2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26 #include <linux/slab.h>
27 #include "atom-types.h"
29 #include "processpptables.h"
30 #include "cgs_common.h"
31 #include "smu/smu_8_0_d.h"
32 #include "smu8_fusion.h"
33 #include "smu/smu_8_0_sh_mask.h"
36 #include "hardwaremanager.h"
39 #include "power_state.h"
40 #include "cz_clockpowergating.h"
41 #include "pp_thermal.h"
43 #define ixSMUSVI_NB_CURRENTVID 0xD8230044
44 #define CURRENT_NB_VID_MASK 0xff000000
45 #define CURRENT_NB_VID__SHIFT 24
46 #define ixSMUSVI_GFX_CURRENTVID 0xD8230048
47 #define CURRENT_GFX_VID_MASK 0xff000000
48 #define CURRENT_GFX_VID__SHIFT 24
50 static const unsigned long PhwCz_Magic = (unsigned long) PHM_Cz_Magic;
52 static struct cz_power_state *cast_PhwCzPowerState(struct pp_hw_power_state *hw_ps)
54 if (PhwCz_Magic != hw_ps->magic)
57 return (struct cz_power_state *)hw_ps;
60 static const struct cz_power_state *cast_const_PhwCzPowerState(
61 const struct pp_hw_power_state *hw_ps)
63 if (PhwCz_Magic != hw_ps->magic)
66 return (struct cz_power_state *)hw_ps;
69 static uint32_t cz_get_eclk_level(struct pp_hwmgr *hwmgr,
70 uint32_t clock, uint32_t msg)
73 struct phm_vce_clock_voltage_dependency_table *ptable =
74 hwmgr->dyn_state.vce_clock_voltage_dependency_table;
77 case PPSMC_MSG_SetEclkSoftMin:
78 case PPSMC_MSG_SetEclkHardMin:
79 for (i = 0; i < (int)ptable->count; i++) {
80 if (clock <= ptable->entries[i].ecclk)
85 case PPSMC_MSG_SetEclkSoftMax:
86 case PPSMC_MSG_SetEclkHardMax:
87 for (i = ptable->count - 1; i >= 0; i--) {
88 if (clock >= ptable->entries[i].ecclk)
100 static uint32_t cz_get_sclk_level(struct pp_hwmgr *hwmgr,
101 uint32_t clock, uint32_t msg)
104 struct phm_clock_voltage_dependency_table *table =
105 hwmgr->dyn_state.vddc_dependency_on_sclk;
108 case PPSMC_MSG_SetSclkSoftMin:
109 case PPSMC_MSG_SetSclkHardMin:
110 for (i = 0; i < (int)table->count; i++) {
111 if (clock <= table->entries[i].clk)
116 case PPSMC_MSG_SetSclkSoftMax:
117 case PPSMC_MSG_SetSclkHardMax:
118 for (i = table->count - 1; i >= 0; i--) {
119 if (clock >= table->entries[i].clk)
130 static uint32_t cz_get_uvd_level(struct pp_hwmgr *hwmgr,
131 uint32_t clock, uint32_t msg)
134 struct phm_uvd_clock_voltage_dependency_table *ptable =
135 hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
138 case PPSMC_MSG_SetUvdSoftMin:
139 case PPSMC_MSG_SetUvdHardMin:
140 for (i = 0; i < (int)ptable->count; i++) {
141 if (clock <= ptable->entries[i].vclk)
146 case PPSMC_MSG_SetUvdSoftMax:
147 case PPSMC_MSG_SetUvdHardMax:
148 for (i = ptable->count - 1; i >= 0; i--) {
149 if (clock >= ptable->entries[i].vclk)
161 static uint32_t cz_get_max_sclk_level(struct pp_hwmgr *hwmgr)
163 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
165 if (cz_hwmgr->max_sclk_level == 0) {
166 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxSclkLevel);
167 cz_hwmgr->max_sclk_level = smum_get_argument(hwmgr) + 1;
170 return cz_hwmgr->max_sclk_level;
173 static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
175 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
177 struct cgs_system_info sys_info = {0};
180 cz_hwmgr->gfx_ramp_step = 256*25/100;
181 cz_hwmgr->gfx_ramp_delay = 1; /* by default, we delay 1us */
183 for (i = 0; i < CZ_MAX_HARDWARE_POWERLEVELS; i++)
184 cz_hwmgr->activity_target[i] = CZ_AT_DFLT;
186 cz_hwmgr->mgcg_cgtt_local0 = 0x00000000;
187 cz_hwmgr->mgcg_cgtt_local1 = 0x00000000;
188 cz_hwmgr->clock_slow_down_freq = 25000;
189 cz_hwmgr->skip_clock_slow_down = 1;
190 cz_hwmgr->enable_nb_ps_policy = 1; /* disable until UNB is ready, Enabled */
191 cz_hwmgr->voltage_drop_in_dce_power_gating = 0; /* disable until fully verified */
192 cz_hwmgr->voting_rights_clients = 0x00C00033;
193 cz_hwmgr->static_screen_threshold = 8;
194 cz_hwmgr->ddi_power_gating_disabled = 0;
195 cz_hwmgr->bapm_enabled = 1;
196 cz_hwmgr->voltage_drop_threshold = 0;
197 cz_hwmgr->gfx_power_gating_threshold = 500;
198 cz_hwmgr->vce_slow_sclk_threshold = 20000;
199 cz_hwmgr->dce_slow_sclk_threshold = 30000;
200 cz_hwmgr->disable_driver_thermal_policy = 1;
201 cz_hwmgr->disable_nb_ps3_in_battery = 0;
203 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
204 PHM_PlatformCaps_ABM);
206 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
207 PHM_PlatformCaps_NonABMSupportInPPLib);
209 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
210 PHM_PlatformCaps_DynamicM3Arbiter);
212 cz_hwmgr->override_dynamic_mgpg = 1;
214 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
215 PHM_PlatformCaps_DynamicPatchPowerState);
217 cz_hwmgr->thermal_auto_throttling_treshold = 0;
218 cz_hwmgr->tdr_clock = 0;
219 cz_hwmgr->disable_gfx_power_gating_in_uvd = 0;
221 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
222 PHM_PlatformCaps_DynamicUVDState);
224 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
225 PHM_PlatformCaps_UVDDPM);
226 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
227 PHM_PlatformCaps_VCEDPM);
229 cz_hwmgr->cc6_settings.cpu_cc6_disable = false;
230 cz_hwmgr->cc6_settings.cpu_pstate_disable = false;
231 cz_hwmgr->cc6_settings.nb_pstate_switch_disable = false;
232 cz_hwmgr->cc6_settings.cpu_pstate_separation_time = 0;
234 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
235 PHM_PlatformCaps_DisableVoltageIsland);
237 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
238 PHM_PlatformCaps_UVDPowerGating);
239 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
240 PHM_PlatformCaps_VCEPowerGating);
241 sys_info.size = sizeof(struct cgs_system_info);
242 sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS;
243 result = cgs_query_system_info(hwmgr->device, &sys_info);
245 if (sys_info.value & AMD_PG_SUPPORT_UVD)
246 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
247 PHM_PlatformCaps_UVDPowerGating);
248 if (sys_info.value & AMD_PG_SUPPORT_VCE)
249 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
250 PHM_PlatformCaps_VCEPowerGating);
256 static uint32_t cz_convert_8Bit_index_to_voltage(
257 struct pp_hwmgr *hwmgr, uint16_t voltage)
259 return 6200 - (voltage * 25);
262 static int cz_construct_max_power_limits_table(struct pp_hwmgr *hwmgr,
263 struct phm_clock_and_voltage_limits *table)
265 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)hwmgr->backend;
266 struct cz_sys_info *sys_info = &cz_hwmgr->sys_info;
267 struct phm_clock_voltage_dependency_table *dep_table =
268 hwmgr->dyn_state.vddc_dependency_on_sclk;
270 if (dep_table->count > 0) {
271 table->sclk = dep_table->entries[dep_table->count-1].clk;
272 table->vddc = cz_convert_8Bit_index_to_voltage(hwmgr,
273 (uint16_t)dep_table->entries[dep_table->count-1].v);
275 table->mclk = sys_info->nbp_memory_clock[0];
279 static int cz_init_dynamic_state_adjustment_rule_settings(
280 struct pp_hwmgr *hwmgr,
281 ATOM_CLK_VOLT_CAPABILITY *disp_voltage_table)
283 uint32_t table_size =
284 sizeof(struct phm_clock_voltage_dependency_table) +
285 (7 * sizeof(struct phm_clock_voltage_dependency_record));
287 struct phm_clock_voltage_dependency_table *table_clk_vlt =
288 kzalloc(table_size, GFP_KERNEL);
290 if (NULL == table_clk_vlt) {
291 pr_err("Can not allocate memory!\n");
295 table_clk_vlt->count = 8;
296 table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_0;
297 table_clk_vlt->entries[0].v = 0;
298 table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_1;
299 table_clk_vlt->entries[1].v = 1;
300 table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_2;
301 table_clk_vlt->entries[2].v = 2;
302 table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_3;
303 table_clk_vlt->entries[3].v = 3;
304 table_clk_vlt->entries[4].clk = PP_DAL_POWERLEVEL_4;
305 table_clk_vlt->entries[4].v = 4;
306 table_clk_vlt->entries[5].clk = PP_DAL_POWERLEVEL_5;
307 table_clk_vlt->entries[5].v = 5;
308 table_clk_vlt->entries[6].clk = PP_DAL_POWERLEVEL_6;
309 table_clk_vlt->entries[6].v = 6;
310 table_clk_vlt->entries[7].clk = PP_DAL_POWERLEVEL_7;
311 table_clk_vlt->entries[7].v = 7;
312 hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt;
317 static int cz_get_system_info_data(struct pp_hwmgr *hwmgr)
319 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)hwmgr->backend;
320 ATOM_INTEGRATED_SYSTEM_INFO_V1_9 *info = NULL;
326 info = (ATOM_INTEGRATED_SYSTEM_INFO_V1_9 *) cgs_atom_get_data_table(
328 GetIndexIntoMasterTable(DATA, IntegratedSystemInfo),
329 &size, &frev, &crev);
332 pr_err("Unsupported IGP table: %d %d\n", frev, crev);
337 pr_err("Could not retrieve the Integrated System Info Table!\n");
341 cz_hwmgr->sys_info.bootup_uma_clock =
342 le32_to_cpu(info->ulBootUpUMAClock);
344 cz_hwmgr->sys_info.bootup_engine_clock =
345 le32_to_cpu(info->ulBootUpEngineClock);
347 cz_hwmgr->sys_info.dentist_vco_freq =
348 le32_to_cpu(info->ulDentistVCOFreq);
350 cz_hwmgr->sys_info.system_config =
351 le32_to_cpu(info->ulSystemConfig);
353 cz_hwmgr->sys_info.bootup_nb_voltage_index =
354 le16_to_cpu(info->usBootUpNBVoltage);
356 cz_hwmgr->sys_info.htc_hyst_lmt =
357 (info->ucHtcHystLmt == 0) ? 5 : info->ucHtcHystLmt;
359 cz_hwmgr->sys_info.htc_tmp_lmt =
360 (info->ucHtcTmpLmt == 0) ? 203 : info->ucHtcTmpLmt;
362 if (cz_hwmgr->sys_info.htc_tmp_lmt <=
363 cz_hwmgr->sys_info.htc_hyst_lmt) {
364 pr_err("The htcTmpLmt should be larger than htcHystLmt.\n");
368 cz_hwmgr->sys_info.nb_dpm_enable =
369 cz_hwmgr->enable_nb_ps_policy &&
370 (le32_to_cpu(info->ulSystemConfig) >> 3 & 0x1);
372 for (i = 0; i < CZ_NUM_NBPSTATES; i++) {
373 if (i < CZ_NUM_NBPMEMORYCLOCK) {
374 cz_hwmgr->sys_info.nbp_memory_clock[i] =
375 le32_to_cpu(info->ulNbpStateMemclkFreq[i]);
377 cz_hwmgr->sys_info.nbp_n_clock[i] =
378 le32_to_cpu(info->ulNbpStateNClkFreq[i]);
381 for (i = 0; i < MAX_DISPLAY_CLOCK_LEVEL; i++) {
382 cz_hwmgr->sys_info.display_clock[i] =
383 le32_to_cpu(info->sDispClkVoltageMapping[i].ulMaximumSupportedCLK);
386 /* Here use 4 levels, make sure not exceed */
387 for (i = 0; i < CZ_NUM_NBPSTATES; i++) {
388 cz_hwmgr->sys_info.nbp_voltage_index[i] =
389 le16_to_cpu(info->usNBPStateVoltage[i]);
392 if (!cz_hwmgr->sys_info.nb_dpm_enable) {
393 for (i = 1; i < CZ_NUM_NBPSTATES; i++) {
394 if (i < CZ_NUM_NBPMEMORYCLOCK) {
395 cz_hwmgr->sys_info.nbp_memory_clock[i] =
396 cz_hwmgr->sys_info.nbp_memory_clock[0];
398 cz_hwmgr->sys_info.nbp_n_clock[i] =
399 cz_hwmgr->sys_info.nbp_n_clock[0];
400 cz_hwmgr->sys_info.nbp_voltage_index[i] =
401 cz_hwmgr->sys_info.nbp_voltage_index[0];
405 if (le32_to_cpu(info->ulGPUCapInfo) &
406 SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS) {
407 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
408 PHM_PlatformCaps_EnableDFSBypass);
411 cz_hwmgr->sys_info.uma_channel_number = info->ucUMAChannelNumber;
413 cz_construct_max_power_limits_table (hwmgr,
414 &hwmgr->dyn_state.max_clock_voltage_on_ac);
416 cz_init_dynamic_state_adjustment_rule_settings(hwmgr,
417 &info->sDISPCLK_Voltage[0]);
422 static int cz_construct_boot_state(struct pp_hwmgr *hwmgr)
424 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
426 cz_hwmgr->boot_power_level.engineClock =
427 cz_hwmgr->sys_info.bootup_engine_clock;
429 cz_hwmgr->boot_power_level.vddcIndex =
430 (uint8_t)cz_hwmgr->sys_info.bootup_nb_voltage_index;
432 cz_hwmgr->boot_power_level.dsDividerIndex = 0;
433 cz_hwmgr->boot_power_level.ssDividerIndex = 0;
434 cz_hwmgr->boot_power_level.allowGnbSlow = 1;
435 cz_hwmgr->boot_power_level.forceNBPstate = 0;
436 cz_hwmgr->boot_power_level.hysteresis_up = 0;
437 cz_hwmgr->boot_power_level.numSIMDToPowerDown = 0;
438 cz_hwmgr->boot_power_level.display_wm = 0;
439 cz_hwmgr->boot_power_level.vce_wm = 0;
444 static int cz_upload_pptable_to_smu(struct pp_hwmgr *hwmgr)
446 struct SMU8_Fusion_ClkTable *clock_table;
450 pp_atomctrl_clock_dividers_kong dividers;
452 struct phm_clock_voltage_dependency_table *vddc_table =
453 hwmgr->dyn_state.vddc_dependency_on_sclk;
454 struct phm_clock_voltage_dependency_table *vdd_gfx_table =
455 hwmgr->dyn_state.vdd_gfx_dependency_on_sclk;
456 struct phm_acp_clock_voltage_dependency_table *acp_table =
457 hwmgr->dyn_state.acp_clock_voltage_dependency_table;
458 struct phm_uvd_clock_voltage_dependency_table *uvd_table =
459 hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
460 struct phm_vce_clock_voltage_dependency_table *vce_table =
461 hwmgr->dyn_state.vce_clock_voltage_dependency_table;
463 if (!hwmgr->need_pp_table_upload)
466 ret = smum_download_powerplay_table(hwmgr, &table);
468 PP_ASSERT_WITH_CODE((0 == ret && NULL != table),
469 "Fail to get clock table from SMU!", return -EINVAL;);
471 clock_table = (struct SMU8_Fusion_ClkTable *)table;
473 /* patch clock table */
474 PP_ASSERT_WITH_CODE((vddc_table->count <= CZ_MAX_HARDWARE_POWERLEVELS),
475 "Dependency table entry exceeds max limit!", return -EINVAL;);
476 PP_ASSERT_WITH_CODE((vdd_gfx_table->count <= CZ_MAX_HARDWARE_POWERLEVELS),
477 "Dependency table entry exceeds max limit!", return -EINVAL;);
478 PP_ASSERT_WITH_CODE((acp_table->count <= CZ_MAX_HARDWARE_POWERLEVELS),
479 "Dependency table entry exceeds max limit!", return -EINVAL;);
480 PP_ASSERT_WITH_CODE((uvd_table->count <= CZ_MAX_HARDWARE_POWERLEVELS),
481 "Dependency table entry exceeds max limit!", return -EINVAL;);
482 PP_ASSERT_WITH_CODE((vce_table->count <= CZ_MAX_HARDWARE_POWERLEVELS),
483 "Dependency table entry exceeds max limit!", return -EINVAL;);
485 for (i = 0; i < CZ_MAX_HARDWARE_POWERLEVELS; i++) {
488 clock_table->SclkBreakdownTable.ClkLevel[i].GnbVid =
489 (i < vddc_table->count) ? (uint8_t)vddc_table->entries[i].v : 0;
490 clock_table->SclkBreakdownTable.ClkLevel[i].Frequency =
491 (i < vddc_table->count) ? vddc_table->entries[i].clk : 0;
493 atomctrl_get_engine_pll_dividers_kong(hwmgr,
494 clock_table->SclkBreakdownTable.ClkLevel[i].Frequency,
497 clock_table->SclkBreakdownTable.ClkLevel[i].DfsDid =
498 (uint8_t)dividers.pll_post_divider;
501 clock_table->SclkBreakdownTable.ClkLevel[i].GfxVid =
502 (i < vdd_gfx_table->count) ? (uint8_t)vdd_gfx_table->entries[i].v : 0;
505 clock_table->AclkBreakdownTable.ClkLevel[i].GfxVid =
506 (i < acp_table->count) ? (uint8_t)acp_table->entries[i].v : 0;
507 clock_table->AclkBreakdownTable.ClkLevel[i].Frequency =
508 (i < acp_table->count) ? acp_table->entries[i].acpclk : 0;
510 atomctrl_get_engine_pll_dividers_kong(hwmgr,
511 clock_table->AclkBreakdownTable.ClkLevel[i].Frequency,
514 clock_table->AclkBreakdownTable.ClkLevel[i].DfsDid =
515 (uint8_t)dividers.pll_post_divider;
519 clock_table->VclkBreakdownTable.ClkLevel[i].GfxVid =
520 (i < uvd_table->count) ? (uint8_t)uvd_table->entries[i].v : 0;
521 clock_table->VclkBreakdownTable.ClkLevel[i].Frequency =
522 (i < uvd_table->count) ? uvd_table->entries[i].vclk : 0;
524 atomctrl_get_engine_pll_dividers_kong(hwmgr,
525 clock_table->VclkBreakdownTable.ClkLevel[i].Frequency,
528 clock_table->VclkBreakdownTable.ClkLevel[i].DfsDid =
529 (uint8_t)dividers.pll_post_divider;
531 clock_table->DclkBreakdownTable.ClkLevel[i].GfxVid =
532 (i < uvd_table->count) ? (uint8_t)uvd_table->entries[i].v : 0;
533 clock_table->DclkBreakdownTable.ClkLevel[i].Frequency =
534 (i < uvd_table->count) ? uvd_table->entries[i].dclk : 0;
536 atomctrl_get_engine_pll_dividers_kong(hwmgr,
537 clock_table->DclkBreakdownTable.ClkLevel[i].Frequency,
540 clock_table->DclkBreakdownTable.ClkLevel[i].DfsDid =
541 (uint8_t)dividers.pll_post_divider;
544 clock_table->EclkBreakdownTable.ClkLevel[i].GfxVid =
545 (i < vce_table->count) ? (uint8_t)vce_table->entries[i].v : 0;
546 clock_table->EclkBreakdownTable.ClkLevel[i].Frequency =
547 (i < vce_table->count) ? vce_table->entries[i].ecclk : 0;
550 atomctrl_get_engine_pll_dividers_kong(hwmgr,
551 clock_table->EclkBreakdownTable.ClkLevel[i].Frequency,
554 clock_table->EclkBreakdownTable.ClkLevel[i].DfsDid =
555 (uint8_t)dividers.pll_post_divider;
558 ret = smum_upload_powerplay_table(hwmgr);
563 static int cz_init_sclk_limit(struct pp_hwmgr *hwmgr)
565 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
566 struct phm_clock_voltage_dependency_table *table =
567 hwmgr->dyn_state.vddc_dependency_on_sclk;
568 unsigned long clock = 0, level;
570 if (NULL == table || table->count <= 0)
573 cz_hwmgr->sclk_dpm.soft_min_clk = table->entries[0].clk;
574 cz_hwmgr->sclk_dpm.hard_min_clk = table->entries[0].clk;
576 level = cz_get_max_sclk_level(hwmgr) - 1;
578 if (level < table->count)
579 clock = table->entries[level].clk;
581 clock = table->entries[table->count - 1].clk;
583 cz_hwmgr->sclk_dpm.soft_max_clk = clock;
584 cz_hwmgr->sclk_dpm.hard_max_clk = clock;
589 static int cz_init_uvd_limit(struct pp_hwmgr *hwmgr)
591 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
592 struct phm_uvd_clock_voltage_dependency_table *table =
593 hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
594 unsigned long clock = 0, level;
596 if (NULL == table || table->count <= 0)
599 cz_hwmgr->uvd_dpm.soft_min_clk = 0;
600 cz_hwmgr->uvd_dpm.hard_min_clk = 0;
602 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxUvdLevel);
603 level = smum_get_argument(hwmgr);
605 if (level < table->count)
606 clock = table->entries[level].vclk;
608 clock = table->entries[table->count - 1].vclk;
610 cz_hwmgr->uvd_dpm.soft_max_clk = clock;
611 cz_hwmgr->uvd_dpm.hard_max_clk = clock;
616 static int cz_init_vce_limit(struct pp_hwmgr *hwmgr)
618 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
619 struct phm_vce_clock_voltage_dependency_table *table =
620 hwmgr->dyn_state.vce_clock_voltage_dependency_table;
621 unsigned long clock = 0, level;
623 if (NULL == table || table->count <= 0)
626 cz_hwmgr->vce_dpm.soft_min_clk = 0;
627 cz_hwmgr->vce_dpm.hard_min_clk = 0;
629 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxEclkLevel);
630 level = smum_get_argument(hwmgr);
632 if (level < table->count)
633 clock = table->entries[level].ecclk;
635 clock = table->entries[table->count - 1].ecclk;
637 cz_hwmgr->vce_dpm.soft_max_clk = clock;
638 cz_hwmgr->vce_dpm.hard_max_clk = clock;
643 static int cz_init_acp_limit(struct pp_hwmgr *hwmgr)
645 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
646 struct phm_acp_clock_voltage_dependency_table *table =
647 hwmgr->dyn_state.acp_clock_voltage_dependency_table;
648 unsigned long clock = 0, level;
650 if (NULL == table || table->count <= 0)
653 cz_hwmgr->acp_dpm.soft_min_clk = 0;
654 cz_hwmgr->acp_dpm.hard_min_clk = 0;
656 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxAclkLevel);
657 level = smum_get_argument(hwmgr);
659 if (level < table->count)
660 clock = table->entries[level].acpclk;
662 clock = table->entries[table->count - 1].acpclk;
664 cz_hwmgr->acp_dpm.soft_max_clk = clock;
665 cz_hwmgr->acp_dpm.hard_max_clk = clock;
669 static void cz_init_power_gate_state(struct pp_hwmgr *hwmgr)
671 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
673 cz_hwmgr->uvd_power_gated = false;
674 cz_hwmgr->vce_power_gated = false;
675 cz_hwmgr->samu_power_gated = false;
676 cz_hwmgr->acp_power_gated = false;
677 cz_hwmgr->pgacpinit = true;
680 static void cz_init_sclk_threshold(struct pp_hwmgr *hwmgr)
682 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
684 cz_hwmgr->low_sclk_interrupt_threshold = 0;
687 static int cz_update_sclk_limit(struct pp_hwmgr *hwmgr)
689 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
690 struct phm_clock_voltage_dependency_table *table =
691 hwmgr->dyn_state.vddc_dependency_on_sclk;
693 unsigned long clock = 0;
695 unsigned long stable_pstate_sclk;
696 unsigned long percentage;
698 cz_hwmgr->sclk_dpm.soft_min_clk = table->entries[0].clk;
699 level = cz_get_max_sclk_level(hwmgr) - 1;
701 if (level < table->count)
702 cz_hwmgr->sclk_dpm.soft_max_clk = table->entries[level].clk;
704 cz_hwmgr->sclk_dpm.soft_max_clk = table->entries[table->count - 1].clk;
706 clock = hwmgr->display_config.min_core_set_clock;
708 pr_debug("min_core_set_clock not set\n");
710 if (cz_hwmgr->sclk_dpm.hard_min_clk != clock) {
711 cz_hwmgr->sclk_dpm.hard_min_clk = clock;
713 smum_send_msg_to_smc_with_parameter(hwmgr,
714 PPSMC_MSG_SetSclkHardMin,
715 cz_get_sclk_level(hwmgr,
716 cz_hwmgr->sclk_dpm.hard_min_clk,
717 PPSMC_MSG_SetSclkHardMin));
720 clock = cz_hwmgr->sclk_dpm.soft_min_clk;
722 /* update minimum clocks for Stable P-State feature */
723 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
724 PHM_PlatformCaps_StablePState)) {
726 /*Sclk - calculate sclk value based on percentage and find FLOOR sclk from VddcDependencyOnSCLK table */
727 stable_pstate_sclk = (hwmgr->dyn_state.max_clock_voltage_on_ac.mclk *
730 if (clock < stable_pstate_sclk)
731 clock = stable_pstate_sclk;
734 if (cz_hwmgr->sclk_dpm.soft_min_clk != clock) {
735 cz_hwmgr->sclk_dpm.soft_min_clk = clock;
736 smum_send_msg_to_smc_with_parameter(hwmgr,
737 PPSMC_MSG_SetSclkSoftMin,
738 cz_get_sclk_level(hwmgr,
739 cz_hwmgr->sclk_dpm.soft_min_clk,
740 PPSMC_MSG_SetSclkSoftMin));
743 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
744 PHM_PlatformCaps_StablePState) &&
745 cz_hwmgr->sclk_dpm.soft_max_clk != clock) {
746 cz_hwmgr->sclk_dpm.soft_max_clk = clock;
747 smum_send_msg_to_smc_with_parameter(hwmgr,
748 PPSMC_MSG_SetSclkSoftMax,
749 cz_get_sclk_level(hwmgr,
750 cz_hwmgr->sclk_dpm.soft_max_clk,
751 PPSMC_MSG_SetSclkSoftMax));
757 static int cz_set_deep_sleep_sclk_threshold(struct pp_hwmgr *hwmgr)
759 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
760 PHM_PlatformCaps_SclkDeepSleep)) {
761 uint32_t clks = hwmgr->display_config.min_core_set_clock_in_sr;
763 clks = CZ_MIN_DEEP_SLEEP_SCLK;
765 PP_DBG_LOG("Setting Deep Sleep Clock: %d\n", clks);
767 smum_send_msg_to_smc_with_parameter(hwmgr,
768 PPSMC_MSG_SetMinDeepSleepSclk,
775 static int cz_set_watermark_threshold(struct pp_hwmgr *hwmgr)
777 struct cz_hwmgr *cz_hwmgr =
778 (struct cz_hwmgr *)(hwmgr->backend);
780 smum_send_msg_to_smc_with_parameter(hwmgr,
781 PPSMC_MSG_SetWatermarkFrequency,
782 cz_hwmgr->sclk_dpm.soft_max_clk);
787 static int cz_nbdpm_pstate_enable_disable(struct pp_hwmgr *hwmgr, bool enable, bool lock)
789 struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend);
791 if (hw_data->is_nb_dpm_enabled) {
793 PP_DBG_LOG("enable Low Memory PState.\n");
795 return smum_send_msg_to_smc_with_parameter(hwmgr,
796 PPSMC_MSG_EnableLowMemoryPstate,
799 PP_DBG_LOG("disable Low Memory PState.\n");
801 return smum_send_msg_to_smc_with_parameter(hwmgr,
802 PPSMC_MSG_DisableLowMemoryPstate,
810 static int cz_disable_nb_dpm(struct pp_hwmgr *hwmgr)
814 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
815 unsigned long dpm_features = 0;
817 if (cz_hwmgr->is_nb_dpm_enabled) {
818 cz_nbdpm_pstate_enable_disable(hwmgr, true, true);
819 dpm_features |= NB_DPM_MASK;
820 ret = smum_send_msg_to_smc_with_parameter(
822 PPSMC_MSG_DisableAllSmuFeatures,
825 cz_hwmgr->is_nb_dpm_enabled = false;
831 static int cz_enable_nb_dpm(struct pp_hwmgr *hwmgr)
835 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
836 unsigned long dpm_features = 0;
838 if (!cz_hwmgr->is_nb_dpm_enabled) {
839 PP_DBG_LOG("enabling ALL SMU features.\n");
840 dpm_features |= NB_DPM_MASK;
841 ret = smum_send_msg_to_smc_with_parameter(
843 PPSMC_MSG_EnableAllSmuFeatures,
846 cz_hwmgr->is_nb_dpm_enabled = true;
852 static int cz_update_low_mem_pstate(struct pp_hwmgr *hwmgr, const void *input)
855 bool enable_low_mem_state;
856 struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend);
857 const struct phm_set_power_state_input *states = (struct phm_set_power_state_input *)input;
858 const struct cz_power_state *pnew_state = cast_const_PhwCzPowerState(states->pnew_state);
860 if (hw_data->sys_info.nb_dpm_enable) {
861 disable_switch = hw_data->cc6_settings.nb_pstate_switch_disable ? true : false;
862 enable_low_mem_state = hw_data->cc6_settings.nb_pstate_switch_disable ? false : true;
864 if (pnew_state->action == FORCE_HIGH)
865 cz_nbdpm_pstate_enable_disable(hwmgr, false, disable_switch);
866 else if (pnew_state->action == CANCEL_FORCE_HIGH)
867 cz_nbdpm_pstate_enable_disable(hwmgr, true, disable_switch);
869 cz_nbdpm_pstate_enable_disable(hwmgr, enable_low_mem_state, disable_switch);
874 static int cz_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
878 cz_update_sclk_limit(hwmgr);
879 cz_set_deep_sleep_sclk_threshold(hwmgr);
880 cz_set_watermark_threshold(hwmgr);
881 ret = cz_enable_nb_dpm(hwmgr);
884 cz_update_low_mem_pstate(hwmgr, input);
890 static int cz_setup_asic_task(struct pp_hwmgr *hwmgr)
894 ret = cz_upload_pptable_to_smu(hwmgr);
897 ret = cz_init_sclk_limit(hwmgr);
900 ret = cz_init_uvd_limit(hwmgr);
903 ret = cz_init_vce_limit(hwmgr);
906 ret = cz_init_acp_limit(hwmgr);
910 cz_init_power_gate_state(hwmgr);
911 cz_init_sclk_threshold(hwmgr);
916 static void cz_power_up_display_clock_sys_pll(struct pp_hwmgr *hwmgr)
918 struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend);
920 hw_data->disp_clk_bypass_pending = false;
921 hw_data->disp_clk_bypass = false;
924 static void cz_clear_nb_dpm_flag(struct pp_hwmgr *hwmgr)
926 struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend);
928 hw_data->is_nb_dpm_enabled = false;
931 static void cz_reset_cc6_data(struct pp_hwmgr *hwmgr)
933 struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend);
935 hw_data->cc6_settings.cc6_setting_changed = false;
936 hw_data->cc6_settings.cpu_pstate_separation_time = 0;
937 hw_data->cc6_settings.cpu_cc6_disable = false;
938 hw_data->cc6_settings.cpu_pstate_disable = false;
941 static int cz_power_off_asic(struct pp_hwmgr *hwmgr)
943 cz_power_up_display_clock_sys_pll(hwmgr);
944 cz_clear_nb_dpm_flag(hwmgr);
945 cz_reset_cc6_data(hwmgr);
949 static void cz_program_voting_clients(struct pp_hwmgr *hwmgr)
951 PHMCZ_WRITE_SMC_REGISTER(hwmgr->device, CG_FREQ_TRAN_VOTING_0,
952 PPCZ_VOTINGRIGHTSCLIENTS_DFLT0);
955 static void cz_clear_voting_clients(struct pp_hwmgr *hwmgr)
957 PHMCZ_WRITE_SMC_REGISTER(hwmgr->device, CG_FREQ_TRAN_VOTING_0, 0);
960 static int cz_start_dpm(struct pp_hwmgr *hwmgr)
962 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
964 cz_hwmgr->dpm_flags |= DPMFlags_SCLK_Enabled;
966 return smum_send_msg_to_smc_with_parameter(hwmgr,
967 PPSMC_MSG_EnableAllSmuFeatures,
971 static int cz_stop_dpm(struct pp_hwmgr *hwmgr)
974 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
975 unsigned long dpm_features = 0;
977 if (cz_hwmgr->dpm_flags & DPMFlags_SCLK_Enabled) {
978 dpm_features |= SCLK_DPM_MASK;
979 cz_hwmgr->dpm_flags &= ~DPMFlags_SCLK_Enabled;
980 ret = smum_send_msg_to_smc_with_parameter(hwmgr,
981 PPSMC_MSG_DisableAllSmuFeatures,
987 static int cz_program_bootup_state(struct pp_hwmgr *hwmgr)
989 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
991 cz_hwmgr->sclk_dpm.soft_min_clk = cz_hwmgr->sys_info.bootup_engine_clock;
992 cz_hwmgr->sclk_dpm.soft_max_clk = cz_hwmgr->sys_info.bootup_engine_clock;
994 smum_send_msg_to_smc_with_parameter(hwmgr,
995 PPSMC_MSG_SetSclkSoftMin,
996 cz_get_sclk_level(hwmgr,
997 cz_hwmgr->sclk_dpm.soft_min_clk,
998 PPSMC_MSG_SetSclkSoftMin));
1000 smum_send_msg_to_smc_with_parameter(hwmgr,
1001 PPSMC_MSG_SetSclkSoftMax,
1002 cz_get_sclk_level(hwmgr,
1003 cz_hwmgr->sclk_dpm.soft_max_clk,
1004 PPSMC_MSG_SetSclkSoftMax));
1009 static void cz_reset_acp_boot_level(struct pp_hwmgr *hwmgr)
1011 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
1013 cz_hwmgr->acp_boot_level = 0xff;
1016 static bool cz_dpm_check_smu_features(struct pp_hwmgr *hwmgr,
1017 unsigned long check_feature)
1020 unsigned long features;
1022 result = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetFeatureStatus, 0);
1024 features = smum_get_argument(hwmgr);
1025 if (features & check_feature)
1032 static bool cz_check_for_dpm_enabled(struct pp_hwmgr *hwmgr)
1034 if (cz_dpm_check_smu_features(hwmgr, SMU_EnabledFeatureScoreboard_SclkDpmOn))
1039 static int cz_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
1041 if (!cz_check_for_dpm_enabled(hwmgr)) {
1042 pr_info("dpm has been disabled\n");
1045 cz_disable_nb_dpm(hwmgr);
1047 cz_clear_voting_clients(hwmgr);
1048 if (cz_stop_dpm(hwmgr))
1054 static int cz_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
1056 if (cz_check_for_dpm_enabled(hwmgr)) {
1057 pr_info("dpm has been enabled\n");
1061 cz_program_voting_clients(hwmgr);
1062 if (cz_start_dpm(hwmgr))
1064 cz_program_bootup_state(hwmgr);
1065 cz_reset_acp_boot_level(hwmgr);
1070 static int cz_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
1071 struct pp_power_state *prequest_ps,
1072 const struct pp_power_state *pcurrent_ps)
1074 struct cz_power_state *cz_ps =
1075 cast_PhwCzPowerState(&prequest_ps->hardware);
1077 const struct cz_power_state *cz_current_ps =
1078 cast_const_PhwCzPowerState(&pcurrent_ps->hardware);
1080 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
1081 struct PP_Clocks clocks = {0, 0, 0, 0};
1083 uint32_t num_of_active_displays = 0;
1084 struct cgs_display_info info = {0};
1086 cz_ps->need_dfs_bypass = true;
1088 cz_hwmgr->battery_state = (PP_StateUILabel_Battery == prequest_ps->classification.ui_label);
1090 clocks.memoryClock = hwmgr->display_config.min_mem_set_clock != 0 ?
1091 hwmgr->display_config.min_mem_set_clock :
1092 cz_hwmgr->sys_info.nbp_memory_clock[1];
1094 cgs_get_active_displays_info(hwmgr->device, &info);
1095 num_of_active_displays = info.display_count;
1097 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
1098 clocks.memoryClock = hwmgr->dyn_state.max_clock_voltage_on_ac.mclk;
1100 force_high = (clocks.memoryClock > cz_hwmgr->sys_info.nbp_memory_clock[CZ_NUM_NBPMEMORYCLOCK - 1])
1101 || (num_of_active_displays >= 3);
1103 cz_ps->action = cz_current_ps->action;
1105 if (hwmgr->request_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
1106 cz_nbdpm_pstate_enable_disable(hwmgr, false, false);
1107 else if (hwmgr->request_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD)
1108 cz_nbdpm_pstate_enable_disable(hwmgr, false, true);
1109 else if (!force_high && (cz_ps->action == FORCE_HIGH))
1110 cz_ps->action = CANCEL_FORCE_HIGH;
1111 else if (force_high && (cz_ps->action != FORCE_HIGH))
1112 cz_ps->action = FORCE_HIGH;
1114 cz_ps->action = DO_NOTHING;
1119 static int cz_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
1122 struct cz_hwmgr *data;
1124 data = kzalloc(sizeof(struct cz_hwmgr), GFP_KERNEL);
1128 hwmgr->backend = data;
1130 result = cz_initialize_dpm_defaults(hwmgr);
1132 pr_err("cz_initialize_dpm_defaults failed\n");
1136 result = cz_get_system_info_data(hwmgr);
1138 pr_err("cz_get_system_info_data failed\n");
1142 cz_construct_boot_state(hwmgr);
1144 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = CZ_MAX_HARDWARE_POWERLEVELS;
1149 static int cz_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
1151 if (hwmgr != NULL) {
1152 kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
1153 hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
1155 kfree(hwmgr->backend);
1156 hwmgr->backend = NULL;
1161 static int cz_phm_force_dpm_highest(struct pp_hwmgr *hwmgr)
1163 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
1165 smum_send_msg_to_smc_with_parameter(hwmgr,
1166 PPSMC_MSG_SetSclkSoftMin,
1167 cz_get_sclk_level(hwmgr,
1168 cz_hwmgr->sclk_dpm.soft_max_clk,
1169 PPSMC_MSG_SetSclkSoftMin));
1171 smum_send_msg_to_smc_with_parameter(hwmgr,
1172 PPSMC_MSG_SetSclkSoftMax,
1173 cz_get_sclk_level(hwmgr,
1174 cz_hwmgr->sclk_dpm.soft_max_clk,
1175 PPSMC_MSG_SetSclkSoftMax));
1180 static int cz_phm_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
1182 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
1183 struct phm_clock_voltage_dependency_table *table =
1184 hwmgr->dyn_state.vddc_dependency_on_sclk;
1185 unsigned long clock = 0, level;
1187 if (NULL == table || table->count <= 0)
1190 cz_hwmgr->sclk_dpm.soft_min_clk = table->entries[0].clk;
1191 cz_hwmgr->sclk_dpm.hard_min_clk = table->entries[0].clk;
1193 level = cz_get_max_sclk_level(hwmgr) - 1;
1195 if (level < table->count)
1196 clock = table->entries[level].clk;
1198 clock = table->entries[table->count - 1].clk;
1200 cz_hwmgr->sclk_dpm.soft_max_clk = clock;
1201 cz_hwmgr->sclk_dpm.hard_max_clk = clock;
1203 smum_send_msg_to_smc_with_parameter(hwmgr,
1204 PPSMC_MSG_SetSclkSoftMin,
1205 cz_get_sclk_level(hwmgr,
1206 cz_hwmgr->sclk_dpm.soft_min_clk,
1207 PPSMC_MSG_SetSclkSoftMin));
1209 smum_send_msg_to_smc_with_parameter(hwmgr,
1210 PPSMC_MSG_SetSclkSoftMax,
1211 cz_get_sclk_level(hwmgr,
1212 cz_hwmgr->sclk_dpm.soft_max_clk,
1213 PPSMC_MSG_SetSclkSoftMax));
1218 static int cz_phm_force_dpm_lowest(struct pp_hwmgr *hwmgr)
1220 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
1222 smum_send_msg_to_smc_with_parameter(hwmgr,
1223 PPSMC_MSG_SetSclkSoftMax,
1224 cz_get_sclk_level(hwmgr,
1225 cz_hwmgr->sclk_dpm.soft_min_clk,
1226 PPSMC_MSG_SetSclkSoftMax));
1228 smum_send_msg_to_smc_with_parameter(hwmgr,
1229 PPSMC_MSG_SetSclkSoftMin,
1230 cz_get_sclk_level(hwmgr,
1231 cz_hwmgr->sclk_dpm.soft_min_clk,
1232 PPSMC_MSG_SetSclkSoftMin));
1237 static int cz_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
1238 enum amd_dpm_forced_level level)
1243 case AMD_DPM_FORCED_LEVEL_HIGH:
1244 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
1245 ret = cz_phm_force_dpm_highest(hwmgr);
1247 case AMD_DPM_FORCED_LEVEL_LOW:
1248 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
1249 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
1250 ret = cz_phm_force_dpm_lowest(hwmgr);
1252 case AMD_DPM_FORCED_LEVEL_AUTO:
1253 ret = cz_phm_unforce_dpm_levels(hwmgr);
1255 case AMD_DPM_FORCED_LEVEL_MANUAL:
1256 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
1264 int cz_dpm_powerdown_uvd(struct pp_hwmgr *hwmgr)
1266 if (PP_CAP(PHM_PlatformCaps_UVDPowerGating))
1267 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_UVDPowerOFF);
1271 int cz_dpm_powerup_uvd(struct pp_hwmgr *hwmgr)
1273 if (PP_CAP(PHM_PlatformCaps_UVDPowerGating)) {
1274 return smum_send_msg_to_smc_with_parameter(
1276 PPSMC_MSG_UVDPowerON,
1277 PP_CAP(PHM_PlatformCaps_UVDDynamicPowerGating) ? 1 : 0);
1283 int cz_dpm_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
1285 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
1286 struct phm_uvd_clock_voltage_dependency_table *ptable =
1287 hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
1290 /* Stable Pstate is enabled and we need to set the UVD DPM to highest level */
1291 if (PP_CAP(PHM_PlatformCaps_StablePState) ||
1292 hwmgr->en_umd_pstate) {
1293 cz_hwmgr->uvd_dpm.hard_min_clk =
1294 ptable->entries[ptable->count - 1].vclk;
1296 smum_send_msg_to_smc_with_parameter(hwmgr,
1297 PPSMC_MSG_SetUvdHardMin,
1298 cz_get_uvd_level(hwmgr,
1299 cz_hwmgr->uvd_dpm.hard_min_clk,
1300 PPSMC_MSG_SetUvdHardMin));
1302 cz_enable_disable_uvd_dpm(hwmgr, true);
1304 cz_enable_disable_uvd_dpm(hwmgr, true);
1307 cz_enable_disable_uvd_dpm(hwmgr, false);
1313 int cz_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr)
1315 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
1316 struct phm_vce_clock_voltage_dependency_table *ptable =
1317 hwmgr->dyn_state.vce_clock_voltage_dependency_table;
1319 /* Stable Pstate is enabled and we need to set the VCE DPM to highest level */
1320 if (PP_CAP(PHM_PlatformCaps_StablePState) ||
1321 hwmgr->en_umd_pstate) {
1322 cz_hwmgr->vce_dpm.hard_min_clk =
1323 ptable->entries[ptable->count - 1].ecclk;
1325 smum_send_msg_to_smc_with_parameter(hwmgr,
1326 PPSMC_MSG_SetEclkHardMin,
1327 cz_get_eclk_level(hwmgr,
1328 cz_hwmgr->vce_dpm.hard_min_clk,
1329 PPSMC_MSG_SetEclkHardMin));
1332 smum_send_msg_to_smc_with_parameter(hwmgr,
1333 PPSMC_MSG_SetEclkHardMin, 0);
1334 /* disable ECLK DPM 0. Otherwise VCE could hang if
1335 * switching SCLK from DPM 0 to 6/7 */
1336 smum_send_msg_to_smc_with_parameter(hwmgr,
1337 PPSMC_MSG_SetEclkSoftMin, 1);
1342 int cz_dpm_powerdown_vce(struct pp_hwmgr *hwmgr)
1344 if (PP_CAP(PHM_PlatformCaps_VCEPowerGating))
1345 return smum_send_msg_to_smc(hwmgr,
1346 PPSMC_MSG_VCEPowerOFF);
1350 int cz_dpm_powerup_vce(struct pp_hwmgr *hwmgr)
1352 if (PP_CAP(PHM_PlatformCaps_VCEPowerGating))
1353 return smum_send_msg_to_smc(hwmgr,
1354 PPSMC_MSG_VCEPowerON);
1358 static uint32_t cz_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
1360 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
1362 return cz_hwmgr->sys_info.bootup_uma_clock;
1365 static uint32_t cz_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
1367 struct pp_power_state *ps;
1368 struct cz_power_state *cz_ps;
1373 ps = hwmgr->request_ps;
1378 cz_ps = cast_PhwCzPowerState(&ps->hardware);
1381 return cz_ps->levels[0].engineClock;
1383 return cz_ps->levels[cz_ps->level-1].engineClock;
1386 static int cz_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
1387 struct pp_hw_power_state *hw_ps)
1389 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
1390 struct cz_power_state *cz_ps = cast_PhwCzPowerState(hw_ps);
1393 cz_ps->nbps_flags = 0;
1394 cz_ps->bapm_flags = 0;
1395 cz_ps->levels[0] = cz_hwmgr->boot_power_level;
1400 static int cz_dpm_get_pp_table_entry_callback(
1401 struct pp_hwmgr *hwmgr,
1402 struct pp_hw_power_state *hw_ps,
1404 const void *clock_info)
1406 struct cz_power_state *cz_ps = cast_PhwCzPowerState(hw_ps);
1408 const ATOM_PPLIB_CZ_CLOCK_INFO *cz_clock_info = clock_info;
1410 struct phm_clock_voltage_dependency_table *table =
1411 hwmgr->dyn_state.vddc_dependency_on_sclk;
1412 uint8_t clock_info_index = cz_clock_info->index;
1414 if (clock_info_index > (uint8_t)(hwmgr->platform_descriptor.hardwareActivityPerformanceLevels - 1))
1415 clock_info_index = (uint8_t)(hwmgr->platform_descriptor.hardwareActivityPerformanceLevels - 1);
1417 cz_ps->levels[index].engineClock = table->entries[clock_info_index].clk;
1418 cz_ps->levels[index].vddcIndex = (uint8_t)table->entries[clock_info_index].v;
1420 cz_ps->level = index + 1;
1422 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
1423 cz_ps->levels[index].dsDividerIndex = 5;
1424 cz_ps->levels[index].ssDividerIndex = 5;
1430 static int cz_dpm_get_num_of_pp_table_entries(struct pp_hwmgr *hwmgr)
1433 unsigned long ret = 0;
1435 result = pp_tables_get_num_of_entries(hwmgr, &ret);
1437 return result ? 0 : ret;
1440 static int cz_dpm_get_pp_table_entry(struct pp_hwmgr *hwmgr,
1441 unsigned long entry, struct pp_power_state *ps)
1444 struct cz_power_state *cz_ps;
1446 ps->hardware.magic = PhwCz_Magic;
1448 cz_ps = cast_PhwCzPowerState(&(ps->hardware));
1450 result = pp_tables_get_entry(hwmgr, entry, ps,
1451 cz_dpm_get_pp_table_entry_callback);
1453 cz_ps->uvd_clocks.vclk = ps->uvd_clocks.VCLK;
1454 cz_ps->uvd_clocks.dclk = ps->uvd_clocks.DCLK;
1459 static int cz_get_power_state_size(struct pp_hwmgr *hwmgr)
1461 return sizeof(struct cz_power_state);
1464 static void cz_hw_print_display_cfg(
1465 const struct cc6_settings *cc6_settings)
1467 PP_DBG_LOG("New Display Configuration:\n");
1469 PP_DBG_LOG(" cpu_cc6_disable: %d\n",
1470 cc6_settings->cpu_cc6_disable);
1471 PP_DBG_LOG(" cpu_pstate_disable: %d\n",
1472 cc6_settings->cpu_pstate_disable);
1473 PP_DBG_LOG(" nb_pstate_switch_disable: %d\n",
1474 cc6_settings->nb_pstate_switch_disable);
1475 PP_DBG_LOG(" cpu_pstate_separation_time: %d\n\n",
1476 cc6_settings->cpu_pstate_separation_time);
1479 static int cz_set_cpu_power_state(struct pp_hwmgr *hwmgr)
1481 struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend);
1484 if (hw_data->cc6_settings.cc6_setting_changed) {
1486 hw_data->cc6_settings.cc6_setting_changed = false;
1488 cz_hw_print_display_cfg(&hw_data->cc6_settings);
1490 data |= (hw_data->cc6_settings.cpu_pstate_separation_time
1491 & PWRMGT_SEPARATION_TIME_MASK)
1492 << PWRMGT_SEPARATION_TIME_SHIFT;
1494 data |= (hw_data->cc6_settings.cpu_cc6_disable ? 0x1 : 0x0)
1495 << PWRMGT_DISABLE_CPU_CSTATES_SHIFT;
1497 data |= (hw_data->cc6_settings.cpu_pstate_disable ? 0x1 : 0x0)
1498 << PWRMGT_DISABLE_CPU_PSTATES_SHIFT;
1500 PP_DBG_LOG("SetDisplaySizePowerParams data: 0x%X\n",
1503 smum_send_msg_to_smc_with_parameter(hwmgr,
1504 PPSMC_MSG_SetDisplaySizePowerParams,
1512 static int cz_store_cc6_data(struct pp_hwmgr *hwmgr, uint32_t separation_time,
1513 bool cc6_disable, bool pstate_disable, bool pstate_switch_disable)
1515 struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend);
1517 if (separation_time !=
1518 hw_data->cc6_settings.cpu_pstate_separation_time ||
1519 cc6_disable != hw_data->cc6_settings.cpu_cc6_disable ||
1520 pstate_disable != hw_data->cc6_settings.cpu_pstate_disable ||
1521 pstate_switch_disable != hw_data->cc6_settings.nb_pstate_switch_disable) {
1523 hw_data->cc6_settings.cc6_setting_changed = true;
1525 hw_data->cc6_settings.cpu_pstate_separation_time =
1527 hw_data->cc6_settings.cpu_cc6_disable =
1529 hw_data->cc6_settings.cpu_pstate_disable =
1531 hw_data->cc6_settings.nb_pstate_switch_disable =
1532 pstate_switch_disable;
1539 static int cz_get_dal_power_level(struct pp_hwmgr *hwmgr,
1540 struct amd_pp_simple_clock_info *info)
1543 const struct phm_clock_voltage_dependency_table *table =
1544 hwmgr->dyn_state.vddc_dep_on_dal_pwrl;
1545 const struct phm_clock_and_voltage_limits *limits =
1546 &hwmgr->dyn_state.max_clock_voltage_on_ac;
1548 info->engine_max_clock = limits->sclk;
1549 info->memory_max_clock = limits->mclk;
1551 for (i = table->count - 1; i > 0; i--) {
1552 if (limits->vddc >= table->entries[i].v) {
1553 info->level = table->entries[i].clk;
1560 static int cz_force_clock_level(struct pp_hwmgr *hwmgr,
1561 enum pp_clock_type type, uint32_t mask)
1563 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
1568 smum_send_msg_to_smc_with_parameter(hwmgr,
1569 PPSMC_MSG_SetSclkSoftMin,
1571 smum_send_msg_to_smc_with_parameter(hwmgr,
1572 PPSMC_MSG_SetSclkSoftMax,
1582 static int cz_print_clock_levels(struct pp_hwmgr *hwmgr,
1583 enum pp_clock_type type, char *buf)
1585 struct cz_hwmgr *data = (struct cz_hwmgr *)(hwmgr->backend);
1586 struct phm_clock_voltage_dependency_table *sclk_table =
1587 hwmgr->dyn_state.vddc_dependency_on_sclk;
1588 int i, now, size = 0;
1592 now = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device,
1594 ixTARGET_AND_CURRENT_PROFILE_INDEX),
1595 TARGET_AND_CURRENT_PROFILE_INDEX,
1598 for (i = 0; i < sclk_table->count; i++)
1599 size += sprintf(buf + size, "%d: %uMhz %s\n",
1600 i, sclk_table->entries[i].clk / 100,
1601 (i == now) ? "*" : "");
1604 now = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device,
1606 ixTARGET_AND_CURRENT_PROFILE_INDEX),
1607 TARGET_AND_CURRENT_PROFILE_INDEX,
1610 for (i = CZ_NUM_NBPMEMORYCLOCK; i > 0; i--)
1611 size += sprintf(buf + size, "%d: %uMhz %s\n",
1612 CZ_NUM_NBPMEMORYCLOCK-i, data->sys_info.nbp_memory_clock[i-1] / 100,
1613 (CZ_NUM_NBPMEMORYCLOCK-i == now) ? "*" : "");
1621 static int cz_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
1622 PHM_PerformanceLevelDesignation designation, uint32_t index,
1623 PHM_PerformanceLevel *level)
1625 const struct cz_power_state *ps;
1626 struct cz_hwmgr *data;
1627 uint32_t level_index;
1630 if (level == NULL || hwmgr == NULL || state == NULL)
1633 data = (struct cz_hwmgr *)(hwmgr->backend);
1634 ps = cast_const_PhwCzPowerState(state);
1636 level_index = index > ps->level - 1 ? ps->level - 1 : index;
1637 level->coreClock = ps->levels[level_index].engineClock;
1639 if (designation == PHM_PerformanceLevelDesignation_PowerContainment) {
1640 for (i = 1; i < ps->level; i++) {
1641 if (ps->levels[i].engineClock > data->dce_slow_sclk_threshold) {
1642 level->coreClock = ps->levels[i].engineClock;
1648 if (level_index == 0)
1649 level->memory_clock = data->sys_info.nbp_memory_clock[CZ_NUM_NBPMEMORYCLOCK - 1];
1651 level->memory_clock = data->sys_info.nbp_memory_clock[0];
1653 level->vddc = (cz_convert_8Bit_index_to_voltage(hwmgr, ps->levels[level_index].vddcIndex) + 2) / 4;
1654 level->nonLocalMemoryFreq = 0;
1655 level->nonLocalMemoryWidth = 0;
1660 static int cz_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr,
1661 const struct pp_hw_power_state *state, struct pp_clock_info *clock_info)
1663 const struct cz_power_state *ps = cast_const_PhwCzPowerState(state);
1665 clock_info->min_eng_clk = ps->levels[0].engineClock / (1 << (ps->levels[0].ssDividerIndex));
1666 clock_info->max_eng_clk = ps->levels[ps->level - 1].engineClock / (1 << (ps->levels[ps->level - 1].ssDividerIndex));
1671 static int cz_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type,
1672 struct amd_pp_clocks *clocks)
1674 struct cz_hwmgr *data = (struct cz_hwmgr *)(hwmgr->backend);
1676 struct phm_clock_voltage_dependency_table *table;
1678 clocks->count = cz_get_max_sclk_level(hwmgr);
1680 case amd_pp_disp_clock:
1681 for (i = 0; i < clocks->count; i++)
1682 clocks->clock[i] = data->sys_info.display_clock[i];
1684 case amd_pp_sys_clock:
1685 table = hwmgr->dyn_state.vddc_dependency_on_sclk;
1686 for (i = 0; i < clocks->count; i++)
1687 clocks->clock[i] = table->entries[i].clk;
1689 case amd_pp_mem_clock:
1690 clocks->count = CZ_NUM_NBPMEMORYCLOCK;
1691 for (i = 0; i < clocks->count; i++)
1692 clocks->clock[i] = data->sys_info.nbp_memory_clock[clocks->count - 1 - i];
1701 static int cz_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks)
1703 struct phm_clock_voltage_dependency_table *table =
1704 hwmgr->dyn_state.vddc_dependency_on_sclk;
1705 unsigned long level;
1706 const struct phm_clock_and_voltage_limits *limits =
1707 &hwmgr->dyn_state.max_clock_voltage_on_ac;
1709 if ((NULL == table) || (table->count <= 0) || (clocks == NULL))
1712 level = cz_get_max_sclk_level(hwmgr) - 1;
1714 if (level < table->count)
1715 clocks->engine_max_clock = table->entries[level].clk;
1717 clocks->engine_max_clock = table->entries[table->count - 1].clk;
1719 clocks->memory_max_clock = limits->mclk;
1724 static int cz_thermal_get_temperature(struct pp_hwmgr *hwmgr)
1726 int actual_temp = 0;
1727 uint32_t val = cgs_read_ind_register(hwmgr->device,
1728 CGS_IND_REG__SMC, ixTHM_TCON_CUR_TMP);
1729 uint32_t temp = PHM_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP);
1731 if (PHM_GET_FIELD(val, THM_TCON_CUR_TMP, CUR_TEMP_RANGE_SEL))
1732 actual_temp = ((temp / 8) - 49) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1734 actual_temp = (temp / 8) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1739 static int cz_read_sensor(struct pp_hwmgr *hwmgr, int idx,
1740 void *value, int *size)
1742 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
1744 struct phm_clock_voltage_dependency_table *table =
1745 hwmgr->dyn_state.vddc_dependency_on_sclk;
1747 struct phm_vce_clock_voltage_dependency_table *vce_table =
1748 hwmgr->dyn_state.vce_clock_voltage_dependency_table;
1750 struct phm_uvd_clock_voltage_dependency_table *uvd_table =
1751 hwmgr->dyn_state.uvd_clock_voltage_dependency_table;
1753 uint32_t sclk_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX),
1754 TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX);
1755 uint32_t uvd_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
1756 TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_UVD_INDEX);
1757 uint32_t vce_index = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
1758 TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_VCE_INDEX);
1760 uint32_t sclk, vclk, dclk, ecclk, tmp, activity_percent;
1761 uint16_t vddnb, vddgfx;
1764 /* size must be at least 4 bytes for all sensors */
1770 case AMDGPU_PP_SENSOR_GFX_SCLK:
1771 if (sclk_index < NUM_SCLK_LEVELS) {
1772 sclk = table->entries[sclk_index].clk;
1773 *((uint32_t *)value) = sclk;
1777 case AMDGPU_PP_SENSOR_VDDNB:
1778 tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_NB_CURRENTVID) &
1779 CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT;
1780 vddnb = cz_convert_8Bit_index_to_voltage(hwmgr, tmp);
1781 *((uint32_t *)value) = vddnb;
1783 case AMDGPU_PP_SENSOR_VDDGFX:
1784 tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_GFX_CURRENTVID) &
1785 CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT;
1786 vddgfx = cz_convert_8Bit_index_to_voltage(hwmgr, (u16)tmp);
1787 *((uint32_t *)value) = vddgfx;
1789 case AMDGPU_PP_SENSOR_UVD_VCLK:
1790 if (!cz_hwmgr->uvd_power_gated) {
1791 if (uvd_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
1794 vclk = uvd_table->entries[uvd_index].vclk;
1795 *((uint32_t *)value) = vclk;
1799 *((uint32_t *)value) = 0;
1801 case AMDGPU_PP_SENSOR_UVD_DCLK:
1802 if (!cz_hwmgr->uvd_power_gated) {
1803 if (uvd_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
1806 dclk = uvd_table->entries[uvd_index].dclk;
1807 *((uint32_t *)value) = dclk;
1811 *((uint32_t *)value) = 0;
1813 case AMDGPU_PP_SENSOR_VCE_ECCLK:
1814 if (!cz_hwmgr->vce_power_gated) {
1815 if (vce_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
1818 ecclk = vce_table->entries[vce_index].ecclk;
1819 *((uint32_t *)value) = ecclk;
1823 *((uint32_t *)value) = 0;
1825 case AMDGPU_PP_SENSOR_GPU_LOAD:
1826 result = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetAverageGraphicsActivity);
1828 activity_percent = cgs_read_register(hwmgr->device, mmSMU_MP1_SRBM2P_ARG_0);
1829 activity_percent = activity_percent > 100 ? 100 : activity_percent;
1831 activity_percent = 50;
1833 *((uint32_t *)value) = activity_percent;
1835 case AMDGPU_PP_SENSOR_UVD_POWER:
1836 *((uint32_t *)value) = cz_hwmgr->uvd_power_gated ? 0 : 1;
1838 case AMDGPU_PP_SENSOR_VCE_POWER:
1839 *((uint32_t *)value) = cz_hwmgr->vce_power_gated ? 0 : 1;
1841 case AMDGPU_PP_SENSOR_GPU_TEMP:
1842 *((uint32_t *)value) = cz_thermal_get_temperature(hwmgr);
1849 static int cz_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
1850 uint32_t virtual_addr_low,
1851 uint32_t virtual_addr_hi,
1852 uint32_t mc_addr_low,
1853 uint32_t mc_addr_hi,
1856 smum_send_msg_to_smc_with_parameter(hwmgr,
1857 PPSMC_MSG_DramAddrHiVirtual,
1859 smum_send_msg_to_smc_with_parameter(hwmgr,
1860 PPSMC_MSG_DramAddrLoVirtual,
1862 smum_send_msg_to_smc_with_parameter(hwmgr,
1863 PPSMC_MSG_DramAddrHiPhysical,
1865 smum_send_msg_to_smc_with_parameter(hwmgr,
1866 PPSMC_MSG_DramAddrLoPhysical,
1869 smum_send_msg_to_smc_with_parameter(hwmgr,
1870 PPSMC_MSG_DramBufferSize,
1875 static int cz_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
1876 struct PP_TemperatureRange *thermal_data)
1878 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
1880 memcpy(thermal_data, &SMU7ThermalPolicy[0], sizeof(struct PP_TemperatureRange));
1882 thermal_data->max = (cz_hwmgr->thermal_auto_throttling_treshold +
1883 cz_hwmgr->sys_info.htc_hyst_lmt) *
1884 PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1889 static const struct pp_hwmgr_func cz_hwmgr_funcs = {
1890 .backend_init = cz_hwmgr_backend_init,
1891 .backend_fini = cz_hwmgr_backend_fini,
1892 .apply_state_adjust_rules = cz_apply_state_adjust_rules,
1893 .force_dpm_level = cz_dpm_force_dpm_level,
1894 .get_power_state_size = cz_get_power_state_size,
1895 .powerdown_uvd = cz_dpm_powerdown_uvd,
1896 .powergate_uvd = cz_dpm_powergate_uvd,
1897 .powergate_vce = cz_dpm_powergate_vce,
1898 .get_mclk = cz_dpm_get_mclk,
1899 .get_sclk = cz_dpm_get_sclk,
1900 .patch_boot_state = cz_dpm_patch_boot_state,
1901 .get_pp_table_entry = cz_dpm_get_pp_table_entry,
1902 .get_num_of_pp_table_entries = cz_dpm_get_num_of_pp_table_entries,
1903 .set_cpu_power_state = cz_set_cpu_power_state,
1904 .store_cc6_data = cz_store_cc6_data,
1905 .force_clock_level = cz_force_clock_level,
1906 .print_clock_levels = cz_print_clock_levels,
1907 .get_dal_power_level = cz_get_dal_power_level,
1908 .get_performance_level = cz_get_performance_level,
1909 .get_current_shallow_sleep_clocks = cz_get_current_shallow_sleep_clocks,
1910 .get_clock_by_type = cz_get_clock_by_type,
1911 .get_max_high_clocks = cz_get_max_high_clocks,
1912 .get_temperature = cz_thermal_get_temperature,
1913 .read_sensor = cz_read_sensor,
1914 .power_off_asic = cz_power_off_asic,
1915 .asic_setup = cz_setup_asic_task,
1916 .dynamic_state_management_enable = cz_enable_dpm_tasks,
1917 .power_state_set = cz_set_power_state_tasks,
1918 .dynamic_state_management_disable = cz_disable_dpm_tasks,
1919 .notify_cac_buffer_info = cz_notify_cac_buffer_info,
1920 .get_thermal_temperature_range = cz_get_thermal_temperature_range,
1923 int cz_init_function_pointers(struct pp_hwmgr *hwmgr)
1925 hwmgr->hwmgr_func = &cz_hwmgr_funcs;
1926 hwmgr->pptable_func = &pptable_funcs;