2 * Copyright 2019 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
29 #include "clk_mgr_internal.h"
31 // For dce12_get_dp_ref_freq_khz
32 #include "dce100/dce_clk_mgr.h"
34 // For dcn20_update_clocks_update_dpp_dto
35 #include "dcn20/dcn20_clk_mgr.h"
39 #include "dcn31_clk_mgr.h"
41 #include "reg_helper.h"
42 #include "core_types.h"
43 #include "dcn31_smu.h"
44 #include "dm_helpers.h"
46 /* TODO: remove this include once we ported over remaining clk mgr functions*/
47 #include "dcn30/dcn30_clk_mgr.h"
49 #include "dc_dmub_srv.h"
51 #define TO_CLK_MGR_DCN31(clk_mgr)\
52 container_of(clk_mgr, struct clk_mgr_dcn31, base)
54 int dcn31_get_active_display_cnt_wa(
56 struct dc_state *context)
59 bool tmds_present = false;
62 for (i = 0; i < context->stream_count; i++) {
63 const struct dc_stream_state *stream = context->streams[i];
65 if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A ||
66 stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK ||
67 stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK)
71 for (i = 0; i < dc->link_count; i++) {
72 const struct dc_link *link = dc->links[i];
74 /* abusing the fact that the dig and phy are coupled to see if the phy is enabled */
75 if (link->link_enc->funcs->is_dig_enabled &&
76 link->link_enc->funcs->is_dig_enabled(link->link_enc))
80 /* WA for hang on HDMI after display off back back on*/
81 if (display_count == 0 && tmds_present)
87 static void dcn31_disable_otg_wa(struct clk_mgr *clk_mgr_base, bool disable)
89 struct dc *dc = clk_mgr_base->ctx->dc;
92 for (i = 0; i < dc->res_pool->pipe_count; ++i) {
93 struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
95 if (pipe->top_pipe || pipe->prev_odm_pipe)
97 if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))) {
99 pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
101 pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
106 static void dcn31_update_clocks(struct clk_mgr *clk_mgr_base,
107 struct dc_state *context,
110 union dmub_rb_cmd cmd;
111 struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
112 struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
113 struct dc *dc = clk_mgr_base->ctx->dc;
115 bool update_dppclk = false;
116 bool update_dispclk = false;
117 bool dpp_clock_lowered = false;
119 if (dc->work_arounds.skip_clock_update)
123 * if it is safe to lower, but we are already in the lower state, we don't have to do anything
124 * also if safe to lower is false, we just go in the higher state
127 if (new_clocks->z9_support == DCN_Z9_SUPPORT_ALLOW &&
128 new_clocks->z9_support != clk_mgr_base->clks.z9_support) {
129 dcn31_smu_set_Z9_support(clk_mgr, true);
130 clk_mgr_base->clks.z9_support = new_clocks->z9_support;
133 if (clk_mgr_base->clks.dtbclk_en && !new_clocks->dtbclk_en) {
134 dcn31_smu_set_dtbclk(clk_mgr, false);
135 clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en;
137 /* check that we're not already in lower */
138 if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) {
139 display_count = dcn31_get_active_display_cnt_wa(dc, context);
140 /* if we can go lower, go lower */
141 if (display_count == 0) {
142 union display_idle_optimization_u idle_info = { 0 };
143 idle_info.idle_info.df_request_disabled = 1;
144 idle_info.idle_info.phy_ref_clk_off = 1;
145 dcn31_smu_set_display_idle_optimization(clk_mgr, idle_info.data);
146 /* update power state */
147 clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER;
151 if (new_clocks->z9_support == DCN_Z9_SUPPORT_DISALLOW &&
152 new_clocks->z9_support != clk_mgr_base->clks.z9_support) {
153 dcn31_smu_set_Z9_support(clk_mgr, false);
154 clk_mgr_base->clks.z9_support = new_clocks->z9_support;
157 if (!clk_mgr_base->clks.dtbclk_en && new_clocks->dtbclk_en) {
158 dcn31_smu_set_dtbclk(clk_mgr, true);
159 clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en;
162 /* check that we're not already in D0 */
163 if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_MISSION_MODE) {
164 union display_idle_optimization_u idle_info = { 0 };
165 dcn31_smu_set_display_idle_optimization(clk_mgr, idle_info.data);
166 /* update power state */
167 clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_MISSION_MODE;
171 if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz)) {
172 clk_mgr_base->clks.dcfclk_khz = new_clocks->dcfclk_khz;
173 dcn31_smu_set_hard_min_dcfclk(clk_mgr, clk_mgr_base->clks.dcfclk_khz);
176 if (should_set_clock(safe_to_lower,
177 new_clocks->dcfclk_deep_sleep_khz, clk_mgr_base->clks.dcfclk_deep_sleep_khz)) {
178 clk_mgr_base->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
179 dcn31_smu_set_min_deep_sleep_dcfclk(clk_mgr, clk_mgr_base->clks.dcfclk_deep_sleep_khz);
182 // workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow.
183 if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
184 if (new_clocks->dppclk_khz < 100000)
185 new_clocks->dppclk_khz = 100000;
188 if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) {
189 if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz)
190 dpp_clock_lowered = true;
191 clk_mgr_base->clks.dppclk_khz = new_clocks->dppclk_khz;
192 update_dppclk = true;
195 if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
196 dcn31_disable_otg_wa(clk_mgr_base, true);
198 clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
199 dcn31_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
200 dcn31_disable_otg_wa(clk_mgr_base, false);
202 update_dispclk = true;
205 /* TODO: add back DTO programming when DPPCLK restore is fixed in FSDL*/
206 if (dpp_clock_lowered) {
207 // increase per DPP DTO before lowering global dppclk
208 dcn31_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz);
210 // increase global DPPCLK before lowering per DPP DTO
211 if (update_dppclk || update_dispclk)
212 dcn31_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz);
215 // notify DMCUB of latest clocks
216 memset(&cmd, 0, sizeof(cmd));
217 cmd.notify_clocks.header.type = DMUB_CMD__CLK_MGR;
218 cmd.notify_clocks.header.sub_type = DMUB_CMD__CLK_MGR_NOTIFY_CLOCKS;
219 cmd.notify_clocks.clocks.dcfclk_khz = clk_mgr_base->clks.dcfclk_khz;
220 cmd.notify_clocks.clocks.dcfclk_deep_sleep_khz =
221 clk_mgr_base->clks.dcfclk_deep_sleep_khz;
222 cmd.notify_clocks.clocks.dispclk_khz = clk_mgr_base->clks.dispclk_khz;
223 cmd.notify_clocks.clocks.dppclk_khz = clk_mgr_base->clks.dppclk_khz;
225 dc_dmub_srv_cmd_queue(dc->ctx->dmub_srv, &cmd);
226 dc_dmub_srv_cmd_execute(dc->ctx->dmub_srv);
227 dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
230 static int get_vco_frequency_from_reg(struct clk_mgr_internal *clk_mgr)
235 static void dcn31_enable_pme_wa(struct clk_mgr *clk_mgr_base)
237 struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
239 dcn31_smu_enable_pme_wa(clk_mgr);
242 static void dcn31_init_clocks(struct clk_mgr *clk_mgr)
244 memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
245 // Assumption is that boot state always supports pstate
246 clk_mgr->clks.p_state_change_support = true;
247 clk_mgr->clks.prev_p_state_change_support = true;
248 clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN;
249 clk_mgr->clks.z9_support = DCN_Z9_SUPPORT_UNKNOWN;
252 static bool dcn31_are_clock_states_equal(struct dc_clocks *a,
255 if (a->dispclk_khz != b->dispclk_khz)
257 else if (a->dppclk_khz != b->dppclk_khz)
259 else if (a->dcfclk_khz != b->dcfclk_khz)
261 else if (a->dcfclk_deep_sleep_khz != b->dcfclk_deep_sleep_khz)
263 else if (a->z9_support != b->z9_support)
265 else if (a->dtbclk_en != b->dtbclk_en)
271 static void dcn31_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
272 struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info)
277 static struct clk_bw_params dcn31_bw_params = {
278 .vram_type = Ddr4MemType,
286 static struct wm_table ddr4_wm_table = {
290 .wm_type = WM_TYPE_PSTATE_CHG,
291 .pstate_latency_us = 11.72,
292 .sr_exit_time_us = 6.09,
293 .sr_enter_plus_exit_time_us = 7.14,
298 .wm_type = WM_TYPE_PSTATE_CHG,
299 .pstate_latency_us = 11.72,
300 .sr_exit_time_us = 10.12,
301 .sr_enter_plus_exit_time_us = 11.48,
306 .wm_type = WM_TYPE_PSTATE_CHG,
307 .pstate_latency_us = 11.72,
308 .sr_exit_time_us = 10.12,
309 .sr_enter_plus_exit_time_us = 11.48,
314 .wm_type = WM_TYPE_PSTATE_CHG,
315 .pstate_latency_us = 11.72,
316 .sr_exit_time_us = 10.12,
317 .sr_enter_plus_exit_time_us = 11.48,
323 static struct wm_table lpddr5_wm_table = {
327 .wm_type = WM_TYPE_PSTATE_CHG,
328 .pstate_latency_us = 11.65333,
329 .sr_exit_time_us = 5.32,
330 .sr_enter_plus_exit_time_us = 6.38,
335 .wm_type = WM_TYPE_PSTATE_CHG,
336 .pstate_latency_us = 11.65333,
337 .sr_exit_time_us = 9.82,
338 .sr_enter_plus_exit_time_us = 11.196,
343 .wm_type = WM_TYPE_PSTATE_CHG,
344 .pstate_latency_us = 11.65333,
345 .sr_exit_time_us = 9.89,
346 .sr_enter_plus_exit_time_us = 11.24,
351 .wm_type = WM_TYPE_PSTATE_CHG,
352 .pstate_latency_us = 11.65333,
353 .sr_exit_time_us = 9.748,
354 .sr_enter_plus_exit_time_us = 11.102,
360 static DpmClocks_t dummy_clocks;
362 static struct dcn31_watermarks dummy_wms = { 0 };
364 static void dcn31_build_watermark_ranges(struct clk_bw_params *bw_params, struct dcn31_watermarks *table)
366 int i, num_valid_sets;
370 for (i = 0; i < WM_SET_COUNT; i++) {
371 /* skip empty entries, the smu array has no holes*/
372 if (!bw_params->wm_table.entries[i].valid)
375 table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmSetting = bw_params->wm_table.entries[i].wm_inst;
376 table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmType = bw_params->wm_table.entries[i].wm_type;
377 /* We will not select WM based on fclk, so leave it as unconstrained */
378 table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinClock = 0;
379 table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxClock = 0xFFFF;
381 if (table->WatermarkRow[WM_DCFCLK][num_valid_sets].WmType == WM_TYPE_PSTATE_CHG) {
383 table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinMclk = 0;
385 /* add 1 to make it non-overlapping with next lvl */
386 table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinMclk =
387 bw_params->clk_table.entries[i - 1].dcfclk_mhz + 1;
389 table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxMclk =
390 bw_params->clk_table.entries[i].dcfclk_mhz;
393 /* unconstrained for memory retraining */
394 table->WatermarkRow[WM_DCFCLK][num_valid_sets].MinClock = 0;
395 table->WatermarkRow[WM_DCFCLK][num_valid_sets].MaxClock = 0xFFFF;
397 /* Modify previous watermark range to cover up to max */
398 table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxClock = 0xFFFF;
403 ASSERT(num_valid_sets != 0); /* Must have at least one set of valid watermarks */
405 /* modify the min and max to make sure we cover the whole range*/
406 table->WatermarkRow[WM_DCFCLK][0].MinMclk = 0;
407 table->WatermarkRow[WM_DCFCLK][0].MinClock = 0;
408 table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxMclk = 0xFFFF;
409 table->WatermarkRow[WM_DCFCLK][num_valid_sets - 1].MaxClock = 0xFFFF;
411 /* This is for writeback only, does not matter currently as no writeback support*/
412 table->WatermarkRow[WM_SOCCLK][0].WmSetting = WM_A;
413 table->WatermarkRow[WM_SOCCLK][0].MinClock = 0;
414 table->WatermarkRow[WM_SOCCLK][0].MaxClock = 0xFFFF;
415 table->WatermarkRow[WM_SOCCLK][0].MinMclk = 0;
416 table->WatermarkRow[WM_SOCCLK][0].MaxMclk = 0xFFFF;
419 static void dcn31_notify_wm_ranges(struct clk_mgr *clk_mgr_base)
421 struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
422 struct clk_mgr_dcn31 *clk_mgr_dcn31 = TO_CLK_MGR_DCN31(clk_mgr);
423 struct dcn31_watermarks *table = clk_mgr_dcn31->smu_wm_set.wm_set;
425 if (!clk_mgr->smu_ver)
428 if (!table || clk_mgr_dcn31->smu_wm_set.mc_address.quad_part == 0)
431 memset(table, 0, sizeof(*table));
433 dcn31_build_watermark_ranges(clk_mgr_base->bw_params, table);
435 dcn31_smu_set_dram_addr_high(clk_mgr,
436 clk_mgr_dcn31->smu_wm_set.mc_address.high_part);
437 dcn31_smu_set_dram_addr_low(clk_mgr,
438 clk_mgr_dcn31->smu_wm_set.mc_address.low_part);
439 dcn31_smu_transfer_wm_table_dram_2_smu(clk_mgr);
442 static void dcn31_get_dpm_table_from_smu(struct clk_mgr_internal *clk_mgr,
443 struct dcn31_smu_dpm_clks *smu_dpm_clks)
445 DpmClocks_t *table = smu_dpm_clks->dpm_clks;
447 if (!clk_mgr->smu_ver)
450 if (!table || smu_dpm_clks->mc_address.quad_part == 0)
453 memset(table, 0, sizeof(*table));
455 dcn31_smu_set_dram_addr_high(clk_mgr,
456 smu_dpm_clks->mc_address.high_part);
457 dcn31_smu_set_dram_addr_low(clk_mgr,
458 smu_dpm_clks->mc_address.low_part);
459 dcn31_smu_transfer_dpm_table_smu_2_dram(clk_mgr);
462 static uint32_t find_max_clk_value(const uint32_t clocks[], uint32_t num_clocks)
467 for (i = 0; i < num_clocks; ++i) {
475 static unsigned int find_clk_for_voltage(
476 const DpmClocks_t *clock_table,
477 const uint32_t clocks[],
478 unsigned int voltage)
482 for (i = 0; i < NUM_SOC_VOLTAGE_LEVELS; i++) {
483 if (clock_table->SocVoltage[i] == voltage)
491 void dcn31_clk_mgr_helper_populate_bw_params(
492 struct clk_mgr_internal *clk_mgr,
493 struct integrated_info *bios_info,
494 const DpmClocks_t *clock_table)
497 struct clk_bw_params *bw_params = clk_mgr->base.bw_params;
498 uint32_t max_dispclk = 0, max_dppclk = 0;
502 ASSERT(NUM_DF_PSTATE_LEVELS <= MAX_NUM_DPM_LVL);
504 /* Find lowest DPM, FCLK is filled in reverse order*/
506 for (i = NUM_DF_PSTATE_LEVELS - 1; i >= 0; i--) {
507 if (clock_table->DfPstateTable[i].FClk != 0) {
514 /* clock table is all 0s, just use our own hardcode */
519 bw_params->clk_table.num_entries = j + 1;
521 /* dispclk and dppclk can be max at any voltage, same number of levels for both */
522 if (clock_table->NumDispClkLevelsEnabled <= NUM_DISPCLK_DPM_LEVELS &&
523 clock_table->NumDispClkLevelsEnabled <= NUM_DPPCLK_DPM_LEVELS) {
524 max_dispclk = find_max_clk_value(clock_table->DispClocks, clock_table->NumDispClkLevelsEnabled);
525 max_dppclk = find_max_clk_value(clock_table->DppClocks, clock_table->NumDispClkLevelsEnabled);
530 for (i = 0; i < bw_params->clk_table.num_entries; i++, j--) {
531 bw_params->clk_table.entries[i].fclk_mhz = clock_table->DfPstateTable[j].FClk;
532 bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[j].MemClk;
533 bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[j].Voltage;
534 switch (clock_table->DfPstateTable[j].WckRatio) {
536 bw_params->clk_table.entries[i].wck_ratio = 2;
539 bw_params->clk_table.entries[i].wck_ratio = 4;
542 bw_params->clk_table.entries[i].wck_ratio = 1;
544 bw_params->clk_table.entries[i].dcfclk_mhz = find_clk_for_voltage(clock_table, clock_table->DcfClocks, clock_table->DfPstateTable[j].Voltage);
545 bw_params->clk_table.entries[i].socclk_mhz = find_clk_for_voltage(clock_table, clock_table->SocClocks, clock_table->DfPstateTable[j].Voltage);
546 bw_params->clk_table.entries[i].dispclk_mhz = max_dispclk;
547 bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk;
550 bw_params->vram_type = bios_info->memory_type;
551 bw_params->num_channels = bios_info->ma_channel_number;
553 for (i = 0; i < WM_SET_COUNT; i++) {
554 bw_params->wm_table.entries[i].wm_inst = i;
556 if (i >= bw_params->clk_table.num_entries) {
557 bw_params->wm_table.entries[i].valid = false;
561 bw_params->wm_table.entries[i].wm_type = WM_TYPE_PSTATE_CHG;
562 bw_params->wm_table.entries[i].valid = true;
566 static struct clk_mgr_funcs dcn31_funcs = {
567 .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
568 .update_clocks = dcn31_update_clocks,
569 .init_clocks = dcn31_init_clocks,
570 .enable_pme_wa = dcn31_enable_pme_wa,
571 .are_clock_states_equal = dcn31_are_clock_states_equal,
572 .notify_wm_ranges = dcn31_notify_wm_ranges
574 extern struct clk_mgr_funcs dcn3_fpga_funcs;
576 void dcn31_clk_mgr_construct(
577 struct dc_context *ctx,
578 struct clk_mgr_dcn31 *clk_mgr,
579 struct pp_smu_funcs *pp_smu,
582 struct dcn31_smu_dpm_clks smu_dpm_clks = { 0 };
584 clk_mgr->base.base.ctx = ctx;
585 clk_mgr->base.base.funcs = &dcn31_funcs;
587 clk_mgr->base.pp_smu = pp_smu;
589 clk_mgr->base.dccg = dccg;
590 clk_mgr->base.dfs_bypass_disp_clk = 0;
592 clk_mgr->base.dprefclk_ss_percentage = 0;
593 clk_mgr->base.dprefclk_ss_divider = 1000;
594 clk_mgr->base.ss_on_dprefclk = false;
596 clk_mgr->smu_wm_set.wm_set = (struct dcn31_watermarks *)dm_helpers_allocate_gpu_mem(
597 clk_mgr->base.base.ctx,
598 DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
599 sizeof(struct dcn31_watermarks),
600 &clk_mgr->smu_wm_set.mc_address.quad_part);
602 if (clk_mgr->smu_wm_set.wm_set == 0) {
603 clk_mgr->smu_wm_set.wm_set = &dummy_wms;
604 clk_mgr->smu_wm_set.mc_address.quad_part = 0;
606 ASSERT(clk_mgr->smu_wm_set.wm_set);
608 smu_dpm_clks.dpm_clks = (DpmClocks_t *)dm_helpers_allocate_gpu_mem(
609 clk_mgr->base.base.ctx,
610 DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
612 &smu_dpm_clks.mc_address.quad_part);
614 if (smu_dpm_clks.dpm_clks == NULL) {
615 smu_dpm_clks.dpm_clks = &dummy_clocks;
616 smu_dpm_clks.mc_address.quad_part = 0;
619 ASSERT(smu_dpm_clks.dpm_clks);
621 if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) {
622 clk_mgr->base.base.funcs = &dcn3_fpga_funcs;
624 struct clk_log_info log_info = {0};
626 clk_mgr->base.smu_ver = dcn31_smu_get_smu_version(&clk_mgr->base);
628 if (clk_mgr->base.smu_ver)
629 clk_mgr->base.smu_present = true;
631 /* TODO: Check we get what we expect during bringup */
632 clk_mgr->base.base.dentist_vco_freq_khz = get_vco_frequency_from_reg(&clk_mgr->base);
634 if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) {
635 dcn31_bw_params.wm_table = lpddr5_wm_table;
637 dcn31_bw_params.wm_table = ddr4_wm_table;
639 /* Saved clocks configured at boot for debug purposes */
640 dcn31_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, &clk_mgr->base.base, &log_info);
644 clk_mgr->base.base.dprefclk_khz = 600000;
645 clk_mgr->base.dccg->ref_dtbclk_khz = 600000;
646 dce_clock_read_ss_info(&clk_mgr->base);
648 clk_mgr->base.base.bw_params = &dcn31_bw_params;
650 if (clk_mgr->base.base.ctx->dc->debug.pstate_enabled) {
651 dcn31_get_dpm_table_from_smu(&clk_mgr->base, &smu_dpm_clks);
653 if (ctx->dc_bios && ctx->dc_bios->integrated_info) {
654 dcn31_clk_mgr_helper_populate_bw_params(
656 ctx->dc_bios->integrated_info,
657 smu_dpm_clks.dpm_clks);
661 if (smu_dpm_clks.dpm_clks && smu_dpm_clks.mc_address.quad_part != 0)
662 dm_helpers_free_gpu_mem(clk_mgr->base.base.ctx, DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
663 smu_dpm_clks.dpm_clks);
666 void dcn31_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr_int)
668 struct clk_mgr_dcn31 *clk_mgr = TO_CLK_MGR_DCN31(clk_mgr_int);
670 if (clk_mgr->smu_wm_set.wm_set && clk_mgr->smu_wm_set.mc_address.quad_part != 0)
671 dm_helpers_free_gpu_mem(clk_mgr_int->base.ctx, DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
672 clk_mgr->smu_wm_set.wm_set);