Merge tag 'for-airlie-tda998x' of git://git.armlinux.org.uk/~rmk/linux-arm into drm...
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm_pp_smu.c
1 /*
2  * Copyright 2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  */
24 #include <linux/string.h>
25 #include <linux/acpi.h>
26
27 #include <drm/drm_probe_helper.h>
28 #include <drm/amdgpu_drm.h>
29 #include "dm_services.h"
30 #include "amdgpu.h"
31 #include "amdgpu_dm.h"
32 #include "amdgpu_dm_irq.h"
33 #include "amdgpu_pm.h"
34 #include "dm_pp_smu.h"
35 #include "amdgpu_smu.h"
36
37
38 bool dm_pp_apply_display_requirements(
39                 const struct dc_context *ctx,
40                 const struct dm_pp_display_configuration *pp_display_cfg)
41 {
42         struct amdgpu_device *adev = ctx->driver_context;
43         struct smu_context *smu = &adev->smu;
44         int i;
45
46         if (adev->pm.dpm_enabled) {
47
48                 memset(&adev->pm.pm_display_cfg, 0,
49                                 sizeof(adev->pm.pm_display_cfg));
50
51                 adev->pm.pm_display_cfg.cpu_cc6_disable =
52                         pp_display_cfg->cpu_cc6_disable;
53
54                 adev->pm.pm_display_cfg.cpu_pstate_disable =
55                         pp_display_cfg->cpu_pstate_disable;
56
57                 adev->pm.pm_display_cfg.cpu_pstate_separation_time =
58                         pp_display_cfg->cpu_pstate_separation_time;
59
60                 adev->pm.pm_display_cfg.nb_pstate_switch_disable =
61                         pp_display_cfg->nb_pstate_switch_disable;
62
63                 adev->pm.pm_display_cfg.num_display =
64                                 pp_display_cfg->display_count;
65                 adev->pm.pm_display_cfg.num_path_including_non_display =
66                                 pp_display_cfg->display_count;
67
68                 adev->pm.pm_display_cfg.min_core_set_clock =
69                                 pp_display_cfg->min_engine_clock_khz/10;
70                 adev->pm.pm_display_cfg.min_core_set_clock_in_sr =
71                                 pp_display_cfg->min_engine_clock_deep_sleep_khz/10;
72                 adev->pm.pm_display_cfg.min_mem_set_clock =
73                                 pp_display_cfg->min_memory_clock_khz/10;
74
75                 adev->pm.pm_display_cfg.min_dcef_deep_sleep_set_clk =
76                                 pp_display_cfg->min_engine_clock_deep_sleep_khz/10;
77                 adev->pm.pm_display_cfg.min_dcef_set_clk =
78                                 pp_display_cfg->min_dcfclock_khz/10;
79
80                 adev->pm.pm_display_cfg.multi_monitor_in_sync =
81                                 pp_display_cfg->all_displays_in_sync;
82                 adev->pm.pm_display_cfg.min_vblank_time =
83                                 pp_display_cfg->avail_mclk_switch_time_us;
84
85                 adev->pm.pm_display_cfg.display_clk =
86                                 pp_display_cfg->disp_clk_khz/10;
87
88                 adev->pm.pm_display_cfg.dce_tolerable_mclk_in_active_latency =
89                                 pp_display_cfg->avail_mclk_switch_time_in_disp_active_us;
90
91                 adev->pm.pm_display_cfg.crtc_index = pp_display_cfg->crtc_index;
92                 adev->pm.pm_display_cfg.line_time_in_us =
93                                 pp_display_cfg->line_time_in_us;
94
95                 adev->pm.pm_display_cfg.vrefresh = pp_display_cfg->disp_configs[0].v_refresh;
96                 adev->pm.pm_display_cfg.crossfire_display_index = -1;
97                 adev->pm.pm_display_cfg.min_bus_bandwidth = 0;
98
99                 for (i = 0; i < pp_display_cfg->display_count; i++) {
100                         const struct dm_pp_single_disp_config *dc_cfg =
101                                                 &pp_display_cfg->disp_configs[i];
102                         adev->pm.pm_display_cfg.displays[i].controller_id = dc_cfg->pipe_idx + 1;
103                 }
104
105                 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->display_configuration_change)
106                         adev->powerplay.pp_funcs->display_configuration_change(
107                                 adev->powerplay.pp_handle,
108                                 &adev->pm.pm_display_cfg);
109                 else
110                         smu_display_configuration_change(smu,
111                                                          &adev->pm.pm_display_cfg);
112
113                 amdgpu_pm_compute_clocks(adev);
114         }
115
116         return true;
117 }
118
119 static void get_default_clock_levels(
120                 enum dm_pp_clock_type clk_type,
121                 struct dm_pp_clock_levels *clks)
122 {
123         uint32_t disp_clks_in_khz[6] = {
124                         300000, 400000, 496560, 626090, 685720, 757900 };
125         uint32_t sclks_in_khz[6] = {
126                         300000, 360000, 423530, 514290, 626090, 720000 };
127         uint32_t mclks_in_khz[2] = { 333000, 800000 };
128
129         switch (clk_type) {
130         case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
131                 clks->num_levels = 6;
132                 memmove(clks->clocks_in_khz, disp_clks_in_khz,
133                                 sizeof(disp_clks_in_khz));
134                 break;
135         case DM_PP_CLOCK_TYPE_ENGINE_CLK:
136                 clks->num_levels = 6;
137                 memmove(clks->clocks_in_khz, sclks_in_khz,
138                                 sizeof(sclks_in_khz));
139                 break;
140         case DM_PP_CLOCK_TYPE_MEMORY_CLK:
141                 clks->num_levels = 2;
142                 memmove(clks->clocks_in_khz, mclks_in_khz,
143                                 sizeof(mclks_in_khz));
144                 break;
145         default:
146                 clks->num_levels = 0;
147                 break;
148         }
149 }
150
151 static enum amd_pp_clock_type dc_to_pp_clock_type(
152                 enum dm_pp_clock_type dm_pp_clk_type)
153 {
154         enum amd_pp_clock_type amd_pp_clk_type = 0;
155
156         switch (dm_pp_clk_type) {
157         case DM_PP_CLOCK_TYPE_DISPLAY_CLK:
158                 amd_pp_clk_type = amd_pp_disp_clock;
159                 break;
160         case DM_PP_CLOCK_TYPE_ENGINE_CLK:
161                 amd_pp_clk_type = amd_pp_sys_clock;
162                 break;
163         case DM_PP_CLOCK_TYPE_MEMORY_CLK:
164                 amd_pp_clk_type = amd_pp_mem_clock;
165                 break;
166         case DM_PP_CLOCK_TYPE_DCEFCLK:
167                 amd_pp_clk_type  = amd_pp_dcef_clock;
168                 break;
169         case DM_PP_CLOCK_TYPE_DCFCLK:
170                 amd_pp_clk_type = amd_pp_dcf_clock;
171                 break;
172         case DM_PP_CLOCK_TYPE_PIXELCLK:
173                 amd_pp_clk_type = amd_pp_pixel_clock;
174                 break;
175         case DM_PP_CLOCK_TYPE_FCLK:
176                 amd_pp_clk_type = amd_pp_f_clock;
177                 break;
178         case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK:
179                 amd_pp_clk_type = amd_pp_phy_clock;
180                 break;
181         case DM_PP_CLOCK_TYPE_DPPCLK:
182                 amd_pp_clk_type = amd_pp_dpp_clock;
183                 break;
184         default:
185                 DRM_ERROR("DM_PPLIB: invalid clock type: %d!\n",
186                                 dm_pp_clk_type);
187                 break;
188         }
189
190         return amd_pp_clk_type;
191 }
192
193 static enum dm_pp_clocks_state pp_to_dc_powerlevel_state(
194                         enum PP_DAL_POWERLEVEL max_clocks_state)
195 {
196         switch (max_clocks_state) {
197         case PP_DAL_POWERLEVEL_0:
198                 return DM_PP_CLOCKS_DPM_STATE_LEVEL_0;
199         case PP_DAL_POWERLEVEL_1:
200                 return DM_PP_CLOCKS_DPM_STATE_LEVEL_1;
201         case PP_DAL_POWERLEVEL_2:
202                 return DM_PP_CLOCKS_DPM_STATE_LEVEL_2;
203         case PP_DAL_POWERLEVEL_3:
204                 return DM_PP_CLOCKS_DPM_STATE_LEVEL_3;
205         case PP_DAL_POWERLEVEL_4:
206                 return DM_PP_CLOCKS_DPM_STATE_LEVEL_4;
207         case PP_DAL_POWERLEVEL_5:
208                 return DM_PP_CLOCKS_DPM_STATE_LEVEL_5;
209         case PP_DAL_POWERLEVEL_6:
210                 return DM_PP_CLOCKS_DPM_STATE_LEVEL_6;
211         case PP_DAL_POWERLEVEL_7:
212                 return DM_PP_CLOCKS_DPM_STATE_LEVEL_7;
213         default:
214                 DRM_ERROR("DM_PPLIB: invalid powerlevel state: %d!\n",
215                                 max_clocks_state);
216                 return DM_PP_CLOCKS_STATE_INVALID;
217         }
218 }
219
220 static void pp_to_dc_clock_levels(
221                 const struct amd_pp_clocks *pp_clks,
222                 struct dm_pp_clock_levels *dc_clks,
223                 enum dm_pp_clock_type dc_clk_type)
224 {
225         uint32_t i;
226
227         if (pp_clks->count > DM_PP_MAX_CLOCK_LEVELS) {
228                 DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
229                                 DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
230                                 pp_clks->count,
231                                 DM_PP_MAX_CLOCK_LEVELS);
232
233                 dc_clks->num_levels = DM_PP_MAX_CLOCK_LEVELS;
234         } else
235                 dc_clks->num_levels = pp_clks->count;
236
237         DRM_INFO("DM_PPLIB: values for %s clock\n",
238                         DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
239
240         for (i = 0; i < dc_clks->num_levels; i++) {
241                 DRM_INFO("DM_PPLIB:\t %d\n", pp_clks->clock[i]);
242                 dc_clks->clocks_in_khz[i] = pp_clks->clock[i];
243         }
244 }
245
246 static void pp_to_dc_clock_levels_with_latency(
247                 const struct pp_clock_levels_with_latency *pp_clks,
248                 struct dm_pp_clock_levels_with_latency *clk_level_info,
249                 enum dm_pp_clock_type dc_clk_type)
250 {
251         uint32_t i;
252
253         if (pp_clks->num_levels > DM_PP_MAX_CLOCK_LEVELS) {
254                 DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
255                                 DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
256                                 pp_clks->num_levels,
257                                 DM_PP_MAX_CLOCK_LEVELS);
258
259                 clk_level_info->num_levels = DM_PP_MAX_CLOCK_LEVELS;
260         } else
261                 clk_level_info->num_levels = pp_clks->num_levels;
262
263         DRM_DEBUG("DM_PPLIB: values for %s clock\n",
264                         DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
265
266         for (i = 0; i < clk_level_info->num_levels; i++) {
267                 DRM_DEBUG("DM_PPLIB:\t %d in kHz\n", pp_clks->data[i].clocks_in_khz);
268                 clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz;
269                 clk_level_info->data[i].latency_in_us = pp_clks->data[i].latency_in_us;
270         }
271 }
272
273 static void pp_to_dc_clock_levels_with_voltage(
274                 const struct pp_clock_levels_with_voltage *pp_clks,
275                 struct dm_pp_clock_levels_with_voltage *clk_level_info,
276                 enum dm_pp_clock_type dc_clk_type)
277 {
278         uint32_t i;
279
280         if (pp_clks->num_levels > DM_PP_MAX_CLOCK_LEVELS) {
281                 DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n",
282                                 DC_DECODE_PP_CLOCK_TYPE(dc_clk_type),
283                                 pp_clks->num_levels,
284                                 DM_PP_MAX_CLOCK_LEVELS);
285
286                 clk_level_info->num_levels = DM_PP_MAX_CLOCK_LEVELS;
287         } else
288                 clk_level_info->num_levels = pp_clks->num_levels;
289
290         DRM_INFO("DM_PPLIB: values for %s clock\n",
291                         DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
292
293         for (i = 0; i < clk_level_info->num_levels; i++) {
294                 DRM_INFO("DM_PPLIB:\t %d in kHz\n", pp_clks->data[i].clocks_in_khz);
295                 clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz;
296                 clk_level_info->data[i].voltage_in_mv = pp_clks->data[i].voltage_in_mv;
297         }
298 }
299
300 bool dm_pp_get_clock_levels_by_type(
301                 const struct dc_context *ctx,
302                 enum dm_pp_clock_type clk_type,
303                 struct dm_pp_clock_levels *dc_clks)
304 {
305         struct amdgpu_device *adev = ctx->driver_context;
306         void *pp_handle = adev->powerplay.pp_handle;
307         struct amd_pp_clocks pp_clks = { 0 };
308         struct amd_pp_simple_clock_info validation_clks = { 0 };
309         uint32_t i;
310
311         if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_clock_by_type) {
312                 if (adev->powerplay.pp_funcs->get_clock_by_type(pp_handle,
313                         dc_to_pp_clock_type(clk_type), &pp_clks)) {
314                 /* Error in pplib. Provide default values. */
315                         return true;
316                 }
317         } else if (adev->smu.funcs && adev->smu.funcs->get_clock_by_type) {
318                 if (smu_get_clock_by_type(&adev->smu,
319                                           dc_to_pp_clock_type(clk_type),
320                                           &pp_clks)) {
321                         get_default_clock_levels(clk_type, dc_clks);
322                         return true;
323                 }
324         }
325
326         pp_to_dc_clock_levels(&pp_clks, dc_clks, clk_type);
327
328         if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_display_mode_validation_clocks) {
329                 if (adev->powerplay.pp_funcs->get_display_mode_validation_clocks(
330                                                 pp_handle, &validation_clks)) {
331                         /* Error in pplib. Provide default values. */
332                         DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n");
333                         validation_clks.engine_max_clock = 72000;
334                         validation_clks.memory_max_clock = 80000;
335                         validation_clks.level = 0;
336                 }
337         } else if (adev->smu.funcs && adev->smu.funcs->get_max_high_clocks) {
338                 if (smu_get_max_high_clocks(&adev->smu, &validation_clks)) {
339                         DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n");
340                         validation_clks.engine_max_clock = 72000;
341                         validation_clks.memory_max_clock = 80000;
342                         validation_clks.level = 0;
343                 }
344         }
345
346         DRM_INFO("DM_PPLIB: Validation clocks:\n");
347         DRM_INFO("DM_PPLIB:    engine_max_clock: %d\n",
348                         validation_clks.engine_max_clock);
349         DRM_INFO("DM_PPLIB:    memory_max_clock: %d\n",
350                         validation_clks.memory_max_clock);
351         DRM_INFO("DM_PPLIB:    level           : %d\n",
352                         validation_clks.level);
353
354         /* Translate 10 kHz to kHz. */
355         validation_clks.engine_max_clock *= 10;
356         validation_clks.memory_max_clock *= 10;
357
358         /* Determine the highest non-boosted level from the Validation Clocks */
359         if (clk_type == DM_PP_CLOCK_TYPE_ENGINE_CLK) {
360                 for (i = 0; i < dc_clks->num_levels; i++) {
361                         if (dc_clks->clocks_in_khz[i] > validation_clks.engine_max_clock) {
362                                 /* This clock is higher the validation clock.
363                                  * Than means the previous one is the highest
364                                  * non-boosted one. */
365                                 DRM_INFO("DM_PPLIB: reducing engine clock level from %d to %d\n",
366                                                 dc_clks->num_levels, i);
367                                 dc_clks->num_levels = i > 0 ? i : 1;
368                                 break;
369                         }
370                 }
371         } else if (clk_type == DM_PP_CLOCK_TYPE_MEMORY_CLK) {
372                 for (i = 0; i < dc_clks->num_levels; i++) {
373                         if (dc_clks->clocks_in_khz[i] > validation_clks.memory_max_clock) {
374                                 DRM_INFO("DM_PPLIB: reducing memory clock level from %d to %d\n",
375                                                 dc_clks->num_levels, i);
376                                 dc_clks->num_levels = i > 0 ? i : 1;
377                                 break;
378                         }
379                 }
380         }
381
382         return true;
383 }
384
385 bool dm_pp_get_clock_levels_by_type_with_latency(
386         const struct dc_context *ctx,
387         enum dm_pp_clock_type clk_type,
388         struct dm_pp_clock_levels_with_latency *clk_level_info)
389 {
390         struct amdgpu_device *adev = ctx->driver_context;
391         void *pp_handle = adev->powerplay.pp_handle;
392         struct pp_clock_levels_with_latency pp_clks = { 0 };
393         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
394         int ret;
395
396         if (pp_funcs && pp_funcs->get_clock_by_type_with_latency) {
397                 ret = pp_funcs->get_clock_by_type_with_latency(pp_handle,
398                                                 dc_to_pp_clock_type(clk_type),
399                                                 &pp_clks);
400                 if (ret)
401                         return false;
402         } else if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->get_clock_by_type_with_latency) {
403                 if (smu_get_clock_by_type_with_latency(&adev->smu,
404                                                        dc_to_pp_clock_type(clk_type),
405                                                        &pp_clks))
406                         return false;
407         }
408
409
410         pp_to_dc_clock_levels_with_latency(&pp_clks, clk_level_info, clk_type);
411
412         return true;
413 }
414
415 bool dm_pp_get_clock_levels_by_type_with_voltage(
416         const struct dc_context *ctx,
417         enum dm_pp_clock_type clk_type,
418         struct dm_pp_clock_levels_with_voltage *clk_level_info)
419 {
420         struct amdgpu_device *adev = ctx->driver_context;
421         void *pp_handle = adev->powerplay.pp_handle;
422         struct pp_clock_levels_with_voltage pp_clk_info = {0};
423         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
424         int ret;
425
426         if (pp_funcs && pp_funcs->get_clock_by_type_with_voltage) {
427                 ret = pp_funcs->get_clock_by_type_with_voltage(pp_handle,
428                                                 dc_to_pp_clock_type(clk_type),
429                                                 &pp_clk_info);
430                 if (ret)
431                         return false;
432         } else if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->get_clock_by_type_with_voltage) {
433                 if (smu_get_clock_by_type_with_voltage(&adev->smu,
434                                                        dc_to_pp_clock_type(clk_type),
435                                                        &pp_clk_info))
436                         return false;
437         }
438
439         pp_to_dc_clock_levels_with_voltage(&pp_clk_info, clk_level_info, clk_type);
440
441         return true;
442 }
443
444 bool dm_pp_notify_wm_clock_changes(
445         const struct dc_context *ctx,
446         struct dm_pp_wm_sets_with_clock_ranges *wm_with_clock_ranges)
447 {
448         /* TODO: to be implemented */
449         return false;
450 }
451
452 bool dm_pp_apply_power_level_change_request(
453         const struct dc_context *ctx,
454         struct dm_pp_power_level_change_request *level_change_req)
455 {
456         /* TODO: to be implemented */
457         return false;
458 }
459
460 bool dm_pp_apply_clock_for_voltage_request(
461         const struct dc_context *ctx,
462         struct dm_pp_clock_for_voltage_req *clock_for_voltage_req)
463 {
464         struct amdgpu_device *adev = ctx->driver_context;
465         struct pp_display_clock_request pp_clock_request = {0};
466         int ret = 0;
467
468         pp_clock_request.clock_type = dc_to_pp_clock_type(clock_for_voltage_req->clk_type);
469         pp_clock_request.clock_freq_in_khz = clock_for_voltage_req->clocks_in_khz;
470
471         if (!pp_clock_request.clock_type)
472                 return false;
473
474         if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->display_clock_voltage_request)
475                 ret = adev->powerplay.pp_funcs->display_clock_voltage_request(
476                         adev->powerplay.pp_handle,
477                         &pp_clock_request);
478         else if (adev->smu.funcs &&
479                  adev->smu.funcs->display_clock_voltage_request)
480                 ret = smu_display_clock_voltage_request(&adev->smu,
481                                                         &pp_clock_request);
482         if (ret)
483                 return false;
484         return true;
485 }
486
487 bool dm_pp_get_static_clocks(
488         const struct dc_context *ctx,
489         struct dm_pp_static_clock_info *static_clk_info)
490 {
491         struct amdgpu_device *adev = ctx->driver_context;
492         struct amd_pp_clock_info pp_clk_info = {0};
493         int ret = 0;
494
495         if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_current_clocks)
496                 ret = adev->powerplay.pp_funcs->get_current_clocks(
497                         adev->powerplay.pp_handle,
498                         &pp_clk_info);
499         else if (adev->smu.funcs)
500                 ret = smu_get_current_clocks(&adev->smu, &pp_clk_info);
501         if (ret)
502                 return false;
503
504         static_clk_info->max_clocks_state = pp_to_dc_powerlevel_state(pp_clk_info.max_clocks_state);
505         static_clk_info->max_mclk_khz = pp_clk_info.max_memory_clock * 10;
506         static_clk_info->max_sclk_khz = pp_clk_info.max_engine_clock * 10;
507
508         return true;
509 }
510
511 void pp_rv_set_wm_ranges(struct pp_smu *pp,
512                 struct pp_smu_wm_range_sets *ranges)
513 {
514         const struct dc_context *ctx = pp->dm;
515         struct amdgpu_device *adev = ctx->driver_context;
516         void *pp_handle = adev->powerplay.pp_handle;
517         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
518         struct dm_pp_wm_sets_with_clock_ranges_soc15 wm_with_clock_ranges;
519         struct dm_pp_clock_range_for_dmif_wm_set_soc15 *wm_dce_clocks = wm_with_clock_ranges.wm_dmif_clocks_ranges;
520         struct dm_pp_clock_range_for_mcif_wm_set_soc15 *wm_soc_clocks = wm_with_clock_ranges.wm_mcif_clocks_ranges;
521         int32_t i;
522
523         wm_with_clock_ranges.num_wm_dmif_sets = ranges->num_reader_wm_sets;
524         wm_with_clock_ranges.num_wm_mcif_sets = ranges->num_writer_wm_sets;
525
526         for (i = 0; i < wm_with_clock_ranges.num_wm_dmif_sets; i++) {
527                 if (ranges->reader_wm_sets[i].wm_inst > 3)
528                         wm_dce_clocks[i].wm_set_id = WM_SET_A;
529                 else
530                         wm_dce_clocks[i].wm_set_id =
531                                         ranges->reader_wm_sets[i].wm_inst;
532                 wm_dce_clocks[i].wm_max_dcfclk_clk_in_khz =
533                                 ranges->reader_wm_sets[i].max_drain_clk_mhz * 1000;
534                 wm_dce_clocks[i].wm_min_dcfclk_clk_in_khz =
535                                 ranges->reader_wm_sets[i].min_drain_clk_mhz * 1000;
536                 wm_dce_clocks[i].wm_max_mem_clk_in_khz =
537                                 ranges->reader_wm_sets[i].max_fill_clk_mhz * 1000;
538                 wm_dce_clocks[i].wm_min_mem_clk_in_khz =
539                                 ranges->reader_wm_sets[i].min_fill_clk_mhz * 1000;
540         }
541
542         for (i = 0; i < wm_with_clock_ranges.num_wm_mcif_sets; i++) {
543                 if (ranges->writer_wm_sets[i].wm_inst > 3)
544                         wm_soc_clocks[i].wm_set_id = WM_SET_A;
545                 else
546                         wm_soc_clocks[i].wm_set_id =
547                                         ranges->writer_wm_sets[i].wm_inst;
548                 wm_soc_clocks[i].wm_max_socclk_clk_in_khz =
549                                 ranges->writer_wm_sets[i].max_fill_clk_mhz * 1000;
550                 wm_soc_clocks[i].wm_min_socclk_clk_in_khz =
551                                 ranges->writer_wm_sets[i].min_fill_clk_mhz * 1000;
552                 wm_soc_clocks[i].wm_max_mem_clk_in_khz =
553                                 ranges->writer_wm_sets[i].max_drain_clk_mhz * 1000;
554                 wm_soc_clocks[i].wm_min_mem_clk_in_khz =
555                                 ranges->writer_wm_sets[i].min_drain_clk_mhz * 1000;
556         }
557
558         if (pp_funcs && pp_funcs->set_watermarks_for_clocks_ranges)
559                 pp_funcs->set_watermarks_for_clocks_ranges(pp_handle,
560                                                            &wm_with_clock_ranges);
561         else if (adev->smu.funcs &&
562                  adev->smu.funcs->set_watermarks_for_clock_ranges)
563                 smu_set_watermarks_for_clock_ranges(&adev->smu,
564                                                     &wm_with_clock_ranges);
565 }
566
567 void pp_rv_set_pme_wa_enable(struct pp_smu *pp)
568 {
569         const struct dc_context *ctx = pp->dm;
570         struct amdgpu_device *adev = ctx->driver_context;
571         void *pp_handle = adev->powerplay.pp_handle;
572         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
573
574         if (pp_funcs && pp_funcs->notify_smu_enable_pwe)
575                 pp_funcs->notify_smu_enable_pwe(pp_handle);
576         else if (adev->smu.funcs)
577                 smu_notify_smu_enable_pwe(&adev->smu);
578 }
579
580 void pp_rv_set_active_display_count(struct pp_smu *pp, int count)
581 {
582         const struct dc_context *ctx = pp->dm;
583         struct amdgpu_device *adev = ctx->driver_context;
584         void *pp_handle = adev->powerplay.pp_handle;
585         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
586
587         if (!pp_funcs || !pp_funcs->set_active_display_count)
588                 return;
589
590         pp_funcs->set_active_display_count(pp_handle, count);
591 }
592
593 void pp_rv_set_min_deep_sleep_dcfclk(struct pp_smu *pp, int clock)
594 {
595         const struct dc_context *ctx = pp->dm;
596         struct amdgpu_device *adev = ctx->driver_context;
597         void *pp_handle = adev->powerplay.pp_handle;
598         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
599
600         if (!pp_funcs || !pp_funcs->set_min_deep_sleep_dcefclk)
601                 return;
602
603         pp_funcs->set_min_deep_sleep_dcefclk(pp_handle, clock);
604 }
605
606 void pp_rv_set_hard_min_dcefclk_by_freq(struct pp_smu *pp, int clock)
607 {
608         const struct dc_context *ctx = pp->dm;
609         struct amdgpu_device *adev = ctx->driver_context;
610         void *pp_handle = adev->powerplay.pp_handle;
611         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
612
613         if (!pp_funcs || !pp_funcs->set_hard_min_dcefclk_by_freq)
614                 return;
615
616         pp_funcs->set_hard_min_dcefclk_by_freq(pp_handle, clock);
617 }
618
619 void pp_rv_set_hard_min_fclk_by_freq(struct pp_smu *pp, int mhz)
620 {
621         const struct dc_context *ctx = pp->dm;
622         struct amdgpu_device *adev = ctx->driver_context;
623         void *pp_handle = adev->powerplay.pp_handle;
624         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
625
626         if (!pp_funcs || !pp_funcs->set_hard_min_fclk_by_freq)
627                 return;
628
629         pp_funcs->set_hard_min_fclk_by_freq(pp_handle, mhz);
630 }
631
632 void dm_pp_get_funcs(
633                 struct dc_context *ctx,
634                 struct pp_smu_funcs *funcs)
635 {
636         funcs->rv_funcs.pp_smu.dm = ctx;
637         funcs->rv_funcs.set_wm_ranges = pp_rv_set_wm_ranges;
638         funcs->rv_funcs.set_pme_wa_enable = pp_rv_set_pme_wa_enable;
639         funcs->rv_funcs.set_display_count = pp_rv_set_active_display_count;
640         funcs->rv_funcs.set_min_deep_sleep_dcfclk = pp_rv_set_min_deep_sleep_dcfclk;
641         funcs->rv_funcs.set_hard_min_dcfclk_by_freq = pp_rv_set_hard_min_dcefclk_by_freq;
642         funcs->rv_funcs.set_hard_min_fclk_by_freq = pp_rv_set_hard_min_fclk_by_freq;
643 }
644