27ca0e6b724fe72b1d424ef7ee2e842ae60e4d4e
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / pm / powerplay / hwmgr / smu7_hwmgr.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "pp_debug.h"
24 #include <linux/delay.h>
25 #include <linux/fb.h>
26 #include <linux/module.h>
27 #include <linux/pci.h>
28 #include <linux/slab.h>
29 #include <asm/div64.h>
30 #include <drm/amdgpu_drm.h>
31 #include "ppatomctrl.h"
32 #include "atombios.h"
33 #include "pptable_v1_0.h"
34 #include "pppcielanes.h"
35 #include "amd_pcie_helpers.h"
36 #include "hardwaremanager.h"
37 #include "process_pptables_v1_0.h"
38 #include "cgs_common.h"
39
40 #include "smu7_common.h"
41
42 #include "hwmgr.h"
43 #include "smu7_hwmgr.h"
44 #include "smu_ucode_xfer_vi.h"
45 #include "smu7_powertune.h"
46 #include "smu7_dyn_defaults.h"
47 #include "smu7_thermal.h"
48 #include "smu7_clockpowergating.h"
49 #include "processpptables.h"
50 #include "pp_thermal.h"
51 #include "smu7_baco.h"
52
53 #include "ivsrcid/ivsrcid_vislands30.h"
54
55 #define MC_CG_ARB_FREQ_F0           0x0a
56 #define MC_CG_ARB_FREQ_F1           0x0b
57 #define MC_CG_ARB_FREQ_F2           0x0c
58 #define MC_CG_ARB_FREQ_F3           0x0d
59
60 #define MC_CG_SEQ_DRAMCONF_S0       0x05
61 #define MC_CG_SEQ_DRAMCONF_S1       0x06
62 #define MC_CG_SEQ_YCLK_SUSPEND      0x04
63 #define MC_CG_SEQ_YCLK_RESUME       0x0a
64
65 #define SMC_CG_IND_START            0xc0030000
66 #define SMC_CG_IND_END              0xc0040000
67
68 #define MEM_FREQ_LOW_LATENCY        25000
69 #define MEM_FREQ_HIGH_LATENCY       80000
70
71 #define MEM_LATENCY_HIGH            45
72 #define MEM_LATENCY_LOW             35
73 #define MEM_LATENCY_ERR             0xFFFF
74
75 #define MC_SEQ_MISC0_GDDR5_SHIFT 28
76 #define MC_SEQ_MISC0_GDDR5_MASK  0xf0000000
77 #define MC_SEQ_MISC0_GDDR5_VALUE 5
78
79 #define PCIE_BUS_CLK                10000
80 #define TCLK                        (PCIE_BUS_CLK / 10)
81
82 static struct profile_mode_setting smu7_profiling[7] =
83                                         {{0, 0, 0, 0, 0, 0, 0, 0},
84                                          {1, 0, 100, 30, 1, 0, 100, 10},
85                                          {1, 10, 0, 30, 0, 0, 0, 0},
86                                          {0, 0, 0, 0, 1, 10, 16, 31},
87                                          {1, 0, 11, 50, 1, 0, 100, 10},
88                                          {1, 0, 5, 30, 0, 0, 0, 0},
89                                          {0, 0, 0, 0, 0, 0, 0, 0},
90                                         };
91
92 #define PPSMC_MSG_SetVBITimeout_VEGAM    ((uint16_t) 0x310)
93
94 #define ixPWR_SVI2_PLANE1_LOAD                     0xC0200280
95 #define PWR_SVI2_PLANE1_LOAD__PSI1_MASK                    0x00000020L
96 #define PWR_SVI2_PLANE1_LOAD__PSI0_EN_MASK                 0x00000040L
97 #define PWR_SVI2_PLANE1_LOAD__PSI1__SHIFT                  0x00000005
98 #define PWR_SVI2_PLANE1_LOAD__PSI0_EN__SHIFT               0x00000006
99
100 #define STRAP_EVV_REVISION_MSB          2211
101 #define STRAP_EVV_REVISION_LSB          2208
102
103 /** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */
104 enum DPM_EVENT_SRC {
105         DPM_EVENT_SRC_ANALOG = 0,
106         DPM_EVENT_SRC_EXTERNAL = 1,
107         DPM_EVENT_SRC_DIGITAL = 2,
108         DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
109         DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4
110 };
111
112 #define ixDIDT_SQ_EDC_CTRL                         0x0013
113 #define ixDIDT_SQ_EDC_THRESHOLD                    0x0014
114 #define ixDIDT_SQ_EDC_STALL_PATTERN_1_2            0x0015
115 #define ixDIDT_SQ_EDC_STALL_PATTERN_3_4            0x0016
116 #define ixDIDT_SQ_EDC_STALL_PATTERN_5_6            0x0017
117 #define ixDIDT_SQ_EDC_STALL_PATTERN_7              0x0018
118
119 #define ixDIDT_TD_EDC_CTRL                         0x0053
120 #define ixDIDT_TD_EDC_THRESHOLD                    0x0054
121 #define ixDIDT_TD_EDC_STALL_PATTERN_1_2            0x0055
122 #define ixDIDT_TD_EDC_STALL_PATTERN_3_4            0x0056
123 #define ixDIDT_TD_EDC_STALL_PATTERN_5_6            0x0057
124 #define ixDIDT_TD_EDC_STALL_PATTERN_7              0x0058
125
126 #define ixDIDT_TCP_EDC_CTRL                        0x0073
127 #define ixDIDT_TCP_EDC_THRESHOLD                   0x0074
128 #define ixDIDT_TCP_EDC_STALL_PATTERN_1_2           0x0075
129 #define ixDIDT_TCP_EDC_STALL_PATTERN_3_4           0x0076
130 #define ixDIDT_TCP_EDC_STALL_PATTERN_5_6           0x0077
131 #define ixDIDT_TCP_EDC_STALL_PATTERN_7             0x0078
132
133 #define ixDIDT_DB_EDC_CTRL                         0x0033
134 #define ixDIDT_DB_EDC_THRESHOLD                    0x0034
135 #define ixDIDT_DB_EDC_STALL_PATTERN_1_2            0x0035
136 #define ixDIDT_DB_EDC_STALL_PATTERN_3_4            0x0036
137 #define ixDIDT_DB_EDC_STALL_PATTERN_5_6            0x0037
138 #define ixDIDT_DB_EDC_STALL_PATTERN_7              0x0038
139
140 uint32_t DIDTEDCConfig_P12[] = {
141     ixDIDT_SQ_EDC_STALL_PATTERN_1_2,
142     ixDIDT_SQ_EDC_STALL_PATTERN_3_4,
143     ixDIDT_SQ_EDC_STALL_PATTERN_5_6,
144     ixDIDT_SQ_EDC_STALL_PATTERN_7,
145     ixDIDT_SQ_EDC_THRESHOLD,
146     ixDIDT_SQ_EDC_CTRL,
147     ixDIDT_TD_EDC_STALL_PATTERN_1_2,
148     ixDIDT_TD_EDC_STALL_PATTERN_3_4,
149     ixDIDT_TD_EDC_STALL_PATTERN_5_6,
150     ixDIDT_TD_EDC_STALL_PATTERN_7,
151     ixDIDT_TD_EDC_THRESHOLD,
152     ixDIDT_TD_EDC_CTRL,
153     ixDIDT_TCP_EDC_STALL_PATTERN_1_2,
154     ixDIDT_TCP_EDC_STALL_PATTERN_3_4,
155     ixDIDT_TCP_EDC_STALL_PATTERN_5_6,
156     ixDIDT_TCP_EDC_STALL_PATTERN_7,
157     ixDIDT_TCP_EDC_THRESHOLD,
158     ixDIDT_TCP_EDC_CTRL,
159     ixDIDT_DB_EDC_STALL_PATTERN_1_2,
160     ixDIDT_DB_EDC_STALL_PATTERN_3_4,
161     ixDIDT_DB_EDC_STALL_PATTERN_5_6,
162     ixDIDT_DB_EDC_STALL_PATTERN_7,
163     ixDIDT_DB_EDC_THRESHOLD,
164     ixDIDT_DB_EDC_CTRL,
165     0xFFFFFFFF // End of list
166 };
167
168 static const unsigned long PhwVIslands_Magic = (unsigned long)(PHM_VIslands_Magic);
169 static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
170                 enum pp_clock_type type, uint32_t mask);
171
172 static struct smu7_power_state *cast_phw_smu7_power_state(
173                                   struct pp_hw_power_state *hw_ps)
174 {
175         PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic),
176                                 "Invalid Powerstate Type!",
177                                  return NULL);
178
179         return (struct smu7_power_state *)hw_ps;
180 }
181
182 static const struct smu7_power_state *cast_const_phw_smu7_power_state(
183                                  const struct pp_hw_power_state *hw_ps)
184 {
185         PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic),
186                                 "Invalid Powerstate Type!",
187                                  return NULL);
188
189         return (const struct smu7_power_state *)hw_ps;
190 }
191
192 /**
193  * Find the MC microcode version and store it in the HwMgr struct
194  *
195  * @param    hwmgr  the address of the powerplay hardware manager.
196  * @return   always 0
197  */
198 static int smu7_get_mc_microcode_version(struct pp_hwmgr *hwmgr)
199 {
200         cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
201
202         hwmgr->microcode_version_info.MC = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA);
203
204         return 0;
205 }
206
207 static uint16_t smu7_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
208 {
209         uint32_t speedCntl = 0;
210
211         /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
212         speedCntl = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__PCIE,
213                         ixPCIE_LC_SPEED_CNTL);
214         return((uint16_t)PHM_GET_FIELD(speedCntl,
215                         PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
216 }
217
218 static int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
219 {
220         uint32_t link_width;
221
222         /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
223         link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
224                         PCIE_LC_LINK_WIDTH_CNTL, LC_LINK_WIDTH_RD);
225
226         PP_ASSERT_WITH_CODE((7 >= link_width),
227                         "Invalid PCIe lane width!", return 0);
228
229         return decode_pcie_lane_width(link_width);
230 }
231
232 /**
233 * Enable voltage control
234 *
235 * @param    pHwMgr  the address of the powerplay hardware manager.
236 * @return   always PP_Result_OK
237 */
238 static int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
239 {
240         if (hwmgr->chip_id == CHIP_VEGAM) {
241                 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device,
242                                 CGS_IND_REG__SMC, PWR_SVI2_PLANE1_LOAD, PSI1, 0);
243                 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device,
244                                 CGS_IND_REG__SMC, PWR_SVI2_PLANE1_LOAD, PSI0_EN, 0);
245         }
246
247         if (hwmgr->feature_mask & PP_SMC_VOLTAGE_CONTROL_MASK)
248                 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Enable, NULL);
249
250         return 0;
251 }
252
253 /**
254 * Checks if we want to support voltage control
255 *
256 * @param    hwmgr  the address of the powerplay hardware manager.
257 */
258 static bool smu7_voltage_control(const struct pp_hwmgr *hwmgr)
259 {
260         const struct smu7_hwmgr *data =
261                         (const struct smu7_hwmgr *)(hwmgr->backend);
262
263         return (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control);
264 }
265
266 /**
267 * Enable voltage control
268 *
269 * @param    hwmgr  the address of the powerplay hardware manager.
270 * @return   always 0
271 */
272 static int smu7_enable_voltage_control(struct pp_hwmgr *hwmgr)
273 {
274         /* enable voltage control */
275         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
276                         GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1);
277
278         return 0;
279 }
280
281 static int phm_get_svi2_voltage_table_v0(pp_atomctrl_voltage_table *voltage_table,
282                 struct phm_clock_voltage_dependency_table *voltage_dependency_table
283                 )
284 {
285         uint32_t i;
286
287         PP_ASSERT_WITH_CODE((NULL != voltage_table),
288                         "Voltage Dependency Table empty.", return -EINVAL;);
289
290         voltage_table->mask_low = 0;
291         voltage_table->phase_delay = 0;
292         voltage_table->count = voltage_dependency_table->count;
293
294         for (i = 0; i < voltage_dependency_table->count; i++) {
295                 voltage_table->entries[i].value =
296                         voltage_dependency_table->entries[i].v;
297                 voltage_table->entries[i].smio_low = 0;
298         }
299
300         return 0;
301 }
302
303
304 /**
305 * Create Voltage Tables.
306 *
307 * @param    hwmgr  the address of the powerplay hardware manager.
308 * @return   always 0
309 */
310 static int smu7_construct_voltage_tables(struct pp_hwmgr *hwmgr)
311 {
312         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
313         struct phm_ppt_v1_information *table_info =
314                         (struct phm_ppt_v1_information *)hwmgr->pptable;
315         int result = 0;
316         uint32_t tmp;
317
318         if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
319                 result = atomctrl_get_voltage_table_v3(hwmgr,
320                                 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT,
321                                 &(data->mvdd_voltage_table));
322                 PP_ASSERT_WITH_CODE((0 == result),
323                                 "Failed to retrieve MVDD table.",
324                                 return result);
325         } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
326                 if (hwmgr->pp_table_version == PP_TABLE_V1)
327                         result = phm_get_svi2_mvdd_voltage_table(&(data->mvdd_voltage_table),
328                                         table_info->vdd_dep_on_mclk);
329                 else if (hwmgr->pp_table_version == PP_TABLE_V0)
330                         result = phm_get_svi2_voltage_table_v0(&(data->mvdd_voltage_table),
331                                         hwmgr->dyn_state.mvdd_dependency_on_mclk);
332
333                 PP_ASSERT_WITH_CODE((0 == result),
334                                 "Failed to retrieve SVI2 MVDD table from dependency table.",
335                                 return result;);
336         }
337
338         if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
339                 result = atomctrl_get_voltage_table_v3(hwmgr,
340                                 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT,
341                                 &(data->vddci_voltage_table));
342                 PP_ASSERT_WITH_CODE((0 == result),
343                                 "Failed to retrieve VDDCI table.",
344                                 return result);
345         } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
346                 if (hwmgr->pp_table_version == PP_TABLE_V1)
347                         result = phm_get_svi2_vddci_voltage_table(&(data->vddci_voltage_table),
348                                         table_info->vdd_dep_on_mclk);
349                 else if (hwmgr->pp_table_version == PP_TABLE_V0)
350                         result = phm_get_svi2_voltage_table_v0(&(data->vddci_voltage_table),
351                                         hwmgr->dyn_state.vddci_dependency_on_mclk);
352                 PP_ASSERT_WITH_CODE((0 == result),
353                                 "Failed to retrieve SVI2 VDDCI table from dependency table.",
354                                 return result);
355         }
356
357         if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
358                 /* VDDGFX has only SVI2 voltage control */
359                 result = phm_get_svi2_vdd_voltage_table(&(data->vddgfx_voltage_table),
360                                         table_info->vddgfx_lookup_table);
361                 PP_ASSERT_WITH_CODE((0 == result),
362                         "Failed to retrieve SVI2 VDDGFX table from lookup table.", return result;);
363         }
364
365
366         if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control) {
367                 result = atomctrl_get_voltage_table_v3(hwmgr,
368                                         VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT,
369                                         &data->vddc_voltage_table);
370                 PP_ASSERT_WITH_CODE((0 == result),
371                         "Failed to retrieve VDDC table.", return result;);
372         } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
373
374                 if (hwmgr->pp_table_version == PP_TABLE_V0)
375                         result = phm_get_svi2_voltage_table_v0(&data->vddc_voltage_table,
376                                         hwmgr->dyn_state.vddc_dependency_on_mclk);
377                 else if (hwmgr->pp_table_version == PP_TABLE_V1)
378                         result = phm_get_svi2_vdd_voltage_table(&(data->vddc_voltage_table),
379                                 table_info->vddc_lookup_table);
380
381                 PP_ASSERT_WITH_CODE((0 == result),
382                         "Failed to retrieve SVI2 VDDC table from dependency table.", return result;);
383         }
384
385         tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDC);
386         PP_ASSERT_WITH_CODE(
387                         (data->vddc_voltage_table.count <= tmp),
388                 "Too many voltage values for VDDC. Trimming to fit state table.",
389                         phm_trim_voltage_table_to_fit_state_table(tmp,
390                                                 &(data->vddc_voltage_table)));
391
392         tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDGFX);
393         PP_ASSERT_WITH_CODE(
394                         (data->vddgfx_voltage_table.count <= tmp),
395                 "Too many voltage values for VDDC. Trimming to fit state table.",
396                         phm_trim_voltage_table_to_fit_state_table(tmp,
397                                                 &(data->vddgfx_voltage_table)));
398
399         tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDCI);
400         PP_ASSERT_WITH_CODE(
401                         (data->vddci_voltage_table.count <= tmp),
402                 "Too many voltage values for VDDCI. Trimming to fit state table.",
403                         phm_trim_voltage_table_to_fit_state_table(tmp,
404                                         &(data->vddci_voltage_table)));
405
406         tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_MVDD);
407         PP_ASSERT_WITH_CODE(
408                         (data->mvdd_voltage_table.count <= tmp),
409                 "Too many voltage values for MVDD. Trimming to fit state table.",
410                         phm_trim_voltage_table_to_fit_state_table(tmp,
411                                                 &(data->mvdd_voltage_table)));
412
413         return 0;
414 }
415
416 /**
417 * Programs static screed detection parameters
418 *
419 * @param    hwmgr  the address of the powerplay hardware manager.
420 * @return   always 0
421 */
422 static int smu7_program_static_screen_threshold_parameters(
423                                                         struct pp_hwmgr *hwmgr)
424 {
425         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
426
427         /* Set static screen threshold unit */
428         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
429                         CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT,
430                         data->static_screen_threshold_unit);
431         /* Set static screen threshold */
432         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
433                         CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD,
434                         data->static_screen_threshold);
435
436         return 0;
437 }
438
439 /**
440 * Setup display gap for glitch free memory clock switching.
441 *
442 * @param    hwmgr  the address of the powerplay hardware manager.
443 * @return   always  0
444 */
445 static int smu7_enable_display_gap(struct pp_hwmgr *hwmgr)
446 {
447         uint32_t display_gap =
448                         cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
449                                         ixCG_DISPLAY_GAP_CNTL);
450
451         display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
452                         DISP_GAP, DISPLAY_GAP_IGNORE);
453
454         display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
455                         DISP_GAP_MCHG, DISPLAY_GAP_VBLANK);
456
457         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
458                         ixCG_DISPLAY_GAP_CNTL, display_gap);
459
460         return 0;
461 }
462
463 /**
464 * Programs activity state transition voting clients
465 *
466 * @param    hwmgr  the address of the powerplay hardware manager.
467 * @return   always  0
468 */
469 static int smu7_program_voting_clients(struct pp_hwmgr *hwmgr)
470 {
471         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
472         int i;
473
474         /* Clear reset for voting clients before enabling DPM */
475         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
476                         SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0);
477         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
478                         SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0);
479
480         for (i = 0; i < 8; i++)
481                 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
482                                         ixCG_FREQ_TRAN_VOTING_0 + i * 4,
483                                         data->voting_rights_clients[i]);
484         return 0;
485 }
486
487 static int smu7_clear_voting_clients(struct pp_hwmgr *hwmgr)
488 {
489         int i;
490
491         /* Reset voting clients before disabling DPM */
492         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
493                         SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1);
494         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
495                         SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1);
496
497         for (i = 0; i < 8; i++)
498                 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
499                                 ixCG_FREQ_TRAN_VOTING_0 + i * 4, 0);
500
501         return 0;
502 }
503
504 /* Copy one arb setting to another and then switch the active set.
505  * arb_src and arb_dest is one of the MC_CG_ARB_FREQ_Fx constants.
506  */
507 static int smu7_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
508                 uint32_t arb_src, uint32_t arb_dest)
509 {
510         uint32_t mc_arb_dram_timing;
511         uint32_t mc_arb_dram_timing2;
512         uint32_t burst_time;
513         uint32_t mc_cg_config;
514
515         switch (arb_src) {
516         case MC_CG_ARB_FREQ_F0:
517                 mc_arb_dram_timing  = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
518                 mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
519                 burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
520                 break;
521         case MC_CG_ARB_FREQ_F1:
522                 mc_arb_dram_timing  = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1);
523                 mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1);
524                 burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1);
525                 break;
526         default:
527                 return -EINVAL;
528         }
529
530         switch (arb_dest) {
531         case MC_CG_ARB_FREQ_F0:
532                 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
533                 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
534                 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time);
535                 break;
536         case MC_CG_ARB_FREQ_F1:
537                 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
538                 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
539                 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time);
540                 break;
541         default:
542                 return -EINVAL;
543         }
544
545         mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG);
546         mc_cg_config |= 0x0000000F;
547         cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config);
548         PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arb_dest);
549
550         return 0;
551 }
552
553 static int smu7_reset_to_default(struct pp_hwmgr *hwmgr)
554 {
555         return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ResetToDefaults, NULL);
556 }
557
558 /**
559 * Initial switch from ARB F0->F1
560 *
561 * @param    hwmgr  the address of the powerplay hardware manager.
562 * @return   always 0
563 * This function is to be called from the SetPowerState table.
564 */
565 static int smu7_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr)
566 {
567         return smu7_copy_and_switch_arb_sets(hwmgr,
568                         MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
569 }
570
571 static int smu7_force_switch_to_arbf0(struct pp_hwmgr *hwmgr)
572 {
573         uint32_t tmp;
574
575         tmp = (cgs_read_ind_register(hwmgr->device,
576                         CGS_IND_REG__SMC, ixSMC_SCRATCH9) &
577                         0x0000ff00) >> 8;
578
579         if (tmp == MC_CG_ARB_FREQ_F0)
580                 return 0;
581
582         return smu7_copy_and_switch_arb_sets(hwmgr,
583                         tmp, MC_CG_ARB_FREQ_F0);
584 }
585
586 static int smu7_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
587 {
588         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
589
590         struct phm_ppt_v1_information *table_info =
591                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
592         struct phm_ppt_v1_pcie_table *pcie_table = NULL;
593
594         uint32_t i, max_entry;
595         uint32_t tmp;
596
597         PP_ASSERT_WITH_CODE((data->use_pcie_performance_levels ||
598                         data->use_pcie_power_saving_levels), "No pcie performance levels!",
599                         return -EINVAL);
600
601         if (table_info != NULL)
602                 pcie_table = table_info->pcie_table;
603
604         if (data->use_pcie_performance_levels &&
605                         !data->use_pcie_power_saving_levels) {
606                 data->pcie_gen_power_saving = data->pcie_gen_performance;
607                 data->pcie_lane_power_saving = data->pcie_lane_performance;
608         } else if (!data->use_pcie_performance_levels &&
609                         data->use_pcie_power_saving_levels) {
610                 data->pcie_gen_performance = data->pcie_gen_power_saving;
611                 data->pcie_lane_performance = data->pcie_lane_power_saving;
612         }
613         tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_LINK);
614         phm_reset_single_dpm_table(&data->dpm_table.pcie_speed_table,
615                                         tmp,
616                                         MAX_REGULAR_DPM_NUMBER);
617
618         if (pcie_table != NULL) {
619                 /* max_entry is used to make sure we reserve one PCIE level
620                  * for boot level (fix for A+A PSPP issue).
621                  * If PCIE table from PPTable have ULV entry + 8 entries,
622                  * then ignore the last entry.*/
623                 max_entry = (tmp < pcie_table->count) ? tmp : pcie_table->count;
624                 for (i = 1; i < max_entry; i++) {
625                         phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i - 1,
626                                         get_pcie_gen_support(data->pcie_gen_cap,
627                                                         pcie_table->entries[i].gen_speed),
628                                         get_pcie_lane_support(data->pcie_lane_cap,
629                                                         pcie_table->entries[i].lane_width));
630                 }
631                 data->dpm_table.pcie_speed_table.count = max_entry - 1;
632                 smum_update_smc_table(hwmgr, SMU_BIF_TABLE);
633         } else {
634                 /* Hardcode Pcie Table */
635                 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0,
636                                 get_pcie_gen_support(data->pcie_gen_cap,
637                                                 PP_Min_PCIEGen),
638                                 get_pcie_lane_support(data->pcie_lane_cap,
639                                                 PP_Max_PCIELane));
640                 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1,
641                                 get_pcie_gen_support(data->pcie_gen_cap,
642                                                 PP_Min_PCIEGen),
643                                 get_pcie_lane_support(data->pcie_lane_cap,
644                                                 PP_Max_PCIELane));
645                 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2,
646                                 get_pcie_gen_support(data->pcie_gen_cap,
647                                                 PP_Max_PCIEGen),
648                                 get_pcie_lane_support(data->pcie_lane_cap,
649                                                 PP_Max_PCIELane));
650                 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3,
651                                 get_pcie_gen_support(data->pcie_gen_cap,
652                                                 PP_Max_PCIEGen),
653                                 get_pcie_lane_support(data->pcie_lane_cap,
654                                                 PP_Max_PCIELane));
655                 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4,
656                                 get_pcie_gen_support(data->pcie_gen_cap,
657                                                 PP_Max_PCIEGen),
658                                 get_pcie_lane_support(data->pcie_lane_cap,
659                                                 PP_Max_PCIELane));
660                 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5,
661                                 get_pcie_gen_support(data->pcie_gen_cap,
662                                                 PP_Max_PCIEGen),
663                                 get_pcie_lane_support(data->pcie_lane_cap,
664                                                 PP_Max_PCIELane));
665
666                 data->dpm_table.pcie_speed_table.count = 6;
667         }
668         /* Populate last level for boot PCIE level, but do not increment count. */
669         if (hwmgr->chip_family == AMDGPU_FAMILY_CI) {
670                 for (i = 0; i <= data->dpm_table.pcie_speed_table.count; i++)
671                         phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i,
672                                 get_pcie_gen_support(data->pcie_gen_cap,
673                                                 PP_Max_PCIEGen),
674                                 data->vbios_boot_state.pcie_lane_bootup_value);
675         } else {
676                 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table,
677                         data->dpm_table.pcie_speed_table.count,
678                         get_pcie_gen_support(data->pcie_gen_cap,
679                                         PP_Min_PCIEGen),
680                         get_pcie_lane_support(data->pcie_lane_cap,
681                                         PP_Max_PCIELane));
682         }
683         return 0;
684 }
685
686 static int smu7_reset_dpm_tables(struct pp_hwmgr *hwmgr)
687 {
688         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
689
690         memset(&(data->dpm_table), 0x00, sizeof(data->dpm_table));
691
692         phm_reset_single_dpm_table(
693                         &data->dpm_table.sclk_table,
694                                 smum_get_mac_definition(hwmgr,
695                                         SMU_MAX_LEVELS_GRAPHICS),
696                                         MAX_REGULAR_DPM_NUMBER);
697         phm_reset_single_dpm_table(
698                         &data->dpm_table.mclk_table,
699                         smum_get_mac_definition(hwmgr,
700                                 SMU_MAX_LEVELS_MEMORY), MAX_REGULAR_DPM_NUMBER);
701
702         phm_reset_single_dpm_table(
703                         &data->dpm_table.vddc_table,
704                                 smum_get_mac_definition(hwmgr,
705                                         SMU_MAX_LEVELS_VDDC),
706                                         MAX_REGULAR_DPM_NUMBER);
707         phm_reset_single_dpm_table(
708                         &data->dpm_table.vddci_table,
709                         smum_get_mac_definition(hwmgr,
710                                 SMU_MAX_LEVELS_VDDCI), MAX_REGULAR_DPM_NUMBER);
711
712         phm_reset_single_dpm_table(
713                         &data->dpm_table.mvdd_table,
714                                 smum_get_mac_definition(hwmgr,
715                                         SMU_MAX_LEVELS_MVDD),
716                                         MAX_REGULAR_DPM_NUMBER);
717         return 0;
718 }
719 /*
720  * This function is to initialize all DPM state tables
721  * for SMU7 based on the dependency table.
722  * Dynamic state patching function will then trim these
723  * state tables to the allowed range based
724  * on the power policy or external client requests,
725  * such as UVD request, etc.
726  */
727
728 static int smu7_setup_dpm_tables_v0(struct pp_hwmgr *hwmgr)
729 {
730         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
731         struct phm_clock_voltage_dependency_table *allowed_vdd_sclk_table =
732                 hwmgr->dyn_state.vddc_dependency_on_sclk;
733         struct phm_clock_voltage_dependency_table *allowed_vdd_mclk_table =
734                 hwmgr->dyn_state.vddc_dependency_on_mclk;
735         struct phm_cac_leakage_table *std_voltage_table =
736                 hwmgr->dyn_state.cac_leakage_table;
737         uint32_t i;
738
739         PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL,
740                 "SCLK dependency table is missing. This table is mandatory", return -EINVAL);
741         PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table->count >= 1,
742                 "SCLK dependency table has to have is missing. This table is mandatory", return -EINVAL);
743
744         PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
745                 "MCLK dependency table is missing. This table is mandatory", return -EINVAL);
746         PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table->count >= 1,
747                 "VMCLK dependency table has to have is missing. This table is mandatory", return -EINVAL);
748
749
750         /* Initialize Sclk DPM table based on allow Sclk values*/
751         data->dpm_table.sclk_table.count = 0;
752
753         for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
754                 if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count-1].value !=
755                                 allowed_vdd_sclk_table->entries[i].clk) {
756                         data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
757                                 allowed_vdd_sclk_table->entries[i].clk;
758                         data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = (i == 0) ? 1 : 0;
759                         data->dpm_table.sclk_table.count++;
760                 }
761         }
762
763         PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
764                 "MCLK dependency table is missing. This table is mandatory", return -EINVAL);
765         /* Initialize Mclk DPM table based on allow Mclk values */
766         data->dpm_table.mclk_table.count = 0;
767         for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
768                 if (i == 0 || data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count-1].value !=
769                         allowed_vdd_mclk_table->entries[i].clk) {
770                         data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
771                                 allowed_vdd_mclk_table->entries[i].clk;
772                         data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = (i == 0) ? 1 : 0;
773                         data->dpm_table.mclk_table.count++;
774                 }
775         }
776
777         /* Initialize Vddc DPM table based on allow Vddc values.  And populate corresponding std values. */
778         for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
779                 data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
780                 data->dpm_table.vddc_table.dpm_levels[i].param1 = std_voltage_table->entries[i].Leakage;
781                 /* param1 is for corresponding std voltage */
782                 data->dpm_table.vddc_table.dpm_levels[i].enabled = true;
783         }
784
785         data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count;
786         allowed_vdd_mclk_table = hwmgr->dyn_state.vddci_dependency_on_mclk;
787
788         if (NULL != allowed_vdd_mclk_table) {
789                 /* Initialize Vddci DPM table based on allow Mclk values */
790                 for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
791                         data->dpm_table.vddci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
792                         data->dpm_table.vddci_table.dpm_levels[i].enabled = true;
793                 }
794                 data->dpm_table.vddci_table.count = allowed_vdd_mclk_table->count;
795         }
796
797         allowed_vdd_mclk_table = hwmgr->dyn_state.mvdd_dependency_on_mclk;
798
799         if (NULL != allowed_vdd_mclk_table) {
800                 /*
801                  * Initialize MVDD DPM table based on allow Mclk
802                  * values
803                  */
804                 for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
805                         data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
806                         data->dpm_table.mvdd_table.dpm_levels[i].enabled = true;
807                 }
808                 data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count;
809         }
810
811         return 0;
812 }
813
814 static int smu7_setup_dpm_tables_v1(struct pp_hwmgr *hwmgr)
815 {
816         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
817         struct phm_ppt_v1_information *table_info =
818                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
819         uint32_t i;
820
821         struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
822         struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
823
824         if (table_info == NULL)
825                 return -EINVAL;
826
827         dep_sclk_table = table_info->vdd_dep_on_sclk;
828         dep_mclk_table = table_info->vdd_dep_on_mclk;
829
830         PP_ASSERT_WITH_CODE(dep_sclk_table != NULL,
831                         "SCLK dependency table is missing.",
832                         return -EINVAL);
833         PP_ASSERT_WITH_CODE(dep_sclk_table->count >= 1,
834                         "SCLK dependency table count is 0.",
835                         return -EINVAL);
836
837         PP_ASSERT_WITH_CODE(dep_mclk_table != NULL,
838                         "MCLK dependency table is missing.",
839                         return -EINVAL);
840         PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1,
841                         "MCLK dependency table count is 0",
842                         return -EINVAL);
843
844         /* Initialize Sclk DPM table based on allow Sclk values */
845         data->dpm_table.sclk_table.count = 0;
846         for (i = 0; i < dep_sclk_table->count; i++) {
847                 if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count - 1].value !=
848                                                 dep_sclk_table->entries[i].clk) {
849
850                         data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
851                                         dep_sclk_table->entries[i].clk;
852
853                         data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled =
854                                         (i == 0) ? true : false;
855                         data->dpm_table.sclk_table.count++;
856                 }
857         }
858         if (hwmgr->platform_descriptor.overdriveLimit.engineClock == 0)
859                 hwmgr->platform_descriptor.overdriveLimit.engineClock = dep_sclk_table->entries[i-1].clk;
860         /* Initialize Mclk DPM table based on allow Mclk values */
861         data->dpm_table.mclk_table.count = 0;
862         for (i = 0; i < dep_mclk_table->count; i++) {
863                 if (i == 0 || data->dpm_table.mclk_table.dpm_levels
864                                 [data->dpm_table.mclk_table.count - 1].value !=
865                                                 dep_mclk_table->entries[i].clk) {
866                         data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
867                                                         dep_mclk_table->entries[i].clk;
868                         data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled =
869                                                         (i == 0) ? true : false;
870                         data->dpm_table.mclk_table.count++;
871                 }
872         }
873
874         if (hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0)
875                 hwmgr->platform_descriptor.overdriveLimit.memoryClock = dep_mclk_table->entries[i-1].clk;
876         return 0;
877 }
878
879 static int smu7_odn_initial_default_setting(struct pp_hwmgr *hwmgr)
880 {
881         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
882         struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
883         struct phm_ppt_v1_information *table_info =
884                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
885         uint32_t i;
886
887         struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
888         struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
889         struct phm_odn_performance_level *entries;
890
891         if (table_info == NULL)
892                 return -EINVAL;
893
894         dep_sclk_table = table_info->vdd_dep_on_sclk;
895         dep_mclk_table = table_info->vdd_dep_on_mclk;
896
897         odn_table->odn_core_clock_dpm_levels.num_of_pl =
898                                                 data->golden_dpm_table.sclk_table.count;
899         entries = odn_table->odn_core_clock_dpm_levels.entries;
900         for (i=0; i<data->golden_dpm_table.sclk_table.count; i++) {
901                 entries[i].clock = data->golden_dpm_table.sclk_table.dpm_levels[i].value;
902                 entries[i].enabled = true;
903                 entries[i].vddc = dep_sclk_table->entries[i].vddc;
904         }
905
906         smu_get_voltage_dependency_table_ppt_v1(dep_sclk_table,
907                 (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk));
908
909         odn_table->odn_memory_clock_dpm_levels.num_of_pl =
910                                                 data->golden_dpm_table.mclk_table.count;
911         entries = odn_table->odn_memory_clock_dpm_levels.entries;
912         for (i=0; i<data->golden_dpm_table.mclk_table.count; i++) {
913                 entries[i].clock = data->golden_dpm_table.mclk_table.dpm_levels[i].value;
914                 entries[i].enabled = true;
915                 entries[i].vddc = dep_mclk_table->entries[i].vddc;
916         }
917
918         smu_get_voltage_dependency_table_ppt_v1(dep_mclk_table,
919                 (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk));
920
921         return 0;
922 }
923
924 static void smu7_setup_voltage_range_from_vbios(struct pp_hwmgr *hwmgr)
925 {
926         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
927         struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
928         struct phm_ppt_v1_information *table_info =
929                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
930         uint32_t min_vddc = 0;
931         uint32_t max_vddc = 0;
932
933         if (!table_info)
934                 return;
935
936         dep_sclk_table = table_info->vdd_dep_on_sclk;
937
938         atomctrl_get_voltage_range(hwmgr, &max_vddc, &min_vddc);
939
940         if (min_vddc == 0 || min_vddc > 2000
941                 || min_vddc > dep_sclk_table->entries[0].vddc)
942                 min_vddc = dep_sclk_table->entries[0].vddc;
943
944         if (max_vddc == 0 || max_vddc > 2000
945                 || max_vddc < dep_sclk_table->entries[dep_sclk_table->count-1].vddc)
946                 max_vddc = dep_sclk_table->entries[dep_sclk_table->count-1].vddc;
947
948         data->odn_dpm_table.min_vddc = min_vddc;
949         data->odn_dpm_table.max_vddc = max_vddc;
950 }
951
952 static void smu7_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
953 {
954         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
955         struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
956         struct phm_ppt_v1_information *table_info =
957                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
958         uint32_t i;
959
960         struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
961         struct phm_ppt_v1_clock_voltage_dependency_table *odn_dep_table;
962
963         if (table_info == NULL)
964                 return;
965
966         for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
967                 if (odn_table->odn_core_clock_dpm_levels.entries[i].clock !=
968                                         data->dpm_table.sclk_table.dpm_levels[i].value) {
969                         data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
970                         break;
971                 }
972         }
973
974         for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
975                 if (odn_table->odn_memory_clock_dpm_levels.entries[i].clock !=
976                                         data->dpm_table.mclk_table.dpm_levels[i].value) {
977                         data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
978                         break;
979                 }
980         }
981
982         dep_table = table_info->vdd_dep_on_mclk;
983         odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk);
984
985         for (i = 0; i < dep_table->count; i++) {
986                 if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
987                         data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK;
988                         return;
989                 }
990         }
991
992         dep_table = table_info->vdd_dep_on_sclk;
993         odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk);
994         for (i = 0; i < dep_table->count; i++) {
995                 if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
996                         data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK;
997                         return;
998                 }
999         }
1000         if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
1001                 data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
1002                 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK;
1003         }
1004 }
1005
1006 static int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
1007 {
1008         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1009
1010         smu7_reset_dpm_tables(hwmgr);
1011
1012         if (hwmgr->pp_table_version == PP_TABLE_V1)
1013                 smu7_setup_dpm_tables_v1(hwmgr);
1014         else if (hwmgr->pp_table_version == PP_TABLE_V0)
1015                 smu7_setup_dpm_tables_v0(hwmgr);
1016
1017         smu7_setup_default_pcie_table(hwmgr);
1018
1019         /* save a copy of the default DPM table */
1020         memcpy(&(data->golden_dpm_table), &(data->dpm_table),
1021                         sizeof(struct smu7_dpm_table));
1022
1023         /* initialize ODN table */
1024         if (hwmgr->od_enabled) {
1025                 if (data->odn_dpm_table.max_vddc) {
1026                         smu7_check_dpm_table_updated(hwmgr);
1027                 } else {
1028                         smu7_setup_voltage_range_from_vbios(hwmgr);
1029                         smu7_odn_initial_default_setting(hwmgr);
1030                 }
1031         }
1032         return 0;
1033 }
1034
1035 static int smu7_enable_vrhot_gpio_interrupt(struct pp_hwmgr *hwmgr)
1036 {
1037
1038         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1039                         PHM_PlatformCaps_RegulatorHot))
1040                 return smum_send_msg_to_smc(hwmgr,
1041                                 PPSMC_MSG_EnableVRHotGPIOInterrupt,
1042                                 NULL);
1043
1044         return 0;
1045 }
1046
1047 static int smu7_enable_sclk_control(struct pp_hwmgr *hwmgr)
1048 {
1049         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
1050                         SCLK_PWRMGT_OFF, 0);
1051         return 0;
1052 }
1053
1054 static int smu7_enable_ulv(struct pp_hwmgr *hwmgr)
1055 {
1056         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1057
1058         if (data->ulv_supported)
1059                 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableULV, NULL);
1060
1061         return 0;
1062 }
1063
1064 static int smu7_disable_ulv(struct pp_hwmgr *hwmgr)
1065 {
1066         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1067
1068         if (data->ulv_supported)
1069                 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableULV, NULL);
1070
1071         return 0;
1072 }
1073
1074 static int smu7_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
1075 {
1076         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1077                         PHM_PlatformCaps_SclkDeepSleep)) {
1078                 if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MASTER_DeepSleep_ON, NULL))
1079                         PP_ASSERT_WITH_CODE(false,
1080                                         "Attempt to enable Master Deep Sleep switch failed!",
1081                                         return -EINVAL);
1082         } else {
1083                 if (smum_send_msg_to_smc(hwmgr,
1084                                 PPSMC_MSG_MASTER_DeepSleep_OFF,
1085                                 NULL)) {
1086                         PP_ASSERT_WITH_CODE(false,
1087                                         "Attempt to disable Master Deep Sleep switch failed!",
1088                                         return -EINVAL);
1089                 }
1090         }
1091
1092         return 0;
1093 }
1094
1095 static int smu7_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
1096 {
1097         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1098                         PHM_PlatformCaps_SclkDeepSleep)) {
1099                 if (smum_send_msg_to_smc(hwmgr,
1100                                 PPSMC_MSG_MASTER_DeepSleep_OFF,
1101                                 NULL)) {
1102                         PP_ASSERT_WITH_CODE(false,
1103                                         "Attempt to disable Master Deep Sleep switch failed!",
1104                                         return -EINVAL);
1105                 }
1106         }
1107
1108         return 0;
1109 }
1110
1111 static int smu7_disable_sclk_vce_handshake(struct pp_hwmgr *hwmgr)
1112 {
1113         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1114         uint32_t soft_register_value = 0;
1115         uint32_t handshake_disables_offset = data->soft_regs_start
1116                                 + smum_get_offsetof(hwmgr,
1117                                         SMU_SoftRegisters, HandshakeDisables);
1118
1119         soft_register_value = cgs_read_ind_register(hwmgr->device,
1120                                 CGS_IND_REG__SMC, handshake_disables_offset);
1121         soft_register_value |= SMU7_VCE_SCLK_HANDSHAKE_DISABLE;
1122         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1123                         handshake_disables_offset, soft_register_value);
1124         return 0;
1125 }
1126
1127 static int smu7_disable_handshake_uvd(struct pp_hwmgr *hwmgr)
1128 {
1129         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1130         uint32_t soft_register_value = 0;
1131         uint32_t handshake_disables_offset = data->soft_regs_start
1132                                 + smum_get_offsetof(hwmgr,
1133                                         SMU_SoftRegisters, HandshakeDisables);
1134
1135         soft_register_value = cgs_read_ind_register(hwmgr->device,
1136                                 CGS_IND_REG__SMC, handshake_disables_offset);
1137         soft_register_value |= smum_get_mac_definition(hwmgr,
1138                                         SMU_UVD_MCLK_HANDSHAKE_DISABLE);
1139         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1140                         handshake_disables_offset, soft_register_value);
1141         return 0;
1142 }
1143
1144 static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
1145 {
1146         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1147
1148         /* enable SCLK dpm */
1149         if (!data->sclk_dpm_key_disabled) {
1150                 if (hwmgr->chip_id == CHIP_VEGAM)
1151                         smu7_disable_sclk_vce_handshake(hwmgr);
1152
1153                 PP_ASSERT_WITH_CODE(
1154                 (0 == smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Enable, NULL)),
1155                 "Failed to enable SCLK DPM during DPM Start Function!",
1156                 return -EINVAL);
1157         }
1158
1159         /* enable MCLK dpm */
1160         if (0 == data->mclk_dpm_key_disabled) {
1161                 if (!(hwmgr->feature_mask & PP_UVD_HANDSHAKE_MASK))
1162                         smu7_disable_handshake_uvd(hwmgr);
1163
1164                 PP_ASSERT_WITH_CODE(
1165                                 (0 == smum_send_msg_to_smc(hwmgr,
1166                                                 PPSMC_MSG_MCLKDPM_Enable,
1167                                                 NULL)),
1168                                 "Failed to enable MCLK DPM during DPM Start Function!",
1169                                 return -EINVAL);
1170
1171                 if (hwmgr->chip_family != CHIP_VEGAM)
1172                         PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
1173
1174
1175                 if (hwmgr->chip_family == AMDGPU_FAMILY_CI) {
1176                         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d30, 0x5);
1177                         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d3c, 0x5);
1178                         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d80, 0x100005);
1179                         udelay(10);
1180                         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d30, 0x400005);
1181                         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d3c, 0x400005);
1182                         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d80, 0x500005);
1183                 } else {
1184                         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5);
1185                         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x5);
1186                         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x100005);
1187                         udelay(10);
1188                         if (hwmgr->chip_id == CHIP_VEGAM) {
1189                                 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400009);
1190                                 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400009);
1191                         } else {
1192                                 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400005);
1193                                 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400005);
1194                         }
1195                         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x500005);
1196                 }
1197         }
1198
1199         return 0;
1200 }
1201
1202 static int smu7_start_dpm(struct pp_hwmgr *hwmgr)
1203 {
1204         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1205
1206         /*enable general power management */
1207
1208         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
1209                         GLOBAL_PWRMGT_EN, 1);
1210
1211         /* enable sclk deep sleep */
1212
1213         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
1214                         DYNAMIC_PM_EN, 1);
1215
1216         /* prepare for PCIE DPM */
1217
1218         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1219                         data->soft_regs_start +
1220                         smum_get_offsetof(hwmgr, SMU_SoftRegisters,
1221                                                 VoltageChangeTimeout), 0x1000);
1222         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
1223                         SWRST_COMMAND_1, RESETLC, 0x0);
1224
1225         if (hwmgr->chip_family == AMDGPU_FAMILY_CI)
1226                 cgs_write_register(hwmgr->device, 0x1488,
1227                         (cgs_read_register(hwmgr->device, 0x1488) & ~0x1));
1228
1229         if (smu7_enable_sclk_mclk_dpm(hwmgr)) {
1230                 pr_err("Failed to enable Sclk DPM and Mclk DPM!");
1231                 return -EINVAL;
1232         }
1233
1234         /* enable PCIE dpm */
1235         if (0 == data->pcie_dpm_key_disabled) {
1236                 PP_ASSERT_WITH_CODE(
1237                                 (0 == smum_send_msg_to_smc(hwmgr,
1238                                                 PPSMC_MSG_PCIeDPM_Enable,
1239                                                 NULL)),
1240                                 "Failed to enable pcie DPM during DPM Start Function!",
1241                                 return -EINVAL);
1242         }
1243
1244         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1245                                 PHM_PlatformCaps_Falcon_QuickTransition)) {
1246                 PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr,
1247                                 PPSMC_MSG_EnableACDCGPIOInterrupt,
1248                                 NULL)),
1249                                 "Failed to enable AC DC GPIO Interrupt!",
1250                                 );
1251         }
1252
1253         return 0;
1254 }
1255
1256 static int smu7_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
1257 {
1258         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1259
1260         /* disable SCLK dpm */
1261         if (!data->sclk_dpm_key_disabled) {
1262                 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
1263                                 "Trying to disable SCLK DPM when DPM is disabled",
1264                                 return 0);
1265                 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Disable, NULL);
1266         }
1267
1268         /* disable MCLK dpm */
1269         if (!data->mclk_dpm_key_disabled) {
1270                 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
1271                                 "Trying to disable MCLK DPM when DPM is disabled",
1272                                 return 0);
1273                 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_Disable, NULL);
1274         }
1275
1276         return 0;
1277 }
1278
1279 static int smu7_stop_dpm(struct pp_hwmgr *hwmgr)
1280 {
1281         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1282
1283         /* disable general power management */
1284         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
1285                         GLOBAL_PWRMGT_EN, 0);
1286         /* disable sclk deep sleep */
1287         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
1288                         DYNAMIC_PM_EN, 0);
1289
1290         /* disable PCIE dpm */
1291         if (!data->pcie_dpm_key_disabled) {
1292                 PP_ASSERT_WITH_CODE(
1293                                 (smum_send_msg_to_smc(hwmgr,
1294                                                 PPSMC_MSG_PCIeDPM_Disable,
1295                                                 NULL) == 0),
1296                                 "Failed to disable pcie DPM during DPM Stop Function!",
1297                                 return -EINVAL);
1298         }
1299
1300         smu7_disable_sclk_mclk_dpm(hwmgr);
1301
1302         PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
1303                         "Trying to disable voltage DPM when DPM is disabled",
1304                         return 0);
1305
1306         smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Disable, NULL);
1307
1308         return 0;
1309 }
1310
1311 static void smu7_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources)
1312 {
1313         bool protection;
1314         enum DPM_EVENT_SRC src;
1315
1316         switch (sources) {
1317         default:
1318                 pr_err("Unknown throttling event sources.");
1319                 fallthrough;
1320         case 0:
1321                 protection = false;
1322                 /* src is unused */
1323                 break;
1324         case (1 << PHM_AutoThrottleSource_Thermal):
1325                 protection = true;
1326                 src = DPM_EVENT_SRC_DIGITAL;
1327                 break;
1328         case (1 << PHM_AutoThrottleSource_External):
1329                 protection = true;
1330                 src = DPM_EVENT_SRC_EXTERNAL;
1331                 break;
1332         case (1 << PHM_AutoThrottleSource_External) |
1333                         (1 << PHM_AutoThrottleSource_Thermal):
1334                 protection = true;
1335                 src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL;
1336                 break;
1337         }
1338         /* Order matters - don't enable thermal protection for the wrong source. */
1339         if (protection) {
1340                 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL,
1341                                 DPM_EVENT_SRC, src);
1342                 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
1343                                 THERMAL_PROTECTION_DIS,
1344                                 !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1345                                                 PHM_PlatformCaps_ThermalController));
1346         } else
1347                 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
1348                                 THERMAL_PROTECTION_DIS, 1);
1349 }
1350
1351 static int smu7_enable_auto_throttle_source(struct pp_hwmgr *hwmgr,
1352                 PHM_AutoThrottleSource source)
1353 {
1354         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1355
1356         if (!(data->active_auto_throttle_sources & (1 << source))) {
1357                 data->active_auto_throttle_sources |= 1 << source;
1358                 smu7_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
1359         }
1360         return 0;
1361 }
1362
1363 static int smu7_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
1364 {
1365         return smu7_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
1366 }
1367
1368 static int smu7_disable_auto_throttle_source(struct pp_hwmgr *hwmgr,
1369                 PHM_AutoThrottleSource source)
1370 {
1371         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1372
1373         if (data->active_auto_throttle_sources & (1 << source)) {
1374                 data->active_auto_throttle_sources &= ~(1 << source);
1375                 smu7_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
1376         }
1377         return 0;
1378 }
1379
1380 static int smu7_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
1381 {
1382         return smu7_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
1383 }
1384
1385 static int smu7_pcie_performance_request(struct pp_hwmgr *hwmgr)
1386 {
1387         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1388         data->pcie_performance_request = true;
1389
1390         return 0;
1391 }
1392
1393 static int smu7_program_edc_didt_registers(struct pp_hwmgr *hwmgr,
1394                                            uint32_t *cac_config_regs,
1395                                            AtomCtrl_EDCLeakgeTable *edc_leakage_table)
1396 {
1397         uint32_t data, i = 0;
1398
1399         while (cac_config_regs[i] != 0xFFFFFFFF) {
1400                 data = edc_leakage_table->DIDT_REG[i];
1401                 cgs_write_ind_register(hwmgr->device,
1402                                        CGS_IND_REG__DIDT,
1403                                        cac_config_regs[i],
1404                                        data);
1405                 i++;
1406         }
1407
1408         return 0;
1409 }
1410
1411 static int smu7_populate_edc_leakage_registers(struct pp_hwmgr *hwmgr)
1412 {
1413         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1414         int ret = 0;
1415
1416         if (!data->disable_edc_leakage_controller &&
1417             data->edc_hilo_leakage_offset_from_vbios.usEdcDidtLoDpm7TableOffset &&
1418             data->edc_hilo_leakage_offset_from_vbios.usEdcDidtHiDpm7TableOffset) {
1419                 ret = smu7_program_edc_didt_registers(hwmgr,
1420                                                       DIDTEDCConfig_P12,
1421                                                       &data->edc_leakage_table);
1422                 if (ret)
1423                         return ret;
1424
1425                 ret = smum_send_msg_to_smc(hwmgr,
1426                                            (PPSMC_Msg)PPSMC_MSG_EnableEDCController,
1427                                            NULL);
1428         } else {
1429                 ret = smum_send_msg_to_smc(hwmgr,
1430                                            (PPSMC_Msg)PPSMC_MSG_DisableEDCController,
1431                                            NULL);
1432         }
1433
1434         return ret;
1435 }
1436
1437 static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
1438 {
1439         int tmp_result = 0;
1440         int result = 0;
1441
1442         if (smu7_voltage_control(hwmgr)) {
1443                 tmp_result = smu7_enable_voltage_control(hwmgr);
1444                 PP_ASSERT_WITH_CODE(tmp_result == 0,
1445                                 "Failed to enable voltage control!",
1446                                 result = tmp_result);
1447
1448                 tmp_result = smu7_construct_voltage_tables(hwmgr);
1449                 PP_ASSERT_WITH_CODE((0 == tmp_result),
1450                                 "Failed to construct voltage tables!",
1451                                 result = tmp_result);
1452         }
1453         smum_initialize_mc_reg_table(hwmgr);
1454
1455         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1456                         PHM_PlatformCaps_EngineSpreadSpectrumSupport))
1457                 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1458                                 GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 1);
1459
1460         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1461                         PHM_PlatformCaps_ThermalController))
1462                 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1463                                 GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 0);
1464
1465         tmp_result = smu7_program_static_screen_threshold_parameters(hwmgr);
1466         PP_ASSERT_WITH_CODE((0 == tmp_result),
1467                         "Failed to program static screen threshold parameters!",
1468                         result = tmp_result);
1469
1470         tmp_result = smu7_enable_display_gap(hwmgr);
1471         PP_ASSERT_WITH_CODE((0 == tmp_result),
1472                         "Failed to enable display gap!", result = tmp_result);
1473
1474         tmp_result = smu7_program_voting_clients(hwmgr);
1475         PP_ASSERT_WITH_CODE((0 == tmp_result),
1476                         "Failed to program voting clients!", result = tmp_result);
1477
1478         tmp_result = smum_process_firmware_header(hwmgr);
1479         PP_ASSERT_WITH_CODE((0 == tmp_result),
1480                         "Failed to process firmware header!", result = tmp_result);
1481
1482         if (hwmgr->chip_id != CHIP_VEGAM) {
1483                 tmp_result = smu7_initial_switch_from_arbf0_to_f1(hwmgr);
1484                 PP_ASSERT_WITH_CODE((0 == tmp_result),
1485                                 "Failed to initialize switch from ArbF0 to F1!",
1486                                 result = tmp_result);
1487         }
1488
1489         result = smu7_setup_default_dpm_tables(hwmgr);
1490         PP_ASSERT_WITH_CODE(0 == result,
1491                         "Failed to setup default DPM tables!", return result);
1492
1493         tmp_result = smum_init_smc_table(hwmgr);
1494         PP_ASSERT_WITH_CODE((0 == tmp_result),
1495                         "Failed to initialize SMC table!", result = tmp_result);
1496
1497         tmp_result = smu7_enable_vrhot_gpio_interrupt(hwmgr);
1498         PP_ASSERT_WITH_CODE((0 == tmp_result),
1499                         "Failed to enable VR hot GPIO interrupt!", result = tmp_result);
1500
1501         smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_NoDisplay, NULL);
1502
1503         if (hwmgr->chip_id >= CHIP_POLARIS10 &&
1504             hwmgr->chip_id <= CHIP_VEGAM) {
1505                 tmp_result = smu7_populate_edc_leakage_registers(hwmgr);
1506                 PP_ASSERT_WITH_CODE((0 == tmp_result),
1507                                 "Failed to populate edc leakage registers!", result = tmp_result);
1508         }
1509
1510         tmp_result = smu7_enable_sclk_control(hwmgr);
1511         PP_ASSERT_WITH_CODE((0 == tmp_result),
1512                         "Failed to enable SCLK control!", result = tmp_result);
1513
1514         tmp_result = smu7_enable_smc_voltage_controller(hwmgr);
1515         PP_ASSERT_WITH_CODE((0 == tmp_result),
1516                         "Failed to enable voltage control!", result = tmp_result);
1517
1518         tmp_result = smu7_enable_ulv(hwmgr);
1519         PP_ASSERT_WITH_CODE((0 == tmp_result),
1520                         "Failed to enable ULV!", result = tmp_result);
1521
1522         tmp_result = smu7_enable_deep_sleep_master_switch(hwmgr);
1523         PP_ASSERT_WITH_CODE((0 == tmp_result),
1524                         "Failed to enable deep sleep master switch!", result = tmp_result);
1525
1526         tmp_result = smu7_enable_didt_config(hwmgr);
1527         PP_ASSERT_WITH_CODE((tmp_result == 0),
1528                         "Failed to enable deep sleep master switch!", result = tmp_result);
1529
1530         tmp_result = smu7_start_dpm(hwmgr);
1531         PP_ASSERT_WITH_CODE((0 == tmp_result),
1532                         "Failed to start DPM!", result = tmp_result);
1533
1534         tmp_result = smu7_enable_smc_cac(hwmgr);
1535         PP_ASSERT_WITH_CODE((0 == tmp_result),
1536                         "Failed to enable SMC CAC!", result = tmp_result);
1537
1538         tmp_result = smu7_enable_power_containment(hwmgr);
1539         PP_ASSERT_WITH_CODE((0 == tmp_result),
1540                         "Failed to enable power containment!", result = tmp_result);
1541
1542         tmp_result = smu7_power_control_set_level(hwmgr);
1543         PP_ASSERT_WITH_CODE((0 == tmp_result),
1544                         "Failed to power control set level!", result = tmp_result);
1545
1546         tmp_result = smu7_enable_thermal_auto_throttle(hwmgr);
1547         PP_ASSERT_WITH_CODE((0 == tmp_result),
1548                         "Failed to enable thermal auto throttle!", result = tmp_result);
1549
1550         tmp_result = smu7_pcie_performance_request(hwmgr);
1551         PP_ASSERT_WITH_CODE((0 == tmp_result),
1552                         "pcie performance request failed!", result = tmp_result);
1553
1554         return 0;
1555 }
1556
1557 static int smu7_avfs_control(struct pp_hwmgr *hwmgr, bool enable)
1558 {
1559         if (!hwmgr->avfs_supported)
1560                 return 0;
1561
1562         if (enable) {
1563                 if (!PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
1564                                 CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) {
1565                         PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(
1566                                         hwmgr, PPSMC_MSG_EnableAvfs, NULL),
1567                                         "Failed to enable AVFS!",
1568                                         return -EINVAL);
1569                 }
1570         } else if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
1571                         CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) {
1572                 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(
1573                                 hwmgr, PPSMC_MSG_DisableAvfs, NULL),
1574                                 "Failed to disable AVFS!",
1575                                 return -EINVAL);
1576         }
1577
1578         return 0;
1579 }
1580
1581 static int smu7_update_avfs(struct pp_hwmgr *hwmgr)
1582 {
1583         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1584
1585         if (!hwmgr->avfs_supported)
1586                 return 0;
1587
1588         if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
1589                 smu7_avfs_control(hwmgr, false);
1590         } else if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
1591                 smu7_avfs_control(hwmgr, false);
1592                 smu7_avfs_control(hwmgr, true);
1593         } else {
1594                 smu7_avfs_control(hwmgr, true);
1595         }
1596
1597         return 0;
1598 }
1599
1600 static int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
1601 {
1602         int tmp_result, result = 0;
1603
1604         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1605                         PHM_PlatformCaps_ThermalController))
1606                 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1607                                 GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 1);
1608
1609         tmp_result = smu7_disable_power_containment(hwmgr);
1610         PP_ASSERT_WITH_CODE((tmp_result == 0),
1611                         "Failed to disable power containment!", result = tmp_result);
1612
1613         tmp_result = smu7_disable_smc_cac(hwmgr);
1614         PP_ASSERT_WITH_CODE((tmp_result == 0),
1615                         "Failed to disable SMC CAC!", result = tmp_result);
1616
1617         tmp_result = smu7_disable_didt_config(hwmgr);
1618         PP_ASSERT_WITH_CODE((tmp_result == 0),
1619                         "Failed to disable DIDT!", result = tmp_result);
1620
1621         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1622                         CG_SPLL_SPREAD_SPECTRUM, SSEN, 0);
1623         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1624                         GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 0);
1625
1626         tmp_result = smu7_disable_thermal_auto_throttle(hwmgr);
1627         PP_ASSERT_WITH_CODE((tmp_result == 0),
1628                         "Failed to disable thermal auto throttle!", result = tmp_result);
1629
1630         tmp_result = smu7_avfs_control(hwmgr, false);
1631         PP_ASSERT_WITH_CODE((tmp_result == 0),
1632                         "Failed to disable AVFS!", result = tmp_result);
1633
1634         tmp_result = smu7_stop_dpm(hwmgr);
1635         PP_ASSERT_WITH_CODE((tmp_result == 0),
1636                         "Failed to stop DPM!", result = tmp_result);
1637
1638         tmp_result = smu7_disable_deep_sleep_master_switch(hwmgr);
1639         PP_ASSERT_WITH_CODE((tmp_result == 0),
1640                         "Failed to disable deep sleep master switch!", result = tmp_result);
1641
1642         tmp_result = smu7_disable_ulv(hwmgr);
1643         PP_ASSERT_WITH_CODE((tmp_result == 0),
1644                         "Failed to disable ULV!", result = tmp_result);
1645
1646         tmp_result = smu7_clear_voting_clients(hwmgr);
1647         PP_ASSERT_WITH_CODE((tmp_result == 0),
1648                         "Failed to clear voting clients!", result = tmp_result);
1649
1650         tmp_result = smu7_reset_to_default(hwmgr);
1651         PP_ASSERT_WITH_CODE((tmp_result == 0),
1652                         "Failed to reset to default!", result = tmp_result);
1653
1654         tmp_result = smu7_force_switch_to_arbf0(hwmgr);
1655         PP_ASSERT_WITH_CODE((tmp_result == 0),
1656                         "Failed to force to switch arbf0!", result = tmp_result);
1657
1658         return result;
1659 }
1660
1661 static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
1662 {
1663         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1664         struct phm_ppt_v1_information *table_info =
1665                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
1666         struct amdgpu_device *adev = hwmgr->adev;
1667
1668         data->dll_default_on = false;
1669         data->mclk_dpm0_activity_target = 0xa;
1670         data->vddc_vddgfx_delta = 300;
1671         data->static_screen_threshold = SMU7_STATICSCREENTHRESHOLD_DFLT;
1672         data->static_screen_threshold_unit = SMU7_STATICSCREENTHRESHOLDUNIT_DFLT;
1673         data->voting_rights_clients[0] = SMU7_VOTINGRIGHTSCLIENTS_DFLT0;
1674         data->voting_rights_clients[1]= SMU7_VOTINGRIGHTSCLIENTS_DFLT1;
1675         data->voting_rights_clients[2] = SMU7_VOTINGRIGHTSCLIENTS_DFLT2;
1676         data->voting_rights_clients[3]= SMU7_VOTINGRIGHTSCLIENTS_DFLT3;
1677         data->voting_rights_clients[4]= SMU7_VOTINGRIGHTSCLIENTS_DFLT4;
1678         data->voting_rights_clients[5]= SMU7_VOTINGRIGHTSCLIENTS_DFLT5;
1679         data->voting_rights_clients[6]= SMU7_VOTINGRIGHTSCLIENTS_DFLT6;
1680         data->voting_rights_clients[7]= SMU7_VOTINGRIGHTSCLIENTS_DFLT7;
1681
1682         data->mclk_dpm_key_disabled = hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true;
1683         data->sclk_dpm_key_disabled = hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true;
1684         data->pcie_dpm_key_disabled = hwmgr->feature_mask & PP_PCIE_DPM_MASK ? false : true;
1685         /* need to set voltage control types before EVV patching */
1686         data->voltage_control = SMU7_VOLTAGE_CONTROL_NONE;
1687         data->vddci_control = SMU7_VOLTAGE_CONTROL_NONE;
1688         data->mvdd_control = SMU7_VOLTAGE_CONTROL_NONE;
1689         data->enable_tdc_limit_feature = true;
1690         data->enable_pkg_pwr_tracking_feature = true;
1691         data->force_pcie_gen = PP_PCIEGenInvalid;
1692         data->ulv_supported = hwmgr->feature_mask & PP_ULV_MASK ? true : false;
1693         data->current_profile_setting.bupdate_sclk = 1;
1694         data->current_profile_setting.sclk_up_hyst = 0;
1695         data->current_profile_setting.sclk_down_hyst = 100;
1696         data->current_profile_setting.sclk_activity = SMU7_SCLK_TARGETACTIVITY_DFLT;
1697         data->current_profile_setting.bupdate_mclk = 1;
1698         if (adev->gmc.vram_width == 256) {
1699                 data->current_profile_setting.mclk_up_hyst = 10;
1700                 data->current_profile_setting.mclk_down_hyst = 60;
1701                 data->current_profile_setting.mclk_activity = 25;
1702         } else if (adev->gmc.vram_width == 128) {
1703                 data->current_profile_setting.mclk_up_hyst = 5;
1704                 data->current_profile_setting.mclk_down_hyst = 16;
1705                 data->current_profile_setting.mclk_activity = 20;
1706         } else if (adev->gmc.vram_width == 64) {
1707                 data->current_profile_setting.mclk_up_hyst = 3;
1708                 data->current_profile_setting.mclk_down_hyst = 16;
1709                 data->current_profile_setting.mclk_activity = 20;
1710         }
1711         hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D];
1712         hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
1713         hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
1714
1715         if (hwmgr->chip_id == CHIP_POLARIS12 || hwmgr->is_kicker) {
1716                 uint8_t tmp1, tmp2;
1717                 uint16_t tmp3 = 0;
1718                 atomctrl_get_svi2_info(hwmgr, VOLTAGE_TYPE_VDDC, &tmp1, &tmp2,
1719                                                 &tmp3);
1720                 tmp3 = (tmp3 >> 5) & 0x3;
1721                 data->vddc_phase_shed_control = ((tmp3 << 1) | (tmp3 >> 1)) & 0x3;
1722         } else if (hwmgr->chip_family == AMDGPU_FAMILY_CI) {
1723                 data->vddc_phase_shed_control = 1;
1724         } else {
1725                 data->vddc_phase_shed_control = 0;
1726         }
1727
1728         if (hwmgr->chip_id  == CHIP_HAWAII) {
1729                 data->thermal_temp_setting.temperature_low = 94500;
1730                 data->thermal_temp_setting.temperature_high = 95000;
1731                 data->thermal_temp_setting.temperature_shutdown = 104000;
1732         } else {
1733                 data->thermal_temp_setting.temperature_low = 99500;
1734                 data->thermal_temp_setting.temperature_high = 100000;
1735                 data->thermal_temp_setting.temperature_shutdown = 104000;
1736         }
1737
1738         data->fast_watermark_threshold = 100;
1739         if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1740                         VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
1741                 data->voltage_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1742         else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1743                         VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT))
1744                 data->voltage_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
1745
1746         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1747                         PHM_PlatformCaps_ControlVDDGFX)) {
1748                 if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1749                         VOLTAGE_TYPE_VDDGFX, VOLTAGE_OBJ_SVID2)) {
1750                         data->vdd_gfx_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1751                 }
1752         }
1753
1754         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1755                         PHM_PlatformCaps_EnableMVDDControl)) {
1756                 if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1757                                 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
1758                         data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
1759                 else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1760                                 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
1761                         data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1762         }
1763
1764         if (SMU7_VOLTAGE_CONTROL_NONE == data->vdd_gfx_control)
1765                 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1766                         PHM_PlatformCaps_ControlVDDGFX);
1767
1768         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1769                         PHM_PlatformCaps_ControlVDDCI)) {
1770                 if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1771                                 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
1772                         data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
1773                 else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1774                                 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
1775                         data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1776         }
1777
1778         if (data->mvdd_control == SMU7_VOLTAGE_CONTROL_NONE)
1779                 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1780                                 PHM_PlatformCaps_EnableMVDDControl);
1781
1782         if (data->vddci_control == SMU7_VOLTAGE_CONTROL_NONE)
1783                 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1784                                 PHM_PlatformCaps_ControlVDDCI);
1785
1786         if ((hwmgr->pp_table_version != PP_TABLE_V0) && (hwmgr->feature_mask & PP_CLOCK_STRETCH_MASK)
1787                 && (table_info->cac_dtp_table->usClockStretchAmount != 0))
1788                 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1789                                         PHM_PlatformCaps_ClockStretcher);
1790
1791         data->pcie_gen_performance.max = PP_PCIEGen1;
1792         data->pcie_gen_performance.min = PP_PCIEGen3;
1793         data->pcie_gen_power_saving.max = PP_PCIEGen1;
1794         data->pcie_gen_power_saving.min = PP_PCIEGen3;
1795         data->pcie_lane_performance.max = 0;
1796         data->pcie_lane_performance.min = 16;
1797         data->pcie_lane_power_saving.max = 0;
1798         data->pcie_lane_power_saving.min = 16;
1799
1800
1801         if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
1802                 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1803                               PHM_PlatformCaps_UVDPowerGating);
1804         if (adev->pg_flags & AMD_PG_SUPPORT_VCE)
1805                 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1806                               PHM_PlatformCaps_VCEPowerGating);
1807
1808         data->disable_edc_leakage_controller = true;
1809         if (((adev->asic_type == CHIP_POLARIS10) && hwmgr->is_kicker) ||
1810             ((adev->asic_type == CHIP_POLARIS11) && hwmgr->is_kicker) ||
1811             (adev->asic_type == CHIP_POLARIS12) ||
1812             (adev->asic_type == CHIP_VEGAM))
1813                 data->disable_edc_leakage_controller = false;
1814 }
1815
1816 static int smu7_calculate_ro_range(struct pp_hwmgr *hwmgr)
1817 {
1818         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1819         struct amdgpu_device *adev = hwmgr->adev;
1820         uint32_t asicrev1, evv_revision, max, min;
1821
1822         atomctrl_read_efuse(hwmgr, STRAP_EVV_REVISION_LSB, STRAP_EVV_REVISION_MSB,
1823                         &evv_revision);
1824
1825         atomctrl_read_efuse(hwmgr, 568, 579, &asicrev1);
1826
1827         if (ASICID_IS_P20(adev->pdev->device, adev->pdev->revision) ||
1828             ASICID_IS_P30(adev->pdev->device, adev->pdev->revision)) {
1829                 min = 1200;
1830                 max = 2500;
1831         } else if (ASICID_IS_P21(adev->pdev->device, adev->pdev->revision) ||
1832                    ASICID_IS_P31(adev->pdev->device, adev->pdev->revision)) {
1833                 min = 900;
1834                 max= 2100;
1835         } else if (hwmgr->chip_id == CHIP_POLARIS10) {
1836                 if (adev->pdev->subsystem_vendor == 0x106B) {
1837                         min = 1000;
1838                         max = 2300;
1839                 } else {
1840                         if (evv_revision == 0) {
1841                                 min = 1000;
1842                                 max = 2300;
1843                         } else if (evv_revision == 1) {
1844                                 if (asicrev1 == 326) {
1845                                         min = 1200;
1846                                         max = 2500;
1847                                         /* TODO: PATCH RO in VBIOS */
1848                                 } else {
1849                                         min = 1200;
1850                                         max = 2000;
1851                                 }
1852                         } else if (evv_revision == 2) {
1853                                 min = 1200;
1854                                 max = 2500;
1855                         }
1856                 }
1857         } else if ((hwmgr->chip_id == CHIP_POLARIS11) ||
1858                    (hwmgr->chip_id == CHIP_POLARIS12)) {
1859                 min = 1100;
1860                 max = 2100;
1861         }
1862
1863         data->ro_range_minimum = min;
1864         data->ro_range_maximum = max;
1865
1866         /* TODO: PATCH RO in VBIOS here */
1867
1868         return 0;
1869 }
1870
1871 /**
1872 * Get Leakage VDDC based on leakage ID.
1873 *
1874 * @param    hwmgr  the address of the powerplay hardware manager.
1875 * @return   always 0
1876 */
1877 static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr)
1878 {
1879         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1880         uint16_t vv_id;
1881         uint16_t vddc = 0;
1882         uint16_t vddgfx = 0;
1883         uint16_t i, j;
1884         uint32_t sclk = 0;
1885         struct phm_ppt_v1_information *table_info =
1886                         (struct phm_ppt_v1_information *)hwmgr->pptable;
1887         struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = NULL;
1888
1889         if (hwmgr->chip_id == CHIP_POLARIS10 ||
1890             hwmgr->chip_id == CHIP_POLARIS11 ||
1891             hwmgr->chip_id == CHIP_POLARIS12)
1892                 smu7_calculate_ro_range(hwmgr);
1893
1894         for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) {
1895                 vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1896
1897                 if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
1898                         if ((hwmgr->pp_table_version == PP_TABLE_V1)
1899                             && !phm_get_sclk_for_voltage_evv(hwmgr,
1900                                                 table_info->vddgfx_lookup_table, vv_id, &sclk)) {
1901                                 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1902                                                         PHM_PlatformCaps_ClockStretcher)) {
1903                                         sclk_table = table_info->vdd_dep_on_sclk;
1904
1905                                         for (j = 1; j < sclk_table->count; j++) {
1906                                                 if (sclk_table->entries[j].clk == sclk &&
1907                                                                 sclk_table->entries[j].cks_enable == 0) {
1908                                                         sclk += 5000;
1909                                                         break;
1910                                                 }
1911                                         }
1912                                 }
1913                                 if (0 == atomctrl_get_voltage_evv_on_sclk
1914                                     (hwmgr, VOLTAGE_TYPE_VDDGFX, sclk,
1915                                      vv_id, &vddgfx)) {
1916                                         /* need to make sure vddgfx is less than 2v or else, it could burn the ASIC. */
1917                                         PP_ASSERT_WITH_CODE((vddgfx < 2000 && vddgfx != 0), "Invalid VDDGFX value!", return -EINVAL);
1918
1919                                         /* the voltage should not be zero nor equal to leakage ID */
1920                                         if (vddgfx != 0 && vddgfx != vv_id) {
1921                                                 data->vddcgfx_leakage.actual_voltage[data->vddcgfx_leakage.count] = vddgfx;
1922                                                 data->vddcgfx_leakage.leakage_id[data->vddcgfx_leakage.count] = vv_id;
1923                                                 data->vddcgfx_leakage.count++;
1924                                         }
1925                                 } else {
1926                                         pr_info("Error retrieving EVV voltage value!\n");
1927                                 }
1928                         }
1929                 } else {
1930                         if ((hwmgr->pp_table_version == PP_TABLE_V0)
1931                                 || !phm_get_sclk_for_voltage_evv(hwmgr,
1932                                         table_info->vddc_lookup_table, vv_id, &sclk)) {
1933                                 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1934                                                 PHM_PlatformCaps_ClockStretcher)) {
1935                                         if (table_info == NULL)
1936                                                 return -EINVAL;
1937                                         sclk_table = table_info->vdd_dep_on_sclk;
1938
1939                                         for (j = 1; j < sclk_table->count; j++) {
1940                                                 if (sclk_table->entries[j].clk == sclk &&
1941                                                                 sclk_table->entries[j].cks_enable == 0) {
1942                                                         sclk += 5000;
1943                                                         break;
1944                                                 }
1945                                         }
1946                                 }
1947
1948                                 if (phm_get_voltage_evv_on_sclk(hwmgr,
1949                                                         VOLTAGE_TYPE_VDDC,
1950                                                         sclk, vv_id, &vddc) == 0) {
1951                                         if (vddc >= 2000 || vddc == 0)
1952                                                 return -EINVAL;
1953                                 } else {
1954                                         pr_debug("failed to retrieving EVV voltage!\n");
1955                                         continue;
1956                                 }
1957
1958                                 /* the voltage should not be zero nor equal to leakage ID */
1959                                 if (vddc != 0 && vddc != vv_id) {
1960                                         data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = (uint16_t)(vddc);
1961                                         data->vddc_leakage.leakage_id[data->vddc_leakage.count] = vv_id;
1962                                         data->vddc_leakage.count++;
1963                                 }
1964                         }
1965                 }
1966         }
1967
1968         return 0;
1969 }
1970
1971 /**
1972  * Change virtual leakage voltage to actual value.
1973  *
1974  * @param     hwmgr  the address of the powerplay hardware manager.
1975  * @param     pointer to changing voltage
1976  * @param     pointer to leakage table
1977  */
1978 static void smu7_patch_ppt_v1_with_vdd_leakage(struct pp_hwmgr *hwmgr,
1979                 uint16_t *voltage, struct smu7_leakage_voltage *leakage_table)
1980 {
1981         uint32_t index;
1982
1983         /* search for leakage voltage ID 0xff01 ~ 0xff08 */
1984         for (index = 0; index < leakage_table->count; index++) {
1985                 /* if this voltage matches a leakage voltage ID */
1986                 /* patch with actual leakage voltage */
1987                 if (leakage_table->leakage_id[index] == *voltage) {
1988                         *voltage = leakage_table->actual_voltage[index];
1989                         break;
1990                 }
1991         }
1992
1993         if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
1994                 pr_err("Voltage value looks like a Leakage ID but it's not patched \n");
1995 }
1996
1997 /**
1998 * Patch voltage lookup table by EVV leakages.
1999 *
2000 * @param     hwmgr  the address of the powerplay hardware manager.
2001 * @param     pointer to voltage lookup table
2002 * @param     pointer to leakage table
2003 * @return     always 0
2004 */
2005 static int smu7_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
2006                 phm_ppt_v1_voltage_lookup_table *lookup_table,
2007                 struct smu7_leakage_voltage *leakage_table)
2008 {
2009         uint32_t i;
2010
2011         for (i = 0; i < lookup_table->count; i++)
2012                 smu7_patch_ppt_v1_with_vdd_leakage(hwmgr,
2013                                 &lookup_table->entries[i].us_vdd, leakage_table);
2014
2015         return 0;
2016 }
2017
2018 static int smu7_patch_clock_voltage_limits_with_vddc_leakage(
2019                 struct pp_hwmgr *hwmgr, struct smu7_leakage_voltage *leakage_table,
2020                 uint16_t *vddc)
2021 {
2022         struct phm_ppt_v1_information *table_info =
2023                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
2024         smu7_patch_ppt_v1_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table);
2025         hwmgr->dyn_state.max_clock_voltage_on_dc.vddc =
2026                         table_info->max_clock_voltage_on_dc.vddc;
2027         return 0;
2028 }
2029
2030 static int smu7_patch_voltage_dependency_tables_with_lookup_table(
2031                 struct pp_hwmgr *hwmgr)
2032 {
2033         uint8_t entry_id;
2034         uint8_t voltage_id;
2035         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2036         struct phm_ppt_v1_information *table_info =
2037                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
2038
2039         struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
2040                         table_info->vdd_dep_on_sclk;
2041         struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table =
2042                         table_info->vdd_dep_on_mclk;
2043         struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
2044                         table_info->mm_dep_table;
2045
2046         if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
2047                 for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
2048                         voltage_id = sclk_table->entries[entry_id].vddInd;
2049                         sclk_table->entries[entry_id].vddgfx =
2050                                 table_info->vddgfx_lookup_table->entries[voltage_id].us_vdd;
2051                 }
2052         } else {
2053                 for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
2054                         voltage_id = sclk_table->entries[entry_id].vddInd;
2055                         sclk_table->entries[entry_id].vddc =
2056                                 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
2057                 }
2058         }
2059
2060         for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) {
2061                 voltage_id = mclk_table->entries[entry_id].vddInd;
2062                 mclk_table->entries[entry_id].vddc =
2063                         table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
2064         }
2065
2066         for (entry_id = 0; entry_id < mm_table->count; ++entry_id) {
2067                 voltage_id = mm_table->entries[entry_id].vddcInd;
2068                 mm_table->entries[entry_id].vddc =
2069                         table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
2070         }
2071
2072         return 0;
2073
2074 }
2075
2076 static int phm_add_voltage(struct pp_hwmgr *hwmgr,
2077                         phm_ppt_v1_voltage_lookup_table *look_up_table,
2078                         phm_ppt_v1_voltage_lookup_record *record)
2079 {
2080         uint32_t i;
2081
2082         PP_ASSERT_WITH_CODE((NULL != look_up_table),
2083                 "Lookup Table empty.", return -EINVAL);
2084         PP_ASSERT_WITH_CODE((0 != look_up_table->count),
2085                 "Lookup Table empty.", return -EINVAL);
2086
2087         i = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDGFX);
2088         PP_ASSERT_WITH_CODE((i >= look_up_table->count),
2089                 "Lookup Table is full.", return -EINVAL);
2090
2091         /* This is to avoid entering duplicate calculated records. */
2092         for (i = 0; i < look_up_table->count; i++) {
2093                 if (look_up_table->entries[i].us_vdd == record->us_vdd) {
2094                         if (look_up_table->entries[i].us_calculated == 1)
2095                                 return 0;
2096                         break;
2097                 }
2098         }
2099
2100         look_up_table->entries[i].us_calculated = 1;
2101         look_up_table->entries[i].us_vdd = record->us_vdd;
2102         look_up_table->entries[i].us_cac_low = record->us_cac_low;
2103         look_up_table->entries[i].us_cac_mid = record->us_cac_mid;
2104         look_up_table->entries[i].us_cac_high = record->us_cac_high;
2105         /* Only increment the count when we're appending, not replacing duplicate entry. */
2106         if (i == look_up_table->count)
2107                 look_up_table->count++;
2108
2109         return 0;
2110 }
2111
2112
2113 static int smu7_calc_voltage_dependency_tables(struct pp_hwmgr *hwmgr)
2114 {
2115         uint8_t entry_id;
2116         struct phm_ppt_v1_voltage_lookup_record v_record;
2117         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2118         struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
2119
2120         phm_ppt_v1_clock_voltage_dependency_table *sclk_table = pptable_info->vdd_dep_on_sclk;
2121         phm_ppt_v1_clock_voltage_dependency_table *mclk_table = pptable_info->vdd_dep_on_mclk;
2122
2123         if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
2124                 for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
2125                         if (sclk_table->entries[entry_id].vdd_offset & (1 << 15))
2126                                 v_record.us_vdd = sclk_table->entries[entry_id].vddgfx +
2127                                         sclk_table->entries[entry_id].vdd_offset - 0xFFFF;
2128                         else
2129                                 v_record.us_vdd = sclk_table->entries[entry_id].vddgfx +
2130                                         sclk_table->entries[entry_id].vdd_offset;
2131
2132                         sclk_table->entries[entry_id].vddc =
2133                                 v_record.us_cac_low = v_record.us_cac_mid =
2134                                 v_record.us_cac_high = v_record.us_vdd;
2135
2136                         phm_add_voltage(hwmgr, pptable_info->vddc_lookup_table, &v_record);
2137                 }
2138
2139                 for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) {
2140                         if (mclk_table->entries[entry_id].vdd_offset & (1 << 15))
2141                                 v_record.us_vdd = mclk_table->entries[entry_id].vddc +
2142                                         mclk_table->entries[entry_id].vdd_offset - 0xFFFF;
2143                         else
2144                                 v_record.us_vdd = mclk_table->entries[entry_id].vddc +
2145                                         mclk_table->entries[entry_id].vdd_offset;
2146
2147                         mclk_table->entries[entry_id].vddgfx = v_record.us_cac_low =
2148                                 v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd;
2149                         phm_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record);
2150                 }
2151         }
2152         return 0;
2153 }
2154
2155 static int smu7_calc_mm_voltage_dependency_table(struct pp_hwmgr *hwmgr)
2156 {
2157         uint8_t entry_id;
2158         struct phm_ppt_v1_voltage_lookup_record v_record;
2159         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2160         struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
2161         phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table;
2162
2163         if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
2164                 for (entry_id = 0; entry_id < mm_table->count; entry_id++) {
2165                         if (mm_table->entries[entry_id].vddgfx_offset & (1 << 15))
2166                                 v_record.us_vdd = mm_table->entries[entry_id].vddc +
2167                                         mm_table->entries[entry_id].vddgfx_offset - 0xFFFF;
2168                         else
2169                                 v_record.us_vdd = mm_table->entries[entry_id].vddc +
2170                                         mm_table->entries[entry_id].vddgfx_offset;
2171
2172                         /* Add the calculated VDDGFX to the VDDGFX lookup table */
2173                         mm_table->entries[entry_id].vddgfx = v_record.us_cac_low =
2174                                 v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd;
2175                         phm_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record);
2176                 }
2177         }
2178         return 0;
2179 }
2180
2181 static int smu7_sort_lookup_table(struct pp_hwmgr *hwmgr,
2182                 struct phm_ppt_v1_voltage_lookup_table *lookup_table)
2183 {
2184         uint32_t table_size, i, j;
2185         table_size = lookup_table->count;
2186
2187         PP_ASSERT_WITH_CODE(0 != lookup_table->count,
2188                 "Lookup table is empty", return -EINVAL);
2189
2190         /* Sorting voltages */
2191         for (i = 0; i < table_size - 1; i++) {
2192                 for (j = i + 1; j > 0; j--) {
2193                         if (lookup_table->entries[j].us_vdd <
2194                                         lookup_table->entries[j - 1].us_vdd) {
2195                                 swap(lookup_table->entries[j - 1],
2196                                      lookup_table->entries[j]);
2197                         }
2198                 }
2199         }
2200
2201         return 0;
2202 }
2203
2204 static int smu7_complete_dependency_tables(struct pp_hwmgr *hwmgr)
2205 {
2206         int result = 0;
2207         int tmp_result;
2208         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2209         struct phm_ppt_v1_information *table_info =
2210                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
2211
2212         if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
2213                 tmp_result = smu7_patch_lookup_table_with_leakage(hwmgr,
2214                         table_info->vddgfx_lookup_table, &(data->vddcgfx_leakage));
2215                 if (tmp_result != 0)
2216                         result = tmp_result;
2217
2218                 smu7_patch_ppt_v1_with_vdd_leakage(hwmgr,
2219                         &table_info->max_clock_voltage_on_dc.vddgfx, &(data->vddcgfx_leakage));
2220         } else {
2221
2222                 tmp_result = smu7_patch_lookup_table_with_leakage(hwmgr,
2223                                 table_info->vddc_lookup_table, &(data->vddc_leakage));
2224                 if (tmp_result)
2225                         result = tmp_result;
2226
2227                 tmp_result = smu7_patch_clock_voltage_limits_with_vddc_leakage(hwmgr,
2228                                 &(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc);
2229                 if (tmp_result)
2230                         result = tmp_result;
2231         }
2232
2233         tmp_result = smu7_patch_voltage_dependency_tables_with_lookup_table(hwmgr);
2234         if (tmp_result)
2235                 result = tmp_result;
2236
2237         tmp_result = smu7_calc_voltage_dependency_tables(hwmgr);
2238         if (tmp_result)
2239                 result = tmp_result;
2240
2241         tmp_result = smu7_calc_mm_voltage_dependency_table(hwmgr);
2242         if (tmp_result)
2243                 result = tmp_result;
2244
2245         tmp_result = smu7_sort_lookup_table(hwmgr, table_info->vddgfx_lookup_table);
2246         if (tmp_result)
2247                 result = tmp_result;
2248
2249         tmp_result = smu7_sort_lookup_table(hwmgr, table_info->vddc_lookup_table);
2250         if (tmp_result)
2251                 result = tmp_result;
2252
2253         return result;
2254 }
2255
2256 static int smu7_set_private_data_based_on_pptable_v1(struct pp_hwmgr *hwmgr)
2257 {
2258         struct phm_ppt_v1_information *table_info =
2259                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
2260
2261         struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
2262                                                 table_info->vdd_dep_on_sclk;
2263         struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table =
2264                                                 table_info->vdd_dep_on_mclk;
2265
2266         PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL,
2267                 "VDD dependency on SCLK table is missing.",
2268                 return -EINVAL);
2269         PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
2270                 "VDD dependency on SCLK table has to have is missing.",
2271                 return -EINVAL);
2272
2273         PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL,
2274                 "VDD dependency on MCLK table is missing",
2275                 return -EINVAL);
2276         PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
2277                 "VDD dependency on MCLK table has to have is missing.",
2278                 return -EINVAL);
2279
2280         table_info->max_clock_voltage_on_ac.sclk =
2281                 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk;
2282         table_info->max_clock_voltage_on_ac.mclk =
2283                 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk;
2284         table_info->max_clock_voltage_on_ac.vddc =
2285                 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
2286         table_info->max_clock_voltage_on_ac.vddci =
2287                 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci;
2288
2289         hwmgr->dyn_state.max_clock_voltage_on_ac.sclk = table_info->max_clock_voltage_on_ac.sclk;
2290         hwmgr->dyn_state.max_clock_voltage_on_ac.mclk = table_info->max_clock_voltage_on_ac.mclk;
2291         hwmgr->dyn_state.max_clock_voltage_on_ac.vddc = table_info->max_clock_voltage_on_ac.vddc;
2292         hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = table_info->max_clock_voltage_on_ac.vddci;
2293
2294         return 0;
2295 }
2296
2297 static int smu7_patch_voltage_workaround(struct pp_hwmgr *hwmgr)
2298 {
2299         struct phm_ppt_v1_information *table_info =
2300                        (struct phm_ppt_v1_information *)(hwmgr->pptable);
2301         struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
2302         struct phm_ppt_v1_voltage_lookup_table *lookup_table;
2303         uint32_t i;
2304         uint32_t hw_revision, sub_vendor_id, sub_sys_id;
2305         struct amdgpu_device *adev = hwmgr->adev;
2306
2307         if (table_info != NULL) {
2308                 dep_mclk_table = table_info->vdd_dep_on_mclk;
2309                 lookup_table = table_info->vddc_lookup_table;
2310         } else
2311                 return 0;
2312
2313         hw_revision = adev->pdev->revision;
2314         sub_sys_id = adev->pdev->subsystem_device;
2315         sub_vendor_id = adev->pdev->subsystem_vendor;
2316
2317         if (adev->pdev->device == 0x67DF && hw_revision == 0xC7 &&
2318             ((sub_sys_id == 0xb37 && sub_vendor_id == 0x1002) ||
2319              (sub_sys_id == 0x4a8 && sub_vendor_id == 0x1043) ||
2320              (sub_sys_id == 0x9480 && sub_vendor_id == 0x1682))) {
2321
2322                 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device,
2323                                               CGS_IND_REG__SMC,
2324                                               PWR_CKS_CNTL,
2325                                               CKS_STRETCH_AMOUNT,
2326                                               0x3);
2327
2328                 if (lookup_table->entries[dep_mclk_table->entries[dep_mclk_table->count-1].vddInd].us_vdd >= 1000)
2329                         return 0;
2330
2331                 for (i = 0; i < lookup_table->count; i++) {
2332                         if (lookup_table->entries[i].us_vdd < 0xff01 && lookup_table->entries[i].us_vdd >= 1000) {
2333                                 dep_mclk_table->entries[dep_mclk_table->count-1].vddInd = (uint8_t) i;
2334                                 return 0;
2335                         }
2336                 }
2337         }
2338         return 0;
2339 }
2340
2341 static int smu7_thermal_parameter_init(struct pp_hwmgr *hwmgr)
2342 {
2343         struct pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
2344         uint32_t temp_reg;
2345         struct phm_ppt_v1_information *table_info =
2346                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
2347
2348
2349         if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) {
2350                 temp_reg = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL);
2351                 switch (gpio_pin_assignment.uc_gpio_pin_bit_shift) {
2352                 case 0:
2353                         temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x1);
2354                         break;
2355                 case 1:
2356                         temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x2);
2357                         break;
2358                 case 2:
2359                         temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW, 0x1);
2360                         break;
2361                 case 3:
2362                         temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, FORCE_NB_PS1, 0x1);
2363                         break;
2364                 case 4:
2365                         temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, DPM_ENABLED, 0x1);
2366                         break;
2367                 default:
2368                         break;
2369                 }
2370                 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL, temp_reg);
2371         }
2372
2373         if (table_info == NULL)
2374                 return 0;
2375
2376         if (table_info->cac_dtp_table->usDefaultTargetOperatingTemp != 0 &&
2377                 hwmgr->thermal_controller.advanceFanControlParameters.ucFanControlMode) {
2378                 hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMinLimit =
2379                         (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit;
2380
2381                 hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMaxLimit =
2382                         (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
2383
2384                 hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMStep = 1;
2385
2386                 hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMaxLimit = 100;
2387
2388                 hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMinLimit =
2389                         (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit;
2390
2391                 hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMStep = 1;
2392
2393                 table_info->cac_dtp_table->usDefaultTargetOperatingTemp = (table_info->cac_dtp_table->usDefaultTargetOperatingTemp >= 50) ?
2394                                                                 (table_info->cac_dtp_table->usDefaultTargetOperatingTemp - 50) : 0;
2395
2396                 table_info->cac_dtp_table->usOperatingTempMaxLimit = table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
2397                 table_info->cac_dtp_table->usOperatingTempStep = 1;
2398                 table_info->cac_dtp_table->usOperatingTempHyst = 1;
2399
2400                 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM =
2401                                hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
2402
2403                 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM =
2404                                hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM;
2405
2406                 hwmgr->dyn_state.cac_dtp_table->usOperatingTempMinLimit =
2407                                table_info->cac_dtp_table->usOperatingTempMinLimit;
2408
2409                 hwmgr->dyn_state.cac_dtp_table->usOperatingTempMaxLimit =
2410                                table_info->cac_dtp_table->usOperatingTempMaxLimit;
2411
2412                 hwmgr->dyn_state.cac_dtp_table->usDefaultTargetOperatingTemp =
2413                                table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
2414
2415                 hwmgr->dyn_state.cac_dtp_table->usOperatingTempStep =
2416                                table_info->cac_dtp_table->usOperatingTempStep;
2417
2418                 hwmgr->dyn_state.cac_dtp_table->usTargetOperatingTemp =
2419                                table_info->cac_dtp_table->usTargetOperatingTemp;
2420                 if (hwmgr->feature_mask & PP_OD_FUZZY_FAN_CONTROL_MASK)
2421                         phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2422                                         PHM_PlatformCaps_ODFuzzyFanControlSupport);
2423         }
2424
2425         return 0;
2426 }
2427
2428 /**
2429  * Change virtual leakage voltage to actual value.
2430  *
2431  * @param     hwmgr  the address of the powerplay hardware manager.
2432  * @param     pointer to changing voltage
2433  * @param     pointer to leakage table
2434  */
2435 static void smu7_patch_ppt_v0_with_vdd_leakage(struct pp_hwmgr *hwmgr,
2436                 uint32_t *voltage, struct smu7_leakage_voltage *leakage_table)
2437 {
2438         uint32_t index;
2439
2440         /* search for leakage voltage ID 0xff01 ~ 0xff08 */
2441         for (index = 0; index < leakage_table->count; index++) {
2442                 /* if this voltage matches a leakage voltage ID */
2443                 /* patch with actual leakage voltage */
2444                 if (leakage_table->leakage_id[index] == *voltage) {
2445                         *voltage = leakage_table->actual_voltage[index];
2446                         break;
2447                 }
2448         }
2449
2450         if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
2451                 pr_err("Voltage value looks like a Leakage ID but it's not patched \n");
2452 }
2453
2454
2455 static int smu7_patch_vddc(struct pp_hwmgr *hwmgr,
2456                               struct phm_clock_voltage_dependency_table *tab)
2457 {
2458         uint16_t i;
2459         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2460
2461         if (tab)
2462                 for (i = 0; i < tab->count; i++)
2463                         smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2464                                                 &data->vddc_leakage);
2465
2466         return 0;
2467 }
2468
2469 static int smu7_patch_vddci(struct pp_hwmgr *hwmgr,
2470                                struct phm_clock_voltage_dependency_table *tab)
2471 {
2472         uint16_t i;
2473         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2474
2475         if (tab)
2476                 for (i = 0; i < tab->count; i++)
2477                         smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2478                                                         &data->vddci_leakage);
2479
2480         return 0;
2481 }
2482
2483 static int smu7_patch_vce_vddc(struct pp_hwmgr *hwmgr,
2484                                   struct phm_vce_clock_voltage_dependency_table *tab)
2485 {
2486         uint16_t i;
2487         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2488
2489         if (tab)
2490                 for (i = 0; i < tab->count; i++)
2491                         smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2492                                                         &data->vddc_leakage);
2493
2494         return 0;
2495 }
2496
2497
2498 static int smu7_patch_uvd_vddc(struct pp_hwmgr *hwmgr,
2499                                   struct phm_uvd_clock_voltage_dependency_table *tab)
2500 {
2501         uint16_t i;
2502         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2503
2504         if (tab)
2505                 for (i = 0; i < tab->count; i++)
2506                         smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2507                                                         &data->vddc_leakage);
2508
2509         return 0;
2510 }
2511
2512 static int smu7_patch_vddc_shed_limit(struct pp_hwmgr *hwmgr,
2513                                          struct phm_phase_shedding_limits_table *tab)
2514 {
2515         uint16_t i;
2516         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2517
2518         if (tab)
2519                 for (i = 0; i < tab->count; i++)
2520                         smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].Voltage,
2521                                                         &data->vddc_leakage);
2522
2523         return 0;
2524 }
2525
2526 static int smu7_patch_samu_vddc(struct pp_hwmgr *hwmgr,
2527                                    struct phm_samu_clock_voltage_dependency_table *tab)
2528 {
2529         uint16_t i;
2530         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2531
2532         if (tab)
2533                 for (i = 0; i < tab->count; i++)
2534                         smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2535                                                         &data->vddc_leakage);
2536
2537         return 0;
2538 }
2539
2540 static int smu7_patch_acp_vddc(struct pp_hwmgr *hwmgr,
2541                                   struct phm_acp_clock_voltage_dependency_table *tab)
2542 {
2543         uint16_t i;
2544         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2545
2546         if (tab)
2547                 for (i = 0; i < tab->count; i++)
2548                         smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2549                                         &data->vddc_leakage);
2550
2551         return 0;
2552 }
2553
2554 static int smu7_patch_limits_vddc(struct pp_hwmgr *hwmgr,
2555                                   struct phm_clock_and_voltage_limits *tab)
2556 {
2557         uint32_t vddc, vddci;
2558         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2559
2560         if (tab) {
2561                 vddc = tab->vddc;
2562                 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc,
2563                                                    &data->vddc_leakage);
2564                 tab->vddc = vddc;
2565                 vddci = tab->vddci;
2566                 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddci,
2567                                                    &data->vddci_leakage);
2568                 tab->vddci = vddci;
2569         }
2570
2571         return 0;
2572 }
2573
2574 static int smu7_patch_cac_vddc(struct pp_hwmgr *hwmgr, struct phm_cac_leakage_table *tab)
2575 {
2576         uint32_t i;
2577         uint32_t vddc;
2578         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2579
2580         if (tab) {
2581                 for (i = 0; i < tab->count; i++) {
2582                         vddc = (uint32_t)(tab->entries[i].Vddc);
2583                         smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc, &data->vddc_leakage);
2584                         tab->entries[i].Vddc = (uint16_t)vddc;
2585                 }
2586         }
2587
2588         return 0;
2589 }
2590
2591 static int smu7_patch_dependency_tables_with_leakage(struct pp_hwmgr *hwmgr)
2592 {
2593         int tmp;
2594
2595         tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_sclk);
2596         if (tmp)
2597                 return -EINVAL;
2598
2599         tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_mclk);
2600         if (tmp)
2601                 return -EINVAL;
2602
2603         tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
2604         if (tmp)
2605                 return -EINVAL;
2606
2607         tmp = smu7_patch_vddci(hwmgr, hwmgr->dyn_state.vddci_dependency_on_mclk);
2608         if (tmp)
2609                 return -EINVAL;
2610
2611         tmp = smu7_patch_vce_vddc(hwmgr, hwmgr->dyn_state.vce_clock_voltage_dependency_table);
2612         if (tmp)
2613                 return -EINVAL;
2614
2615         tmp = smu7_patch_uvd_vddc(hwmgr, hwmgr->dyn_state.uvd_clock_voltage_dependency_table);
2616         if (tmp)
2617                 return -EINVAL;
2618
2619         tmp = smu7_patch_samu_vddc(hwmgr, hwmgr->dyn_state.samu_clock_voltage_dependency_table);
2620         if (tmp)
2621                 return -EINVAL;
2622
2623         tmp = smu7_patch_acp_vddc(hwmgr, hwmgr->dyn_state.acp_clock_voltage_dependency_table);
2624         if (tmp)
2625                 return -EINVAL;
2626
2627         tmp = smu7_patch_vddc_shed_limit(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table);
2628         if (tmp)
2629                 return -EINVAL;
2630
2631         tmp = smu7_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_ac);
2632         if (tmp)
2633                 return -EINVAL;
2634
2635         tmp = smu7_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_dc);
2636         if (tmp)
2637                 return -EINVAL;
2638
2639         tmp = smu7_patch_cac_vddc(hwmgr, hwmgr->dyn_state.cac_leakage_table);
2640         if (tmp)
2641                 return -EINVAL;
2642
2643         return 0;
2644 }
2645
2646
2647 static int smu7_set_private_data_based_on_pptable_v0(struct pp_hwmgr *hwmgr)
2648 {
2649         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2650
2651         struct phm_clock_voltage_dependency_table *allowed_sclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_sclk;
2652         struct phm_clock_voltage_dependency_table *allowed_mclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_mclk;
2653         struct phm_clock_voltage_dependency_table *allowed_mclk_vddci_table = hwmgr->dyn_state.vddci_dependency_on_mclk;
2654
2655         PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table != NULL,
2656                 "VDDC dependency on SCLK table is missing. This table is mandatory",
2657                 return -EINVAL);
2658         PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table->count >= 1,
2659                 "VDDC dependency on SCLK table has to have is missing. This table is mandatory",
2660                 return -EINVAL);
2661
2662         PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table != NULL,
2663                 "VDDC dependency on MCLK table is missing. This table is mandatory",
2664                 return -EINVAL);
2665         PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table->count >= 1,
2666                 "VDD dependency on MCLK table has to have is missing. This table is mandatory",
2667                 return -EINVAL);
2668
2669         data->min_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[0].v;
2670         data->max_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
2671
2672         hwmgr->dyn_state.max_clock_voltage_on_ac.sclk =
2673                 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
2674         hwmgr->dyn_state.max_clock_voltage_on_ac.mclk =
2675                 allowed_mclk_vddc_table->entries[allowed_mclk_vddc_table->count - 1].clk;
2676         hwmgr->dyn_state.max_clock_voltage_on_ac.vddc =
2677                 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
2678
2679         if (allowed_mclk_vddci_table != NULL && allowed_mclk_vddci_table->count >= 1) {
2680                 data->min_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[0].v;
2681                 data->max_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
2682         }
2683
2684         if (hwmgr->dyn_state.vddci_dependency_on_mclk != NULL && hwmgr->dyn_state.vddci_dependency_on_mclk->count >= 1)
2685                 hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = hwmgr->dyn_state.vddci_dependency_on_mclk->entries[hwmgr->dyn_state.vddci_dependency_on_mclk->count - 1].v;
2686
2687         return 0;
2688 }
2689
2690 static int smu7_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
2691 {
2692         kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
2693         hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
2694         kfree(hwmgr->backend);
2695         hwmgr->backend = NULL;
2696
2697         return 0;
2698 }
2699
2700 static int smu7_get_elb_voltages(struct pp_hwmgr *hwmgr)
2701 {
2702         uint16_t virtual_voltage_id, vddc, vddci, efuse_voltage_id;
2703         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2704         int i;
2705
2706         if (atomctrl_get_leakage_id_from_efuse(hwmgr, &efuse_voltage_id) == 0) {
2707                 for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) {
2708                         virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
2709                         if (atomctrl_get_leakage_vddc_base_on_leakage(hwmgr, &vddc, &vddci,
2710                                                                 virtual_voltage_id,
2711                                                                 efuse_voltage_id) == 0) {
2712                                 if (vddc != 0 && vddc != virtual_voltage_id) {
2713                                         data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = vddc;
2714                                         data->vddc_leakage.leakage_id[data->vddc_leakage.count] = virtual_voltage_id;
2715                                         data->vddc_leakage.count++;
2716                                 }
2717                                 if (vddci != 0 && vddci != virtual_voltage_id) {
2718                                         data->vddci_leakage.actual_voltage[data->vddci_leakage.count] = vddci;
2719                                         data->vddci_leakage.leakage_id[data->vddci_leakage.count] = virtual_voltage_id;
2720                                         data->vddci_leakage.count++;
2721                                 }
2722                         }
2723                 }
2724         }
2725         return 0;
2726 }
2727
2728 #define LEAKAGE_ID_MSB                  463
2729 #define LEAKAGE_ID_LSB                  454
2730
2731 static int smu7_update_edc_leakage_table(struct pp_hwmgr *hwmgr)
2732 {
2733         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2734         uint32_t efuse;
2735         uint16_t offset;
2736         int ret = 0;
2737
2738         if (data->disable_edc_leakage_controller)
2739                 return 0;
2740
2741         ret = atomctrl_get_edc_hilo_leakage_offset_table(hwmgr,
2742                                                          &data->edc_hilo_leakage_offset_from_vbios);
2743         if (ret)
2744                 return ret;
2745
2746         if (data->edc_hilo_leakage_offset_from_vbios.usEdcDidtLoDpm7TableOffset &&
2747             data->edc_hilo_leakage_offset_from_vbios.usEdcDidtHiDpm7TableOffset) {
2748                 atomctrl_read_efuse(hwmgr, LEAKAGE_ID_LSB, LEAKAGE_ID_MSB, &efuse);
2749                 if (efuse < data->edc_hilo_leakage_offset_from_vbios.usHiLoLeakageThreshold)
2750                         offset = data->edc_hilo_leakage_offset_from_vbios.usEdcDidtLoDpm7TableOffset;
2751                 else
2752                         offset = data->edc_hilo_leakage_offset_from_vbios.usEdcDidtHiDpm7TableOffset;
2753
2754                 ret = atomctrl_get_edc_leakage_table(hwmgr,
2755                                                      &data->edc_leakage_table,
2756                                                      offset);
2757                 if (ret)
2758                         return ret;
2759         }
2760
2761         return ret;
2762 }
2763
2764 static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
2765 {
2766         struct smu7_hwmgr *data;
2767         int result = 0;
2768
2769         data = kzalloc(sizeof(struct smu7_hwmgr), GFP_KERNEL);
2770         if (data == NULL)
2771                 return -ENOMEM;
2772
2773         hwmgr->backend = data;
2774         smu7_patch_voltage_workaround(hwmgr);
2775         smu7_init_dpm_defaults(hwmgr);
2776
2777         /* Get leakage voltage based on leakage ID. */
2778         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2779                         PHM_PlatformCaps_EVV)) {
2780                 result = smu7_get_evv_voltages(hwmgr);
2781                 if (result) {
2782                         pr_info("Get EVV Voltage Failed.  Abort Driver loading!\n");
2783                         return -EINVAL;
2784                 }
2785         } else {
2786                 smu7_get_elb_voltages(hwmgr);
2787         }
2788
2789         if (hwmgr->pp_table_version == PP_TABLE_V1) {
2790                 smu7_complete_dependency_tables(hwmgr);
2791                 smu7_set_private_data_based_on_pptable_v1(hwmgr);
2792         } else if (hwmgr->pp_table_version == PP_TABLE_V0) {
2793                 smu7_patch_dependency_tables_with_leakage(hwmgr);
2794                 smu7_set_private_data_based_on_pptable_v0(hwmgr);
2795         }
2796
2797         /* Initalize Dynamic State Adjustment Rule Settings */
2798         result = phm_initializa_dynamic_state_adjustment_rule_settings(hwmgr);
2799
2800         if (0 == result) {
2801                 struct amdgpu_device *adev = hwmgr->adev;
2802
2803                 data->is_tlu_enabled = false;
2804
2805                 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
2806                                                         SMU7_MAX_HARDWARE_POWERLEVELS;
2807                 hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
2808                 hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
2809
2810                 data->pcie_gen_cap = adev->pm.pcie_gen_mask;
2811                 if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
2812                         data->pcie_spc_cap = 20;
2813                 data->pcie_lane_cap = adev->pm.pcie_mlw_mask;
2814
2815                 hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
2816 /* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
2817                 hwmgr->platform_descriptor.clockStep.engineClock = 500;
2818                 hwmgr->platform_descriptor.clockStep.memoryClock = 500;
2819                 smu7_thermal_parameter_init(hwmgr);
2820         } else {
2821                 /* Ignore return value in here, we are cleaning up a mess. */
2822                 smu7_hwmgr_backend_fini(hwmgr);
2823         }
2824
2825         result = smu7_update_edc_leakage_table(hwmgr);
2826         if (result)
2827                 return result;
2828
2829         return 0;
2830 }
2831
2832 static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr)
2833 {
2834         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2835         uint32_t level, tmp;
2836
2837         if (!data->pcie_dpm_key_disabled) {
2838                 if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
2839                         level = 0;
2840                         tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask;
2841                         while (tmp >>= 1)
2842                                 level++;
2843
2844                         if (level)
2845                                 smum_send_msg_to_smc_with_parameter(hwmgr,
2846                                                 PPSMC_MSG_PCIeDPM_ForceLevel, level,
2847                                                 NULL);
2848                 }
2849         }
2850
2851         if (!data->sclk_dpm_key_disabled) {
2852                 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
2853                         level = 0;
2854                         tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask;
2855                         while (tmp >>= 1)
2856                                 level++;
2857
2858                         if (level)
2859                                 smum_send_msg_to_smc_with_parameter(hwmgr,
2860                                                 PPSMC_MSG_SCLKDPM_SetEnabledMask,
2861                                                 (1 << level),
2862                                                 NULL);
2863                 }
2864         }
2865
2866         if (!data->mclk_dpm_key_disabled) {
2867                 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
2868                         level = 0;
2869                         tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask;
2870                         while (tmp >>= 1)
2871                                 level++;
2872
2873                         if (level)
2874                                 smum_send_msg_to_smc_with_parameter(hwmgr,
2875                                                 PPSMC_MSG_MCLKDPM_SetEnabledMask,
2876                                                 (1 << level),
2877                                                 NULL);
2878                 }
2879         }
2880
2881         return 0;
2882 }
2883
2884 static int smu7_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
2885 {
2886         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2887
2888         if (hwmgr->pp_table_version == PP_TABLE_V1)
2889                 phm_apply_dal_min_voltage_request(hwmgr);
2890 /* TO DO  for v0 iceland and Ci*/
2891
2892         if (!data->sclk_dpm_key_disabled) {
2893                 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask)
2894                         smum_send_msg_to_smc_with_parameter(hwmgr,
2895                                         PPSMC_MSG_SCLKDPM_SetEnabledMask,
2896                                         data->dpm_level_enable_mask.sclk_dpm_enable_mask,
2897                                         NULL);
2898         }
2899
2900         if (!data->mclk_dpm_key_disabled) {
2901                 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask)
2902                         smum_send_msg_to_smc_with_parameter(hwmgr,
2903                                         PPSMC_MSG_MCLKDPM_SetEnabledMask,
2904                                         data->dpm_level_enable_mask.mclk_dpm_enable_mask,
2905                                         NULL);
2906         }
2907
2908         return 0;
2909 }
2910
2911 static int smu7_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
2912 {
2913         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2914
2915         if (!smum_is_dpm_running(hwmgr))
2916                 return -EINVAL;
2917
2918         if (!data->pcie_dpm_key_disabled) {
2919                 smum_send_msg_to_smc(hwmgr,
2920                                 PPSMC_MSG_PCIeDPM_UnForceLevel,
2921                                 NULL);
2922         }
2923
2924         return smu7_upload_dpm_level_enable_mask(hwmgr);
2925 }
2926
2927 static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr)
2928 {
2929         struct smu7_hwmgr *data =
2930                         (struct smu7_hwmgr *)(hwmgr->backend);
2931         uint32_t level;
2932
2933         if (!data->sclk_dpm_key_disabled)
2934                 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
2935                         level = phm_get_lowest_enabled_level(hwmgr,
2936                                                               data->dpm_level_enable_mask.sclk_dpm_enable_mask);
2937                         smum_send_msg_to_smc_with_parameter(hwmgr,
2938                                                             PPSMC_MSG_SCLKDPM_SetEnabledMask,
2939                                                             (1 << level),
2940                                                             NULL);
2941
2942         }
2943
2944         if (!data->mclk_dpm_key_disabled) {
2945                 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
2946                         level = phm_get_lowest_enabled_level(hwmgr,
2947                                                               data->dpm_level_enable_mask.mclk_dpm_enable_mask);
2948                         smum_send_msg_to_smc_with_parameter(hwmgr,
2949                                                             PPSMC_MSG_MCLKDPM_SetEnabledMask,
2950                                                             (1 << level),
2951                                                             NULL);
2952                 }
2953         }
2954
2955         if (!data->pcie_dpm_key_disabled) {
2956                 if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
2957                         level = phm_get_lowest_enabled_level(hwmgr,
2958                                                               data->dpm_level_enable_mask.pcie_dpm_enable_mask);
2959                         smum_send_msg_to_smc_with_parameter(hwmgr,
2960                                                             PPSMC_MSG_PCIeDPM_ForceLevel,
2961                                                             (level),
2962                                                             NULL);
2963                 }
2964         }
2965
2966         return 0;
2967 }
2968
2969 static int smu7_get_profiling_clk(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level,
2970                                 uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *pcie_mask)
2971 {
2972         uint32_t percentage;
2973         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2974         struct smu7_dpm_table *golden_dpm_table = &data->golden_dpm_table;
2975         int32_t tmp_mclk;
2976         int32_t tmp_sclk;
2977         int32_t count;
2978
2979         if (golden_dpm_table->mclk_table.count < 1)
2980                 return -EINVAL;
2981
2982         percentage = 100 * golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count - 1].value /
2983                         golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value;
2984
2985         if (golden_dpm_table->mclk_table.count == 1) {
2986                 percentage = 70;
2987                 tmp_mclk = golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value;
2988                 *mclk_mask = golden_dpm_table->mclk_table.count - 1;
2989         } else {
2990                 tmp_mclk = golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 2].value;
2991                 *mclk_mask = golden_dpm_table->mclk_table.count - 2;
2992         }
2993
2994         tmp_sclk = tmp_mclk * percentage / 100;
2995
2996         if (hwmgr->pp_table_version == PP_TABLE_V0) {
2997                 for (count = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1;
2998                         count >= 0; count--) {
2999                         if (tmp_sclk >= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk) {
3000                                 tmp_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk;
3001                                 *sclk_mask = count;
3002                                 break;
3003                         }
3004                 }
3005                 if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
3006                         *sclk_mask = 0;
3007                         tmp_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].clk;
3008                 }
3009
3010                 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
3011                         *sclk_mask = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1;
3012         } else if (hwmgr->pp_table_version == PP_TABLE_V1) {
3013                 struct phm_ppt_v1_information *table_info =
3014                                 (struct phm_ppt_v1_information *)(hwmgr->pptable);
3015
3016                 for (count = table_info->vdd_dep_on_sclk->count-1; count >= 0; count--) {
3017                         if (tmp_sclk >= table_info->vdd_dep_on_sclk->entries[count].clk) {
3018                                 tmp_sclk = table_info->vdd_dep_on_sclk->entries[count].clk;
3019                                 *sclk_mask = count;
3020                                 break;
3021                         }
3022                 }
3023                 if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
3024                         *sclk_mask = 0;
3025                         tmp_sclk =  table_info->vdd_dep_on_sclk->entries[0].clk;
3026                 }
3027
3028                 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
3029                         *sclk_mask = table_info->vdd_dep_on_sclk->count - 1;
3030         }
3031
3032         if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK)
3033                 *mclk_mask = 0;
3034         else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
3035                 *mclk_mask = golden_dpm_table->mclk_table.count - 1;
3036
3037         *pcie_mask = data->dpm_table.pcie_speed_table.count - 1;
3038         hwmgr->pstate_sclk = tmp_sclk;
3039         hwmgr->pstate_mclk = tmp_mclk;
3040
3041         return 0;
3042 }
3043
3044 static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr,
3045                                 enum amd_dpm_forced_level level)
3046 {
3047         int ret = 0;
3048         uint32_t sclk_mask = 0;
3049         uint32_t mclk_mask = 0;
3050         uint32_t pcie_mask = 0;
3051
3052         if (hwmgr->pstate_sclk == 0)
3053                 smu7_get_profiling_clk(hwmgr, level, &sclk_mask, &mclk_mask, &pcie_mask);
3054
3055         switch (level) {
3056         case AMD_DPM_FORCED_LEVEL_HIGH:
3057                 ret = smu7_force_dpm_highest(hwmgr);
3058                 break;
3059         case AMD_DPM_FORCED_LEVEL_LOW:
3060                 ret = smu7_force_dpm_lowest(hwmgr);
3061                 break;
3062         case AMD_DPM_FORCED_LEVEL_AUTO:
3063                 ret = smu7_unforce_dpm_levels(hwmgr);
3064                 break;
3065         case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
3066         case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
3067         case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
3068         case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
3069                 ret = smu7_get_profiling_clk(hwmgr, level, &sclk_mask, &mclk_mask, &pcie_mask);
3070                 if (ret)
3071                         return ret;
3072                 smu7_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
3073                 smu7_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
3074                 smu7_force_clock_level(hwmgr, PP_PCIE, 1<<pcie_mask);
3075                 break;
3076         case AMD_DPM_FORCED_LEVEL_MANUAL:
3077         case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
3078         default:
3079                 break;
3080         }
3081
3082         if (!ret) {
3083                 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
3084                         smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
3085                 else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
3086                         smu7_fan_ctrl_reset_fan_speed_to_default(hwmgr);
3087         }
3088         return ret;
3089 }
3090
3091 static int smu7_get_power_state_size(struct pp_hwmgr *hwmgr)
3092 {
3093         return sizeof(struct smu7_power_state);
3094 }
3095
3096 static int smu7_vblank_too_short(struct pp_hwmgr *hwmgr,
3097                                  uint32_t vblank_time_us)
3098 {
3099         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3100         uint32_t switch_limit_us;
3101
3102         switch (hwmgr->chip_id) {
3103         case CHIP_POLARIS10:
3104         case CHIP_POLARIS11:
3105         case CHIP_POLARIS12:
3106                 if (hwmgr->is_kicker)
3107                         switch_limit_us = data->is_memory_gddr5 ? 450 : 150;
3108                 else
3109                         switch_limit_us = data->is_memory_gddr5 ? 200 : 150;
3110                 break;
3111         case CHIP_VEGAM:
3112                 switch_limit_us = 30;
3113                 break;
3114         default:
3115                 switch_limit_us = data->is_memory_gddr5 ? 450 : 150;
3116                 break;
3117         }
3118
3119         if (vblank_time_us < switch_limit_us)
3120                 return true;
3121         else
3122                 return false;
3123 }
3124
3125 static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
3126                                 struct pp_power_state *request_ps,
3127                         const struct pp_power_state *current_ps)
3128 {
3129         struct amdgpu_device *adev = hwmgr->adev;
3130         struct smu7_power_state *smu7_ps =
3131                                 cast_phw_smu7_power_state(&request_ps->hardware);
3132         uint32_t sclk;
3133         uint32_t mclk;
3134         struct PP_Clocks minimum_clocks = {0};
3135         bool disable_mclk_switching;
3136         bool disable_mclk_switching_for_frame_lock;
3137         const struct phm_clock_and_voltage_limits *max_limits;
3138         uint32_t i;
3139         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3140         struct phm_ppt_v1_information *table_info =
3141                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
3142         int32_t count;
3143         int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
3144
3145         data->battery_state = (PP_StateUILabel_Battery ==
3146                         request_ps->classification.ui_label);
3147
3148         PP_ASSERT_WITH_CODE(smu7_ps->performance_level_count == 2,
3149                                  "VI should always have 2 performance levels",
3150                                 );
3151
3152         max_limits = adev->pm.ac_power ?
3153                         &(hwmgr->dyn_state.max_clock_voltage_on_ac) :
3154                         &(hwmgr->dyn_state.max_clock_voltage_on_dc);
3155
3156         /* Cap clock DPM tables at DC MAX if it is in DC. */
3157         if (!adev->pm.ac_power) {
3158                 for (i = 0; i < smu7_ps->performance_level_count; i++) {
3159                         if (smu7_ps->performance_levels[i].memory_clock > max_limits->mclk)
3160                                 smu7_ps->performance_levels[i].memory_clock = max_limits->mclk;
3161                         if (smu7_ps->performance_levels[i].engine_clock > max_limits->sclk)
3162                                 smu7_ps->performance_levels[i].engine_clock = max_limits->sclk;
3163                 }
3164         }
3165
3166         minimum_clocks.engineClock = hwmgr->display_config->min_core_set_clock;
3167         minimum_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock;
3168
3169         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3170                         PHM_PlatformCaps_StablePState)) {
3171                 max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
3172                 stable_pstate_sclk = (max_limits->sclk * 75) / 100;
3173
3174                 for (count = table_info->vdd_dep_on_sclk->count - 1;
3175                                 count >= 0; count--) {
3176                         if (stable_pstate_sclk >=
3177                                         table_info->vdd_dep_on_sclk->entries[count].clk) {
3178                                 stable_pstate_sclk =
3179                                                 table_info->vdd_dep_on_sclk->entries[count].clk;
3180                                 break;
3181                         }
3182                 }
3183
3184                 if (count < 0)
3185                         stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
3186
3187                 stable_pstate_mclk = max_limits->mclk;
3188
3189                 minimum_clocks.engineClock = stable_pstate_sclk;
3190                 minimum_clocks.memoryClock = stable_pstate_mclk;
3191         }
3192
3193         disable_mclk_switching_for_frame_lock = phm_cap_enabled(
3194                                     hwmgr->platform_descriptor.platformCaps,
3195                                     PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
3196
3197
3198         if (hwmgr->display_config->num_display == 0)
3199                 disable_mclk_switching = false;
3200         else
3201                 disable_mclk_switching = ((1 < hwmgr->display_config->num_display) &&
3202                                           !hwmgr->display_config->multi_monitor_in_sync) ||
3203                         disable_mclk_switching_for_frame_lock ||
3204                         smu7_vblank_too_short(hwmgr, hwmgr->display_config->min_vblank_time);
3205
3206         sclk = smu7_ps->performance_levels[0].engine_clock;
3207         mclk = smu7_ps->performance_levels[0].memory_clock;
3208
3209         if (disable_mclk_switching)
3210                 mclk = smu7_ps->performance_levels
3211                 [smu7_ps->performance_level_count - 1].memory_clock;
3212
3213         if (sclk < minimum_clocks.engineClock)
3214                 sclk = (minimum_clocks.engineClock > max_limits->sclk) ?
3215                                 max_limits->sclk : minimum_clocks.engineClock;
3216
3217         if (mclk < minimum_clocks.memoryClock)
3218                 mclk = (minimum_clocks.memoryClock > max_limits->mclk) ?
3219                                 max_limits->mclk : minimum_clocks.memoryClock;
3220
3221         smu7_ps->performance_levels[0].engine_clock = sclk;
3222         smu7_ps->performance_levels[0].memory_clock = mclk;
3223
3224         smu7_ps->performance_levels[1].engine_clock =
3225                 (smu7_ps->performance_levels[1].engine_clock >=
3226                                 smu7_ps->performance_levels[0].engine_clock) ?
3227                                                 smu7_ps->performance_levels[1].engine_clock :
3228                                                 smu7_ps->performance_levels[0].engine_clock;
3229
3230         if (disable_mclk_switching) {
3231                 if (mclk < smu7_ps->performance_levels[1].memory_clock)
3232                         mclk = smu7_ps->performance_levels[1].memory_clock;
3233
3234                 smu7_ps->performance_levels[0].memory_clock = mclk;
3235                 smu7_ps->performance_levels[1].memory_clock = mclk;
3236         } else {
3237                 if (smu7_ps->performance_levels[1].memory_clock <
3238                                 smu7_ps->performance_levels[0].memory_clock)
3239                         smu7_ps->performance_levels[1].memory_clock =
3240                                         smu7_ps->performance_levels[0].memory_clock;
3241         }
3242
3243         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3244                         PHM_PlatformCaps_StablePState)) {
3245                 for (i = 0; i < smu7_ps->performance_level_count; i++) {
3246                         smu7_ps->performance_levels[i].engine_clock = stable_pstate_sclk;
3247                         smu7_ps->performance_levels[i].memory_clock = stable_pstate_mclk;
3248                         smu7_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max;
3249                         smu7_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max;
3250                 }
3251         }
3252         return 0;
3253 }
3254
3255
3256 static uint32_t smu7_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
3257 {
3258         struct pp_power_state  *ps;
3259         struct smu7_power_state  *smu7_ps;
3260
3261         if (hwmgr == NULL)
3262                 return -EINVAL;
3263
3264         ps = hwmgr->request_ps;
3265
3266         if (ps == NULL)
3267                 return -EINVAL;
3268
3269         smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
3270
3271         if (low)
3272                 return smu7_ps->performance_levels[0].memory_clock;
3273         else
3274                 return smu7_ps->performance_levels
3275                                 [smu7_ps->performance_level_count-1].memory_clock;
3276 }
3277
3278 static uint32_t smu7_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
3279 {
3280         struct pp_power_state  *ps;
3281         struct smu7_power_state  *smu7_ps;
3282
3283         if (hwmgr == NULL)
3284                 return -EINVAL;
3285
3286         ps = hwmgr->request_ps;
3287
3288         if (ps == NULL)
3289                 return -EINVAL;
3290
3291         smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
3292
3293         if (low)
3294                 return smu7_ps->performance_levels[0].engine_clock;
3295         else
3296                 return smu7_ps->performance_levels
3297                                 [smu7_ps->performance_level_count-1].engine_clock;
3298 }
3299
3300 static int smu7_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
3301                                         struct pp_hw_power_state *hw_ps)
3302 {
3303         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3304         struct smu7_power_state *ps = (struct smu7_power_state *)hw_ps;
3305         ATOM_FIRMWARE_INFO_V2_2 *fw_info;
3306         uint16_t size;
3307         uint8_t frev, crev;
3308         int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
3309
3310         /* First retrieve the Boot clocks and VDDC from the firmware info table.
3311          * We assume here that fw_info is unchanged if this call fails.
3312          */
3313         fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)smu_atom_get_data_table(hwmgr->adev, index,
3314                         &size, &frev, &crev);
3315         if (!fw_info)
3316                 /* During a test, there is no firmware info table. */
3317                 return 0;
3318
3319         /* Patch the state. */
3320         data->vbios_boot_state.sclk_bootup_value =
3321                         le32_to_cpu(fw_info->ulDefaultEngineClock);
3322         data->vbios_boot_state.mclk_bootup_value =
3323                         le32_to_cpu(fw_info->ulDefaultMemoryClock);
3324         data->vbios_boot_state.mvdd_bootup_value =
3325                         le16_to_cpu(fw_info->usBootUpMVDDCVoltage);
3326         data->vbios_boot_state.vddc_bootup_value =
3327                         le16_to_cpu(fw_info->usBootUpVDDCVoltage);
3328         data->vbios_boot_state.vddci_bootup_value =
3329                         le16_to_cpu(fw_info->usBootUpVDDCIVoltage);
3330         data->vbios_boot_state.pcie_gen_bootup_value =
3331                         smu7_get_current_pcie_speed(hwmgr);
3332
3333         data->vbios_boot_state.pcie_lane_bootup_value =
3334                         (uint16_t)smu7_get_current_pcie_lane_number(hwmgr);
3335
3336         /* set boot power state */
3337         ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value;
3338         ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value;
3339         ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value;
3340         ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value;
3341
3342         return 0;
3343 }
3344
3345 static int smu7_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr)
3346 {
3347         int result;
3348         unsigned long ret = 0;
3349
3350         if (hwmgr->pp_table_version == PP_TABLE_V0) {
3351                 result = pp_tables_get_num_of_entries(hwmgr, &ret);
3352                 return result ? 0 : ret;
3353         } else if (hwmgr->pp_table_version == PP_TABLE_V1) {
3354                 result = get_number_of_powerplay_table_entries_v1_0(hwmgr);
3355                 return result;
3356         }
3357         return 0;
3358 }
3359
3360 static int smu7_get_pp_table_entry_callback_func_v1(struct pp_hwmgr *hwmgr,
3361                 void *state, struct pp_power_state *power_state,
3362                 void *pp_table, uint32_t classification_flag)
3363 {
3364         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3365         struct smu7_power_state  *smu7_power_state =
3366                         (struct smu7_power_state *)(&(power_state->hardware));
3367         struct smu7_performance_level *performance_level;
3368         ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state;
3369         ATOM_Tonga_POWERPLAYTABLE *powerplay_table =
3370                         (ATOM_Tonga_POWERPLAYTABLE *)pp_table;
3371         PPTable_Generic_SubTable_Header *sclk_dep_table =
3372                         (PPTable_Generic_SubTable_Header *)
3373                         (((unsigned long)powerplay_table) +
3374                                 le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
3375
3376         ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
3377                         (ATOM_Tonga_MCLK_Dependency_Table *)
3378                         (((unsigned long)powerplay_table) +
3379                                 le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
3380
3381         /* The following fields are not initialized here: id orderedList allStatesList */
3382         power_state->classification.ui_label =
3383                         (le16_to_cpu(state_entry->usClassification) &
3384                         ATOM_PPLIB_CLASSIFICATION_UI_MASK) >>
3385                         ATOM_PPLIB_CLASSIFICATION_UI_SHIFT;
3386         power_state->classification.flags = classification_flag;
3387         /* NOTE: There is a classification2 flag in BIOS that is not being used right now */
3388
3389         power_state->classification.temporary_state = false;
3390         power_state->classification.to_be_deleted = false;
3391
3392         power_state->validation.disallowOnDC =
3393                         (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
3394                                         ATOM_Tonga_DISALLOW_ON_DC));
3395
3396         power_state->pcie.lanes = 0;
3397
3398         power_state->display.disableFrameModulation = false;
3399         power_state->display.limitRefreshrate = false;
3400         power_state->display.enableVariBright =
3401                         (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
3402                                         ATOM_Tonga_ENABLE_VARIBRIGHT));
3403
3404         power_state->validation.supportedPowerLevels = 0;
3405         power_state->uvd_clocks.VCLK = 0;
3406         power_state->uvd_clocks.DCLK = 0;
3407         power_state->temperatures.min = 0;
3408         power_state->temperatures.max = 0;
3409
3410         performance_level = &(smu7_power_state->performance_levels
3411                         [smu7_power_state->performance_level_count++]);
3412
3413         PP_ASSERT_WITH_CODE(
3414                         (smu7_power_state->performance_level_count < smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_GRAPHICS)),
3415                         "Performance levels exceeds SMC limit!",
3416                         return -EINVAL);
3417
3418         PP_ASSERT_WITH_CODE(
3419                         (smu7_power_state->performance_level_count <=
3420                                         hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
3421                         "Performance levels exceeds Driver limit!",
3422                         return -EINVAL);
3423
3424         /* Performance levels are arranged from low to high. */
3425         performance_level->memory_clock = mclk_dep_table->entries
3426                         [state_entry->ucMemoryClockIndexLow].ulMclk;
3427         if (sclk_dep_table->ucRevId == 0)
3428                 performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
3429                         [state_entry->ucEngineClockIndexLow].ulSclk;
3430         else if (sclk_dep_table->ucRevId == 1)
3431                 performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
3432                         [state_entry->ucEngineClockIndexLow].ulSclk;
3433         performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
3434                         state_entry->ucPCIEGenLow);
3435         performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
3436                         state_entry->ucPCIELaneLow);
3437
3438         performance_level = &(smu7_power_state->performance_levels
3439                         [smu7_power_state->performance_level_count++]);
3440         performance_level->memory_clock = mclk_dep_table->entries
3441                         [state_entry->ucMemoryClockIndexHigh].ulMclk;
3442
3443         if (sclk_dep_table->ucRevId == 0)
3444                 performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
3445                         [state_entry->ucEngineClockIndexHigh].ulSclk;
3446         else if (sclk_dep_table->ucRevId == 1)
3447                 performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
3448                         [state_entry->ucEngineClockIndexHigh].ulSclk;
3449
3450         performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
3451                         state_entry->ucPCIEGenHigh);
3452         performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
3453                         state_entry->ucPCIELaneHigh);
3454
3455         return 0;
3456 }
3457
3458 static int smu7_get_pp_table_entry_v1(struct pp_hwmgr *hwmgr,
3459                 unsigned long entry_index, struct pp_power_state *state)
3460 {
3461         int result;
3462         struct smu7_power_state *ps;
3463         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3464         struct phm_ppt_v1_information *table_info =
3465                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
3466         struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
3467                         table_info->vdd_dep_on_mclk;
3468
3469         state->hardware.magic = PHM_VIslands_Magic;
3470
3471         ps = (struct smu7_power_state *)(&state->hardware);
3472
3473         result = get_powerplay_table_entry_v1_0(hwmgr, entry_index, state,
3474                         smu7_get_pp_table_entry_callback_func_v1);
3475
3476         /* This is the earliest time we have all the dependency table and the VBIOS boot state
3477          * as PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot state
3478          * if there is only one VDDCI/MCLK level, check if it's the same as VBIOS boot state
3479          */
3480         if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
3481                 if (dep_mclk_table->entries[0].clk !=
3482                                 data->vbios_boot_state.mclk_bootup_value)
3483                         pr_debug("Single MCLK entry VDDCI/MCLK dependency table "
3484                                         "does not match VBIOS boot MCLK level");
3485                 if (dep_mclk_table->entries[0].vddci !=
3486                                 data->vbios_boot_state.vddci_bootup_value)
3487                         pr_debug("Single VDDCI entry VDDCI/MCLK dependency table "
3488                                         "does not match VBIOS boot VDDCI level");
3489         }
3490
3491         /* set DC compatible flag if this state supports DC */
3492         if (!state->validation.disallowOnDC)
3493                 ps->dc_compatible = true;
3494
3495         if (state->classification.flags & PP_StateClassificationFlag_ACPI)
3496                 data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen;
3497
3498         ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
3499         ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
3500
3501         if (!result) {
3502                 uint32_t i;
3503
3504                 switch (state->classification.ui_label) {
3505                 case PP_StateUILabel_Performance:
3506                         data->use_pcie_performance_levels = true;
3507                         for (i = 0; i < ps->performance_level_count; i++) {
3508                                 if (data->pcie_gen_performance.max <
3509                                                 ps->performance_levels[i].pcie_gen)
3510                                         data->pcie_gen_performance.max =
3511                                                         ps->performance_levels[i].pcie_gen;
3512
3513                                 if (data->pcie_gen_performance.min >
3514                                                 ps->performance_levels[i].pcie_gen)
3515                                         data->pcie_gen_performance.min =
3516                                                         ps->performance_levels[i].pcie_gen;
3517
3518                                 if (data->pcie_lane_performance.max <
3519                                                 ps->performance_levels[i].pcie_lane)
3520                                         data->pcie_lane_performance.max =
3521                                                         ps->performance_levels[i].pcie_lane;
3522                                 if (data->pcie_lane_performance.min >
3523                                                 ps->performance_levels[i].pcie_lane)
3524                                         data->pcie_lane_performance.min =
3525                                                         ps->performance_levels[i].pcie_lane;
3526                         }
3527                         break;
3528                 case PP_StateUILabel_Battery:
3529                         data->use_pcie_power_saving_levels = true;
3530
3531                         for (i = 0; i < ps->performance_level_count; i++) {
3532                                 if (data->pcie_gen_power_saving.max <
3533                                                 ps->performance_levels[i].pcie_gen)
3534                                         data->pcie_gen_power_saving.max =
3535                                                         ps->performance_levels[i].pcie_gen;
3536
3537                                 if (data->pcie_gen_power_saving.min >
3538                                                 ps->performance_levels[i].pcie_gen)
3539                                         data->pcie_gen_power_saving.min =
3540                                                         ps->performance_levels[i].pcie_gen;
3541
3542                                 if (data->pcie_lane_power_saving.max <
3543                                                 ps->performance_levels[i].pcie_lane)
3544                                         data->pcie_lane_power_saving.max =
3545                                                         ps->performance_levels[i].pcie_lane;
3546
3547                                 if (data->pcie_lane_power_saving.min >
3548                                                 ps->performance_levels[i].pcie_lane)
3549                                         data->pcie_lane_power_saving.min =
3550                                                         ps->performance_levels[i].pcie_lane;
3551                         }
3552                         break;
3553                 default:
3554                         break;
3555                 }
3556         }
3557         return 0;
3558 }
3559
3560 static int smu7_get_pp_table_entry_callback_func_v0(struct pp_hwmgr *hwmgr,
3561                                         struct pp_hw_power_state *power_state,
3562                                         unsigned int index, const void *clock_info)
3563 {
3564         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3565         struct smu7_power_state  *ps = cast_phw_smu7_power_state(power_state);
3566         const ATOM_PPLIB_CI_CLOCK_INFO *visland_clk_info = clock_info;
3567         struct smu7_performance_level *performance_level;
3568         uint32_t engine_clock, memory_clock;
3569         uint16_t pcie_gen_from_bios;
3570
3571         engine_clock = visland_clk_info->ucEngineClockHigh << 16 | visland_clk_info->usEngineClockLow;
3572         memory_clock = visland_clk_info->ucMemoryClockHigh << 16 | visland_clk_info->usMemoryClockLow;
3573
3574         if (!(data->mc_micro_code_feature & DISABLE_MC_LOADMICROCODE) && memory_clock > data->highest_mclk)
3575                 data->highest_mclk = memory_clock;
3576
3577         PP_ASSERT_WITH_CODE(
3578                         (ps->performance_level_count < smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_GRAPHICS)),
3579                         "Performance levels exceeds SMC limit!",
3580                         return -EINVAL);
3581
3582         PP_ASSERT_WITH_CODE(
3583                         (ps->performance_level_count <
3584                                         hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
3585                         "Performance levels exceeds Driver limit, Skip!",
3586                         return 0);
3587
3588         performance_level = &(ps->performance_levels
3589                         [ps->performance_level_count++]);
3590
3591         /* Performance levels are arranged from low to high. */
3592         performance_level->memory_clock = memory_clock;
3593         performance_level->engine_clock = engine_clock;
3594
3595         pcie_gen_from_bios = visland_clk_info->ucPCIEGen;
3596
3597         performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, pcie_gen_from_bios);
3598         performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, visland_clk_info->usPCIELane);
3599
3600         return 0;
3601 }
3602
3603 static int smu7_get_pp_table_entry_v0(struct pp_hwmgr *hwmgr,
3604                 unsigned long entry_index, struct pp_power_state *state)
3605 {
3606         int result;
3607         struct smu7_power_state *ps;
3608         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3609         struct phm_clock_voltage_dependency_table *dep_mclk_table =
3610                         hwmgr->dyn_state.vddci_dependency_on_mclk;
3611
3612         memset(&state->hardware, 0x00, sizeof(struct pp_hw_power_state));
3613
3614         state->hardware.magic = PHM_VIslands_Magic;
3615
3616         ps = (struct smu7_power_state *)(&state->hardware);
3617
3618         result = pp_tables_get_entry(hwmgr, entry_index, state,
3619                         smu7_get_pp_table_entry_callback_func_v0);
3620
3621         /*
3622          * This is the earliest time we have all the dependency table
3623          * and the VBIOS boot state as
3624          * PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot
3625          * state if there is only one VDDCI/MCLK level, check if it's
3626          * the same as VBIOS boot state
3627          */
3628         if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
3629                 if (dep_mclk_table->entries[0].clk !=
3630                                 data->vbios_boot_state.mclk_bootup_value)
3631                         pr_debug("Single MCLK entry VDDCI/MCLK dependency table "
3632                                         "does not match VBIOS boot MCLK level");
3633                 if (dep_mclk_table->entries[0].v !=
3634                                 data->vbios_boot_state.vddci_bootup_value)
3635                         pr_debug("Single VDDCI entry VDDCI/MCLK dependency table "
3636                                         "does not match VBIOS boot VDDCI level");
3637         }
3638
3639         /* set DC compatible flag if this state supports DC */
3640         if (!state->validation.disallowOnDC)
3641                 ps->dc_compatible = true;
3642
3643         if (state->classification.flags & PP_StateClassificationFlag_ACPI)
3644                 data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen;
3645
3646         ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
3647         ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
3648
3649         if (!result) {
3650                 uint32_t i;
3651
3652                 switch (state->classification.ui_label) {
3653                 case PP_StateUILabel_Performance:
3654                         data->use_pcie_performance_levels = true;
3655
3656                         for (i = 0; i < ps->performance_level_count; i++) {
3657                                 if (data->pcie_gen_performance.max <
3658                                                 ps->performance_levels[i].pcie_gen)
3659                                         data->pcie_gen_performance.max =
3660                                                         ps->performance_levels[i].pcie_gen;
3661
3662                                 if (data->pcie_gen_performance.min >
3663                                                 ps->performance_levels[i].pcie_gen)
3664                                         data->pcie_gen_performance.min =
3665                                                         ps->performance_levels[i].pcie_gen;
3666
3667                                 if (data->pcie_lane_performance.max <
3668                                                 ps->performance_levels[i].pcie_lane)
3669                                         data->pcie_lane_performance.max =
3670                                                         ps->performance_levels[i].pcie_lane;
3671
3672                                 if (data->pcie_lane_performance.min >
3673                                                 ps->performance_levels[i].pcie_lane)
3674                                         data->pcie_lane_performance.min =
3675                                                         ps->performance_levels[i].pcie_lane;
3676                         }
3677                         break;
3678                 case PP_StateUILabel_Battery:
3679                         data->use_pcie_power_saving_levels = true;
3680
3681                         for (i = 0; i < ps->performance_level_count; i++) {
3682                                 if (data->pcie_gen_power_saving.max <
3683                                                 ps->performance_levels[i].pcie_gen)
3684                                         data->pcie_gen_power_saving.max =
3685                                                         ps->performance_levels[i].pcie_gen;
3686
3687                                 if (data->pcie_gen_power_saving.min >
3688                                                 ps->performance_levels[i].pcie_gen)
3689                                         data->pcie_gen_power_saving.min =
3690                                                         ps->performance_levels[i].pcie_gen;
3691
3692                                 if (data->pcie_lane_power_saving.max <
3693                                                 ps->performance_levels[i].pcie_lane)
3694                                         data->pcie_lane_power_saving.max =
3695                                                         ps->performance_levels[i].pcie_lane;
3696
3697                                 if (data->pcie_lane_power_saving.min >
3698                                                 ps->performance_levels[i].pcie_lane)
3699                                         data->pcie_lane_power_saving.min =
3700                                                         ps->performance_levels[i].pcie_lane;
3701                         }
3702                         break;
3703                 default:
3704                         break;
3705                 }
3706         }
3707         return 0;
3708 }
3709
3710 static int smu7_get_pp_table_entry(struct pp_hwmgr *hwmgr,
3711                 unsigned long entry_index, struct pp_power_state *state)
3712 {
3713         if (hwmgr->pp_table_version == PP_TABLE_V0)
3714                 return smu7_get_pp_table_entry_v0(hwmgr, entry_index, state);
3715         else if (hwmgr->pp_table_version == PP_TABLE_V1)
3716                 return smu7_get_pp_table_entry_v1(hwmgr, entry_index, state);
3717
3718         return 0;
3719 }
3720
3721 static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, u32 *query)
3722 {
3723         struct amdgpu_device *adev = hwmgr->adev;
3724         int i;
3725         u32 tmp = 0;
3726
3727         if (!query)
3728                 return -EINVAL;
3729
3730         /*
3731          * PPSMC_MSG_GetCurrPkgPwr is not supported on:
3732          *  - Hawaii
3733          *  - Bonaire
3734          *  - Fiji
3735          *  - Tonga
3736          */
3737         if ((adev->asic_type != CHIP_HAWAII) &&
3738             (adev->asic_type != CHIP_BONAIRE) &&
3739             (adev->asic_type != CHIP_FIJI) &&
3740             (adev->asic_type != CHIP_TONGA)) {
3741                 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0, &tmp);
3742                 *query = tmp;
3743
3744                 if (tmp != 0)
3745                         return 0;
3746         }
3747
3748         smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart, NULL);
3749         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3750                                                         ixSMU_PM_STATUS_95, 0);
3751
3752         for (i = 0; i < 10; i++) {
3753                 msleep(500);
3754                 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogSample, NULL);
3755                 tmp = cgs_read_ind_register(hwmgr->device,
3756                                                 CGS_IND_REG__SMC,
3757                                                 ixSMU_PM_STATUS_95);
3758                 if (tmp != 0)
3759                         break;
3760         }
3761         *query = tmp;
3762
3763         return 0;
3764 }
3765
3766 static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx,
3767                             void *value, int *size)
3768 {
3769         uint32_t sclk, mclk, activity_percent;
3770         uint32_t offset, val_vid;
3771         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3772
3773         /* size must be at least 4 bytes for all sensors */
3774         if (*size < 4)
3775                 return -EINVAL;
3776
3777         switch (idx) {
3778         case AMDGPU_PP_SENSOR_GFX_SCLK:
3779                 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency, &sclk);
3780                 *((uint32_t *)value) = sclk;
3781                 *size = 4;
3782                 return 0;
3783         case AMDGPU_PP_SENSOR_GFX_MCLK:
3784                 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency, &mclk);
3785                 *((uint32_t *)value) = mclk;
3786                 *size = 4;
3787                 return 0;
3788         case AMDGPU_PP_SENSOR_GPU_LOAD:
3789         case AMDGPU_PP_SENSOR_MEM_LOAD:
3790                 offset = data->soft_regs_start + smum_get_offsetof(hwmgr,
3791                                                                 SMU_SoftRegisters,
3792                                                                 (idx == AMDGPU_PP_SENSOR_GPU_LOAD) ?
3793                                                                 AverageGraphicsActivity:
3794                                                                 AverageMemoryActivity);
3795
3796                 activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset);
3797                 activity_percent += 0x80;
3798                 activity_percent >>= 8;
3799                 *((uint32_t *)value) = activity_percent > 100 ? 100 : activity_percent;
3800                 *size = 4;
3801                 return 0;
3802         case AMDGPU_PP_SENSOR_GPU_TEMP:
3803                 *((uint32_t *)value) = smu7_thermal_get_temperature(hwmgr);
3804                 *size = 4;
3805                 return 0;
3806         case AMDGPU_PP_SENSOR_UVD_POWER:
3807                 *((uint32_t *)value) = data->uvd_power_gated ? 0 : 1;
3808                 *size = 4;
3809                 return 0;
3810         case AMDGPU_PP_SENSOR_VCE_POWER:
3811                 *((uint32_t *)value) = data->vce_power_gated ? 0 : 1;
3812                 *size = 4;
3813                 return 0;
3814         case AMDGPU_PP_SENSOR_GPU_POWER:
3815                 return smu7_get_gpu_power(hwmgr, (uint32_t *)value);
3816         case AMDGPU_PP_SENSOR_VDDGFX:
3817                 if ((data->vr_config & VRCONF_VDDGFX_MASK) ==
3818                     (VR_SVI2_PLANE_2 << VRCONF_VDDGFX_SHIFT))
3819                         val_vid = PHM_READ_INDIRECT_FIELD(hwmgr->device,
3820                                         CGS_IND_REG__SMC, PWR_SVI2_STATUS, PLANE2_VID);
3821                 else
3822                         val_vid = PHM_READ_INDIRECT_FIELD(hwmgr->device,
3823                                         CGS_IND_REG__SMC, PWR_SVI2_STATUS, PLANE1_VID);
3824
3825                 *((uint32_t *)value) = (uint32_t)convert_to_vddc(val_vid);
3826                 return 0;
3827         default:
3828                 return -EINVAL;
3829         }
3830 }
3831
3832 static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
3833 {
3834         const struct phm_set_power_state_input *states =
3835                         (const struct phm_set_power_state_input *)input;
3836         const struct smu7_power_state *smu7_ps =
3837                         cast_const_phw_smu7_power_state(states->pnew_state);
3838         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3839         struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
3840         uint32_t sclk = smu7_ps->performance_levels
3841                         [smu7_ps->performance_level_count - 1].engine_clock;
3842         struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
3843         uint32_t mclk = smu7_ps->performance_levels
3844                         [smu7_ps->performance_level_count - 1].memory_clock;
3845         struct PP_Clocks min_clocks = {0};
3846         uint32_t i;
3847
3848         for (i = 0; i < sclk_table->count; i++) {
3849                 if (sclk == sclk_table->dpm_levels[i].value)
3850                         break;
3851         }
3852
3853         if (i >= sclk_table->count) {
3854                 if (sclk > sclk_table->dpm_levels[i-1].value) {
3855                         data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
3856                         sclk_table->dpm_levels[i-1].value = sclk;
3857                 }
3858         } else {
3859         /* TODO: Check SCLK in DAL's minimum clocks
3860          * in case DeepSleep divider update is required.
3861          */
3862                 if (data->display_timing.min_clock_in_sr != min_clocks.engineClockInSR &&
3863                         (min_clocks.engineClockInSR >= SMU7_MINIMUM_ENGINE_CLOCK ||
3864                                 data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK))
3865                         data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
3866         }
3867
3868         for (i = 0; i < mclk_table->count; i++) {
3869                 if (mclk == mclk_table->dpm_levels[i].value)
3870                         break;
3871         }
3872
3873         if (i >= mclk_table->count) {
3874                 if (mclk > mclk_table->dpm_levels[i-1].value) {
3875                         data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
3876                         mclk_table->dpm_levels[i-1].value = mclk;
3877                 }
3878         }
3879
3880         if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
3881                 data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
3882
3883         return 0;
3884 }
3885
3886 static uint16_t smu7_get_maximum_link_speed(struct pp_hwmgr *hwmgr,
3887                 const struct smu7_power_state *smu7_ps)
3888 {
3889         uint32_t i;
3890         uint32_t sclk, max_sclk = 0;
3891         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3892         struct smu7_dpm_table *dpm_table = &data->dpm_table;
3893
3894         for (i = 0; i < smu7_ps->performance_level_count; i++) {
3895                 sclk = smu7_ps->performance_levels[i].engine_clock;
3896                 if (max_sclk < sclk)
3897                         max_sclk = sclk;
3898         }
3899
3900         for (i = 0; i < dpm_table->sclk_table.count; i++) {
3901                 if (dpm_table->sclk_table.dpm_levels[i].value == max_sclk)
3902                         return (uint16_t) ((i >= dpm_table->pcie_speed_table.count) ?
3903                                         dpm_table->pcie_speed_table.dpm_levels
3904                                         [dpm_table->pcie_speed_table.count - 1].value :
3905                                         dpm_table->pcie_speed_table.dpm_levels[i].value);
3906         }
3907
3908         return 0;
3909 }
3910
3911 static int smu7_request_link_speed_change_before_state_change(
3912                 struct pp_hwmgr *hwmgr, const void *input)
3913 {
3914         const struct phm_set_power_state_input *states =
3915                         (const struct phm_set_power_state_input *)input;
3916         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3917         const struct smu7_power_state *smu7_nps =
3918                         cast_const_phw_smu7_power_state(states->pnew_state);
3919         const struct smu7_power_state *polaris10_cps =
3920                         cast_const_phw_smu7_power_state(states->pcurrent_state);
3921
3922         uint16_t target_link_speed = smu7_get_maximum_link_speed(hwmgr, smu7_nps);
3923         uint16_t current_link_speed;
3924
3925         if (data->force_pcie_gen == PP_PCIEGenInvalid)
3926                 current_link_speed = smu7_get_maximum_link_speed(hwmgr, polaris10_cps);
3927         else
3928                 current_link_speed = data->force_pcie_gen;
3929
3930         data->force_pcie_gen = PP_PCIEGenInvalid;
3931         data->pspp_notify_required = false;
3932
3933         if (target_link_speed > current_link_speed) {
3934                 switch (target_link_speed) {
3935 #ifdef CONFIG_ACPI
3936                 case PP_PCIEGen3:
3937                         if (0 == amdgpu_acpi_pcie_performance_request(hwmgr->adev, PCIE_PERF_REQ_GEN3, false))
3938                                 break;
3939                         data->force_pcie_gen = PP_PCIEGen2;
3940                         if (current_link_speed == PP_PCIEGen2)
3941                                 break;
3942                         fallthrough;
3943                 case PP_PCIEGen2:
3944                         if (0 == amdgpu_acpi_pcie_performance_request(hwmgr->adev, PCIE_PERF_REQ_GEN2, false))
3945                                 break;
3946                         fallthrough;
3947 #endif
3948                 default:
3949                         data->force_pcie_gen = smu7_get_current_pcie_speed(hwmgr);
3950                         break;
3951                 }
3952         } else {
3953                 if (target_link_speed < current_link_speed)
3954                         data->pspp_notify_required = true;
3955         }
3956
3957         return 0;
3958 }
3959
3960 static int smu7_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
3961 {
3962         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3963
3964         if (0 == data->need_update_smu7_dpm_table)
3965                 return 0;
3966
3967         if ((0 == data->sclk_dpm_key_disabled) &&
3968                 (data->need_update_smu7_dpm_table &
3969                         (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
3970                 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
3971                                 "Trying to freeze SCLK DPM when DPM is disabled",
3972                                 );
3973                 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
3974                                 PPSMC_MSG_SCLKDPM_FreezeLevel,
3975                                 NULL),
3976                                 "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!",
3977                                 return -EINVAL);
3978         }
3979
3980         if ((0 == data->mclk_dpm_key_disabled) &&
3981                 (data->need_update_smu7_dpm_table &
3982                  DPMTABLE_OD_UPDATE_MCLK)) {
3983                 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
3984                                 "Trying to freeze MCLK DPM when DPM is disabled",
3985                                 );
3986                 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
3987                                 PPSMC_MSG_MCLKDPM_FreezeLevel,
3988                                 NULL),
3989                                 "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!",
3990                                 return -EINVAL);
3991         }
3992
3993         return 0;
3994 }
3995
3996 static int smu7_populate_and_upload_sclk_mclk_dpm_levels(
3997                 struct pp_hwmgr *hwmgr, const void *input)
3998 {
3999         int result = 0;
4000         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4001         struct smu7_dpm_table *dpm_table = &data->dpm_table;
4002         uint32_t count;
4003         struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
4004         struct phm_odn_clock_levels *odn_sclk_table = &(odn_table->odn_core_clock_dpm_levels);
4005         struct phm_odn_clock_levels *odn_mclk_table = &(odn_table->odn_memory_clock_dpm_levels);
4006
4007         if (0 == data->need_update_smu7_dpm_table)
4008                 return 0;
4009
4010         if (hwmgr->od_enabled && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
4011                 for (count = 0; count < dpm_table->sclk_table.count; count++) {
4012                         dpm_table->sclk_table.dpm_levels[count].enabled = odn_sclk_table->entries[count].enabled;
4013                         dpm_table->sclk_table.dpm_levels[count].value = odn_sclk_table->entries[count].clock;
4014                 }
4015         }
4016
4017         if (hwmgr->od_enabled && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
4018                 for (count = 0; count < dpm_table->mclk_table.count; count++) {
4019                         dpm_table->mclk_table.dpm_levels[count].enabled = odn_mclk_table->entries[count].enabled;
4020                         dpm_table->mclk_table.dpm_levels[count].value = odn_mclk_table->entries[count].clock;
4021                 }
4022         }
4023
4024         if (data->need_update_smu7_dpm_table &
4025                         (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) {
4026                 result = smum_populate_all_graphic_levels(hwmgr);
4027                 PP_ASSERT_WITH_CODE((0 == result),
4028                                 "Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
4029                                 return result);
4030         }
4031
4032         if (data->need_update_smu7_dpm_table &
4033                         (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
4034                 /*populate MCLK dpm table to SMU7 */
4035                 result = smum_populate_all_memory_levels(hwmgr);
4036                 PP_ASSERT_WITH_CODE((0 == result),
4037                                 "Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
4038                                 return result);
4039         }
4040
4041         return result;
4042 }
4043
4044 static int smu7_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
4045                           struct smu7_single_dpm_table *dpm_table,
4046                         uint32_t low_limit, uint32_t high_limit)
4047 {
4048         uint32_t i;
4049
4050         /* force the trim if mclk_switching is disabled to prevent flicker */
4051         bool force_trim = (low_limit == high_limit);
4052         for (i = 0; i < dpm_table->count; i++) {
4053         /*skip the trim if od is enabled*/
4054                 if ((!hwmgr->od_enabled || force_trim)
4055                         && (dpm_table->dpm_levels[i].value < low_limit
4056                         || dpm_table->dpm_levels[i].value > high_limit))
4057                         dpm_table->dpm_levels[i].enabled = false;
4058                 else
4059                         dpm_table->dpm_levels[i].enabled = true;
4060         }
4061
4062         return 0;
4063 }
4064
4065 static int smu7_trim_dpm_states(struct pp_hwmgr *hwmgr,
4066                 const struct smu7_power_state *smu7_ps)
4067 {
4068         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4069         uint32_t high_limit_count;
4070
4071         PP_ASSERT_WITH_CODE((smu7_ps->performance_level_count >= 1),
4072                         "power state did not have any performance level",
4073                         return -EINVAL);
4074
4075         high_limit_count = (1 == smu7_ps->performance_level_count) ? 0 : 1;
4076
4077         smu7_trim_single_dpm_states(hwmgr,
4078                         &(data->dpm_table.sclk_table),
4079                         smu7_ps->performance_levels[0].engine_clock,
4080                         smu7_ps->performance_levels[high_limit_count].engine_clock);
4081
4082         smu7_trim_single_dpm_states(hwmgr,
4083                         &(data->dpm_table.mclk_table),
4084                         smu7_ps->performance_levels[0].memory_clock,
4085                         smu7_ps->performance_levels[high_limit_count].memory_clock);
4086
4087         return 0;
4088 }
4089
4090 static int smu7_generate_dpm_level_enable_mask(
4091                 struct pp_hwmgr *hwmgr, const void *input)
4092 {
4093         int result = 0;
4094         const struct phm_set_power_state_input *states =
4095                         (const struct phm_set_power_state_input *)input;
4096         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4097         const struct smu7_power_state *smu7_ps =
4098                         cast_const_phw_smu7_power_state(states->pnew_state);
4099
4100
4101         result = smu7_trim_dpm_states(hwmgr, smu7_ps);
4102         if (result)
4103                 return result;
4104
4105         data->dpm_level_enable_mask.sclk_dpm_enable_mask =
4106                         phm_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table);
4107         data->dpm_level_enable_mask.mclk_dpm_enable_mask =
4108                         phm_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table);
4109         data->dpm_level_enable_mask.pcie_dpm_enable_mask =
4110                         phm_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table);
4111
4112         return 0;
4113 }
4114
4115 static int smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
4116 {
4117         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4118
4119         if (0 == data->need_update_smu7_dpm_table)
4120                 return 0;
4121
4122         if ((0 == data->sclk_dpm_key_disabled) &&
4123                 (data->need_update_smu7_dpm_table &
4124                 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
4125
4126                 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
4127                                 "Trying to Unfreeze SCLK DPM when DPM is disabled",
4128                                 );
4129                 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
4130                                 PPSMC_MSG_SCLKDPM_UnfreezeLevel,
4131                                 NULL),
4132                         "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!",
4133                         return -EINVAL);
4134         }
4135
4136         if ((0 == data->mclk_dpm_key_disabled) &&
4137                 (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
4138
4139                 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
4140                                 "Trying to Unfreeze MCLK DPM when DPM is disabled",
4141                                 );
4142                 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
4143                                 PPSMC_MSG_MCLKDPM_UnfreezeLevel,
4144                                 NULL),
4145                     "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!",
4146                     return -EINVAL);
4147         }
4148
4149         data->need_update_smu7_dpm_table &= DPMTABLE_OD_UPDATE_VDDC;
4150
4151         return 0;
4152 }
4153
4154 static int smu7_notify_link_speed_change_after_state_change(
4155                 struct pp_hwmgr *hwmgr, const void *input)
4156 {
4157         const struct phm_set_power_state_input *states =
4158                         (const struct phm_set_power_state_input *)input;
4159         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4160         const struct smu7_power_state *smu7_ps =
4161                         cast_const_phw_smu7_power_state(states->pnew_state);
4162         uint16_t target_link_speed = smu7_get_maximum_link_speed(hwmgr, smu7_ps);
4163         uint8_t  request;
4164
4165         if (data->pspp_notify_required) {
4166                 if (target_link_speed == PP_PCIEGen3)
4167                         request = PCIE_PERF_REQ_GEN3;
4168                 else if (target_link_speed == PP_PCIEGen2)
4169                         request = PCIE_PERF_REQ_GEN2;
4170                 else
4171                         request = PCIE_PERF_REQ_GEN1;
4172
4173                 if (request == PCIE_PERF_REQ_GEN1 &&
4174                                 smu7_get_current_pcie_speed(hwmgr) > 0)
4175                         return 0;
4176
4177 #ifdef CONFIG_ACPI
4178                 if (amdgpu_acpi_pcie_performance_request(hwmgr->adev, request, false)) {
4179                         if (PP_PCIEGen2 == target_link_speed)
4180                                 pr_info("PSPP request to switch to Gen2 from Gen3 Failed!");
4181                         else
4182                                 pr_info("PSPP request to switch to Gen1 from Gen2 Failed!");
4183                 }
4184 #endif
4185         }
4186
4187         return 0;
4188 }
4189
4190 static int smu7_notify_smc_display(struct pp_hwmgr *hwmgr)
4191 {
4192         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4193
4194         if (hwmgr->feature_mask & PP_VBI_TIME_SUPPORT_MASK) {
4195                 if (hwmgr->chip_id == CHIP_VEGAM)
4196                         smum_send_msg_to_smc_with_parameter(hwmgr,
4197                                         (PPSMC_Msg)PPSMC_MSG_SetVBITimeout_VEGAM, data->frame_time_x2,
4198                                         NULL);
4199                 else
4200                         smum_send_msg_to_smc_with_parameter(hwmgr,
4201                                         (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2,
4202                                         NULL);
4203         }
4204         return (smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_HasDisplay, NULL) == 0) ?  0 : -EINVAL;
4205 }
4206
4207 static int smu7_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
4208 {
4209         int tmp_result, result = 0;
4210         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4211
4212         tmp_result = smu7_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
4213         PP_ASSERT_WITH_CODE((0 == tmp_result),
4214                         "Failed to find DPM states clocks in DPM table!",
4215                         result = tmp_result);
4216
4217         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4218                         PHM_PlatformCaps_PCIEPerformanceRequest)) {
4219                 tmp_result =
4220                         smu7_request_link_speed_change_before_state_change(hwmgr, input);
4221                 PP_ASSERT_WITH_CODE((0 == tmp_result),
4222                                 "Failed to request link speed change before state change!",
4223                                 result = tmp_result);
4224         }
4225
4226         tmp_result = smu7_freeze_sclk_mclk_dpm(hwmgr);
4227         PP_ASSERT_WITH_CODE((0 == tmp_result),
4228                         "Failed to freeze SCLK MCLK DPM!", result = tmp_result);
4229
4230         tmp_result = smu7_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input);
4231         PP_ASSERT_WITH_CODE((0 == tmp_result),
4232                         "Failed to populate and upload SCLK MCLK DPM levels!",
4233                         result = tmp_result);
4234
4235         /*
4236          * If a custom pp table is loaded, set DPMTABLE_OD_UPDATE_VDDC flag.
4237          * That effectively disables AVFS feature.
4238          */
4239         if (hwmgr->hardcode_pp_table != NULL)
4240                 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC;
4241
4242         tmp_result = smu7_update_avfs(hwmgr);
4243         PP_ASSERT_WITH_CODE((0 == tmp_result),
4244                         "Failed to update avfs voltages!",
4245                         result = tmp_result);
4246
4247         tmp_result = smu7_generate_dpm_level_enable_mask(hwmgr, input);
4248         PP_ASSERT_WITH_CODE((0 == tmp_result),
4249                         "Failed to generate DPM level enabled mask!",
4250                         result = tmp_result);
4251
4252         tmp_result = smum_update_sclk_threshold(hwmgr);
4253         PP_ASSERT_WITH_CODE((0 == tmp_result),
4254                         "Failed to update SCLK threshold!",
4255                         result = tmp_result);
4256
4257         tmp_result = smu7_notify_smc_display(hwmgr);
4258         PP_ASSERT_WITH_CODE((0 == tmp_result),
4259                         "Failed to notify smc display settings!",
4260                         result = tmp_result);
4261
4262         tmp_result = smu7_unfreeze_sclk_mclk_dpm(hwmgr);
4263         PP_ASSERT_WITH_CODE((0 == tmp_result),
4264                         "Failed to unfreeze SCLK MCLK DPM!",
4265                         result = tmp_result);
4266
4267         tmp_result = smu7_upload_dpm_level_enable_mask(hwmgr);
4268         PP_ASSERT_WITH_CODE((0 == tmp_result),
4269                         "Failed to upload DPM level enabled mask!",
4270                         result = tmp_result);
4271
4272         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4273                         PHM_PlatformCaps_PCIEPerformanceRequest)) {
4274                 tmp_result =
4275                         smu7_notify_link_speed_change_after_state_change(hwmgr, input);
4276                 PP_ASSERT_WITH_CODE((0 == tmp_result),
4277                                 "Failed to notify link speed change after state change!",
4278                                 result = tmp_result);
4279         }
4280         data->apply_optimized_settings = false;
4281         return result;
4282 }
4283
4284 static int smu7_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm)
4285 {
4286         hwmgr->thermal_controller.
4287         advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm;
4288
4289         return smum_send_msg_to_smc_with_parameter(hwmgr,
4290                         PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm,
4291                         NULL);
4292 }
4293
4294 static int
4295 smu7_notify_smc_display_change(struct pp_hwmgr *hwmgr, bool has_display)
4296 {
4297         PPSMC_Msg msg = has_display ? (PPSMC_Msg)PPSMC_HasDisplay : (PPSMC_Msg)PPSMC_NoDisplay;
4298
4299         return (smum_send_msg_to_smc(hwmgr, msg, NULL) == 0) ?  0 : -1;
4300 }
4301
4302 static int
4303 smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
4304 {
4305         if (hwmgr->display_config->num_display > 1 &&
4306                         !hwmgr->display_config->multi_monitor_in_sync)
4307                 smu7_notify_smc_display_change(hwmgr, false);
4308
4309         return 0;
4310 }
4311
4312 /**
4313 * Programs the display gap
4314 *
4315 * @param    hwmgr  the address of the powerplay hardware manager.
4316 * @return   always OK
4317 */
4318 static int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
4319 {
4320         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4321         uint32_t display_gap = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL);
4322         uint32_t display_gap2;
4323         uint32_t pre_vbi_time_in_us;
4324         uint32_t frame_time_in_us;
4325         uint32_t ref_clock, refresh_rate;
4326
4327         display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (hwmgr->display_config->num_display > 0) ? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE);
4328         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL, display_gap);
4329
4330         ref_clock =  amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev);
4331         refresh_rate = hwmgr->display_config->vrefresh;
4332
4333         if (0 == refresh_rate)
4334                 refresh_rate = 60;
4335
4336         frame_time_in_us = 1000000 / refresh_rate;
4337
4338         pre_vbi_time_in_us = frame_time_in_us - 200 - hwmgr->display_config->min_vblank_time;
4339
4340         data->frame_time_x2 = frame_time_in_us * 2 / 100;
4341
4342         if (data->frame_time_x2 < 280) {
4343                 pr_debug("%s: enforce minimal VBITimeout: %d -> 280\n", __func__, data->frame_time_x2);
4344                 data->frame_time_x2 = 280;
4345         }
4346
4347         display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
4348
4349         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2);
4350
4351         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4352                         data->soft_regs_start + smum_get_offsetof(hwmgr,
4353                                                         SMU_SoftRegisters,
4354                                                         PreVBlankGap), 0x64);
4355
4356         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4357                         data->soft_regs_start + smum_get_offsetof(hwmgr,
4358                                                         SMU_SoftRegisters,
4359                                                         VBlankTimeout),
4360                                         (frame_time_in_us - pre_vbi_time_in_us));
4361
4362         return 0;
4363 }
4364
4365 static int smu7_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
4366 {
4367         return smu7_program_display_gap(hwmgr);
4368 }
4369
4370 /**
4371 *  Set maximum target operating fan output RPM
4372 *
4373 * @param    hwmgr:  the address of the powerplay hardware manager.
4374 * @param    usMaxFanRpm:  max operating fan RPM value.
4375 * @return   The response that came from the SMC.
4376 */
4377 static int smu7_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_rpm)
4378 {
4379         hwmgr->thermal_controller.
4380         advanceFanControlParameters.usMaxFanRPM = us_max_fan_rpm;
4381
4382         return smum_send_msg_to_smc_with_parameter(hwmgr,
4383                         PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm,
4384                         NULL);
4385 }
4386
4387 static const struct amdgpu_irq_src_funcs smu7_irq_funcs = {
4388         .process = phm_irq_process,
4389 };
4390
4391 static int smu7_register_irq_handlers(struct pp_hwmgr *hwmgr)
4392 {
4393         struct amdgpu_irq_src *source =
4394                 kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL);
4395
4396         if (!source)
4397                 return -ENOMEM;
4398
4399         source->funcs = &smu7_irq_funcs;
4400
4401         amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
4402                         AMDGPU_IRQ_CLIENTID_LEGACY,
4403                         VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH,
4404                         source);
4405         amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
4406                         AMDGPU_IRQ_CLIENTID_LEGACY,
4407                         VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW,
4408                         source);
4409
4410         /* Register CTF(GPIO_19) interrupt */
4411         amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
4412                         AMDGPU_IRQ_CLIENTID_LEGACY,
4413                         VISLANDS30_IV_SRCID_GPIO_19,
4414                         source);
4415
4416         return 0;
4417 }
4418
4419 static bool
4420 smu7_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
4421 {
4422         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4423         bool is_update_required = false;
4424
4425         if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
4426                 is_update_required = true;
4427
4428         if (data->display_timing.vrefresh != hwmgr->display_config->vrefresh)
4429                 is_update_required = true;
4430
4431         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
4432                 if (data->display_timing.min_clock_in_sr != hwmgr->display_config->min_core_set_clock_in_sr &&
4433                         (data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK ||
4434                         hwmgr->display_config->min_core_set_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK))
4435                         is_update_required = true;
4436         }
4437         return is_update_required;
4438 }
4439
4440 static inline bool smu7_are_power_levels_equal(const struct smu7_performance_level *pl1,
4441                                                            const struct smu7_performance_level *pl2)
4442 {
4443         return ((pl1->memory_clock == pl2->memory_clock) &&
4444                   (pl1->engine_clock == pl2->engine_clock) &&
4445                   (pl1->pcie_gen == pl2->pcie_gen) &&
4446                   (pl1->pcie_lane == pl2->pcie_lane));
4447 }
4448
4449 static int smu7_check_states_equal(struct pp_hwmgr *hwmgr,
4450                 const struct pp_hw_power_state *pstate1,
4451                 const struct pp_hw_power_state *pstate2, bool *equal)
4452 {
4453         const struct smu7_power_state *psa;
4454         const struct smu7_power_state *psb;
4455         int i;
4456         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4457
4458         if (pstate1 == NULL || pstate2 == NULL || equal == NULL)
4459                 return -EINVAL;
4460
4461         psa = cast_const_phw_smu7_power_state(pstate1);
4462         psb = cast_const_phw_smu7_power_state(pstate2);
4463         /* If the two states don't even have the same number of performance levels they cannot be the same state. */
4464         if (psa->performance_level_count != psb->performance_level_count) {
4465                 *equal = false;
4466                 return 0;
4467         }
4468
4469         for (i = 0; i < psa->performance_level_count; i++) {
4470                 if (!smu7_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
4471                         /* If we have found even one performance level pair that is different the states are different. */
4472                         *equal = false;
4473                         return 0;
4474                 }
4475         }
4476
4477         /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
4478         *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
4479         *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
4480         *equal &= (psa->sclk_threshold == psb->sclk_threshold);
4481         /* For OD call, set value based on flag */
4482         *equal &= !(data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK |
4483                                                         DPMTABLE_OD_UPDATE_MCLK |
4484                                                         DPMTABLE_OD_UPDATE_VDDC));
4485
4486         return 0;
4487 }
4488
4489 static int smu7_check_mc_firmware(struct pp_hwmgr *hwmgr)
4490 {
4491         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4492
4493         uint32_t tmp;
4494
4495         /* Read MC indirect register offset 0x9F bits [3:0] to see
4496          * if VBIOS has already loaded a full version of MC ucode
4497          * or not.
4498          */
4499
4500         smu7_get_mc_microcode_version(hwmgr);
4501
4502         data->need_long_memory_training = false;
4503
4504         cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX,
4505                                                         ixMC_IO_DEBUG_UP_13);
4506         tmp = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA);
4507
4508         if (tmp & (1 << 23)) {
4509                 data->mem_latency_high = MEM_LATENCY_HIGH;
4510                 data->mem_latency_low = MEM_LATENCY_LOW;
4511                 if ((hwmgr->chip_id == CHIP_POLARIS10) ||
4512                     (hwmgr->chip_id == CHIP_POLARIS11) ||
4513                     (hwmgr->chip_id == CHIP_POLARIS12))
4514                         smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableFFC, NULL);
4515         } else {
4516                 data->mem_latency_high = 330;
4517                 data->mem_latency_low = 330;
4518                 if ((hwmgr->chip_id == CHIP_POLARIS10) ||
4519                     (hwmgr->chip_id == CHIP_POLARIS11) ||
4520                     (hwmgr->chip_id == CHIP_POLARIS12))
4521                         smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableFFC, NULL);
4522         }
4523
4524         return 0;
4525 }
4526
4527 static int smu7_read_clock_registers(struct pp_hwmgr *hwmgr)
4528 {
4529         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4530
4531         data->clock_registers.vCG_SPLL_FUNC_CNTL         =
4532                 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL);
4533         data->clock_registers.vCG_SPLL_FUNC_CNTL_2       =
4534                 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_2);
4535         data->clock_registers.vCG_SPLL_FUNC_CNTL_3       =
4536                 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_3);
4537         data->clock_registers.vCG_SPLL_FUNC_CNTL_4       =
4538                 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_4);
4539         data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM   =
4540                 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM);
4541         data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2 =
4542                 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM_2);
4543         data->clock_registers.vDLL_CNTL                  =
4544                 cgs_read_register(hwmgr->device, mmDLL_CNTL);
4545         data->clock_registers.vMCLK_PWRMGT_CNTL          =
4546                 cgs_read_register(hwmgr->device, mmMCLK_PWRMGT_CNTL);
4547         data->clock_registers.vMPLL_AD_FUNC_CNTL         =
4548                 cgs_read_register(hwmgr->device, mmMPLL_AD_FUNC_CNTL);
4549         data->clock_registers.vMPLL_DQ_FUNC_CNTL         =
4550                 cgs_read_register(hwmgr->device, mmMPLL_DQ_FUNC_CNTL);
4551         data->clock_registers.vMPLL_FUNC_CNTL            =
4552                 cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL);
4553         data->clock_registers.vMPLL_FUNC_CNTL_1          =
4554                 cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_1);
4555         data->clock_registers.vMPLL_FUNC_CNTL_2          =
4556                 cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_2);
4557         data->clock_registers.vMPLL_SS1                  =
4558                 cgs_read_register(hwmgr->device, mmMPLL_SS1);
4559         data->clock_registers.vMPLL_SS2                  =
4560                 cgs_read_register(hwmgr->device, mmMPLL_SS2);
4561         return 0;
4562
4563 }
4564
4565 /**
4566  * Find out if memory is GDDR5.
4567  *
4568  * @param    hwmgr  the address of the powerplay hardware manager.
4569  * @return   always 0
4570  */
4571 static int smu7_get_memory_type(struct pp_hwmgr *hwmgr)
4572 {
4573         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4574         struct amdgpu_device *adev = hwmgr->adev;
4575
4576         data->is_memory_gddr5 = (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5);
4577
4578         return 0;
4579 }
4580
4581 /**
4582  * Enables Dynamic Power Management by SMC
4583  *
4584  * @param    hwmgr  the address of the powerplay hardware manager.
4585  * @return   always 0
4586  */
4587 static int smu7_enable_acpi_power_management(struct pp_hwmgr *hwmgr)
4588 {
4589         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
4590                         GENERAL_PWRMGT, STATIC_PM_EN, 1);
4591
4592         return 0;
4593 }
4594
4595 /**
4596  * Initialize PowerGating States for different engines
4597  *
4598  * @param    hwmgr  the address of the powerplay hardware manager.
4599  * @return   always 0
4600  */
4601 static int smu7_init_power_gate_state(struct pp_hwmgr *hwmgr)
4602 {
4603         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4604
4605         data->uvd_power_gated = false;
4606         data->vce_power_gated = false;
4607
4608         return 0;
4609 }
4610
4611 static int smu7_init_sclk_threshold(struct pp_hwmgr *hwmgr)
4612 {
4613         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4614
4615         data->low_sclk_interrupt_threshold = 0;
4616         return 0;
4617 }
4618
4619 static int smu7_setup_asic_task(struct pp_hwmgr *hwmgr)
4620 {
4621         int tmp_result, result = 0;
4622
4623         smu7_check_mc_firmware(hwmgr);
4624
4625         tmp_result = smu7_read_clock_registers(hwmgr);
4626         PP_ASSERT_WITH_CODE((0 == tmp_result),
4627                         "Failed to read clock registers!", result = tmp_result);
4628
4629         tmp_result = smu7_get_memory_type(hwmgr);
4630         PP_ASSERT_WITH_CODE((0 == tmp_result),
4631                         "Failed to get memory type!", result = tmp_result);
4632
4633         tmp_result = smu7_enable_acpi_power_management(hwmgr);
4634         PP_ASSERT_WITH_CODE((0 == tmp_result),
4635                         "Failed to enable ACPI power management!", result = tmp_result);
4636
4637         tmp_result = smu7_init_power_gate_state(hwmgr);
4638         PP_ASSERT_WITH_CODE((0 == tmp_result),
4639                         "Failed to init power gate state!", result = tmp_result);
4640
4641         tmp_result = smu7_get_mc_microcode_version(hwmgr);
4642         PP_ASSERT_WITH_CODE((0 == tmp_result),
4643                         "Failed to get MC microcode version!", result = tmp_result);
4644
4645         tmp_result = smu7_init_sclk_threshold(hwmgr);
4646         PP_ASSERT_WITH_CODE((0 == tmp_result),
4647                         "Failed to init sclk threshold!", result = tmp_result);
4648
4649         return result;
4650 }
4651
4652 static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
4653                 enum pp_clock_type type, uint32_t mask)
4654 {
4655         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4656
4657         if (mask == 0)
4658                 return -EINVAL;
4659
4660         switch (type) {
4661         case PP_SCLK:
4662                 if (!data->sclk_dpm_key_disabled)
4663                         smum_send_msg_to_smc_with_parameter(hwmgr,
4664                                         PPSMC_MSG_SCLKDPM_SetEnabledMask,
4665                                         data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask,
4666                                         NULL);
4667                 break;
4668         case PP_MCLK:
4669                 if (!data->mclk_dpm_key_disabled)
4670                         smum_send_msg_to_smc_with_parameter(hwmgr,
4671                                         PPSMC_MSG_MCLKDPM_SetEnabledMask,
4672                                         data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask,
4673                                         NULL);
4674                 break;
4675         case PP_PCIE:
4676         {
4677                 uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask;
4678
4679                 if (!data->pcie_dpm_key_disabled) {
4680                         if (fls(tmp) != ffs(tmp))
4681                                 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PCIeDPM_UnForceLevel,
4682                                                 NULL);
4683                         else
4684                                 smum_send_msg_to_smc_with_parameter(hwmgr,
4685                                         PPSMC_MSG_PCIeDPM_ForceLevel,
4686                                         fls(tmp) - 1,
4687                                         NULL);
4688                 }
4689                 break;
4690         }
4691         default:
4692                 break;
4693         }
4694
4695         return 0;
4696 }
4697
4698 static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
4699                 enum pp_clock_type type, char *buf)
4700 {
4701         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4702         struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
4703         struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
4704         struct smu7_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table);
4705         struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
4706         struct phm_odn_clock_levels *odn_sclk_table = &(odn_table->odn_core_clock_dpm_levels);
4707         struct phm_odn_clock_levels *odn_mclk_table = &(odn_table->odn_memory_clock_dpm_levels);
4708         int i, now, size = 0;
4709         uint32_t clock, pcie_speed;
4710
4711         switch (type) {
4712         case PP_SCLK:
4713                 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency, &clock);
4714
4715                 for (i = 0; i < sclk_table->count; i++) {
4716                         if (clock > sclk_table->dpm_levels[i].value)
4717                                 continue;
4718                         break;
4719                 }
4720                 now = i;
4721
4722                 for (i = 0; i < sclk_table->count; i++)
4723                         size += sprintf(buf + size, "%d: %uMhz %s\n",
4724                                         i, sclk_table->dpm_levels[i].value / 100,
4725                                         (i == now) ? "*" : "");
4726                 break;
4727         case PP_MCLK:
4728                 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency, &clock);
4729
4730                 for (i = 0; i < mclk_table->count; i++) {
4731                         if (clock > mclk_table->dpm_levels[i].value)
4732                                 continue;
4733                         break;
4734                 }
4735                 now = i;
4736
4737                 for (i = 0; i < mclk_table->count; i++)
4738                         size += sprintf(buf + size, "%d: %uMhz %s\n",
4739                                         i, mclk_table->dpm_levels[i].value / 100,
4740                                         (i == now) ? "*" : "");
4741                 break;
4742         case PP_PCIE:
4743                 pcie_speed = smu7_get_current_pcie_speed(hwmgr);
4744                 for (i = 0; i < pcie_table->count; i++) {
4745                         if (pcie_speed != pcie_table->dpm_levels[i].value)
4746                                 continue;
4747                         break;
4748                 }
4749                 now = i;
4750
4751                 for (i = 0; i < pcie_table->count; i++)
4752                         size += sprintf(buf + size, "%d: %s %s\n", i,
4753                                         (pcie_table->dpm_levels[i].value == 0) ? "2.5GT/s, x8" :
4754                                         (pcie_table->dpm_levels[i].value == 1) ? "5.0GT/s, x16" :
4755                                         (pcie_table->dpm_levels[i].value == 2) ? "8.0GT/s, x16" : "",
4756                                         (i == now) ? "*" : "");
4757                 break;
4758         case OD_SCLK:
4759                 if (hwmgr->od_enabled) {
4760                         size = sprintf(buf, "%s:\n", "OD_SCLK");
4761                         for (i = 0; i < odn_sclk_table->num_of_pl; i++)
4762                                 size += sprintf(buf + size, "%d: %10uMHz %10umV\n",
4763                                         i, odn_sclk_table->entries[i].clock/100,
4764                                         odn_sclk_table->entries[i].vddc);
4765                 }
4766                 break;
4767         case OD_MCLK:
4768                 if (hwmgr->od_enabled) {
4769                         size = sprintf(buf, "%s:\n", "OD_MCLK");
4770                         for (i = 0; i < odn_mclk_table->num_of_pl; i++)
4771                                 size += sprintf(buf + size, "%d: %10uMHz %10umV\n",
4772                                         i, odn_mclk_table->entries[i].clock/100,
4773                                         odn_mclk_table->entries[i].vddc);
4774                 }
4775                 break;
4776         case OD_RANGE:
4777                 if (hwmgr->od_enabled) {
4778                         size = sprintf(buf, "%s:\n", "OD_RANGE");
4779                         size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
4780                                 data->golden_dpm_table.sclk_table.dpm_levels[0].value/100,
4781                                 hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
4782                         size += sprintf(buf + size, "MCLK: %7uMHz %10uMHz\n",
4783                                 data->golden_dpm_table.mclk_table.dpm_levels[0].value/100,
4784                                 hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
4785                         size += sprintf(buf + size, "VDDC: %7umV %11umV\n",
4786                                 data->odn_dpm_table.min_vddc,
4787                                 data->odn_dpm_table.max_vddc);
4788                 }
4789                 break;
4790         default:
4791                 break;
4792         }
4793         return size;
4794 }
4795
4796 static void smu7_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
4797 {
4798         switch (mode) {
4799         case AMD_FAN_CTRL_NONE:
4800                 smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
4801                 break;
4802         case AMD_FAN_CTRL_MANUAL:
4803                 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4804                         PHM_PlatformCaps_MicrocodeFanControl))
4805                         smu7_fan_ctrl_stop_smc_fan_control(hwmgr);
4806                 break;
4807         case AMD_FAN_CTRL_AUTO:
4808                 if (!smu7_fan_ctrl_set_static_mode(hwmgr, mode))
4809                         smu7_fan_ctrl_start_smc_fan_control(hwmgr);
4810                 break;
4811         default:
4812                 break;
4813         }
4814 }
4815
4816 static uint32_t smu7_get_fan_control_mode(struct pp_hwmgr *hwmgr)
4817 {
4818         return hwmgr->fan_ctrl_enabled ? AMD_FAN_CTRL_AUTO : AMD_FAN_CTRL_MANUAL;
4819 }
4820
4821 static int smu7_get_sclk_od(struct pp_hwmgr *hwmgr)
4822 {
4823         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4824         struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
4825         struct smu7_single_dpm_table *golden_sclk_table =
4826                         &(data->golden_dpm_table.sclk_table);
4827         int value = sclk_table->dpm_levels[sclk_table->count - 1].value;
4828         int golden_value = golden_sclk_table->dpm_levels
4829                         [golden_sclk_table->count - 1].value;
4830
4831         value -= golden_value;
4832         value = DIV_ROUND_UP(value * 100, golden_value);
4833
4834         return value;
4835 }
4836
4837 static int smu7_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
4838 {
4839         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4840         struct smu7_single_dpm_table *golden_sclk_table =
4841                         &(data->golden_dpm_table.sclk_table);
4842         struct pp_power_state  *ps;
4843         struct smu7_power_state  *smu7_ps;
4844
4845         if (value > 20)
4846                 value = 20;
4847
4848         ps = hwmgr->request_ps;
4849
4850         if (ps == NULL)
4851                 return -EINVAL;
4852
4853         smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
4854
4855         smu7_ps->performance_levels[smu7_ps->performance_level_count - 1].engine_clock =
4856                         golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
4857                         value / 100 +
4858                         golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
4859
4860         return 0;
4861 }
4862
4863 static int smu7_get_mclk_od(struct pp_hwmgr *hwmgr)
4864 {
4865         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4866         struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
4867         struct smu7_single_dpm_table *golden_mclk_table =
4868                         &(data->golden_dpm_table.mclk_table);
4869         int value = mclk_table->dpm_levels[mclk_table->count - 1].value;
4870         int golden_value = golden_mclk_table->dpm_levels
4871                         [golden_mclk_table->count - 1].value;
4872
4873         value -= golden_value;
4874         value = DIV_ROUND_UP(value * 100, golden_value);
4875
4876         return value;
4877 }
4878
4879 static int smu7_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
4880 {
4881         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4882         struct smu7_single_dpm_table *golden_mclk_table =
4883                         &(data->golden_dpm_table.mclk_table);
4884         struct pp_power_state  *ps;
4885         struct smu7_power_state  *smu7_ps;
4886
4887         if (value > 20)
4888                 value = 20;
4889
4890         ps = hwmgr->request_ps;
4891
4892         if (ps == NULL)
4893                 return -EINVAL;
4894
4895         smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
4896
4897         smu7_ps->performance_levels[smu7_ps->performance_level_count - 1].memory_clock =
4898                         golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
4899                         value / 100 +
4900                         golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
4901
4902         return 0;
4903 }
4904
4905
4906 static int smu7_get_sclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
4907 {
4908         struct phm_ppt_v1_information *table_info =
4909                         (struct phm_ppt_v1_information *)hwmgr->pptable;
4910         struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table = NULL;
4911         struct phm_clock_voltage_dependency_table *sclk_table;
4912         int i;
4913
4914         if (hwmgr->pp_table_version == PP_TABLE_V1) {
4915                 if (table_info == NULL || table_info->vdd_dep_on_sclk == NULL)
4916                         return -EINVAL;
4917                 dep_sclk_table = table_info->vdd_dep_on_sclk;
4918                 for (i = 0; i < dep_sclk_table->count; i++)
4919                         clocks->clock[i] = dep_sclk_table->entries[i].clk * 10;
4920                 clocks->count = dep_sclk_table->count;
4921         } else if (hwmgr->pp_table_version == PP_TABLE_V0) {
4922                 sclk_table = hwmgr->dyn_state.vddc_dependency_on_sclk;
4923                 for (i = 0; i < sclk_table->count; i++)
4924                         clocks->clock[i] = sclk_table->entries[i].clk * 10;
4925                 clocks->count = sclk_table->count;
4926         }
4927
4928         return 0;
4929 }
4930
4931 static uint32_t smu7_get_mem_latency(struct pp_hwmgr *hwmgr, uint32_t clk)
4932 {
4933         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4934
4935         if (clk >= MEM_FREQ_LOW_LATENCY && clk < MEM_FREQ_HIGH_LATENCY)
4936                 return data->mem_latency_high;
4937         else if (clk >= MEM_FREQ_HIGH_LATENCY)
4938                 return data->mem_latency_low;
4939         else
4940                 return MEM_LATENCY_ERR;
4941 }
4942
4943 static int smu7_get_mclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
4944 {
4945         struct phm_ppt_v1_information *table_info =
4946                         (struct phm_ppt_v1_information *)hwmgr->pptable;
4947         struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
4948         int i;
4949         struct phm_clock_voltage_dependency_table *mclk_table;
4950
4951         if (hwmgr->pp_table_version == PP_TABLE_V1) {
4952                 if (table_info == NULL)
4953                         return -EINVAL;
4954                 dep_mclk_table = table_info->vdd_dep_on_mclk;
4955                 for (i = 0; i < dep_mclk_table->count; i++) {
4956                         clocks->clock[i] = dep_mclk_table->entries[i].clk * 10;
4957                         clocks->latency[i] = smu7_get_mem_latency(hwmgr,
4958                                                 dep_mclk_table->entries[i].clk);
4959                 }
4960                 clocks->count = dep_mclk_table->count;
4961         } else if (hwmgr->pp_table_version == PP_TABLE_V0) {
4962                 mclk_table = hwmgr->dyn_state.vddc_dependency_on_mclk;
4963                 for (i = 0; i < mclk_table->count; i++)
4964                         clocks->clock[i] = mclk_table->entries[i].clk * 10;
4965                 clocks->count = mclk_table->count;
4966         }
4967         return 0;
4968 }
4969
4970 static int smu7_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type,
4971                                                 struct amd_pp_clocks *clocks)
4972 {
4973         switch (type) {
4974         case amd_pp_sys_clock:
4975                 smu7_get_sclks(hwmgr, clocks);
4976                 break;
4977         case amd_pp_mem_clock:
4978                 smu7_get_mclks(hwmgr, clocks);
4979                 break;
4980         default:
4981                 return -EINVAL;
4982         }
4983
4984         return 0;
4985 }
4986
4987 static int smu7_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
4988                                         uint32_t virtual_addr_low,
4989                                         uint32_t virtual_addr_hi,
4990                                         uint32_t mc_addr_low,
4991                                         uint32_t mc_addr_hi,
4992                                         uint32_t size)
4993 {
4994         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4995
4996         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4997                                         data->soft_regs_start +
4998                                         smum_get_offsetof(hwmgr,
4999                                         SMU_SoftRegisters, DRAM_LOG_ADDR_H),
5000                                         mc_addr_hi);
5001
5002         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
5003                                         data->soft_regs_start +
5004                                         smum_get_offsetof(hwmgr,
5005                                         SMU_SoftRegisters, DRAM_LOG_ADDR_L),
5006                                         mc_addr_low);
5007
5008         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
5009                                         data->soft_regs_start +
5010                                         smum_get_offsetof(hwmgr,
5011                                         SMU_SoftRegisters, DRAM_LOG_PHY_ADDR_H),
5012                                         virtual_addr_hi);
5013
5014         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
5015                                         data->soft_regs_start +
5016                                         smum_get_offsetof(hwmgr,
5017                                         SMU_SoftRegisters, DRAM_LOG_PHY_ADDR_L),
5018                                         virtual_addr_low);
5019
5020         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
5021                                         data->soft_regs_start +
5022                                         smum_get_offsetof(hwmgr,
5023                                         SMU_SoftRegisters, DRAM_LOG_BUFF_SIZE),
5024                                         size);
5025         return 0;
5026 }
5027
5028 static int smu7_get_max_high_clocks(struct pp_hwmgr *hwmgr,
5029                                         struct amd_pp_simple_clock_info *clocks)
5030 {
5031         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5032         struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
5033         struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
5034
5035         if (clocks == NULL)
5036                 return -EINVAL;
5037
5038         clocks->memory_max_clock = mclk_table->count > 1 ?
5039                                 mclk_table->dpm_levels[mclk_table->count-1].value :
5040                                 mclk_table->dpm_levels[0].value;
5041         clocks->engine_max_clock = sclk_table->count > 1 ?
5042                                 sclk_table->dpm_levels[sclk_table->count-1].value :
5043                                 sclk_table->dpm_levels[0].value;
5044         return 0;
5045 }
5046
5047 static int smu7_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
5048                 struct PP_TemperatureRange *thermal_data)
5049 {
5050         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5051         struct phm_ppt_v1_information *table_info =
5052                         (struct phm_ppt_v1_information *)hwmgr->pptable;
5053
5054         memcpy(thermal_data, &SMU7ThermalPolicy[0], sizeof(struct PP_TemperatureRange));
5055
5056         if (hwmgr->pp_table_version == PP_TABLE_V1)
5057                 thermal_data->max = table_info->cac_dtp_table->usSoftwareShutdownTemp *
5058                         PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
5059         else if (hwmgr->pp_table_version == PP_TABLE_V0)
5060                 thermal_data->max = data->thermal_temp_setting.temperature_shutdown *
5061                         PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
5062
5063         return 0;
5064 }
5065
5066 static bool smu7_check_clk_voltage_valid(struct pp_hwmgr *hwmgr,
5067                                         enum PP_OD_DPM_TABLE_COMMAND type,
5068                                         uint32_t clk,
5069                                         uint32_t voltage)
5070 {
5071         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5072
5073         if (voltage < data->odn_dpm_table.min_vddc || voltage > data->odn_dpm_table.max_vddc) {
5074                 pr_info("OD voltage is out of range [%d - %d] mV\n",
5075                                                 data->odn_dpm_table.min_vddc,
5076                                                 data->odn_dpm_table.max_vddc);
5077                 return false;
5078         }
5079
5080         if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) {
5081                 if (data->golden_dpm_table.sclk_table.dpm_levels[0].value > clk ||
5082                         hwmgr->platform_descriptor.overdriveLimit.engineClock < clk) {
5083                         pr_info("OD engine clock is out of range [%d - %d] MHz\n",
5084                                 data->golden_dpm_table.sclk_table.dpm_levels[0].value/100,
5085                                 hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
5086                         return false;
5087                 }
5088         } else if (type == PP_OD_EDIT_MCLK_VDDC_TABLE) {
5089                 if (data->golden_dpm_table.mclk_table.dpm_levels[0].value > clk ||
5090                         hwmgr->platform_descriptor.overdriveLimit.memoryClock < clk) {
5091                         pr_info("OD memory clock is out of range [%d - %d] MHz\n",
5092                                 data->golden_dpm_table.mclk_table.dpm_levels[0].value/100,
5093                                 hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
5094                         return false;
5095                 }
5096         } else {
5097                 return false;
5098         }
5099
5100         return true;
5101 }
5102
5103 static int smu7_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
5104                                         enum PP_OD_DPM_TABLE_COMMAND type,
5105                                         long *input, uint32_t size)
5106 {
5107         uint32_t i;
5108         struct phm_odn_clock_levels *podn_dpm_table_in_backend = NULL;
5109         struct smu7_odn_clock_voltage_dependency_table *podn_vdd_dep_in_backend = NULL;
5110         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5111
5112         uint32_t input_clk;
5113         uint32_t input_vol;
5114         uint32_t input_level;
5115
5116         PP_ASSERT_WITH_CODE(input, "NULL user input for clock and voltage",
5117                                 return -EINVAL);
5118
5119         if (!hwmgr->od_enabled) {
5120                 pr_info("OverDrive feature not enabled\n");
5121                 return -EINVAL;
5122         }
5123
5124         if (PP_OD_EDIT_SCLK_VDDC_TABLE == type) {
5125                 podn_dpm_table_in_backend = &data->odn_dpm_table.odn_core_clock_dpm_levels;
5126                 podn_vdd_dep_in_backend = &data->odn_dpm_table.vdd_dependency_on_sclk;
5127                 PP_ASSERT_WITH_CODE((podn_dpm_table_in_backend && podn_vdd_dep_in_backend),
5128                                 "Failed to get ODN SCLK and Voltage tables",
5129                                 return -EINVAL);
5130         } else if (PP_OD_EDIT_MCLK_VDDC_TABLE == type) {
5131                 podn_dpm_table_in_backend = &data->odn_dpm_table.odn_memory_clock_dpm_levels;
5132                 podn_vdd_dep_in_backend = &data->odn_dpm_table.vdd_dependency_on_mclk;
5133
5134                 PP_ASSERT_WITH_CODE((podn_dpm_table_in_backend && podn_vdd_dep_in_backend),
5135                         "Failed to get ODN MCLK and Voltage tables",
5136                         return -EINVAL);
5137         } else if (PP_OD_RESTORE_DEFAULT_TABLE == type) {
5138                 smu7_odn_initial_default_setting(hwmgr);
5139                 return 0;
5140         } else if (PP_OD_COMMIT_DPM_TABLE == type) {
5141                 smu7_check_dpm_table_updated(hwmgr);
5142                 return 0;
5143         } else {
5144                 return -EINVAL;
5145         }
5146
5147         for (i = 0; i < size; i += 3) {
5148                 if (i + 3 > size || input[i] >= podn_dpm_table_in_backend->num_of_pl) {
5149                         pr_info("invalid clock voltage input \n");
5150                         return 0;
5151                 }
5152                 input_level = input[i];
5153                 input_clk = input[i+1] * 100;
5154                 input_vol = input[i+2];
5155
5156                 if (smu7_check_clk_voltage_valid(hwmgr, type, input_clk, input_vol)) {
5157                         podn_dpm_table_in_backend->entries[input_level].clock = input_clk;
5158                         podn_vdd_dep_in_backend->entries[input_level].clk = input_clk;
5159                         podn_dpm_table_in_backend->entries[input_level].vddc = input_vol;
5160                         podn_vdd_dep_in_backend->entries[input_level].vddc = input_vol;
5161                         podn_vdd_dep_in_backend->entries[input_level].vddgfx = input_vol;
5162                 } else {
5163                         return -EINVAL;
5164                 }
5165         }
5166
5167         return 0;
5168 }
5169
5170 static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
5171 {
5172         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5173         uint32_t i, size = 0;
5174         uint32_t len;
5175
5176         static const char *profile_name[7] = {"BOOTUP_DEFAULT",
5177                                         "3D_FULL_SCREEN",
5178                                         "POWER_SAVING",
5179                                         "VIDEO",
5180                                         "VR",
5181                                         "COMPUTE",
5182                                         "CUSTOM"};
5183
5184         static const char *title[8] = {"NUM",
5185                         "MODE_NAME",
5186                         "SCLK_UP_HYST",
5187                         "SCLK_DOWN_HYST",
5188                         "SCLK_ACTIVE_LEVEL",
5189                         "MCLK_UP_HYST",
5190                         "MCLK_DOWN_HYST",
5191                         "MCLK_ACTIVE_LEVEL"};
5192
5193         if (!buf)
5194                 return -EINVAL;
5195
5196         size += sprintf(buf + size, "%s %16s %16s %16s %16s %16s %16s %16s\n",
5197                         title[0], title[1], title[2], title[3],
5198                         title[4], title[5], title[6], title[7]);
5199
5200         len = ARRAY_SIZE(smu7_profiling);
5201
5202         for (i = 0; i < len; i++) {
5203                 if (i == hwmgr->power_profile_mode) {
5204                         size += sprintf(buf + size, "%3d %14s %s: %8d %16d %16d %16d %16d %16d\n",
5205                         i, profile_name[i], "*",
5206                         data->current_profile_setting.sclk_up_hyst,
5207                         data->current_profile_setting.sclk_down_hyst,
5208                         data->current_profile_setting.sclk_activity,
5209                         data->current_profile_setting.mclk_up_hyst,
5210                         data->current_profile_setting.mclk_down_hyst,
5211                         data->current_profile_setting.mclk_activity);
5212                         continue;
5213                 }
5214                 if (smu7_profiling[i].bupdate_sclk)
5215                         size += sprintf(buf + size, "%3d %16s: %8d %16d %16d ",
5216                         i, profile_name[i], smu7_profiling[i].sclk_up_hyst,
5217                         smu7_profiling[i].sclk_down_hyst,
5218                         smu7_profiling[i].sclk_activity);
5219                 else
5220                         size += sprintf(buf + size, "%3d %16s: %8s %16s %16s ",
5221                         i, profile_name[i], "-", "-", "-");
5222
5223                 if (smu7_profiling[i].bupdate_mclk)
5224                         size += sprintf(buf + size, "%16d %16d %16d\n",
5225                         smu7_profiling[i].mclk_up_hyst,
5226                         smu7_profiling[i].mclk_down_hyst,
5227                         smu7_profiling[i].mclk_activity);
5228                 else
5229                         size += sprintf(buf + size, "%16s %16s %16s\n",
5230                         "-", "-", "-");
5231         }
5232
5233         return size;
5234 }
5235
5236 static void smu7_patch_compute_profile_mode(struct pp_hwmgr *hwmgr,
5237                                         enum PP_SMC_POWER_PROFILE requst)
5238 {
5239         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5240         uint32_t tmp, level;
5241
5242         if (requst == PP_SMC_POWER_PROFILE_COMPUTE) {
5243                 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
5244                         level = 0;
5245                         tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask;
5246                         while (tmp >>= 1)
5247                                 level++;
5248                         if (level > 0)
5249                                 smu7_force_clock_level(hwmgr, PP_SCLK, 3 << (level-1));
5250                 }
5251         } else if (hwmgr->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE) {
5252                 smu7_force_clock_level(hwmgr, PP_SCLK, data->dpm_level_enable_mask.sclk_dpm_enable_mask);
5253         }
5254 }
5255
5256 static int smu7_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size)
5257 {
5258         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5259         struct profile_mode_setting tmp;
5260         enum PP_SMC_POWER_PROFILE mode;
5261
5262         if (input == NULL)
5263                 return -EINVAL;
5264
5265         mode = input[size];
5266         switch (mode) {
5267         case PP_SMC_POWER_PROFILE_CUSTOM:
5268                 if (size < 8 && size != 0)
5269                         return -EINVAL;
5270                 /* If only CUSTOM is passed in, use the saved values. Check
5271                  * that we actually have a CUSTOM profile by ensuring that
5272                  * the "use sclk" or the "use mclk" bits are set
5273                  */
5274                 tmp = smu7_profiling[PP_SMC_POWER_PROFILE_CUSTOM];
5275                 if (size == 0) {
5276                         if (tmp.bupdate_sclk == 0 && tmp.bupdate_mclk == 0)
5277                                 return -EINVAL;
5278                 } else {
5279                         tmp.bupdate_sclk = input[0];
5280                         tmp.sclk_up_hyst = input[1];
5281                         tmp.sclk_down_hyst = input[2];
5282                         tmp.sclk_activity = input[3];
5283                         tmp.bupdate_mclk = input[4];
5284                         tmp.mclk_up_hyst = input[5];
5285                         tmp.mclk_down_hyst = input[6];
5286                         tmp.mclk_activity = input[7];
5287                         smu7_profiling[PP_SMC_POWER_PROFILE_CUSTOM] = tmp;
5288                 }
5289                 if (!smum_update_dpm_settings(hwmgr, &tmp)) {
5290                         memcpy(&data->current_profile_setting, &tmp, sizeof(struct profile_mode_setting));
5291                         hwmgr->power_profile_mode = mode;
5292                 }
5293                 break;
5294         case PP_SMC_POWER_PROFILE_FULLSCREEN3D:
5295         case PP_SMC_POWER_PROFILE_POWERSAVING:
5296         case PP_SMC_POWER_PROFILE_VIDEO:
5297         case PP_SMC_POWER_PROFILE_VR:
5298         case PP_SMC_POWER_PROFILE_COMPUTE:
5299                 if (mode == hwmgr->power_profile_mode)
5300                         return 0;
5301
5302                 memcpy(&tmp, &smu7_profiling[mode], sizeof(struct profile_mode_setting));
5303                 if (!smum_update_dpm_settings(hwmgr, &tmp)) {
5304                         if (tmp.bupdate_sclk) {
5305                                 data->current_profile_setting.bupdate_sclk = tmp.bupdate_sclk;
5306                                 data->current_profile_setting.sclk_up_hyst = tmp.sclk_up_hyst;
5307                                 data->current_profile_setting.sclk_down_hyst = tmp.sclk_down_hyst;
5308                                 data->current_profile_setting.sclk_activity = tmp.sclk_activity;
5309                         }
5310                         if (tmp.bupdate_mclk) {
5311                                 data->current_profile_setting.bupdate_mclk = tmp.bupdate_mclk;
5312                                 data->current_profile_setting.mclk_up_hyst = tmp.mclk_up_hyst;
5313                                 data->current_profile_setting.mclk_down_hyst = tmp.mclk_down_hyst;
5314                                 data->current_profile_setting.mclk_activity = tmp.mclk_activity;
5315                         }
5316                         smu7_patch_compute_profile_mode(hwmgr, mode);
5317                         hwmgr->power_profile_mode = mode;
5318                 }
5319                 break;
5320         default:
5321                 return -EINVAL;
5322         }
5323
5324         return 0;
5325 }
5326
5327 static int smu7_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
5328                                 PHM_PerformanceLevelDesignation designation, uint32_t index,
5329                                 PHM_PerformanceLevel *level)
5330 {
5331         const struct smu7_power_state *ps;
5332         uint32_t i;
5333
5334         if (level == NULL || hwmgr == NULL || state == NULL)
5335                 return -EINVAL;
5336
5337         ps = cast_const_phw_smu7_power_state(state);
5338
5339         i = index > ps->performance_level_count - 1 ?
5340                         ps->performance_level_count - 1 : index;
5341
5342         level->coreClock = ps->performance_levels[i].engine_clock;
5343         level->memory_clock = ps->performance_levels[i].memory_clock;
5344
5345         return 0;
5346 }
5347
5348 static int smu7_power_off_asic(struct pp_hwmgr *hwmgr)
5349 {
5350         int result;
5351
5352         result = smu7_disable_dpm_tasks(hwmgr);
5353         PP_ASSERT_WITH_CODE((0 == result),
5354                         "[disable_dpm_tasks] Failed to disable DPM!",
5355                         );
5356
5357         return result;
5358 }
5359
5360 static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
5361         .backend_init = &smu7_hwmgr_backend_init,
5362         .backend_fini = &smu7_hwmgr_backend_fini,
5363         .asic_setup = &smu7_setup_asic_task,
5364         .dynamic_state_management_enable = &smu7_enable_dpm_tasks,
5365         .apply_state_adjust_rules = smu7_apply_state_adjust_rules,
5366         .force_dpm_level = &smu7_force_dpm_level,
5367         .power_state_set = smu7_set_power_state_tasks,
5368         .get_power_state_size = smu7_get_power_state_size,
5369         .get_mclk = smu7_dpm_get_mclk,
5370         .get_sclk = smu7_dpm_get_sclk,
5371         .patch_boot_state = smu7_dpm_patch_boot_state,
5372         .get_pp_table_entry = smu7_get_pp_table_entry,
5373         .get_num_of_pp_table_entries = smu7_get_number_of_powerplay_table_entries,
5374         .powerdown_uvd = smu7_powerdown_uvd,
5375         .powergate_uvd = smu7_powergate_uvd,
5376         .powergate_vce = smu7_powergate_vce,
5377         .disable_clock_power_gating = smu7_disable_clock_power_gating,
5378         .update_clock_gatings = smu7_update_clock_gatings,
5379         .notify_smc_display_config_after_ps_adjustment = smu7_notify_smc_display_config_after_ps_adjustment,
5380         .display_config_changed = smu7_display_configuration_changed_task,
5381         .set_max_fan_pwm_output = smu7_set_max_fan_pwm_output,
5382         .set_max_fan_rpm_output = smu7_set_max_fan_rpm_output,
5383         .stop_thermal_controller = smu7_thermal_stop_thermal_controller,
5384         .get_fan_speed_info = smu7_fan_ctrl_get_fan_speed_info,
5385         .get_fan_speed_percent = smu7_fan_ctrl_get_fan_speed_percent,
5386         .set_fan_speed_percent = smu7_fan_ctrl_set_fan_speed_percent,
5387         .reset_fan_speed_to_default = smu7_fan_ctrl_reset_fan_speed_to_default,
5388         .get_fan_speed_rpm = smu7_fan_ctrl_get_fan_speed_rpm,
5389         .set_fan_speed_rpm = smu7_fan_ctrl_set_fan_speed_rpm,
5390         .uninitialize_thermal_controller = smu7_thermal_ctrl_uninitialize_thermal_controller,
5391         .register_irq_handlers = smu7_register_irq_handlers,
5392         .check_smc_update_required_for_display_configuration = smu7_check_smc_update_required_for_display_configuration,
5393         .check_states_equal = smu7_check_states_equal,
5394         .set_fan_control_mode = smu7_set_fan_control_mode,
5395         .get_fan_control_mode = smu7_get_fan_control_mode,
5396         .force_clock_level = smu7_force_clock_level,
5397         .print_clock_levels = smu7_print_clock_levels,
5398         .powergate_gfx = smu7_powergate_gfx,
5399         .get_sclk_od = smu7_get_sclk_od,
5400         .set_sclk_od = smu7_set_sclk_od,
5401         .get_mclk_od = smu7_get_mclk_od,
5402         .set_mclk_od = smu7_set_mclk_od,
5403         .get_clock_by_type = smu7_get_clock_by_type,
5404         .read_sensor = smu7_read_sensor,
5405         .dynamic_state_management_disable = smu7_disable_dpm_tasks,
5406         .avfs_control = smu7_avfs_control,
5407         .disable_smc_firmware_ctf = smu7_thermal_disable_alert,
5408         .start_thermal_controller = smu7_start_thermal_controller,
5409         .notify_cac_buffer_info = smu7_notify_cac_buffer_info,
5410         .get_max_high_clocks = smu7_get_max_high_clocks,
5411         .get_thermal_temperature_range = smu7_get_thermal_temperature_range,
5412         .odn_edit_dpm_table = smu7_odn_edit_dpm_table,
5413         .set_power_limit = smu7_set_power_limit,
5414         .get_power_profile_mode = smu7_get_power_profile_mode,
5415         .set_power_profile_mode = smu7_set_power_profile_mode,
5416         .get_performance_level = smu7_get_performance_level,
5417         .get_asic_baco_capability = smu7_baco_get_capability,
5418         .get_asic_baco_state = smu7_baco_get_state,
5419         .set_asic_baco_state = smu7_baco_set_state,
5420         .power_off_asic = smu7_power_off_asic,
5421 };
5422
5423 uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock,
5424                 uint32_t clock_insr)
5425 {
5426         uint8_t i;
5427         uint32_t temp;
5428         uint32_t min = max(clock_insr, (uint32_t)SMU7_MINIMUM_ENGINE_CLOCK);
5429
5430         PP_ASSERT_WITH_CODE((clock >= min), "Engine clock can't satisfy stutter requirement!", return 0);
5431         for (i = SMU7_MAX_DEEPSLEEP_DIVIDER_ID;  ; i--) {
5432                 temp = clock >> i;
5433
5434                 if (temp >= min || i == 0)
5435                         break;
5436         }
5437         return i;
5438 }
5439
5440 int smu7_init_function_pointers(struct pp_hwmgr *hwmgr)
5441 {
5442         hwmgr->hwmgr_func = &smu7_hwmgr_funcs;
5443         if (hwmgr->pp_table_version == PP_TABLE_V0)
5444                 hwmgr->pptable_func = &pptable_funcs;
5445         else if (hwmgr->pp_table_version == PP_TABLE_V1)
5446                 hwmgr->pptable_func = &pptable_v1_0_funcs;
5447
5448         return 0;
5449 }