0b9083c9067a9332edddc6e383c0e1ae17898d00
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / pm / powerplay / hwmgr / smu7_hwmgr.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "pp_debug.h"
24 #include <linux/delay.h>
25 #include <linux/fb.h>
26 #include <linux/module.h>
27 #include <linux/pci.h>
28 #include <linux/slab.h>
29 #include <asm/div64.h>
30 #include <drm/amdgpu_drm.h>
31 #include "ppatomctrl.h"
32 #include "atombios.h"
33 #include "pptable_v1_0.h"
34 #include "pppcielanes.h"
35 #include "amd_pcie_helpers.h"
36 #include "hardwaremanager.h"
37 #include "process_pptables_v1_0.h"
38 #include "cgs_common.h"
39
40 #include "smu7_common.h"
41
42 #include "hwmgr.h"
43 #include "smu7_hwmgr.h"
44 #include "smu_ucode_xfer_vi.h"
45 #include "smu7_powertune.h"
46 #include "smu7_dyn_defaults.h"
47 #include "smu7_thermal.h"
48 #include "smu7_clockpowergating.h"
49 #include "processpptables.h"
50 #include "pp_thermal.h"
51 #include "smu7_baco.h"
52 #include "smu7_smumgr.h"
53 #include "polaris10_smumgr.h"
54
55 #include "ivsrcid/ivsrcid_vislands30.h"
56
57 #define MC_CG_ARB_FREQ_F0           0x0a
58 #define MC_CG_ARB_FREQ_F1           0x0b
59 #define MC_CG_ARB_FREQ_F2           0x0c
60 #define MC_CG_ARB_FREQ_F3           0x0d
61
62 #define MC_CG_SEQ_DRAMCONF_S0       0x05
63 #define MC_CG_SEQ_DRAMCONF_S1       0x06
64 #define MC_CG_SEQ_YCLK_SUSPEND      0x04
65 #define MC_CG_SEQ_YCLK_RESUME       0x0a
66
67 #define SMC_CG_IND_START            0xc0030000
68 #define SMC_CG_IND_END              0xc0040000
69
70 #define MEM_FREQ_LOW_LATENCY        25000
71 #define MEM_FREQ_HIGH_LATENCY       80000
72
73 #define MEM_LATENCY_HIGH            45
74 #define MEM_LATENCY_LOW             35
75 #define MEM_LATENCY_ERR             0xFFFF
76
77 #define MC_SEQ_MISC0_GDDR5_SHIFT 28
78 #define MC_SEQ_MISC0_GDDR5_MASK  0xf0000000
79 #define MC_SEQ_MISC0_GDDR5_VALUE 5
80
81 #define PCIE_BUS_CLK                10000
82 #define TCLK                        (PCIE_BUS_CLK / 10)
83
84 static struct profile_mode_setting smu7_profiling[7] =
85                                         {{0, 0, 0, 0, 0, 0, 0, 0},
86                                          {1, 0, 100, 30, 1, 0, 100, 10},
87                                          {1, 10, 0, 30, 0, 0, 0, 0},
88                                          {0, 0, 0, 0, 1, 10, 16, 31},
89                                          {1, 0, 11, 50, 1, 0, 100, 10},
90                                          {1, 0, 5, 30, 0, 0, 0, 0},
91                                          {0, 0, 0, 0, 0, 0, 0, 0},
92                                         };
93
94 #define PPSMC_MSG_SetVBITimeout_VEGAM    ((uint16_t) 0x310)
95
96 #define ixPWR_SVI2_PLANE1_LOAD                     0xC0200280
97 #define PWR_SVI2_PLANE1_LOAD__PSI1_MASK                    0x00000020L
98 #define PWR_SVI2_PLANE1_LOAD__PSI0_EN_MASK                 0x00000040L
99 #define PWR_SVI2_PLANE1_LOAD__PSI1__SHIFT                  0x00000005
100 #define PWR_SVI2_PLANE1_LOAD__PSI0_EN__SHIFT               0x00000006
101
102 #define STRAP_EVV_REVISION_MSB          2211
103 #define STRAP_EVV_REVISION_LSB          2208
104
105 /** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */
106 enum DPM_EVENT_SRC {
107         DPM_EVENT_SRC_ANALOG = 0,
108         DPM_EVENT_SRC_EXTERNAL = 1,
109         DPM_EVENT_SRC_DIGITAL = 2,
110         DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
111         DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4
112 };
113
114 #define ixDIDT_SQ_EDC_CTRL                         0x0013
115 #define ixDIDT_SQ_EDC_THRESHOLD                    0x0014
116 #define ixDIDT_SQ_EDC_STALL_PATTERN_1_2            0x0015
117 #define ixDIDT_SQ_EDC_STALL_PATTERN_3_4            0x0016
118 #define ixDIDT_SQ_EDC_STALL_PATTERN_5_6            0x0017
119 #define ixDIDT_SQ_EDC_STALL_PATTERN_7              0x0018
120
121 #define ixDIDT_TD_EDC_CTRL                         0x0053
122 #define ixDIDT_TD_EDC_THRESHOLD                    0x0054
123 #define ixDIDT_TD_EDC_STALL_PATTERN_1_2            0x0055
124 #define ixDIDT_TD_EDC_STALL_PATTERN_3_4            0x0056
125 #define ixDIDT_TD_EDC_STALL_PATTERN_5_6            0x0057
126 #define ixDIDT_TD_EDC_STALL_PATTERN_7              0x0058
127
128 #define ixDIDT_TCP_EDC_CTRL                        0x0073
129 #define ixDIDT_TCP_EDC_THRESHOLD                   0x0074
130 #define ixDIDT_TCP_EDC_STALL_PATTERN_1_2           0x0075
131 #define ixDIDT_TCP_EDC_STALL_PATTERN_3_4           0x0076
132 #define ixDIDT_TCP_EDC_STALL_PATTERN_5_6           0x0077
133 #define ixDIDT_TCP_EDC_STALL_PATTERN_7             0x0078
134
135 #define ixDIDT_DB_EDC_CTRL                         0x0033
136 #define ixDIDT_DB_EDC_THRESHOLD                    0x0034
137 #define ixDIDT_DB_EDC_STALL_PATTERN_1_2            0x0035
138 #define ixDIDT_DB_EDC_STALL_PATTERN_3_4            0x0036
139 #define ixDIDT_DB_EDC_STALL_PATTERN_5_6            0x0037
140 #define ixDIDT_DB_EDC_STALL_PATTERN_7              0x0038
141
142 uint32_t DIDTEDCConfig_P12[] = {
143     ixDIDT_SQ_EDC_STALL_PATTERN_1_2,
144     ixDIDT_SQ_EDC_STALL_PATTERN_3_4,
145     ixDIDT_SQ_EDC_STALL_PATTERN_5_6,
146     ixDIDT_SQ_EDC_STALL_PATTERN_7,
147     ixDIDT_SQ_EDC_THRESHOLD,
148     ixDIDT_SQ_EDC_CTRL,
149     ixDIDT_TD_EDC_STALL_PATTERN_1_2,
150     ixDIDT_TD_EDC_STALL_PATTERN_3_4,
151     ixDIDT_TD_EDC_STALL_PATTERN_5_6,
152     ixDIDT_TD_EDC_STALL_PATTERN_7,
153     ixDIDT_TD_EDC_THRESHOLD,
154     ixDIDT_TD_EDC_CTRL,
155     ixDIDT_TCP_EDC_STALL_PATTERN_1_2,
156     ixDIDT_TCP_EDC_STALL_PATTERN_3_4,
157     ixDIDT_TCP_EDC_STALL_PATTERN_5_6,
158     ixDIDT_TCP_EDC_STALL_PATTERN_7,
159     ixDIDT_TCP_EDC_THRESHOLD,
160     ixDIDT_TCP_EDC_CTRL,
161     ixDIDT_DB_EDC_STALL_PATTERN_1_2,
162     ixDIDT_DB_EDC_STALL_PATTERN_3_4,
163     ixDIDT_DB_EDC_STALL_PATTERN_5_6,
164     ixDIDT_DB_EDC_STALL_PATTERN_7,
165     ixDIDT_DB_EDC_THRESHOLD,
166     ixDIDT_DB_EDC_CTRL,
167     0xFFFFFFFF // End of list
168 };
169
170 static const unsigned long PhwVIslands_Magic = (unsigned long)(PHM_VIslands_Magic);
171 static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
172                 enum pp_clock_type type, uint32_t mask);
173 static int smu7_notify_has_display(struct pp_hwmgr *hwmgr);
174
175 static struct smu7_power_state *cast_phw_smu7_power_state(
176                                   struct pp_hw_power_state *hw_ps)
177 {
178         PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic),
179                                 "Invalid Powerstate Type!",
180                                  return NULL);
181
182         return (struct smu7_power_state *)hw_ps;
183 }
184
185 static const struct smu7_power_state *cast_const_phw_smu7_power_state(
186                                  const struct pp_hw_power_state *hw_ps)
187 {
188         PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic),
189                                 "Invalid Powerstate Type!",
190                                  return NULL);
191
192         return (const struct smu7_power_state *)hw_ps;
193 }
194
195 /**
196  * Find the MC microcode version and store it in the HwMgr struct
197  *
198  * @param    hwmgr  the address of the powerplay hardware manager.
199  * @return   always 0
200  */
201 static int smu7_get_mc_microcode_version(struct pp_hwmgr *hwmgr)
202 {
203         cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
204
205         hwmgr->microcode_version_info.MC = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA);
206
207         return 0;
208 }
209
210 static uint16_t smu7_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
211 {
212         uint32_t speedCntl = 0;
213
214         /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
215         speedCntl = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__PCIE,
216                         ixPCIE_LC_SPEED_CNTL);
217         return((uint16_t)PHM_GET_FIELD(speedCntl,
218                         PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
219 }
220
221 static int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
222 {
223         uint32_t link_width;
224
225         /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
226         link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
227                         PCIE_LC_LINK_WIDTH_CNTL, LC_LINK_WIDTH_RD);
228
229         PP_ASSERT_WITH_CODE((7 >= link_width),
230                         "Invalid PCIe lane width!", return 0);
231
232         return decode_pcie_lane_width(link_width);
233 }
234
235 /**
236 * Enable voltage control
237 *
238 * @param    pHwMgr  the address of the powerplay hardware manager.
239 * @return   always PP_Result_OK
240 */
241 static int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
242 {
243         if (hwmgr->chip_id >= CHIP_POLARIS10 &&
244             hwmgr->chip_id <= CHIP_VEGAM) {
245                 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device,
246                                 CGS_IND_REG__SMC, PWR_SVI2_PLANE1_LOAD, PSI1, 0);
247                 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device,
248                                 CGS_IND_REG__SMC, PWR_SVI2_PLANE1_LOAD, PSI0_EN, 0);
249         }
250
251         if (hwmgr->feature_mask & PP_SMC_VOLTAGE_CONTROL_MASK)
252                 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Enable, NULL);
253
254         return 0;
255 }
256
257 /**
258 * Checks if we want to support voltage control
259 *
260 * @param    hwmgr  the address of the powerplay hardware manager.
261 */
262 static bool smu7_voltage_control(const struct pp_hwmgr *hwmgr)
263 {
264         const struct smu7_hwmgr *data =
265                         (const struct smu7_hwmgr *)(hwmgr->backend);
266
267         return (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control);
268 }
269
270 /**
271 * Enable voltage control
272 *
273 * @param    hwmgr  the address of the powerplay hardware manager.
274 * @return   always 0
275 */
276 static int smu7_enable_voltage_control(struct pp_hwmgr *hwmgr)
277 {
278         /* enable voltage control */
279         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
280                         GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1);
281
282         return 0;
283 }
284
285 static int phm_get_svi2_voltage_table_v0(pp_atomctrl_voltage_table *voltage_table,
286                 struct phm_clock_voltage_dependency_table *voltage_dependency_table
287                 )
288 {
289         uint32_t i;
290
291         PP_ASSERT_WITH_CODE((NULL != voltage_table),
292                         "Voltage Dependency Table empty.", return -EINVAL;);
293
294         voltage_table->mask_low = 0;
295         voltage_table->phase_delay = 0;
296         voltage_table->count = voltage_dependency_table->count;
297
298         for (i = 0; i < voltage_dependency_table->count; i++) {
299                 voltage_table->entries[i].value =
300                         voltage_dependency_table->entries[i].v;
301                 voltage_table->entries[i].smio_low = 0;
302         }
303
304         return 0;
305 }
306
307
308 /**
309 * Create Voltage Tables.
310 *
311 * @param    hwmgr  the address of the powerplay hardware manager.
312 * @return   always 0
313 */
314 static int smu7_construct_voltage_tables(struct pp_hwmgr *hwmgr)
315 {
316         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
317         struct phm_ppt_v1_information *table_info =
318                         (struct phm_ppt_v1_information *)hwmgr->pptable;
319         int result = 0;
320         uint32_t tmp;
321
322         if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
323                 result = atomctrl_get_voltage_table_v3(hwmgr,
324                                 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT,
325                                 &(data->mvdd_voltage_table));
326                 PP_ASSERT_WITH_CODE((0 == result),
327                                 "Failed to retrieve MVDD table.",
328                                 return result);
329         } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
330                 if (hwmgr->pp_table_version == PP_TABLE_V1)
331                         result = phm_get_svi2_mvdd_voltage_table(&(data->mvdd_voltage_table),
332                                         table_info->vdd_dep_on_mclk);
333                 else if (hwmgr->pp_table_version == PP_TABLE_V0)
334                         result = phm_get_svi2_voltage_table_v0(&(data->mvdd_voltage_table),
335                                         hwmgr->dyn_state.mvdd_dependency_on_mclk);
336
337                 PP_ASSERT_WITH_CODE((0 == result),
338                                 "Failed to retrieve SVI2 MVDD table from dependency table.",
339                                 return result;);
340         }
341
342         if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
343                 result = atomctrl_get_voltage_table_v3(hwmgr,
344                                 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT,
345                                 &(data->vddci_voltage_table));
346                 PP_ASSERT_WITH_CODE((0 == result),
347                                 "Failed to retrieve VDDCI table.",
348                                 return result);
349         } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
350                 if (hwmgr->pp_table_version == PP_TABLE_V1)
351                         result = phm_get_svi2_vddci_voltage_table(&(data->vddci_voltage_table),
352                                         table_info->vdd_dep_on_mclk);
353                 else if (hwmgr->pp_table_version == PP_TABLE_V0)
354                         result = phm_get_svi2_voltage_table_v0(&(data->vddci_voltage_table),
355                                         hwmgr->dyn_state.vddci_dependency_on_mclk);
356                 PP_ASSERT_WITH_CODE((0 == result),
357                                 "Failed to retrieve SVI2 VDDCI table from dependency table.",
358                                 return result);
359         }
360
361         if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
362                 /* VDDGFX has only SVI2 voltage control */
363                 result = phm_get_svi2_vdd_voltage_table(&(data->vddgfx_voltage_table),
364                                         table_info->vddgfx_lookup_table);
365                 PP_ASSERT_WITH_CODE((0 == result),
366                         "Failed to retrieve SVI2 VDDGFX table from lookup table.", return result;);
367         }
368
369
370         if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control) {
371                 result = atomctrl_get_voltage_table_v3(hwmgr,
372                                         VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT,
373                                         &data->vddc_voltage_table);
374                 PP_ASSERT_WITH_CODE((0 == result),
375                         "Failed to retrieve VDDC table.", return result;);
376         } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
377
378                 if (hwmgr->pp_table_version == PP_TABLE_V0)
379                         result = phm_get_svi2_voltage_table_v0(&data->vddc_voltage_table,
380                                         hwmgr->dyn_state.vddc_dependency_on_mclk);
381                 else if (hwmgr->pp_table_version == PP_TABLE_V1)
382                         result = phm_get_svi2_vdd_voltage_table(&(data->vddc_voltage_table),
383                                 table_info->vddc_lookup_table);
384
385                 PP_ASSERT_WITH_CODE((0 == result),
386                         "Failed to retrieve SVI2 VDDC table from dependency table.", return result;);
387         }
388
389         tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDC);
390         PP_ASSERT_WITH_CODE(
391                         (data->vddc_voltage_table.count <= tmp),
392                 "Too many voltage values for VDDC. Trimming to fit state table.",
393                         phm_trim_voltage_table_to_fit_state_table(tmp,
394                                                 &(data->vddc_voltage_table)));
395
396         tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDGFX);
397         PP_ASSERT_WITH_CODE(
398                         (data->vddgfx_voltage_table.count <= tmp),
399                 "Too many voltage values for VDDC. Trimming to fit state table.",
400                         phm_trim_voltage_table_to_fit_state_table(tmp,
401                                                 &(data->vddgfx_voltage_table)));
402
403         tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDCI);
404         PP_ASSERT_WITH_CODE(
405                         (data->vddci_voltage_table.count <= tmp),
406                 "Too many voltage values for VDDCI. Trimming to fit state table.",
407                         phm_trim_voltage_table_to_fit_state_table(tmp,
408                                         &(data->vddci_voltage_table)));
409
410         tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_MVDD);
411         PP_ASSERT_WITH_CODE(
412                         (data->mvdd_voltage_table.count <= tmp),
413                 "Too many voltage values for MVDD. Trimming to fit state table.",
414                         phm_trim_voltage_table_to_fit_state_table(tmp,
415                                                 &(data->mvdd_voltage_table)));
416
417         return 0;
418 }
419
420 /**
421 * Programs static screed detection parameters
422 *
423 * @param    hwmgr  the address of the powerplay hardware manager.
424 * @return   always 0
425 */
426 static int smu7_program_static_screen_threshold_parameters(
427                                                         struct pp_hwmgr *hwmgr)
428 {
429         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
430
431         /* Set static screen threshold unit */
432         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
433                         CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT,
434                         data->static_screen_threshold_unit);
435         /* Set static screen threshold */
436         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
437                         CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD,
438                         data->static_screen_threshold);
439
440         return 0;
441 }
442
443 /**
444 * Setup display gap for glitch free memory clock switching.
445 *
446 * @param    hwmgr  the address of the powerplay hardware manager.
447 * @return   always  0
448 */
449 static int smu7_enable_display_gap(struct pp_hwmgr *hwmgr)
450 {
451         uint32_t display_gap =
452                         cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
453                                         ixCG_DISPLAY_GAP_CNTL);
454
455         display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
456                         DISP_GAP, DISPLAY_GAP_IGNORE);
457
458         display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
459                         DISP_GAP_MCHG, DISPLAY_GAP_VBLANK);
460
461         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
462                         ixCG_DISPLAY_GAP_CNTL, display_gap);
463
464         return 0;
465 }
466
467 /**
468 * Programs activity state transition voting clients
469 *
470 * @param    hwmgr  the address of the powerplay hardware manager.
471 * @return   always  0
472 */
473 static int smu7_program_voting_clients(struct pp_hwmgr *hwmgr)
474 {
475         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
476         int i;
477
478         /* Clear reset for voting clients before enabling DPM */
479         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
480                         SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0);
481         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
482                         SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0);
483
484         for (i = 0; i < 8; i++)
485                 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
486                                         ixCG_FREQ_TRAN_VOTING_0 + i * 4,
487                                         data->voting_rights_clients[i]);
488         return 0;
489 }
490
491 static int smu7_clear_voting_clients(struct pp_hwmgr *hwmgr)
492 {
493         int i;
494
495         /* Reset voting clients before disabling DPM */
496         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
497                         SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1);
498         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
499                         SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1);
500
501         for (i = 0; i < 8; i++)
502                 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
503                                 ixCG_FREQ_TRAN_VOTING_0 + i * 4, 0);
504
505         return 0;
506 }
507
508 /* Copy one arb setting to another and then switch the active set.
509  * arb_src and arb_dest is one of the MC_CG_ARB_FREQ_Fx constants.
510  */
511 static int smu7_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
512                 uint32_t arb_src, uint32_t arb_dest)
513 {
514         uint32_t mc_arb_dram_timing;
515         uint32_t mc_arb_dram_timing2;
516         uint32_t burst_time;
517         uint32_t mc_cg_config;
518
519         switch (arb_src) {
520         case MC_CG_ARB_FREQ_F0:
521                 mc_arb_dram_timing  = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
522                 mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
523                 burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
524                 break;
525         case MC_CG_ARB_FREQ_F1:
526                 mc_arb_dram_timing  = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1);
527                 mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1);
528                 burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1);
529                 break;
530         default:
531                 return -EINVAL;
532         }
533
534         switch (arb_dest) {
535         case MC_CG_ARB_FREQ_F0:
536                 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
537                 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
538                 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time);
539                 break;
540         case MC_CG_ARB_FREQ_F1:
541                 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
542                 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
543                 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time);
544                 break;
545         default:
546                 return -EINVAL;
547         }
548
549         mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG);
550         mc_cg_config |= 0x0000000F;
551         cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config);
552         PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arb_dest);
553
554         return 0;
555 }
556
557 static int smu7_reset_to_default(struct pp_hwmgr *hwmgr)
558 {
559         return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ResetToDefaults, NULL);
560 }
561
562 /**
563 * Initial switch from ARB F0->F1
564 *
565 * @param    hwmgr  the address of the powerplay hardware manager.
566 * @return   always 0
567 * This function is to be called from the SetPowerState table.
568 */
569 static int smu7_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr)
570 {
571         return smu7_copy_and_switch_arb_sets(hwmgr,
572                         MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
573 }
574
575 static int smu7_force_switch_to_arbf0(struct pp_hwmgr *hwmgr)
576 {
577         uint32_t tmp;
578
579         tmp = (cgs_read_ind_register(hwmgr->device,
580                         CGS_IND_REG__SMC, ixSMC_SCRATCH9) &
581                         0x0000ff00) >> 8;
582
583         if (tmp == MC_CG_ARB_FREQ_F0)
584                 return 0;
585
586         return smu7_copy_and_switch_arb_sets(hwmgr,
587                         tmp, MC_CG_ARB_FREQ_F0);
588 }
589
590 static int smu7_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
591 {
592         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
593
594         struct phm_ppt_v1_information *table_info =
595                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
596         struct phm_ppt_v1_pcie_table *pcie_table = NULL;
597
598         uint32_t i, max_entry;
599         uint32_t tmp;
600
601         PP_ASSERT_WITH_CODE((data->use_pcie_performance_levels ||
602                         data->use_pcie_power_saving_levels), "No pcie performance levels!",
603                         return -EINVAL);
604
605         if (table_info != NULL)
606                 pcie_table = table_info->pcie_table;
607
608         if (data->use_pcie_performance_levels &&
609                         !data->use_pcie_power_saving_levels) {
610                 data->pcie_gen_power_saving = data->pcie_gen_performance;
611                 data->pcie_lane_power_saving = data->pcie_lane_performance;
612         } else if (!data->use_pcie_performance_levels &&
613                         data->use_pcie_power_saving_levels) {
614                 data->pcie_gen_performance = data->pcie_gen_power_saving;
615                 data->pcie_lane_performance = data->pcie_lane_power_saving;
616         }
617         tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_LINK);
618         phm_reset_single_dpm_table(&data->dpm_table.pcie_speed_table,
619                                         tmp,
620                                         MAX_REGULAR_DPM_NUMBER);
621
622         if (pcie_table != NULL) {
623                 /* max_entry is used to make sure we reserve one PCIE level
624                  * for boot level (fix for A+A PSPP issue).
625                  * If PCIE table from PPTable have ULV entry + 8 entries,
626                  * then ignore the last entry.*/
627                 max_entry = (tmp < pcie_table->count) ? tmp : pcie_table->count;
628                 for (i = 1; i < max_entry; i++) {
629                         phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i - 1,
630                                         get_pcie_gen_support(data->pcie_gen_cap,
631                                                         pcie_table->entries[i].gen_speed),
632                                         get_pcie_lane_support(data->pcie_lane_cap,
633                                                         pcie_table->entries[i].lane_width));
634                 }
635                 data->dpm_table.pcie_speed_table.count = max_entry - 1;
636                 smum_update_smc_table(hwmgr, SMU_BIF_TABLE);
637         } else {
638                 /* Hardcode Pcie Table */
639                 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0,
640                                 get_pcie_gen_support(data->pcie_gen_cap,
641                                                 PP_Min_PCIEGen),
642                                 get_pcie_lane_support(data->pcie_lane_cap,
643                                                 PP_Max_PCIELane));
644                 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1,
645                                 get_pcie_gen_support(data->pcie_gen_cap,
646                                                 PP_Min_PCIEGen),
647                                 get_pcie_lane_support(data->pcie_lane_cap,
648                                                 PP_Max_PCIELane));
649                 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2,
650                                 get_pcie_gen_support(data->pcie_gen_cap,
651                                                 PP_Max_PCIEGen),
652                                 get_pcie_lane_support(data->pcie_lane_cap,
653                                                 PP_Max_PCIELane));
654                 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3,
655                                 get_pcie_gen_support(data->pcie_gen_cap,
656                                                 PP_Max_PCIEGen),
657                                 get_pcie_lane_support(data->pcie_lane_cap,
658                                                 PP_Max_PCIELane));
659                 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4,
660                                 get_pcie_gen_support(data->pcie_gen_cap,
661                                                 PP_Max_PCIEGen),
662                                 get_pcie_lane_support(data->pcie_lane_cap,
663                                                 PP_Max_PCIELane));
664                 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5,
665                                 get_pcie_gen_support(data->pcie_gen_cap,
666                                                 PP_Max_PCIEGen),
667                                 get_pcie_lane_support(data->pcie_lane_cap,
668                                                 PP_Max_PCIELane));
669
670                 data->dpm_table.pcie_speed_table.count = 6;
671         }
672         /* Populate last level for boot PCIE level, but do not increment count. */
673         if (hwmgr->chip_family == AMDGPU_FAMILY_CI) {
674                 for (i = 0; i <= data->dpm_table.pcie_speed_table.count; i++)
675                         phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i,
676                                 get_pcie_gen_support(data->pcie_gen_cap,
677                                                 PP_Max_PCIEGen),
678                                 data->vbios_boot_state.pcie_lane_bootup_value);
679         } else {
680                 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table,
681                         data->dpm_table.pcie_speed_table.count,
682                         get_pcie_gen_support(data->pcie_gen_cap,
683                                         PP_Min_PCIEGen),
684                         get_pcie_lane_support(data->pcie_lane_cap,
685                                         PP_Max_PCIELane));
686         }
687         return 0;
688 }
689
690 static int smu7_reset_dpm_tables(struct pp_hwmgr *hwmgr)
691 {
692         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
693
694         memset(&(data->dpm_table), 0x00, sizeof(data->dpm_table));
695
696         phm_reset_single_dpm_table(
697                         &data->dpm_table.sclk_table,
698                                 smum_get_mac_definition(hwmgr,
699                                         SMU_MAX_LEVELS_GRAPHICS),
700                                         MAX_REGULAR_DPM_NUMBER);
701         phm_reset_single_dpm_table(
702                         &data->dpm_table.mclk_table,
703                         smum_get_mac_definition(hwmgr,
704                                 SMU_MAX_LEVELS_MEMORY), MAX_REGULAR_DPM_NUMBER);
705
706         phm_reset_single_dpm_table(
707                         &data->dpm_table.vddc_table,
708                                 smum_get_mac_definition(hwmgr,
709                                         SMU_MAX_LEVELS_VDDC),
710                                         MAX_REGULAR_DPM_NUMBER);
711         phm_reset_single_dpm_table(
712                         &data->dpm_table.vddci_table,
713                         smum_get_mac_definition(hwmgr,
714                                 SMU_MAX_LEVELS_VDDCI), MAX_REGULAR_DPM_NUMBER);
715
716         phm_reset_single_dpm_table(
717                         &data->dpm_table.mvdd_table,
718                                 smum_get_mac_definition(hwmgr,
719                                         SMU_MAX_LEVELS_MVDD),
720                                         MAX_REGULAR_DPM_NUMBER);
721         return 0;
722 }
723 /*
724  * This function is to initialize all DPM state tables
725  * for SMU7 based on the dependency table.
726  * Dynamic state patching function will then trim these
727  * state tables to the allowed range based
728  * on the power policy or external client requests,
729  * such as UVD request, etc.
730  */
731
732 static int smu7_setup_dpm_tables_v0(struct pp_hwmgr *hwmgr)
733 {
734         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
735         struct phm_clock_voltage_dependency_table *allowed_vdd_sclk_table =
736                 hwmgr->dyn_state.vddc_dependency_on_sclk;
737         struct phm_clock_voltage_dependency_table *allowed_vdd_mclk_table =
738                 hwmgr->dyn_state.vddc_dependency_on_mclk;
739         struct phm_cac_leakage_table *std_voltage_table =
740                 hwmgr->dyn_state.cac_leakage_table;
741         uint32_t i;
742
743         PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL,
744                 "SCLK dependency table is missing. This table is mandatory", return -EINVAL);
745         PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table->count >= 1,
746                 "SCLK dependency table has to have is missing. This table is mandatory", return -EINVAL);
747
748         PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
749                 "MCLK dependency table is missing. This table is mandatory", return -EINVAL);
750         PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table->count >= 1,
751                 "VMCLK dependency table has to have is missing. This table is mandatory", return -EINVAL);
752
753
754         /* Initialize Sclk DPM table based on allow Sclk values*/
755         data->dpm_table.sclk_table.count = 0;
756
757         for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
758                 if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count-1].value !=
759                                 allowed_vdd_sclk_table->entries[i].clk) {
760                         data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
761                                 allowed_vdd_sclk_table->entries[i].clk;
762                         data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = (i == 0) ? 1 : 0;
763                         data->dpm_table.sclk_table.count++;
764                 }
765         }
766
767         PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
768                 "MCLK dependency table is missing. This table is mandatory", return -EINVAL);
769         /* Initialize Mclk DPM table based on allow Mclk values */
770         data->dpm_table.mclk_table.count = 0;
771         for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
772                 if (i == 0 || data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count-1].value !=
773                         allowed_vdd_mclk_table->entries[i].clk) {
774                         data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
775                                 allowed_vdd_mclk_table->entries[i].clk;
776                         data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = (i == 0) ? 1 : 0;
777                         data->dpm_table.mclk_table.count++;
778                 }
779         }
780
781         /* Initialize Vddc DPM table based on allow Vddc values.  And populate corresponding std values. */
782         for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
783                 data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
784                 data->dpm_table.vddc_table.dpm_levels[i].param1 = std_voltage_table->entries[i].Leakage;
785                 /* param1 is for corresponding std voltage */
786                 data->dpm_table.vddc_table.dpm_levels[i].enabled = true;
787         }
788
789         data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count;
790         allowed_vdd_mclk_table = hwmgr->dyn_state.vddci_dependency_on_mclk;
791
792         if (NULL != allowed_vdd_mclk_table) {
793                 /* Initialize Vddci DPM table based on allow Mclk values */
794                 for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
795                         data->dpm_table.vddci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
796                         data->dpm_table.vddci_table.dpm_levels[i].enabled = true;
797                 }
798                 data->dpm_table.vddci_table.count = allowed_vdd_mclk_table->count;
799         }
800
801         allowed_vdd_mclk_table = hwmgr->dyn_state.mvdd_dependency_on_mclk;
802
803         if (NULL != allowed_vdd_mclk_table) {
804                 /*
805                  * Initialize MVDD DPM table based on allow Mclk
806                  * values
807                  */
808                 for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
809                         data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
810                         data->dpm_table.mvdd_table.dpm_levels[i].enabled = true;
811                 }
812                 data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count;
813         }
814
815         return 0;
816 }
817
818 static int smu7_setup_dpm_tables_v1(struct pp_hwmgr *hwmgr)
819 {
820         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
821         struct phm_ppt_v1_information *table_info =
822                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
823         uint32_t i;
824
825         struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
826         struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
827
828         if (table_info == NULL)
829                 return -EINVAL;
830
831         dep_sclk_table = table_info->vdd_dep_on_sclk;
832         dep_mclk_table = table_info->vdd_dep_on_mclk;
833
834         PP_ASSERT_WITH_CODE(dep_sclk_table != NULL,
835                         "SCLK dependency table is missing.",
836                         return -EINVAL);
837         PP_ASSERT_WITH_CODE(dep_sclk_table->count >= 1,
838                         "SCLK dependency table count is 0.",
839                         return -EINVAL);
840
841         PP_ASSERT_WITH_CODE(dep_mclk_table != NULL,
842                         "MCLK dependency table is missing.",
843                         return -EINVAL);
844         PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1,
845                         "MCLK dependency table count is 0",
846                         return -EINVAL);
847
848         /* Initialize Sclk DPM table based on allow Sclk values */
849         data->dpm_table.sclk_table.count = 0;
850         for (i = 0; i < dep_sclk_table->count; i++) {
851                 if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count - 1].value !=
852                                                 dep_sclk_table->entries[i].clk) {
853
854                         data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
855                                         dep_sclk_table->entries[i].clk;
856
857                         data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled =
858                                         (i == 0) ? true : false;
859                         data->dpm_table.sclk_table.count++;
860                 }
861         }
862         if (hwmgr->platform_descriptor.overdriveLimit.engineClock == 0)
863                 hwmgr->platform_descriptor.overdriveLimit.engineClock = dep_sclk_table->entries[i-1].clk;
864         /* Initialize Mclk DPM table based on allow Mclk values */
865         data->dpm_table.mclk_table.count = 0;
866         for (i = 0; i < dep_mclk_table->count; i++) {
867                 if (i == 0 || data->dpm_table.mclk_table.dpm_levels
868                                 [data->dpm_table.mclk_table.count - 1].value !=
869                                                 dep_mclk_table->entries[i].clk) {
870                         data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
871                                                         dep_mclk_table->entries[i].clk;
872                         data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled =
873                                                         (i == 0) ? true : false;
874                         data->dpm_table.mclk_table.count++;
875                 }
876         }
877
878         if (hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0)
879                 hwmgr->platform_descriptor.overdriveLimit.memoryClock = dep_mclk_table->entries[i-1].clk;
880         return 0;
881 }
882
883 static int smu7_odn_initial_default_setting(struct pp_hwmgr *hwmgr)
884 {
885         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
886         struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
887         struct phm_ppt_v1_information *table_info =
888                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
889         uint32_t i;
890
891         struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
892         struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
893         struct phm_odn_performance_level *entries;
894
895         if (table_info == NULL)
896                 return -EINVAL;
897
898         dep_sclk_table = table_info->vdd_dep_on_sclk;
899         dep_mclk_table = table_info->vdd_dep_on_mclk;
900
901         odn_table->odn_core_clock_dpm_levels.num_of_pl =
902                                                 data->golden_dpm_table.sclk_table.count;
903         entries = odn_table->odn_core_clock_dpm_levels.entries;
904         for (i=0; i<data->golden_dpm_table.sclk_table.count; i++) {
905                 entries[i].clock = data->golden_dpm_table.sclk_table.dpm_levels[i].value;
906                 entries[i].enabled = true;
907                 entries[i].vddc = dep_sclk_table->entries[i].vddc;
908         }
909
910         smu_get_voltage_dependency_table_ppt_v1(dep_sclk_table,
911                 (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk));
912
913         odn_table->odn_memory_clock_dpm_levels.num_of_pl =
914                                                 data->golden_dpm_table.mclk_table.count;
915         entries = odn_table->odn_memory_clock_dpm_levels.entries;
916         for (i=0; i<data->golden_dpm_table.mclk_table.count; i++) {
917                 entries[i].clock = data->golden_dpm_table.mclk_table.dpm_levels[i].value;
918                 entries[i].enabled = true;
919                 entries[i].vddc = dep_mclk_table->entries[i].vddc;
920         }
921
922         smu_get_voltage_dependency_table_ppt_v1(dep_mclk_table,
923                 (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk));
924
925         return 0;
926 }
927
928 static void smu7_setup_voltage_range_from_vbios(struct pp_hwmgr *hwmgr)
929 {
930         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
931         struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
932         struct phm_ppt_v1_information *table_info =
933                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
934         uint32_t min_vddc = 0;
935         uint32_t max_vddc = 0;
936
937         if (!table_info)
938                 return;
939
940         dep_sclk_table = table_info->vdd_dep_on_sclk;
941
942         atomctrl_get_voltage_range(hwmgr, &max_vddc, &min_vddc);
943
944         if (min_vddc == 0 || min_vddc > 2000
945                 || min_vddc > dep_sclk_table->entries[0].vddc)
946                 min_vddc = dep_sclk_table->entries[0].vddc;
947
948         if (max_vddc == 0 || max_vddc > 2000
949                 || max_vddc < dep_sclk_table->entries[dep_sclk_table->count-1].vddc)
950                 max_vddc = dep_sclk_table->entries[dep_sclk_table->count-1].vddc;
951
952         data->odn_dpm_table.min_vddc = min_vddc;
953         data->odn_dpm_table.max_vddc = max_vddc;
954 }
955
956 static void smu7_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
957 {
958         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
959         struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
960         struct phm_ppt_v1_information *table_info =
961                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
962         uint32_t i;
963
964         struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
965         struct phm_ppt_v1_clock_voltage_dependency_table *odn_dep_table;
966
967         if (table_info == NULL)
968                 return;
969
970         for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
971                 if (odn_table->odn_core_clock_dpm_levels.entries[i].clock !=
972                                         data->dpm_table.sclk_table.dpm_levels[i].value) {
973                         data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
974                         break;
975                 }
976         }
977
978         for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
979                 if (odn_table->odn_memory_clock_dpm_levels.entries[i].clock !=
980                                         data->dpm_table.mclk_table.dpm_levels[i].value) {
981                         data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
982                         break;
983                 }
984         }
985
986         dep_table = table_info->vdd_dep_on_mclk;
987         odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk);
988
989         for (i = 0; i < dep_table->count; i++) {
990                 if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
991                         data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK;
992                         return;
993                 }
994         }
995
996         dep_table = table_info->vdd_dep_on_sclk;
997         odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk);
998         for (i = 0; i < dep_table->count; i++) {
999                 if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
1000                         data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK;
1001                         return;
1002                 }
1003         }
1004         if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
1005                 data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
1006                 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK;
1007         }
1008 }
1009
1010 static int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
1011 {
1012         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1013
1014         smu7_reset_dpm_tables(hwmgr);
1015
1016         if (hwmgr->pp_table_version == PP_TABLE_V1)
1017                 smu7_setup_dpm_tables_v1(hwmgr);
1018         else if (hwmgr->pp_table_version == PP_TABLE_V0)
1019                 smu7_setup_dpm_tables_v0(hwmgr);
1020
1021         smu7_setup_default_pcie_table(hwmgr);
1022
1023         /* save a copy of the default DPM table */
1024         memcpy(&(data->golden_dpm_table), &(data->dpm_table),
1025                         sizeof(struct smu7_dpm_table));
1026
1027         /* initialize ODN table */
1028         if (hwmgr->od_enabled) {
1029                 if (data->odn_dpm_table.max_vddc) {
1030                         smu7_check_dpm_table_updated(hwmgr);
1031                 } else {
1032                         smu7_setup_voltage_range_from_vbios(hwmgr);
1033                         smu7_odn_initial_default_setting(hwmgr);
1034                 }
1035         }
1036         return 0;
1037 }
1038
1039 static int smu7_enable_vrhot_gpio_interrupt(struct pp_hwmgr *hwmgr)
1040 {
1041
1042         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1043                         PHM_PlatformCaps_RegulatorHot))
1044                 return smum_send_msg_to_smc(hwmgr,
1045                                 PPSMC_MSG_EnableVRHotGPIOInterrupt,
1046                                 NULL);
1047
1048         return 0;
1049 }
1050
1051 static int smu7_enable_sclk_control(struct pp_hwmgr *hwmgr)
1052 {
1053         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
1054                         SCLK_PWRMGT_OFF, 0);
1055         return 0;
1056 }
1057
1058 static int smu7_enable_ulv(struct pp_hwmgr *hwmgr)
1059 {
1060         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1061
1062         if (data->ulv_supported)
1063                 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableULV, NULL);
1064
1065         return 0;
1066 }
1067
1068 static int smu7_disable_ulv(struct pp_hwmgr *hwmgr)
1069 {
1070         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1071
1072         if (data->ulv_supported)
1073                 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableULV, NULL);
1074
1075         return 0;
1076 }
1077
1078 static int smu7_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
1079 {
1080         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1081                         PHM_PlatformCaps_SclkDeepSleep)) {
1082                 if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MASTER_DeepSleep_ON, NULL))
1083                         PP_ASSERT_WITH_CODE(false,
1084                                         "Attempt to enable Master Deep Sleep switch failed!",
1085                                         return -EINVAL);
1086         } else {
1087                 if (smum_send_msg_to_smc(hwmgr,
1088                                 PPSMC_MSG_MASTER_DeepSleep_OFF,
1089                                 NULL)) {
1090                         PP_ASSERT_WITH_CODE(false,
1091                                         "Attempt to disable Master Deep Sleep switch failed!",
1092                                         return -EINVAL);
1093                 }
1094         }
1095
1096         return 0;
1097 }
1098
1099 static int smu7_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
1100 {
1101         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1102                         PHM_PlatformCaps_SclkDeepSleep)) {
1103                 if (smum_send_msg_to_smc(hwmgr,
1104                                 PPSMC_MSG_MASTER_DeepSleep_OFF,
1105                                 NULL)) {
1106                         PP_ASSERT_WITH_CODE(false,
1107                                         "Attempt to disable Master Deep Sleep switch failed!",
1108                                         return -EINVAL);
1109                 }
1110         }
1111
1112         return 0;
1113 }
1114
1115 static int smu7_disable_sclk_vce_handshake(struct pp_hwmgr *hwmgr)
1116 {
1117         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1118         uint32_t soft_register_value = 0;
1119         uint32_t handshake_disables_offset = data->soft_regs_start
1120                                 + smum_get_offsetof(hwmgr,
1121                                         SMU_SoftRegisters, HandshakeDisables);
1122
1123         soft_register_value = cgs_read_ind_register(hwmgr->device,
1124                                 CGS_IND_REG__SMC, handshake_disables_offset);
1125         soft_register_value |= SMU7_VCE_SCLK_HANDSHAKE_DISABLE;
1126         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1127                         handshake_disables_offset, soft_register_value);
1128         return 0;
1129 }
1130
1131 static int smu7_disable_handshake_uvd(struct pp_hwmgr *hwmgr)
1132 {
1133         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1134         uint32_t soft_register_value = 0;
1135         uint32_t handshake_disables_offset = data->soft_regs_start
1136                                 + smum_get_offsetof(hwmgr,
1137                                         SMU_SoftRegisters, HandshakeDisables);
1138
1139         soft_register_value = cgs_read_ind_register(hwmgr->device,
1140                                 CGS_IND_REG__SMC, handshake_disables_offset);
1141         soft_register_value |= smum_get_mac_definition(hwmgr,
1142                                         SMU_UVD_MCLK_HANDSHAKE_DISABLE);
1143         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1144                         handshake_disables_offset, soft_register_value);
1145         return 0;
1146 }
1147
1148 static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
1149 {
1150         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1151
1152         /* enable SCLK dpm */
1153         if (!data->sclk_dpm_key_disabled) {
1154                 if (hwmgr->chip_id >= CHIP_POLARIS10 &&
1155                     hwmgr->chip_id <= CHIP_VEGAM)
1156                         smu7_disable_sclk_vce_handshake(hwmgr);
1157
1158                 PP_ASSERT_WITH_CODE(
1159                 (0 == smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Enable, NULL)),
1160                 "Failed to enable SCLK DPM during DPM Start Function!",
1161                 return -EINVAL);
1162         }
1163
1164         /* enable MCLK dpm */
1165         if (0 == data->mclk_dpm_key_disabled) {
1166                 if (!(hwmgr->feature_mask & PP_UVD_HANDSHAKE_MASK))
1167                         smu7_disable_handshake_uvd(hwmgr);
1168
1169                 PP_ASSERT_WITH_CODE(
1170                                 (0 == smum_send_msg_to_smc(hwmgr,
1171                                                 PPSMC_MSG_MCLKDPM_Enable,
1172                                                 NULL)),
1173                                 "Failed to enable MCLK DPM during DPM Start Function!",
1174                                 return -EINVAL);
1175
1176                 if ((hwmgr->chip_family == AMDGPU_FAMILY_CI) ||
1177                     (hwmgr->chip_id == CHIP_POLARIS10) ||
1178                     (hwmgr->chip_id == CHIP_POLARIS11) ||
1179                     (hwmgr->chip_id == CHIP_POLARIS12) ||
1180                     (hwmgr->chip_id == CHIP_TONGA))
1181                         PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
1182
1183
1184                 if (hwmgr->chip_family == AMDGPU_FAMILY_CI) {
1185                         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d30, 0x5);
1186                         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d3c, 0x5);
1187                         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d80, 0x100005);
1188                         udelay(10);
1189                         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d30, 0x400005);
1190                         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d3c, 0x400005);
1191                         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d80, 0x500005);
1192                 } else {
1193                         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5);
1194                         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x5);
1195                         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x100005);
1196                         udelay(10);
1197                         if (hwmgr->chip_id == CHIP_VEGAM) {
1198                                 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400009);
1199                                 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400009);
1200                         } else {
1201                                 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400005);
1202                                 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400005);
1203                         }
1204                         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x500005);
1205                 }
1206         }
1207
1208         return 0;
1209 }
1210
1211 static int smu7_start_dpm(struct pp_hwmgr *hwmgr)
1212 {
1213         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1214
1215         /*enable general power management */
1216
1217         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
1218                         GLOBAL_PWRMGT_EN, 1);
1219
1220         /* enable sclk deep sleep */
1221
1222         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
1223                         DYNAMIC_PM_EN, 1);
1224
1225         /* prepare for PCIE DPM */
1226
1227         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1228                         data->soft_regs_start +
1229                         smum_get_offsetof(hwmgr, SMU_SoftRegisters,
1230                                                 VoltageChangeTimeout), 0x1000);
1231         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
1232                         SWRST_COMMAND_1, RESETLC, 0x0);
1233
1234         if (hwmgr->chip_family == AMDGPU_FAMILY_CI)
1235                 cgs_write_register(hwmgr->device, 0x1488,
1236                         (cgs_read_register(hwmgr->device, 0x1488) & ~0x1));
1237
1238         if (smu7_enable_sclk_mclk_dpm(hwmgr)) {
1239                 pr_err("Failed to enable Sclk DPM and Mclk DPM!");
1240                 return -EINVAL;
1241         }
1242
1243         /* enable PCIE dpm */
1244         if (0 == data->pcie_dpm_key_disabled) {
1245                 PP_ASSERT_WITH_CODE(
1246                                 (0 == smum_send_msg_to_smc(hwmgr,
1247                                                 PPSMC_MSG_PCIeDPM_Enable,
1248                                                 NULL)),
1249                                 "Failed to enable pcie DPM during DPM Start Function!",
1250                                 return -EINVAL);
1251         }
1252
1253         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1254                                 PHM_PlatformCaps_Falcon_QuickTransition)) {
1255                 PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr,
1256                                 PPSMC_MSG_EnableACDCGPIOInterrupt,
1257                                 NULL)),
1258                                 "Failed to enable AC DC GPIO Interrupt!",
1259                                 );
1260         }
1261
1262         return 0;
1263 }
1264
1265 static int smu7_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
1266 {
1267         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1268
1269         /* disable SCLK dpm */
1270         if (!data->sclk_dpm_key_disabled) {
1271                 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
1272                                 "Trying to disable SCLK DPM when DPM is disabled",
1273                                 return 0);
1274                 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Disable, NULL);
1275         }
1276
1277         /* disable MCLK dpm */
1278         if (!data->mclk_dpm_key_disabled) {
1279                 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
1280                                 "Trying to disable MCLK DPM when DPM is disabled",
1281                                 return 0);
1282                 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_Disable, NULL);
1283         }
1284
1285         return 0;
1286 }
1287
1288 static int smu7_stop_dpm(struct pp_hwmgr *hwmgr)
1289 {
1290         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1291
1292         /* disable general power management */
1293         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
1294                         GLOBAL_PWRMGT_EN, 0);
1295         /* disable sclk deep sleep */
1296         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
1297                         DYNAMIC_PM_EN, 0);
1298
1299         /* disable PCIE dpm */
1300         if (!data->pcie_dpm_key_disabled) {
1301                 PP_ASSERT_WITH_CODE(
1302                                 (smum_send_msg_to_smc(hwmgr,
1303                                                 PPSMC_MSG_PCIeDPM_Disable,
1304                                                 NULL) == 0),
1305                                 "Failed to disable pcie DPM during DPM Stop Function!",
1306                                 return -EINVAL);
1307         }
1308
1309         smu7_disable_sclk_mclk_dpm(hwmgr);
1310
1311         PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
1312                         "Trying to disable voltage DPM when DPM is disabled",
1313                         return 0);
1314
1315         smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Disable, NULL);
1316
1317         return 0;
1318 }
1319
1320 static void smu7_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources)
1321 {
1322         bool protection;
1323         enum DPM_EVENT_SRC src;
1324
1325         switch (sources) {
1326         default:
1327                 pr_err("Unknown throttling event sources.");
1328                 fallthrough;
1329         case 0:
1330                 protection = false;
1331                 /* src is unused */
1332                 break;
1333         case (1 << PHM_AutoThrottleSource_Thermal):
1334                 protection = true;
1335                 src = DPM_EVENT_SRC_DIGITAL;
1336                 break;
1337         case (1 << PHM_AutoThrottleSource_External):
1338                 protection = true;
1339                 src = DPM_EVENT_SRC_EXTERNAL;
1340                 break;
1341         case (1 << PHM_AutoThrottleSource_External) |
1342                         (1 << PHM_AutoThrottleSource_Thermal):
1343                 protection = true;
1344                 src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL;
1345                 break;
1346         }
1347         /* Order matters - don't enable thermal protection for the wrong source. */
1348         if (protection) {
1349                 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL,
1350                                 DPM_EVENT_SRC, src);
1351                 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
1352                                 THERMAL_PROTECTION_DIS,
1353                                 !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1354                                                 PHM_PlatformCaps_ThermalController));
1355         } else
1356                 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
1357                                 THERMAL_PROTECTION_DIS, 1);
1358 }
1359
1360 static int smu7_enable_auto_throttle_source(struct pp_hwmgr *hwmgr,
1361                 PHM_AutoThrottleSource source)
1362 {
1363         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1364
1365         if (!(data->active_auto_throttle_sources & (1 << source))) {
1366                 data->active_auto_throttle_sources |= 1 << source;
1367                 smu7_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
1368         }
1369         return 0;
1370 }
1371
1372 static int smu7_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
1373 {
1374         return smu7_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
1375 }
1376
1377 static int smu7_disable_auto_throttle_source(struct pp_hwmgr *hwmgr,
1378                 PHM_AutoThrottleSource source)
1379 {
1380         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1381
1382         if (data->active_auto_throttle_sources & (1 << source)) {
1383                 data->active_auto_throttle_sources &= ~(1 << source);
1384                 smu7_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
1385         }
1386         return 0;
1387 }
1388
1389 static int smu7_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
1390 {
1391         return smu7_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
1392 }
1393
1394 static int smu7_pcie_performance_request(struct pp_hwmgr *hwmgr)
1395 {
1396         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1397         data->pcie_performance_request = true;
1398
1399         return 0;
1400 }
1401
1402 static int smu7_program_edc_didt_registers(struct pp_hwmgr *hwmgr,
1403                                            uint32_t *cac_config_regs,
1404                                            AtomCtrl_EDCLeakgeTable *edc_leakage_table)
1405 {
1406         uint32_t data, i = 0;
1407
1408         while (cac_config_regs[i] != 0xFFFFFFFF) {
1409                 data = edc_leakage_table->DIDT_REG[i];
1410                 cgs_write_ind_register(hwmgr->device,
1411                                        CGS_IND_REG__DIDT,
1412                                        cac_config_regs[i],
1413                                        data);
1414                 i++;
1415         }
1416
1417         return 0;
1418 }
1419
1420 static int smu7_populate_edc_leakage_registers(struct pp_hwmgr *hwmgr)
1421 {
1422         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1423         int ret = 0;
1424
1425         if (!data->disable_edc_leakage_controller &&
1426             data->edc_hilo_leakage_offset_from_vbios.usEdcDidtLoDpm7TableOffset &&
1427             data->edc_hilo_leakage_offset_from_vbios.usEdcDidtHiDpm7TableOffset) {
1428                 ret = smu7_program_edc_didt_registers(hwmgr,
1429                                                       DIDTEDCConfig_P12,
1430                                                       &data->edc_leakage_table);
1431                 if (ret)
1432                         return ret;
1433
1434                 ret = smum_send_msg_to_smc(hwmgr,
1435                                            (PPSMC_Msg)PPSMC_MSG_EnableEDCController,
1436                                            NULL);
1437         } else {
1438                 ret = smum_send_msg_to_smc(hwmgr,
1439                                            (PPSMC_Msg)PPSMC_MSG_DisableEDCController,
1440                                            NULL);
1441         }
1442
1443         return ret;
1444 }
1445
1446 static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
1447 {
1448         int tmp_result = 0;
1449         int result = 0;
1450
1451         if (smu7_voltage_control(hwmgr)) {
1452                 tmp_result = smu7_enable_voltage_control(hwmgr);
1453                 PP_ASSERT_WITH_CODE(tmp_result == 0,
1454                                 "Failed to enable voltage control!",
1455                                 result = tmp_result);
1456
1457                 tmp_result = smu7_construct_voltage_tables(hwmgr);
1458                 PP_ASSERT_WITH_CODE((0 == tmp_result),
1459                                 "Failed to construct voltage tables!",
1460                                 result = tmp_result);
1461         }
1462         smum_initialize_mc_reg_table(hwmgr);
1463
1464         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1465                         PHM_PlatformCaps_EngineSpreadSpectrumSupport))
1466                 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1467                                 GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 1);
1468
1469         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1470                         PHM_PlatformCaps_ThermalController))
1471                 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1472                                 GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 0);
1473
1474         tmp_result = smu7_program_static_screen_threshold_parameters(hwmgr);
1475         PP_ASSERT_WITH_CODE((0 == tmp_result),
1476                         "Failed to program static screen threshold parameters!",
1477                         result = tmp_result);
1478
1479         tmp_result = smu7_enable_display_gap(hwmgr);
1480         PP_ASSERT_WITH_CODE((0 == tmp_result),
1481                         "Failed to enable display gap!", result = tmp_result);
1482
1483         tmp_result = smu7_program_voting_clients(hwmgr);
1484         PP_ASSERT_WITH_CODE((0 == tmp_result),
1485                         "Failed to program voting clients!", result = tmp_result);
1486
1487         tmp_result = smum_process_firmware_header(hwmgr);
1488         PP_ASSERT_WITH_CODE((0 == tmp_result),
1489                         "Failed to process firmware header!", result = tmp_result);
1490
1491         if (hwmgr->chip_id != CHIP_VEGAM) {
1492                 tmp_result = smu7_initial_switch_from_arbf0_to_f1(hwmgr);
1493                 PP_ASSERT_WITH_CODE((0 == tmp_result),
1494                                 "Failed to initialize switch from ArbF0 to F1!",
1495                                 result = tmp_result);
1496         }
1497
1498         result = smu7_setup_default_dpm_tables(hwmgr);
1499         PP_ASSERT_WITH_CODE(0 == result,
1500                         "Failed to setup default DPM tables!", return result);
1501
1502         tmp_result = smum_init_smc_table(hwmgr);
1503         PP_ASSERT_WITH_CODE((0 == tmp_result),
1504                         "Failed to initialize SMC table!", result = tmp_result);
1505
1506         tmp_result = smu7_enable_vrhot_gpio_interrupt(hwmgr);
1507         PP_ASSERT_WITH_CODE((0 == tmp_result),
1508                         "Failed to enable VR hot GPIO interrupt!", result = tmp_result);
1509
1510         if (hwmgr->chip_id >= CHIP_POLARIS10 &&
1511             hwmgr->chip_id <= CHIP_VEGAM) {
1512                 tmp_result = smu7_notify_has_display(hwmgr);
1513                 PP_ASSERT_WITH_CODE((0 == tmp_result),
1514                                 "Failed to enable display setting!", result = tmp_result);
1515         } else {
1516                 smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_NoDisplay, NULL);
1517         }
1518
1519         if (hwmgr->chip_id >= CHIP_POLARIS10 &&
1520             hwmgr->chip_id <= CHIP_VEGAM) {
1521                 tmp_result = smu7_populate_edc_leakage_registers(hwmgr);
1522                 PP_ASSERT_WITH_CODE((0 == tmp_result),
1523                                 "Failed to populate edc leakage registers!", result = tmp_result);
1524         }
1525
1526         tmp_result = smu7_enable_sclk_control(hwmgr);
1527         PP_ASSERT_WITH_CODE((0 == tmp_result),
1528                         "Failed to enable SCLK control!", result = tmp_result);
1529
1530         tmp_result = smu7_enable_smc_voltage_controller(hwmgr);
1531         PP_ASSERT_WITH_CODE((0 == tmp_result),
1532                         "Failed to enable voltage control!", result = tmp_result);
1533
1534         tmp_result = smu7_enable_ulv(hwmgr);
1535         PP_ASSERT_WITH_CODE((0 == tmp_result),
1536                         "Failed to enable ULV!", result = tmp_result);
1537
1538         tmp_result = smu7_enable_deep_sleep_master_switch(hwmgr);
1539         PP_ASSERT_WITH_CODE((0 == tmp_result),
1540                         "Failed to enable deep sleep master switch!", result = tmp_result);
1541
1542         tmp_result = smu7_enable_didt_config(hwmgr);
1543         PP_ASSERT_WITH_CODE((tmp_result == 0),
1544                         "Failed to enable deep sleep master switch!", result = tmp_result);
1545
1546         tmp_result = smu7_start_dpm(hwmgr);
1547         PP_ASSERT_WITH_CODE((0 == tmp_result),
1548                         "Failed to start DPM!", result = tmp_result);
1549
1550         tmp_result = smu7_enable_smc_cac(hwmgr);
1551         PP_ASSERT_WITH_CODE((0 == tmp_result),
1552                         "Failed to enable SMC CAC!", result = tmp_result);
1553
1554         tmp_result = smu7_enable_power_containment(hwmgr);
1555         PP_ASSERT_WITH_CODE((0 == tmp_result),
1556                         "Failed to enable power containment!", result = tmp_result);
1557
1558         tmp_result = smu7_power_control_set_level(hwmgr);
1559         PP_ASSERT_WITH_CODE((0 == tmp_result),
1560                         "Failed to power control set level!", result = tmp_result);
1561
1562         tmp_result = smu7_enable_thermal_auto_throttle(hwmgr);
1563         PP_ASSERT_WITH_CODE((0 == tmp_result),
1564                         "Failed to enable thermal auto throttle!", result = tmp_result);
1565
1566         tmp_result = smu7_pcie_performance_request(hwmgr);
1567         PP_ASSERT_WITH_CODE((0 == tmp_result),
1568                         "pcie performance request failed!", result = tmp_result);
1569
1570         return 0;
1571 }
1572
1573 static int smu7_avfs_control(struct pp_hwmgr *hwmgr, bool enable)
1574 {
1575         if (!hwmgr->avfs_supported)
1576                 return 0;
1577
1578         if (enable) {
1579                 if (!PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
1580                                 CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) {
1581                         PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(
1582                                         hwmgr, PPSMC_MSG_EnableAvfs, NULL),
1583                                         "Failed to enable AVFS!",
1584                                         return -EINVAL);
1585                 }
1586         } else if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
1587                         CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) {
1588                 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(
1589                                 hwmgr, PPSMC_MSG_DisableAvfs, NULL),
1590                                 "Failed to disable AVFS!",
1591                                 return -EINVAL);
1592         }
1593
1594         return 0;
1595 }
1596
1597 static int smu7_update_avfs(struct pp_hwmgr *hwmgr)
1598 {
1599         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1600
1601         if (!hwmgr->avfs_supported)
1602                 return 0;
1603
1604         if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
1605                 smu7_avfs_control(hwmgr, false);
1606         } else if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
1607                 smu7_avfs_control(hwmgr, false);
1608                 smu7_avfs_control(hwmgr, true);
1609         } else {
1610                 smu7_avfs_control(hwmgr, true);
1611         }
1612
1613         return 0;
1614 }
1615
1616 static int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
1617 {
1618         int tmp_result, result = 0;
1619
1620         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1621                         PHM_PlatformCaps_ThermalController))
1622                 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1623                                 GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 1);
1624
1625         tmp_result = smu7_disable_power_containment(hwmgr);
1626         PP_ASSERT_WITH_CODE((tmp_result == 0),
1627                         "Failed to disable power containment!", result = tmp_result);
1628
1629         tmp_result = smu7_disable_smc_cac(hwmgr);
1630         PP_ASSERT_WITH_CODE((tmp_result == 0),
1631                         "Failed to disable SMC CAC!", result = tmp_result);
1632
1633         tmp_result = smu7_disable_didt_config(hwmgr);
1634         PP_ASSERT_WITH_CODE((tmp_result == 0),
1635                         "Failed to disable DIDT!", result = tmp_result);
1636
1637         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1638                         CG_SPLL_SPREAD_SPECTRUM, SSEN, 0);
1639         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1640                         GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 0);
1641
1642         tmp_result = smu7_disable_thermal_auto_throttle(hwmgr);
1643         PP_ASSERT_WITH_CODE((tmp_result == 0),
1644                         "Failed to disable thermal auto throttle!", result = tmp_result);
1645
1646         tmp_result = smu7_avfs_control(hwmgr, false);
1647         PP_ASSERT_WITH_CODE((tmp_result == 0),
1648                         "Failed to disable AVFS!", result = tmp_result);
1649
1650         tmp_result = smu7_stop_dpm(hwmgr);
1651         PP_ASSERT_WITH_CODE((tmp_result == 0),
1652                         "Failed to stop DPM!", result = tmp_result);
1653
1654         tmp_result = smu7_disable_deep_sleep_master_switch(hwmgr);
1655         PP_ASSERT_WITH_CODE((tmp_result == 0),
1656                         "Failed to disable deep sleep master switch!", result = tmp_result);
1657
1658         tmp_result = smu7_disable_ulv(hwmgr);
1659         PP_ASSERT_WITH_CODE((tmp_result == 0),
1660                         "Failed to disable ULV!", result = tmp_result);
1661
1662         tmp_result = smu7_clear_voting_clients(hwmgr);
1663         PP_ASSERT_WITH_CODE((tmp_result == 0),
1664                         "Failed to clear voting clients!", result = tmp_result);
1665
1666         tmp_result = smu7_reset_to_default(hwmgr);
1667         PP_ASSERT_WITH_CODE((tmp_result == 0),
1668                         "Failed to reset to default!", result = tmp_result);
1669
1670         tmp_result = smum_stop_smc(hwmgr);
1671         PP_ASSERT_WITH_CODE((tmp_result == 0),
1672                         "Failed to stop smc!", result = tmp_result);
1673
1674         tmp_result = smu7_force_switch_to_arbf0(hwmgr);
1675         PP_ASSERT_WITH_CODE((tmp_result == 0),
1676                         "Failed to force to switch arbf0!", result = tmp_result);
1677
1678         return result;
1679 }
1680
1681 static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
1682 {
1683         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1684         struct phm_ppt_v1_information *table_info =
1685                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
1686         struct amdgpu_device *adev = hwmgr->adev;
1687         uint8_t tmp1, tmp2;
1688         uint16_t tmp3 = 0;
1689
1690         data->dll_default_on = false;
1691         data->mclk_dpm0_activity_target = 0xa;
1692         data->vddc_vddgfx_delta = 300;
1693         data->static_screen_threshold = SMU7_STATICSCREENTHRESHOLD_DFLT;
1694         data->static_screen_threshold_unit = SMU7_STATICSCREENTHRESHOLDUNIT_DFLT;
1695         data->voting_rights_clients[0] = SMU7_VOTINGRIGHTSCLIENTS_DFLT0;
1696         data->voting_rights_clients[1]= SMU7_VOTINGRIGHTSCLIENTS_DFLT1;
1697         data->voting_rights_clients[2] = SMU7_VOTINGRIGHTSCLIENTS_DFLT2;
1698         data->voting_rights_clients[3]= SMU7_VOTINGRIGHTSCLIENTS_DFLT3;
1699         data->voting_rights_clients[4]= SMU7_VOTINGRIGHTSCLIENTS_DFLT4;
1700         data->voting_rights_clients[5]= SMU7_VOTINGRIGHTSCLIENTS_DFLT5;
1701         data->voting_rights_clients[6]= SMU7_VOTINGRIGHTSCLIENTS_DFLT6;
1702         data->voting_rights_clients[7]= SMU7_VOTINGRIGHTSCLIENTS_DFLT7;
1703
1704         data->mclk_dpm_key_disabled = hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true;
1705         data->sclk_dpm_key_disabled = hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true;
1706         data->pcie_dpm_key_disabled = hwmgr->feature_mask & PP_PCIE_DPM_MASK ? false : true;
1707         /* need to set voltage control types before EVV patching */
1708         data->voltage_control = SMU7_VOLTAGE_CONTROL_NONE;
1709         data->vddci_control = SMU7_VOLTAGE_CONTROL_NONE;
1710         data->mvdd_control = SMU7_VOLTAGE_CONTROL_NONE;
1711         data->enable_tdc_limit_feature = true;
1712         data->enable_pkg_pwr_tracking_feature = true;
1713         data->force_pcie_gen = PP_PCIEGenInvalid;
1714         data->ulv_supported = hwmgr->feature_mask & PP_ULV_MASK ? true : false;
1715         data->current_profile_setting.bupdate_sclk = 1;
1716         data->current_profile_setting.sclk_up_hyst = 0;
1717         data->current_profile_setting.sclk_down_hyst = 100;
1718         data->current_profile_setting.sclk_activity = SMU7_SCLK_TARGETACTIVITY_DFLT;
1719         data->current_profile_setting.bupdate_mclk = 1;
1720         if (hwmgr->chip_id >= CHIP_POLARIS10) {
1721                 if (adev->gmc.vram_width == 256) {
1722                         data->current_profile_setting.mclk_up_hyst = 10;
1723                         data->current_profile_setting.mclk_down_hyst = 60;
1724                         data->current_profile_setting.mclk_activity = 25;
1725                 } else if (adev->gmc.vram_width == 128) {
1726                         data->current_profile_setting.mclk_up_hyst = 5;
1727                         data->current_profile_setting.mclk_down_hyst = 16;
1728                         data->current_profile_setting.mclk_activity = 20;
1729                 } else if (adev->gmc.vram_width == 64) {
1730                         data->current_profile_setting.mclk_up_hyst = 3;
1731                         data->current_profile_setting.mclk_down_hyst = 16;
1732                         data->current_profile_setting.mclk_activity = 20;
1733                 }
1734         } else {
1735                 data->current_profile_setting.mclk_up_hyst = 0;
1736                 data->current_profile_setting.mclk_down_hyst = 100;
1737                 data->current_profile_setting.mclk_activity = SMU7_MCLK_TARGETACTIVITY_DFLT;
1738         }
1739         hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D];
1740         hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
1741         hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
1742
1743         if (hwmgr->chip_id  == CHIP_HAWAII) {
1744                 data->thermal_temp_setting.temperature_low = 94500;
1745                 data->thermal_temp_setting.temperature_high = 95000;
1746                 data->thermal_temp_setting.temperature_shutdown = 104000;
1747         } else {
1748                 data->thermal_temp_setting.temperature_low = 99500;
1749                 data->thermal_temp_setting.temperature_high = 100000;
1750                 data->thermal_temp_setting.temperature_shutdown = 104000;
1751         }
1752
1753         data->fast_watermark_threshold = 100;
1754         if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1755                         VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
1756                 data->voltage_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1757         else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1758                         VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT))
1759                 data->voltage_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
1760
1761         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1762                         PHM_PlatformCaps_ControlVDDGFX)) {
1763                 if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1764                         VOLTAGE_TYPE_VDDGFX, VOLTAGE_OBJ_SVID2)) {
1765                         data->vdd_gfx_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1766                 }
1767         }
1768
1769         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1770                         PHM_PlatformCaps_EnableMVDDControl)) {
1771                 if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1772                                 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
1773                         data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
1774                 else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1775                                 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
1776                         data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1777         }
1778
1779         if (SMU7_VOLTAGE_CONTROL_NONE == data->vdd_gfx_control)
1780                 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1781                         PHM_PlatformCaps_ControlVDDGFX);
1782
1783         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1784                         PHM_PlatformCaps_ControlVDDCI)) {
1785                 if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1786                                 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
1787                         data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
1788                 else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1789                                 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
1790                         data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1791         }
1792
1793         if (data->mvdd_control == SMU7_VOLTAGE_CONTROL_NONE)
1794                 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1795                                 PHM_PlatformCaps_EnableMVDDControl);
1796
1797         if (data->vddci_control == SMU7_VOLTAGE_CONTROL_NONE)
1798                 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1799                                 PHM_PlatformCaps_ControlVDDCI);
1800
1801         data->vddc_phase_shed_control = 1;
1802         if ((hwmgr->chip_id == CHIP_POLARIS12) ||
1803             ASICID_IS_P20(adev->pdev->device, adev->pdev->revision) ||
1804             ASICID_IS_P21(adev->pdev->device, adev->pdev->revision) ||
1805             ASICID_IS_P30(adev->pdev->device, adev->pdev->revision) ||
1806             ASICID_IS_P31(adev->pdev->device, adev->pdev->revision)) {
1807                 if (data->voltage_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
1808                         atomctrl_get_svi2_info(hwmgr, VOLTAGE_TYPE_VDDC, &tmp1, &tmp2,
1809                                                         &tmp3);
1810                         tmp3 = (tmp3 >> 5) & 0x3;
1811                         data->vddc_phase_shed_control = ((tmp3 << 1) | (tmp3 >> 1)) & 0x3;
1812                 }
1813         } else if (hwmgr->chip_family == AMDGPU_FAMILY_CI) {
1814                 data->vddc_phase_shed_control = 1;
1815         }
1816
1817         if ((hwmgr->pp_table_version != PP_TABLE_V0) && (hwmgr->feature_mask & PP_CLOCK_STRETCH_MASK)
1818                 && (table_info->cac_dtp_table->usClockStretchAmount != 0))
1819                 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1820                                         PHM_PlatformCaps_ClockStretcher);
1821
1822         data->pcie_gen_performance.max = PP_PCIEGen1;
1823         data->pcie_gen_performance.min = PP_PCIEGen3;
1824         data->pcie_gen_power_saving.max = PP_PCIEGen1;
1825         data->pcie_gen_power_saving.min = PP_PCIEGen3;
1826         data->pcie_lane_performance.max = 0;
1827         data->pcie_lane_performance.min = 16;
1828         data->pcie_lane_power_saving.max = 0;
1829         data->pcie_lane_power_saving.min = 16;
1830
1831
1832         if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
1833                 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1834                               PHM_PlatformCaps_UVDPowerGating);
1835         if (adev->pg_flags & AMD_PG_SUPPORT_VCE)
1836                 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1837                               PHM_PlatformCaps_VCEPowerGating);
1838
1839         data->disable_edc_leakage_controller = true;
1840         if (((adev->asic_type == CHIP_POLARIS10) && hwmgr->is_kicker) ||
1841             ((adev->asic_type == CHIP_POLARIS11) && hwmgr->is_kicker) ||
1842             (adev->asic_type == CHIP_POLARIS12) ||
1843             (adev->asic_type == CHIP_VEGAM))
1844                 data->disable_edc_leakage_controller = false;
1845
1846         if (!atomctrl_is_asic_internal_ss_supported(hwmgr)) {
1847                 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1848                         PHM_PlatformCaps_MemorySpreadSpectrumSupport);
1849                 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1850                         PHM_PlatformCaps_EngineSpreadSpectrumSupport);
1851         }
1852
1853         if ((adev->pdev->device == 0x699F) &&
1854             (adev->pdev->revision == 0xCF)) {
1855                 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1856                                 PHM_PlatformCaps_PowerContainment);
1857                 data->enable_tdc_limit_feature = false;
1858                 data->enable_pkg_pwr_tracking_feature = false;
1859                 data->disable_edc_leakage_controller = true;
1860                 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1861                                         PHM_PlatformCaps_ClockStretcher);
1862         }
1863 }
1864
1865 static int smu7_calculate_ro_range(struct pp_hwmgr *hwmgr)
1866 {
1867         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1868         struct amdgpu_device *adev = hwmgr->adev;
1869         uint32_t asicrev1, evv_revision, max, min;
1870
1871         atomctrl_read_efuse(hwmgr, STRAP_EVV_REVISION_LSB, STRAP_EVV_REVISION_MSB,
1872                         &evv_revision);
1873
1874         atomctrl_read_efuse(hwmgr, 568, 579, &asicrev1);
1875
1876         if (ASICID_IS_P20(adev->pdev->device, adev->pdev->revision) ||
1877             ASICID_IS_P30(adev->pdev->device, adev->pdev->revision)) {
1878                 min = 1200;
1879                 max = 2500;
1880         } else if (ASICID_IS_P21(adev->pdev->device, adev->pdev->revision) ||
1881                    ASICID_IS_P31(adev->pdev->device, adev->pdev->revision)) {
1882                 min = 900;
1883                 max= 2100;
1884         } else if (hwmgr->chip_id == CHIP_POLARIS10) {
1885                 if (adev->pdev->subsystem_vendor == 0x106B) {
1886                         min = 1000;
1887                         max = 2300;
1888                 } else {
1889                         if (evv_revision == 0) {
1890                                 min = 1000;
1891                                 max = 2300;
1892                         } else if (evv_revision == 1) {
1893                                 if (asicrev1 == 326) {
1894                                         min = 1200;
1895                                         max = 2500;
1896                                         /* TODO: PATCH RO in VBIOS */
1897                                 } else {
1898                                         min = 1200;
1899                                         max = 2000;
1900                                 }
1901                         } else if (evv_revision == 2) {
1902                                 min = 1200;
1903                                 max = 2500;
1904                         }
1905                 }
1906         } else if ((hwmgr->chip_id == CHIP_POLARIS11) ||
1907                    (hwmgr->chip_id == CHIP_POLARIS12)) {
1908                 min = 1100;
1909                 max = 2100;
1910         }
1911
1912         data->ro_range_minimum = min;
1913         data->ro_range_maximum = max;
1914
1915         /* TODO: PATCH RO in VBIOS here */
1916
1917         return 0;
1918 }
1919
1920 /**
1921 * Get Leakage VDDC based on leakage ID.
1922 *
1923 * @param    hwmgr  the address of the powerplay hardware manager.
1924 * @return   always 0
1925 */
1926 static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr)
1927 {
1928         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1929         uint16_t vv_id;
1930         uint16_t vddc = 0;
1931         uint16_t vddgfx = 0;
1932         uint16_t i, j;
1933         uint32_t sclk = 0;
1934         struct phm_ppt_v1_information *table_info =
1935                         (struct phm_ppt_v1_information *)hwmgr->pptable;
1936         struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = NULL;
1937
1938         if (hwmgr->chip_id == CHIP_POLARIS10 ||
1939             hwmgr->chip_id == CHIP_POLARIS11 ||
1940             hwmgr->chip_id == CHIP_POLARIS12)
1941                 smu7_calculate_ro_range(hwmgr);
1942
1943         for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) {
1944                 vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1945
1946                 if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
1947                         if ((hwmgr->pp_table_version == PP_TABLE_V1)
1948                             && !phm_get_sclk_for_voltage_evv(hwmgr,
1949                                                 table_info->vddgfx_lookup_table, vv_id, &sclk)) {
1950                                 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1951                                                         PHM_PlatformCaps_ClockStretcher)) {
1952                                         sclk_table = table_info->vdd_dep_on_sclk;
1953
1954                                         for (j = 1; j < sclk_table->count; j++) {
1955                                                 if (sclk_table->entries[j].clk == sclk &&
1956                                                                 sclk_table->entries[j].cks_enable == 0) {
1957                                                         sclk += 5000;
1958                                                         break;
1959                                                 }
1960                                         }
1961                                 }
1962                                 if (0 == atomctrl_get_voltage_evv_on_sclk
1963                                     (hwmgr, VOLTAGE_TYPE_VDDGFX, sclk,
1964                                      vv_id, &vddgfx)) {
1965                                         /* need to make sure vddgfx is less than 2v or else, it could burn the ASIC. */
1966                                         PP_ASSERT_WITH_CODE((vddgfx < 2000 && vddgfx != 0), "Invalid VDDGFX value!", return -EINVAL);
1967
1968                                         /* the voltage should not be zero nor equal to leakage ID */
1969                                         if (vddgfx != 0 && vddgfx != vv_id) {
1970                                                 data->vddcgfx_leakage.actual_voltage[data->vddcgfx_leakage.count] = vddgfx;
1971                                                 data->vddcgfx_leakage.leakage_id[data->vddcgfx_leakage.count] = vv_id;
1972                                                 data->vddcgfx_leakage.count++;
1973                                         }
1974                                 } else {
1975                                         pr_info("Error retrieving EVV voltage value!\n");
1976                                 }
1977                         }
1978                 } else {
1979                         if ((hwmgr->pp_table_version == PP_TABLE_V0)
1980                                 || !phm_get_sclk_for_voltage_evv(hwmgr,
1981                                         table_info->vddc_lookup_table, vv_id, &sclk)) {
1982                                 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1983                                                 PHM_PlatformCaps_ClockStretcher)) {
1984                                         if (table_info == NULL)
1985                                                 return -EINVAL;
1986                                         sclk_table = table_info->vdd_dep_on_sclk;
1987
1988                                         for (j = 1; j < sclk_table->count; j++) {
1989                                                 if (sclk_table->entries[j].clk == sclk &&
1990                                                                 sclk_table->entries[j].cks_enable == 0) {
1991                                                         sclk += 5000;
1992                                                         break;
1993                                                 }
1994                                         }
1995                                 }
1996
1997                                 if (phm_get_voltage_evv_on_sclk(hwmgr,
1998                                                         VOLTAGE_TYPE_VDDC,
1999                                                         sclk, vv_id, &vddc) == 0) {
2000                                         if (vddc >= 2000 || vddc == 0)
2001                                                 return -EINVAL;
2002                                 } else {
2003                                         pr_debug("failed to retrieving EVV voltage!\n");
2004                                         continue;
2005                                 }
2006
2007                                 /* the voltage should not be zero nor equal to leakage ID */
2008                                 if (vddc != 0 && vddc != vv_id) {
2009                                         data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = (uint16_t)(vddc);
2010                                         data->vddc_leakage.leakage_id[data->vddc_leakage.count] = vv_id;
2011                                         data->vddc_leakage.count++;
2012                                 }
2013                         }
2014                 }
2015         }
2016
2017         return 0;
2018 }
2019
2020 /**
2021  * Change virtual leakage voltage to actual value.
2022  *
2023  * @param     hwmgr  the address of the powerplay hardware manager.
2024  * @param     pointer to changing voltage
2025  * @param     pointer to leakage table
2026  */
2027 static void smu7_patch_ppt_v1_with_vdd_leakage(struct pp_hwmgr *hwmgr,
2028                 uint16_t *voltage, struct smu7_leakage_voltage *leakage_table)
2029 {
2030         uint32_t index;
2031
2032         /* search for leakage voltage ID 0xff01 ~ 0xff08 */
2033         for (index = 0; index < leakage_table->count; index++) {
2034                 /* if this voltage matches a leakage voltage ID */
2035                 /* patch with actual leakage voltage */
2036                 if (leakage_table->leakage_id[index] == *voltage) {
2037                         *voltage = leakage_table->actual_voltage[index];
2038                         break;
2039                 }
2040         }
2041
2042         if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
2043                 pr_err("Voltage value looks like a Leakage ID but it's not patched \n");
2044 }
2045
2046 /**
2047 * Patch voltage lookup table by EVV leakages.
2048 *
2049 * @param     hwmgr  the address of the powerplay hardware manager.
2050 * @param     pointer to voltage lookup table
2051 * @param     pointer to leakage table
2052 * @return     always 0
2053 */
2054 static int smu7_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
2055                 phm_ppt_v1_voltage_lookup_table *lookup_table,
2056                 struct smu7_leakage_voltage *leakage_table)
2057 {
2058         uint32_t i;
2059
2060         for (i = 0; i < lookup_table->count; i++)
2061                 smu7_patch_ppt_v1_with_vdd_leakage(hwmgr,
2062                                 &lookup_table->entries[i].us_vdd, leakage_table);
2063
2064         return 0;
2065 }
2066
2067 static int smu7_patch_clock_voltage_limits_with_vddc_leakage(
2068                 struct pp_hwmgr *hwmgr, struct smu7_leakage_voltage *leakage_table,
2069                 uint16_t *vddc)
2070 {
2071         struct phm_ppt_v1_information *table_info =
2072                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
2073         smu7_patch_ppt_v1_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table);
2074         hwmgr->dyn_state.max_clock_voltage_on_dc.vddc =
2075                         table_info->max_clock_voltage_on_dc.vddc;
2076         return 0;
2077 }
2078
2079 static int smu7_patch_voltage_dependency_tables_with_lookup_table(
2080                 struct pp_hwmgr *hwmgr)
2081 {
2082         uint8_t entry_id;
2083         uint8_t voltage_id;
2084         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2085         struct phm_ppt_v1_information *table_info =
2086                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
2087
2088         struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
2089                         table_info->vdd_dep_on_sclk;
2090         struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table =
2091                         table_info->vdd_dep_on_mclk;
2092         struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
2093                         table_info->mm_dep_table;
2094
2095         if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
2096                 for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
2097                         voltage_id = sclk_table->entries[entry_id].vddInd;
2098                         sclk_table->entries[entry_id].vddgfx =
2099                                 table_info->vddgfx_lookup_table->entries[voltage_id].us_vdd;
2100                 }
2101         } else {
2102                 for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
2103                         voltage_id = sclk_table->entries[entry_id].vddInd;
2104                         sclk_table->entries[entry_id].vddc =
2105                                 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
2106                 }
2107         }
2108
2109         for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) {
2110                 voltage_id = mclk_table->entries[entry_id].vddInd;
2111                 mclk_table->entries[entry_id].vddc =
2112                         table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
2113         }
2114
2115         for (entry_id = 0; entry_id < mm_table->count; ++entry_id) {
2116                 voltage_id = mm_table->entries[entry_id].vddcInd;
2117                 mm_table->entries[entry_id].vddc =
2118                         table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
2119         }
2120
2121         return 0;
2122
2123 }
2124
2125 static int phm_add_voltage(struct pp_hwmgr *hwmgr,
2126                         phm_ppt_v1_voltage_lookup_table *look_up_table,
2127                         phm_ppt_v1_voltage_lookup_record *record)
2128 {
2129         uint32_t i;
2130
2131         PP_ASSERT_WITH_CODE((NULL != look_up_table),
2132                 "Lookup Table empty.", return -EINVAL);
2133         PP_ASSERT_WITH_CODE((0 != look_up_table->count),
2134                 "Lookup Table empty.", return -EINVAL);
2135
2136         i = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDGFX);
2137         PP_ASSERT_WITH_CODE((i >= look_up_table->count),
2138                 "Lookup Table is full.", return -EINVAL);
2139
2140         /* This is to avoid entering duplicate calculated records. */
2141         for (i = 0; i < look_up_table->count; i++) {
2142                 if (look_up_table->entries[i].us_vdd == record->us_vdd) {
2143                         if (look_up_table->entries[i].us_calculated == 1)
2144                                 return 0;
2145                         break;
2146                 }
2147         }
2148
2149         look_up_table->entries[i].us_calculated = 1;
2150         look_up_table->entries[i].us_vdd = record->us_vdd;
2151         look_up_table->entries[i].us_cac_low = record->us_cac_low;
2152         look_up_table->entries[i].us_cac_mid = record->us_cac_mid;
2153         look_up_table->entries[i].us_cac_high = record->us_cac_high;
2154         /* Only increment the count when we're appending, not replacing duplicate entry. */
2155         if (i == look_up_table->count)
2156                 look_up_table->count++;
2157
2158         return 0;
2159 }
2160
2161
2162 static int smu7_calc_voltage_dependency_tables(struct pp_hwmgr *hwmgr)
2163 {
2164         uint8_t entry_id;
2165         struct phm_ppt_v1_voltage_lookup_record v_record;
2166         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2167         struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
2168
2169         phm_ppt_v1_clock_voltage_dependency_table *sclk_table = pptable_info->vdd_dep_on_sclk;
2170         phm_ppt_v1_clock_voltage_dependency_table *mclk_table = pptable_info->vdd_dep_on_mclk;
2171
2172         if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
2173                 for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
2174                         if (sclk_table->entries[entry_id].vdd_offset & (1 << 15))
2175                                 v_record.us_vdd = sclk_table->entries[entry_id].vddgfx +
2176                                         sclk_table->entries[entry_id].vdd_offset - 0xFFFF;
2177                         else
2178                                 v_record.us_vdd = sclk_table->entries[entry_id].vddgfx +
2179                                         sclk_table->entries[entry_id].vdd_offset;
2180
2181                         sclk_table->entries[entry_id].vddc =
2182                                 v_record.us_cac_low = v_record.us_cac_mid =
2183                                 v_record.us_cac_high = v_record.us_vdd;
2184
2185                         phm_add_voltage(hwmgr, pptable_info->vddc_lookup_table, &v_record);
2186                 }
2187
2188                 for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) {
2189                         if (mclk_table->entries[entry_id].vdd_offset & (1 << 15))
2190                                 v_record.us_vdd = mclk_table->entries[entry_id].vddc +
2191                                         mclk_table->entries[entry_id].vdd_offset - 0xFFFF;
2192                         else
2193                                 v_record.us_vdd = mclk_table->entries[entry_id].vddc +
2194                                         mclk_table->entries[entry_id].vdd_offset;
2195
2196                         mclk_table->entries[entry_id].vddgfx = v_record.us_cac_low =
2197                                 v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd;
2198                         phm_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record);
2199                 }
2200         }
2201         return 0;
2202 }
2203
2204 static int smu7_calc_mm_voltage_dependency_table(struct pp_hwmgr *hwmgr)
2205 {
2206         uint8_t entry_id;
2207         struct phm_ppt_v1_voltage_lookup_record v_record;
2208         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2209         struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
2210         phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table;
2211
2212         if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
2213                 for (entry_id = 0; entry_id < mm_table->count; entry_id++) {
2214                         if (mm_table->entries[entry_id].vddgfx_offset & (1 << 15))
2215                                 v_record.us_vdd = mm_table->entries[entry_id].vddc +
2216                                         mm_table->entries[entry_id].vddgfx_offset - 0xFFFF;
2217                         else
2218                                 v_record.us_vdd = mm_table->entries[entry_id].vddc +
2219                                         mm_table->entries[entry_id].vddgfx_offset;
2220
2221                         /* Add the calculated VDDGFX to the VDDGFX lookup table */
2222                         mm_table->entries[entry_id].vddgfx = v_record.us_cac_low =
2223                                 v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd;
2224                         phm_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record);
2225                 }
2226         }
2227         return 0;
2228 }
2229
2230 static int smu7_sort_lookup_table(struct pp_hwmgr *hwmgr,
2231                 struct phm_ppt_v1_voltage_lookup_table *lookup_table)
2232 {
2233         uint32_t table_size, i, j;
2234         table_size = lookup_table->count;
2235
2236         PP_ASSERT_WITH_CODE(0 != lookup_table->count,
2237                 "Lookup table is empty", return -EINVAL);
2238
2239         /* Sorting voltages */
2240         for (i = 0; i < table_size - 1; i++) {
2241                 for (j = i + 1; j > 0; j--) {
2242                         if (lookup_table->entries[j].us_vdd <
2243                                         lookup_table->entries[j - 1].us_vdd) {
2244                                 swap(lookup_table->entries[j - 1],
2245                                      lookup_table->entries[j]);
2246                         }
2247                 }
2248         }
2249
2250         return 0;
2251 }
2252
2253 static int smu7_complete_dependency_tables(struct pp_hwmgr *hwmgr)
2254 {
2255         int result = 0;
2256         int tmp_result;
2257         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2258         struct phm_ppt_v1_information *table_info =
2259                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
2260
2261         if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
2262                 tmp_result = smu7_patch_lookup_table_with_leakage(hwmgr,
2263                         table_info->vddgfx_lookup_table, &(data->vddcgfx_leakage));
2264                 if (tmp_result != 0)
2265                         result = tmp_result;
2266
2267                 smu7_patch_ppt_v1_with_vdd_leakage(hwmgr,
2268                         &table_info->max_clock_voltage_on_dc.vddgfx, &(data->vddcgfx_leakage));
2269         } else {
2270
2271                 tmp_result = smu7_patch_lookup_table_with_leakage(hwmgr,
2272                                 table_info->vddc_lookup_table, &(data->vddc_leakage));
2273                 if (tmp_result)
2274                         result = tmp_result;
2275
2276                 tmp_result = smu7_patch_clock_voltage_limits_with_vddc_leakage(hwmgr,
2277                                 &(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc);
2278                 if (tmp_result)
2279                         result = tmp_result;
2280         }
2281
2282         tmp_result = smu7_patch_voltage_dependency_tables_with_lookup_table(hwmgr);
2283         if (tmp_result)
2284                 result = tmp_result;
2285
2286         tmp_result = smu7_calc_voltage_dependency_tables(hwmgr);
2287         if (tmp_result)
2288                 result = tmp_result;
2289
2290         tmp_result = smu7_calc_mm_voltage_dependency_table(hwmgr);
2291         if (tmp_result)
2292                 result = tmp_result;
2293
2294         tmp_result = smu7_sort_lookup_table(hwmgr, table_info->vddgfx_lookup_table);
2295         if (tmp_result)
2296                 result = tmp_result;
2297
2298         tmp_result = smu7_sort_lookup_table(hwmgr, table_info->vddc_lookup_table);
2299         if (tmp_result)
2300                 result = tmp_result;
2301
2302         return result;
2303 }
2304
2305 static int smu7_find_highest_vddc(struct pp_hwmgr *hwmgr)
2306 {
2307         struct phm_ppt_v1_information *table_info =
2308                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
2309         struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
2310                                                 table_info->vdd_dep_on_sclk;
2311         struct phm_ppt_v1_voltage_lookup_table *lookup_table =
2312                                                 table_info->vddc_lookup_table;
2313         uint16_t highest_voltage;
2314         uint32_t i;
2315
2316         highest_voltage = allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
2317
2318         for (i = 0; i < lookup_table->count; i++) {
2319                 if (lookup_table->entries[i].us_vdd < ATOM_VIRTUAL_VOLTAGE_ID0 &&
2320                     lookup_table->entries[i].us_vdd > highest_voltage)
2321                         highest_voltage = lookup_table->entries[i].us_vdd;
2322         }
2323
2324         return highest_voltage;
2325 }
2326
2327 static int smu7_set_private_data_based_on_pptable_v1(struct pp_hwmgr *hwmgr)
2328 {
2329         struct phm_ppt_v1_information *table_info =
2330                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
2331
2332         struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
2333                                                 table_info->vdd_dep_on_sclk;
2334         struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table =
2335                                                 table_info->vdd_dep_on_mclk;
2336
2337         PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL,
2338                 "VDD dependency on SCLK table is missing.",
2339                 return -EINVAL);
2340         PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
2341                 "VDD dependency on SCLK table has to have is missing.",
2342                 return -EINVAL);
2343
2344         PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL,
2345                 "VDD dependency on MCLK table is missing",
2346                 return -EINVAL);
2347         PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
2348                 "VDD dependency on MCLK table has to have is missing.",
2349                 return -EINVAL);
2350
2351         table_info->max_clock_voltage_on_ac.sclk =
2352                 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk;
2353         table_info->max_clock_voltage_on_ac.mclk =
2354                 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk;
2355         if (hwmgr->chip_id >= CHIP_POLARIS10 && hwmgr->chip_id <= CHIP_VEGAM)
2356                 table_info->max_clock_voltage_on_ac.vddc =
2357                         smu7_find_highest_vddc(hwmgr);
2358         else
2359                 table_info->max_clock_voltage_on_ac.vddc =
2360                         allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
2361         table_info->max_clock_voltage_on_ac.vddci =
2362                 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci;
2363
2364         hwmgr->dyn_state.max_clock_voltage_on_ac.sclk = table_info->max_clock_voltage_on_ac.sclk;
2365         hwmgr->dyn_state.max_clock_voltage_on_ac.mclk = table_info->max_clock_voltage_on_ac.mclk;
2366         hwmgr->dyn_state.max_clock_voltage_on_ac.vddc = table_info->max_clock_voltage_on_ac.vddc;
2367         hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = table_info->max_clock_voltage_on_ac.vddci;
2368
2369         return 0;
2370 }
2371
2372 static int smu7_patch_voltage_workaround(struct pp_hwmgr *hwmgr)
2373 {
2374         struct phm_ppt_v1_information *table_info =
2375                        (struct phm_ppt_v1_information *)(hwmgr->pptable);
2376         struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
2377         struct phm_ppt_v1_voltage_lookup_table *lookup_table;
2378         uint32_t i;
2379         uint32_t hw_revision, sub_vendor_id, sub_sys_id;
2380         struct amdgpu_device *adev = hwmgr->adev;
2381
2382         if (table_info != NULL) {
2383                 dep_mclk_table = table_info->vdd_dep_on_mclk;
2384                 lookup_table = table_info->vddc_lookup_table;
2385         } else
2386                 return 0;
2387
2388         hw_revision = adev->pdev->revision;
2389         sub_sys_id = adev->pdev->subsystem_device;
2390         sub_vendor_id = adev->pdev->subsystem_vendor;
2391
2392         if (adev->pdev->device == 0x67DF && hw_revision == 0xC7 &&
2393             ((sub_sys_id == 0xb37 && sub_vendor_id == 0x1002) ||
2394              (sub_sys_id == 0x4a8 && sub_vendor_id == 0x1043) ||
2395              (sub_sys_id == 0x9480 && sub_vendor_id == 0x1682))) {
2396
2397                 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device,
2398                                               CGS_IND_REG__SMC,
2399                                               PWR_CKS_CNTL,
2400                                               CKS_STRETCH_AMOUNT,
2401                                               0x3);
2402
2403                 if (lookup_table->entries[dep_mclk_table->entries[dep_mclk_table->count-1].vddInd].us_vdd >= 1000)
2404                         return 0;
2405
2406                 for (i = 0; i < lookup_table->count; i++) {
2407                         if (lookup_table->entries[i].us_vdd < 0xff01 && lookup_table->entries[i].us_vdd >= 1000) {
2408                                 dep_mclk_table->entries[dep_mclk_table->count-1].vddInd = (uint8_t) i;
2409                                 return 0;
2410                         }
2411                 }
2412         }
2413         return 0;
2414 }
2415
2416 static int smu7_thermal_parameter_init(struct pp_hwmgr *hwmgr)
2417 {
2418         struct pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
2419         uint32_t temp_reg;
2420         struct phm_ppt_v1_information *table_info =
2421                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
2422
2423
2424         if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) {
2425                 temp_reg = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL);
2426                 switch (gpio_pin_assignment.uc_gpio_pin_bit_shift) {
2427                 case 0:
2428                         temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x1);
2429                         break;
2430                 case 1:
2431                         temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x2);
2432                         break;
2433                 case 2:
2434                         temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW, 0x1);
2435                         break;
2436                 case 3:
2437                         temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, FORCE_NB_PS1, 0x1);
2438                         break;
2439                 case 4:
2440                         temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, DPM_ENABLED, 0x1);
2441                         break;
2442                 default:
2443                         break;
2444                 }
2445                 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL, temp_reg);
2446         }
2447
2448         if (table_info == NULL)
2449                 return 0;
2450
2451         if (table_info->cac_dtp_table->usDefaultTargetOperatingTemp != 0 &&
2452                 hwmgr->thermal_controller.advanceFanControlParameters.ucFanControlMode) {
2453                 hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMinLimit =
2454                         (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit;
2455
2456                 hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMaxLimit =
2457                         (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
2458
2459                 hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMStep = 1;
2460
2461                 hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMaxLimit = 100;
2462
2463                 hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMinLimit =
2464                         (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit;
2465
2466                 hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMStep = 1;
2467
2468                 table_info->cac_dtp_table->usDefaultTargetOperatingTemp = (table_info->cac_dtp_table->usDefaultTargetOperatingTemp >= 50) ?
2469                                                                 (table_info->cac_dtp_table->usDefaultTargetOperatingTemp - 50) : 0;
2470
2471                 table_info->cac_dtp_table->usOperatingTempMaxLimit = table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
2472                 table_info->cac_dtp_table->usOperatingTempStep = 1;
2473                 table_info->cac_dtp_table->usOperatingTempHyst = 1;
2474
2475                 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM =
2476                                hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
2477
2478                 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM =
2479                                hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM;
2480
2481                 hwmgr->dyn_state.cac_dtp_table->usOperatingTempMinLimit =
2482                                table_info->cac_dtp_table->usOperatingTempMinLimit;
2483
2484                 hwmgr->dyn_state.cac_dtp_table->usOperatingTempMaxLimit =
2485                                table_info->cac_dtp_table->usOperatingTempMaxLimit;
2486
2487                 hwmgr->dyn_state.cac_dtp_table->usDefaultTargetOperatingTemp =
2488                                table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
2489
2490                 hwmgr->dyn_state.cac_dtp_table->usOperatingTempStep =
2491                                table_info->cac_dtp_table->usOperatingTempStep;
2492
2493                 hwmgr->dyn_state.cac_dtp_table->usTargetOperatingTemp =
2494                                table_info->cac_dtp_table->usTargetOperatingTemp;
2495                 if (hwmgr->feature_mask & PP_OD_FUZZY_FAN_CONTROL_MASK)
2496                         phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2497                                         PHM_PlatformCaps_ODFuzzyFanControlSupport);
2498         }
2499
2500         return 0;
2501 }
2502
2503 /**
2504  * Change virtual leakage voltage to actual value.
2505  *
2506  * @param     hwmgr  the address of the powerplay hardware manager.
2507  * @param     pointer to changing voltage
2508  * @param     pointer to leakage table
2509  */
2510 static void smu7_patch_ppt_v0_with_vdd_leakage(struct pp_hwmgr *hwmgr,
2511                 uint32_t *voltage, struct smu7_leakage_voltage *leakage_table)
2512 {
2513         uint32_t index;
2514
2515         /* search for leakage voltage ID 0xff01 ~ 0xff08 */
2516         for (index = 0; index < leakage_table->count; index++) {
2517                 /* if this voltage matches a leakage voltage ID */
2518                 /* patch with actual leakage voltage */
2519                 if (leakage_table->leakage_id[index] == *voltage) {
2520                         *voltage = leakage_table->actual_voltage[index];
2521                         break;
2522                 }
2523         }
2524
2525         if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
2526                 pr_err("Voltage value looks like a Leakage ID but it's not patched \n");
2527 }
2528
2529
2530 static int smu7_patch_vddc(struct pp_hwmgr *hwmgr,
2531                               struct phm_clock_voltage_dependency_table *tab)
2532 {
2533         uint16_t i;
2534         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2535
2536         if (tab)
2537                 for (i = 0; i < tab->count; i++)
2538                         smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2539                                                 &data->vddc_leakage);
2540
2541         return 0;
2542 }
2543
2544 static int smu7_patch_vddci(struct pp_hwmgr *hwmgr,
2545                                struct phm_clock_voltage_dependency_table *tab)
2546 {
2547         uint16_t i;
2548         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2549
2550         if (tab)
2551                 for (i = 0; i < tab->count; i++)
2552                         smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2553                                                         &data->vddci_leakage);
2554
2555         return 0;
2556 }
2557
2558 static int smu7_patch_vce_vddc(struct pp_hwmgr *hwmgr,
2559                                   struct phm_vce_clock_voltage_dependency_table *tab)
2560 {
2561         uint16_t i;
2562         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2563
2564         if (tab)
2565                 for (i = 0; i < tab->count; i++)
2566                         smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2567                                                         &data->vddc_leakage);
2568
2569         return 0;
2570 }
2571
2572
2573 static int smu7_patch_uvd_vddc(struct pp_hwmgr *hwmgr,
2574                                   struct phm_uvd_clock_voltage_dependency_table *tab)
2575 {
2576         uint16_t i;
2577         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2578
2579         if (tab)
2580                 for (i = 0; i < tab->count; i++)
2581                         smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2582                                                         &data->vddc_leakage);
2583
2584         return 0;
2585 }
2586
2587 static int smu7_patch_vddc_shed_limit(struct pp_hwmgr *hwmgr,
2588                                          struct phm_phase_shedding_limits_table *tab)
2589 {
2590         uint16_t i;
2591         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2592
2593         if (tab)
2594                 for (i = 0; i < tab->count; i++)
2595                         smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].Voltage,
2596                                                         &data->vddc_leakage);
2597
2598         return 0;
2599 }
2600
2601 static int smu7_patch_samu_vddc(struct pp_hwmgr *hwmgr,
2602                                    struct phm_samu_clock_voltage_dependency_table *tab)
2603 {
2604         uint16_t i;
2605         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2606
2607         if (tab)
2608                 for (i = 0; i < tab->count; i++)
2609                         smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2610                                                         &data->vddc_leakage);
2611
2612         return 0;
2613 }
2614
2615 static int smu7_patch_acp_vddc(struct pp_hwmgr *hwmgr,
2616                                   struct phm_acp_clock_voltage_dependency_table *tab)
2617 {
2618         uint16_t i;
2619         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2620
2621         if (tab)
2622                 for (i = 0; i < tab->count; i++)
2623                         smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2624                                         &data->vddc_leakage);
2625
2626         return 0;
2627 }
2628
2629 static int smu7_patch_limits_vddc(struct pp_hwmgr *hwmgr,
2630                                   struct phm_clock_and_voltage_limits *tab)
2631 {
2632         uint32_t vddc, vddci;
2633         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2634
2635         if (tab) {
2636                 vddc = tab->vddc;
2637                 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc,
2638                                                    &data->vddc_leakage);
2639                 tab->vddc = vddc;
2640                 vddci = tab->vddci;
2641                 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddci,
2642                                                    &data->vddci_leakage);
2643                 tab->vddci = vddci;
2644         }
2645
2646         return 0;
2647 }
2648
2649 static int smu7_patch_cac_vddc(struct pp_hwmgr *hwmgr, struct phm_cac_leakage_table *tab)
2650 {
2651         uint32_t i;
2652         uint32_t vddc;
2653         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2654
2655         if (tab) {
2656                 for (i = 0; i < tab->count; i++) {
2657                         vddc = (uint32_t)(tab->entries[i].Vddc);
2658                         smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc, &data->vddc_leakage);
2659                         tab->entries[i].Vddc = (uint16_t)vddc;
2660                 }
2661         }
2662
2663         return 0;
2664 }
2665
2666 static int smu7_patch_dependency_tables_with_leakage(struct pp_hwmgr *hwmgr)
2667 {
2668         int tmp;
2669
2670         tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_sclk);
2671         if (tmp)
2672                 return -EINVAL;
2673
2674         tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_mclk);
2675         if (tmp)
2676                 return -EINVAL;
2677
2678         tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
2679         if (tmp)
2680                 return -EINVAL;
2681
2682         tmp = smu7_patch_vddci(hwmgr, hwmgr->dyn_state.vddci_dependency_on_mclk);
2683         if (tmp)
2684                 return -EINVAL;
2685
2686         tmp = smu7_patch_vce_vddc(hwmgr, hwmgr->dyn_state.vce_clock_voltage_dependency_table);
2687         if (tmp)
2688                 return -EINVAL;
2689
2690         tmp = smu7_patch_uvd_vddc(hwmgr, hwmgr->dyn_state.uvd_clock_voltage_dependency_table);
2691         if (tmp)
2692                 return -EINVAL;
2693
2694         tmp = smu7_patch_samu_vddc(hwmgr, hwmgr->dyn_state.samu_clock_voltage_dependency_table);
2695         if (tmp)
2696                 return -EINVAL;
2697
2698         tmp = smu7_patch_acp_vddc(hwmgr, hwmgr->dyn_state.acp_clock_voltage_dependency_table);
2699         if (tmp)
2700                 return -EINVAL;
2701
2702         tmp = smu7_patch_vddc_shed_limit(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table);
2703         if (tmp)
2704                 return -EINVAL;
2705
2706         tmp = smu7_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_ac);
2707         if (tmp)
2708                 return -EINVAL;
2709
2710         tmp = smu7_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_dc);
2711         if (tmp)
2712                 return -EINVAL;
2713
2714         tmp = smu7_patch_cac_vddc(hwmgr, hwmgr->dyn_state.cac_leakage_table);
2715         if (tmp)
2716                 return -EINVAL;
2717
2718         return 0;
2719 }
2720
2721
2722 static int smu7_set_private_data_based_on_pptable_v0(struct pp_hwmgr *hwmgr)
2723 {
2724         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2725
2726         struct phm_clock_voltage_dependency_table *allowed_sclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_sclk;
2727         struct phm_clock_voltage_dependency_table *allowed_mclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_mclk;
2728         struct phm_clock_voltage_dependency_table *allowed_mclk_vddci_table = hwmgr->dyn_state.vddci_dependency_on_mclk;
2729
2730         PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table != NULL,
2731                 "VDDC dependency on SCLK table is missing. This table is mandatory",
2732                 return -EINVAL);
2733         PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table->count >= 1,
2734                 "VDDC dependency on SCLK table has to have is missing. This table is mandatory",
2735                 return -EINVAL);
2736
2737         PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table != NULL,
2738                 "VDDC dependency on MCLK table is missing. This table is mandatory",
2739                 return -EINVAL);
2740         PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table->count >= 1,
2741                 "VDD dependency on MCLK table has to have is missing. This table is mandatory",
2742                 return -EINVAL);
2743
2744         data->min_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[0].v;
2745         data->max_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
2746
2747         hwmgr->dyn_state.max_clock_voltage_on_ac.sclk =
2748                 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
2749         hwmgr->dyn_state.max_clock_voltage_on_ac.mclk =
2750                 allowed_mclk_vddc_table->entries[allowed_mclk_vddc_table->count - 1].clk;
2751         hwmgr->dyn_state.max_clock_voltage_on_ac.vddc =
2752                 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
2753
2754         if (allowed_mclk_vddci_table != NULL && allowed_mclk_vddci_table->count >= 1) {
2755                 data->min_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[0].v;
2756                 data->max_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
2757         }
2758
2759         if (hwmgr->dyn_state.vddci_dependency_on_mclk != NULL && hwmgr->dyn_state.vddci_dependency_on_mclk->count >= 1)
2760                 hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = hwmgr->dyn_state.vddci_dependency_on_mclk->entries[hwmgr->dyn_state.vddci_dependency_on_mclk->count - 1].v;
2761
2762         return 0;
2763 }
2764
2765 static int smu7_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
2766 {
2767         kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
2768         hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
2769         kfree(hwmgr->backend);
2770         hwmgr->backend = NULL;
2771
2772         return 0;
2773 }
2774
2775 static int smu7_get_elb_voltages(struct pp_hwmgr *hwmgr)
2776 {
2777         uint16_t virtual_voltage_id, vddc, vddci, efuse_voltage_id;
2778         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2779         int i;
2780
2781         if (atomctrl_get_leakage_id_from_efuse(hwmgr, &efuse_voltage_id) == 0) {
2782                 for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) {
2783                         virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
2784                         if (atomctrl_get_leakage_vddc_base_on_leakage(hwmgr, &vddc, &vddci,
2785                                                                 virtual_voltage_id,
2786                                                                 efuse_voltage_id) == 0) {
2787                                 if (vddc != 0 && vddc != virtual_voltage_id) {
2788                                         data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = vddc;
2789                                         data->vddc_leakage.leakage_id[data->vddc_leakage.count] = virtual_voltage_id;
2790                                         data->vddc_leakage.count++;
2791                                 }
2792                                 if (vddci != 0 && vddci != virtual_voltage_id) {
2793                                         data->vddci_leakage.actual_voltage[data->vddci_leakage.count] = vddci;
2794                                         data->vddci_leakage.leakage_id[data->vddci_leakage.count] = virtual_voltage_id;
2795                                         data->vddci_leakage.count++;
2796                                 }
2797                         }
2798                 }
2799         }
2800         return 0;
2801 }
2802
2803 #define LEAKAGE_ID_MSB                  463
2804 #define LEAKAGE_ID_LSB                  454
2805
2806 static int smu7_update_edc_leakage_table(struct pp_hwmgr *hwmgr)
2807 {
2808         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2809         uint32_t efuse;
2810         uint16_t offset;
2811         int ret = 0;
2812
2813         if (data->disable_edc_leakage_controller)
2814                 return 0;
2815
2816         ret = atomctrl_get_edc_hilo_leakage_offset_table(hwmgr,
2817                                                          &data->edc_hilo_leakage_offset_from_vbios);
2818         if (ret)
2819                 return ret;
2820
2821         if (data->edc_hilo_leakage_offset_from_vbios.usEdcDidtLoDpm7TableOffset &&
2822             data->edc_hilo_leakage_offset_from_vbios.usEdcDidtHiDpm7TableOffset) {
2823                 atomctrl_read_efuse(hwmgr, LEAKAGE_ID_LSB, LEAKAGE_ID_MSB, &efuse);
2824                 if (efuse < data->edc_hilo_leakage_offset_from_vbios.usHiLoLeakageThreshold)
2825                         offset = data->edc_hilo_leakage_offset_from_vbios.usEdcDidtLoDpm7TableOffset;
2826                 else
2827                         offset = data->edc_hilo_leakage_offset_from_vbios.usEdcDidtHiDpm7TableOffset;
2828
2829                 ret = atomctrl_get_edc_leakage_table(hwmgr,
2830                                                      &data->edc_leakage_table,
2831                                                      offset);
2832                 if (ret)
2833                         return ret;
2834         }
2835
2836         return ret;
2837 }
2838
2839 static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
2840 {
2841         struct smu7_hwmgr *data;
2842         int result = 0;
2843
2844         data = kzalloc(sizeof(struct smu7_hwmgr), GFP_KERNEL);
2845         if (data == NULL)
2846                 return -ENOMEM;
2847
2848         hwmgr->backend = data;
2849         smu7_patch_voltage_workaround(hwmgr);
2850         smu7_init_dpm_defaults(hwmgr);
2851
2852         /* Get leakage voltage based on leakage ID. */
2853         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2854                         PHM_PlatformCaps_EVV)) {
2855                 result = smu7_get_evv_voltages(hwmgr);
2856                 if (result) {
2857                         pr_info("Get EVV Voltage Failed.  Abort Driver loading!\n");
2858                         return -EINVAL;
2859                 }
2860         } else {
2861                 smu7_get_elb_voltages(hwmgr);
2862         }
2863
2864         if (hwmgr->pp_table_version == PP_TABLE_V1) {
2865                 smu7_complete_dependency_tables(hwmgr);
2866                 smu7_set_private_data_based_on_pptable_v1(hwmgr);
2867         } else if (hwmgr->pp_table_version == PP_TABLE_V0) {
2868                 smu7_patch_dependency_tables_with_leakage(hwmgr);
2869                 smu7_set_private_data_based_on_pptable_v0(hwmgr);
2870         }
2871
2872         /* Initalize Dynamic State Adjustment Rule Settings */
2873         result = phm_initializa_dynamic_state_adjustment_rule_settings(hwmgr);
2874
2875         if (0 == result) {
2876                 struct amdgpu_device *adev = hwmgr->adev;
2877
2878                 data->is_tlu_enabled = false;
2879
2880                 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
2881                                                         SMU7_MAX_HARDWARE_POWERLEVELS;
2882                 hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
2883                 hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
2884
2885                 data->pcie_gen_cap = adev->pm.pcie_gen_mask;
2886                 if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
2887                         data->pcie_spc_cap = 20;
2888                 else
2889                         data->pcie_spc_cap = 16;
2890                 data->pcie_lane_cap = adev->pm.pcie_mlw_mask;
2891
2892                 hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
2893 /* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
2894                 hwmgr->platform_descriptor.clockStep.engineClock = 500;
2895                 hwmgr->platform_descriptor.clockStep.memoryClock = 500;
2896                 smu7_thermal_parameter_init(hwmgr);
2897         } else {
2898                 /* Ignore return value in here, we are cleaning up a mess. */
2899                 smu7_hwmgr_backend_fini(hwmgr);
2900         }
2901
2902         result = smu7_update_edc_leakage_table(hwmgr);
2903         if (result)
2904                 return result;
2905
2906         return 0;
2907 }
2908
2909 static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr)
2910 {
2911         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2912         uint32_t level, tmp;
2913
2914         if (!data->pcie_dpm_key_disabled) {
2915                 if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
2916                         level = 0;
2917                         tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask;
2918                         while (tmp >>= 1)
2919                                 level++;
2920
2921                         if (level)
2922                                 smum_send_msg_to_smc_with_parameter(hwmgr,
2923                                                 PPSMC_MSG_PCIeDPM_ForceLevel, level,
2924                                                 NULL);
2925                 }
2926         }
2927
2928         if (!data->sclk_dpm_key_disabled) {
2929                 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
2930                         level = 0;
2931                         tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask;
2932                         while (tmp >>= 1)
2933                                 level++;
2934
2935                         if (level)
2936                                 smum_send_msg_to_smc_with_parameter(hwmgr,
2937                                                 PPSMC_MSG_SCLKDPM_SetEnabledMask,
2938                                                 (1 << level),
2939                                                 NULL);
2940                 }
2941         }
2942
2943         if (!data->mclk_dpm_key_disabled) {
2944                 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
2945                         level = 0;
2946                         tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask;
2947                         while (tmp >>= 1)
2948                                 level++;
2949
2950                         if (level)
2951                                 smum_send_msg_to_smc_with_parameter(hwmgr,
2952                                                 PPSMC_MSG_MCLKDPM_SetEnabledMask,
2953                                                 (1 << level),
2954                                                 NULL);
2955                 }
2956         }
2957
2958         return 0;
2959 }
2960
2961 static int smu7_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
2962 {
2963         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2964
2965         if (hwmgr->pp_table_version == PP_TABLE_V1)
2966                 phm_apply_dal_min_voltage_request(hwmgr);
2967 /* TO DO  for v0 iceland and Ci*/
2968
2969         if (!data->sclk_dpm_key_disabled) {
2970                 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask)
2971                         smum_send_msg_to_smc_with_parameter(hwmgr,
2972                                         PPSMC_MSG_SCLKDPM_SetEnabledMask,
2973                                         data->dpm_level_enable_mask.sclk_dpm_enable_mask,
2974                                         NULL);
2975         }
2976
2977         if (!data->mclk_dpm_key_disabled) {
2978                 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask)
2979                         smum_send_msg_to_smc_with_parameter(hwmgr,
2980                                         PPSMC_MSG_MCLKDPM_SetEnabledMask,
2981                                         data->dpm_level_enable_mask.mclk_dpm_enable_mask,
2982                                         NULL);
2983         }
2984
2985         return 0;
2986 }
2987
2988 static int smu7_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
2989 {
2990         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2991
2992         if (!smum_is_dpm_running(hwmgr))
2993                 return -EINVAL;
2994
2995         if (!data->pcie_dpm_key_disabled) {
2996                 smum_send_msg_to_smc(hwmgr,
2997                                 PPSMC_MSG_PCIeDPM_UnForceLevel,
2998                                 NULL);
2999         }
3000
3001         return smu7_upload_dpm_level_enable_mask(hwmgr);
3002 }
3003
3004 static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr)
3005 {
3006         struct smu7_hwmgr *data =
3007                         (struct smu7_hwmgr *)(hwmgr->backend);
3008         uint32_t level;
3009
3010         if (!data->sclk_dpm_key_disabled)
3011                 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3012                         level = phm_get_lowest_enabled_level(hwmgr,
3013                                                               data->dpm_level_enable_mask.sclk_dpm_enable_mask);
3014                         smum_send_msg_to_smc_with_parameter(hwmgr,
3015                                                             PPSMC_MSG_SCLKDPM_SetEnabledMask,
3016                                                             (1 << level),
3017                                                             NULL);
3018
3019         }
3020
3021         if (!data->mclk_dpm_key_disabled) {
3022                 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3023                         level = phm_get_lowest_enabled_level(hwmgr,
3024                                                               data->dpm_level_enable_mask.mclk_dpm_enable_mask);
3025                         smum_send_msg_to_smc_with_parameter(hwmgr,
3026                                                             PPSMC_MSG_MCLKDPM_SetEnabledMask,
3027                                                             (1 << level),
3028                                                             NULL);
3029                 }
3030         }
3031
3032         if (!data->pcie_dpm_key_disabled) {
3033                 if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3034                         level = phm_get_lowest_enabled_level(hwmgr,
3035                                                               data->dpm_level_enable_mask.pcie_dpm_enable_mask);
3036                         smum_send_msg_to_smc_with_parameter(hwmgr,
3037                                                             PPSMC_MSG_PCIeDPM_ForceLevel,
3038                                                             (level),
3039                                                             NULL);
3040                 }
3041         }
3042
3043         return 0;
3044 }
3045
3046 static int smu7_get_profiling_clk(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level,
3047                                 uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *pcie_mask)
3048 {
3049         uint32_t percentage;
3050         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3051         struct smu7_dpm_table *golden_dpm_table = &data->golden_dpm_table;
3052         int32_t tmp_mclk;
3053         int32_t tmp_sclk;
3054         int32_t count;
3055
3056         if (golden_dpm_table->mclk_table.count < 1)
3057                 return -EINVAL;
3058
3059         percentage = 100 * golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count - 1].value /
3060                         golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value;
3061
3062         if (golden_dpm_table->mclk_table.count == 1) {
3063                 percentage = 70;
3064                 tmp_mclk = golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value;
3065                 *mclk_mask = golden_dpm_table->mclk_table.count - 1;
3066         } else {
3067                 tmp_mclk = golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 2].value;
3068                 *mclk_mask = golden_dpm_table->mclk_table.count - 2;
3069         }
3070
3071         tmp_sclk = tmp_mclk * percentage / 100;
3072
3073         if (hwmgr->pp_table_version == PP_TABLE_V0) {
3074                 for (count = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1;
3075                         count >= 0; count--) {
3076                         if (tmp_sclk >= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk) {
3077                                 tmp_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk;
3078                                 *sclk_mask = count;
3079                                 break;
3080                         }
3081                 }
3082                 if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
3083                         *sclk_mask = 0;
3084                         tmp_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].clk;
3085                 }
3086
3087                 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
3088                         *sclk_mask = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1;
3089         } else if (hwmgr->pp_table_version == PP_TABLE_V1) {
3090                 struct phm_ppt_v1_information *table_info =
3091                                 (struct phm_ppt_v1_information *)(hwmgr->pptable);
3092
3093                 for (count = table_info->vdd_dep_on_sclk->count-1; count >= 0; count--) {
3094                         if (tmp_sclk >= table_info->vdd_dep_on_sclk->entries[count].clk) {
3095                                 tmp_sclk = table_info->vdd_dep_on_sclk->entries[count].clk;
3096                                 *sclk_mask = count;
3097                                 break;
3098                         }
3099                 }
3100                 if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
3101                         *sclk_mask = 0;
3102                         tmp_sclk =  table_info->vdd_dep_on_sclk->entries[0].clk;
3103                 }
3104
3105                 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
3106                         *sclk_mask = table_info->vdd_dep_on_sclk->count - 1;
3107         }
3108
3109         if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK)
3110                 *mclk_mask = 0;
3111         else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
3112                 *mclk_mask = golden_dpm_table->mclk_table.count - 1;
3113
3114         *pcie_mask = data->dpm_table.pcie_speed_table.count - 1;
3115         hwmgr->pstate_sclk = tmp_sclk;
3116         hwmgr->pstate_mclk = tmp_mclk;
3117
3118         return 0;
3119 }
3120
3121 static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr,
3122                                 enum amd_dpm_forced_level level)
3123 {
3124         int ret = 0;
3125         uint32_t sclk_mask = 0;
3126         uint32_t mclk_mask = 0;
3127         uint32_t pcie_mask = 0;
3128
3129         if (hwmgr->pstate_sclk == 0)
3130                 smu7_get_profiling_clk(hwmgr, level, &sclk_mask, &mclk_mask, &pcie_mask);
3131
3132         switch (level) {
3133         case AMD_DPM_FORCED_LEVEL_HIGH:
3134                 ret = smu7_force_dpm_highest(hwmgr);
3135                 break;
3136         case AMD_DPM_FORCED_LEVEL_LOW:
3137                 ret = smu7_force_dpm_lowest(hwmgr);
3138                 break;
3139         case AMD_DPM_FORCED_LEVEL_AUTO:
3140                 ret = smu7_unforce_dpm_levels(hwmgr);
3141                 break;
3142         case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
3143         case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
3144         case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
3145         case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
3146                 ret = smu7_get_profiling_clk(hwmgr, level, &sclk_mask, &mclk_mask, &pcie_mask);
3147                 if (ret)
3148                         return ret;
3149                 smu7_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
3150                 smu7_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
3151                 smu7_force_clock_level(hwmgr, PP_PCIE, 1<<pcie_mask);
3152                 break;
3153         case AMD_DPM_FORCED_LEVEL_MANUAL:
3154         case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
3155         default:
3156                 break;
3157         }
3158
3159         if (!ret) {
3160                 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
3161                         smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
3162                 else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
3163                         smu7_fan_ctrl_reset_fan_speed_to_default(hwmgr);
3164         }
3165         return ret;
3166 }
3167
3168 static int smu7_get_power_state_size(struct pp_hwmgr *hwmgr)
3169 {
3170         return sizeof(struct smu7_power_state);
3171 }
3172
3173 static int smu7_vblank_too_short(struct pp_hwmgr *hwmgr,
3174                                  uint32_t vblank_time_us)
3175 {
3176         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3177         uint32_t switch_limit_us;
3178
3179         switch (hwmgr->chip_id) {
3180         case CHIP_POLARIS10:
3181         case CHIP_POLARIS11:
3182         case CHIP_POLARIS12:
3183                 if (hwmgr->is_kicker || (hwmgr->chip_id == CHIP_POLARIS12))
3184                         switch_limit_us = data->is_memory_gddr5 ? 450 : 150;
3185                 else
3186                         switch_limit_us = data->is_memory_gddr5 ? 200 : 150;
3187                 break;
3188         case CHIP_VEGAM:
3189                 switch_limit_us = 30;
3190                 break;
3191         default:
3192                 switch_limit_us = data->is_memory_gddr5 ? 450 : 150;
3193                 break;
3194         }
3195
3196         if (vblank_time_us < switch_limit_us)
3197                 return true;
3198         else
3199                 return false;
3200 }
3201
3202 static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
3203                                 struct pp_power_state *request_ps,
3204                         const struct pp_power_state *current_ps)
3205 {
3206         struct amdgpu_device *adev = hwmgr->adev;
3207         struct smu7_power_state *smu7_ps =
3208                                 cast_phw_smu7_power_state(&request_ps->hardware);
3209         uint32_t sclk;
3210         uint32_t mclk;
3211         struct PP_Clocks minimum_clocks = {0};
3212         bool disable_mclk_switching;
3213         bool disable_mclk_switching_for_frame_lock;
3214         bool disable_mclk_switching_for_display;
3215         const struct phm_clock_and_voltage_limits *max_limits;
3216         uint32_t i;
3217         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3218         struct phm_ppt_v1_information *table_info =
3219                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
3220         int32_t count;
3221         int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
3222         uint32_t latency;
3223         bool latency_allowed = false;
3224
3225         data->battery_state = (PP_StateUILabel_Battery ==
3226                         request_ps->classification.ui_label);
3227         data->mclk_ignore_signal = false;
3228
3229         PP_ASSERT_WITH_CODE(smu7_ps->performance_level_count == 2,
3230                                  "VI should always have 2 performance levels",
3231                                 );
3232
3233         max_limits = adev->pm.ac_power ?
3234                         &(hwmgr->dyn_state.max_clock_voltage_on_ac) :
3235                         &(hwmgr->dyn_state.max_clock_voltage_on_dc);
3236
3237         /* Cap clock DPM tables at DC MAX if it is in DC. */
3238         if (!adev->pm.ac_power) {
3239                 for (i = 0; i < smu7_ps->performance_level_count; i++) {
3240                         if (smu7_ps->performance_levels[i].memory_clock > max_limits->mclk)
3241                                 smu7_ps->performance_levels[i].memory_clock = max_limits->mclk;
3242                         if (smu7_ps->performance_levels[i].engine_clock > max_limits->sclk)
3243                                 smu7_ps->performance_levels[i].engine_clock = max_limits->sclk;
3244                 }
3245         }
3246
3247         minimum_clocks.engineClock = hwmgr->display_config->min_core_set_clock;
3248         minimum_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock;
3249
3250         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3251                         PHM_PlatformCaps_StablePState)) {
3252                 max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
3253                 stable_pstate_sclk = (max_limits->sclk * 75) / 100;
3254
3255                 for (count = table_info->vdd_dep_on_sclk->count - 1;
3256                                 count >= 0; count--) {
3257                         if (stable_pstate_sclk >=
3258                                         table_info->vdd_dep_on_sclk->entries[count].clk) {
3259                                 stable_pstate_sclk =
3260                                                 table_info->vdd_dep_on_sclk->entries[count].clk;
3261                                 break;
3262                         }
3263                 }
3264
3265                 if (count < 0)
3266                         stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
3267
3268                 stable_pstate_mclk = max_limits->mclk;
3269
3270                 minimum_clocks.engineClock = stable_pstate_sclk;
3271                 minimum_clocks.memoryClock = stable_pstate_mclk;
3272         }
3273
3274         disable_mclk_switching_for_frame_lock = phm_cap_enabled(
3275                                     hwmgr->platform_descriptor.platformCaps,
3276                                     PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
3277
3278         disable_mclk_switching_for_display = ((1 < hwmgr->display_config->num_display) &&
3279                                                 !hwmgr->display_config->multi_monitor_in_sync) ||
3280                                                 smu7_vblank_too_short(hwmgr, hwmgr->display_config->min_vblank_time);
3281
3282         disable_mclk_switching = disable_mclk_switching_for_frame_lock ||
3283                                          disable_mclk_switching_for_display;
3284
3285         if (hwmgr->display_config->num_display == 0) {
3286                 if (hwmgr->chip_id >= CHIP_POLARIS10 && hwmgr->chip_id <= CHIP_VEGAM)
3287                         data->mclk_ignore_signal = true;
3288                 else
3289                         disable_mclk_switching = false;
3290         }
3291
3292         sclk = smu7_ps->performance_levels[0].engine_clock;
3293         mclk = smu7_ps->performance_levels[0].memory_clock;
3294
3295         if (disable_mclk_switching &&
3296             (!(hwmgr->chip_id >= CHIP_POLARIS10 &&
3297             hwmgr->chip_id <= CHIP_VEGAM)))
3298                 mclk = smu7_ps->performance_levels
3299                 [smu7_ps->performance_level_count - 1].memory_clock;
3300
3301         if (sclk < minimum_clocks.engineClock)
3302                 sclk = (minimum_clocks.engineClock > max_limits->sclk) ?
3303                                 max_limits->sclk : minimum_clocks.engineClock;
3304
3305         if (mclk < minimum_clocks.memoryClock)
3306                 mclk = (minimum_clocks.memoryClock > max_limits->mclk) ?
3307                                 max_limits->mclk : minimum_clocks.memoryClock;
3308
3309         smu7_ps->performance_levels[0].engine_clock = sclk;
3310         smu7_ps->performance_levels[0].memory_clock = mclk;
3311
3312         smu7_ps->performance_levels[1].engine_clock =
3313                 (smu7_ps->performance_levels[1].engine_clock >=
3314                                 smu7_ps->performance_levels[0].engine_clock) ?
3315                                                 smu7_ps->performance_levels[1].engine_clock :
3316                                                 smu7_ps->performance_levels[0].engine_clock;
3317
3318         if (disable_mclk_switching) {
3319                 if (mclk < smu7_ps->performance_levels[1].memory_clock)
3320                         mclk = smu7_ps->performance_levels[1].memory_clock;
3321
3322                 if (hwmgr->chip_id >= CHIP_POLARIS10 && hwmgr->chip_id <= CHIP_VEGAM) {
3323                         if (disable_mclk_switching_for_display) {
3324                                 /* Find the lowest MCLK frequency that is within
3325                                  * the tolerable latency defined in DAL
3326                                  */
3327                                 latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency;
3328                                 for (i = 0; i < data->mclk_latency_table.count; i++) {
3329                                         if (data->mclk_latency_table.entries[i].latency <= latency) {
3330                                                 latency_allowed = true;
3331
3332                                                 if ((data->mclk_latency_table.entries[i].frequency >=
3333                                                                 smu7_ps->performance_levels[0].memory_clock) &&
3334                                                     (data->mclk_latency_table.entries[i].frequency <=
3335                                                                 smu7_ps->performance_levels[1].memory_clock)) {
3336                                                         mclk = data->mclk_latency_table.entries[i].frequency;
3337                                                         break;
3338                                                 }
3339                                         }
3340                                 }
3341                                 if ((i >= data->mclk_latency_table.count - 1) && !latency_allowed) {
3342                                         data->mclk_ignore_signal = true;
3343                                 } else {
3344                                         data->mclk_ignore_signal = false;
3345                                 }
3346                         }
3347
3348                         if (disable_mclk_switching_for_frame_lock)
3349                                 mclk = smu7_ps->performance_levels[1].memory_clock;
3350                 }
3351
3352                 smu7_ps->performance_levels[0].memory_clock = mclk;
3353
3354                 if (!(hwmgr->chip_id >= CHIP_POLARIS10 &&
3355                       hwmgr->chip_id <= CHIP_VEGAM))
3356                         smu7_ps->performance_levels[1].memory_clock = mclk;
3357         } else {
3358                 if (smu7_ps->performance_levels[1].memory_clock <
3359                                 smu7_ps->performance_levels[0].memory_clock)
3360                         smu7_ps->performance_levels[1].memory_clock =
3361                                         smu7_ps->performance_levels[0].memory_clock;
3362         }
3363
3364         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3365                         PHM_PlatformCaps_StablePState)) {
3366                 for (i = 0; i < smu7_ps->performance_level_count; i++) {
3367                         smu7_ps->performance_levels[i].engine_clock = stable_pstate_sclk;
3368                         smu7_ps->performance_levels[i].memory_clock = stable_pstate_mclk;
3369                         smu7_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max;
3370                         smu7_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max;
3371                 }
3372         }
3373         return 0;
3374 }
3375
3376
3377 static uint32_t smu7_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
3378 {
3379         struct pp_power_state  *ps;
3380         struct smu7_power_state  *smu7_ps;
3381
3382         if (hwmgr == NULL)
3383                 return -EINVAL;
3384
3385         ps = hwmgr->request_ps;
3386
3387         if (ps == NULL)
3388                 return -EINVAL;
3389
3390         smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
3391
3392         if (low)
3393                 return smu7_ps->performance_levels[0].memory_clock;
3394         else
3395                 return smu7_ps->performance_levels
3396                                 [smu7_ps->performance_level_count-1].memory_clock;
3397 }
3398
3399 static uint32_t smu7_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
3400 {
3401         struct pp_power_state  *ps;
3402         struct smu7_power_state  *smu7_ps;
3403
3404         if (hwmgr == NULL)
3405                 return -EINVAL;
3406
3407         ps = hwmgr->request_ps;
3408
3409         if (ps == NULL)
3410                 return -EINVAL;
3411
3412         smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
3413
3414         if (low)
3415                 return smu7_ps->performance_levels[0].engine_clock;
3416         else
3417                 return smu7_ps->performance_levels
3418                                 [smu7_ps->performance_level_count-1].engine_clock;
3419 }
3420
3421 static int smu7_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
3422                                         struct pp_hw_power_state *hw_ps)
3423 {
3424         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3425         struct smu7_power_state *ps = (struct smu7_power_state *)hw_ps;
3426         ATOM_FIRMWARE_INFO_V2_2 *fw_info;
3427         uint16_t size;
3428         uint8_t frev, crev;
3429         int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
3430
3431         /* First retrieve the Boot clocks and VDDC from the firmware info table.
3432          * We assume here that fw_info is unchanged if this call fails.
3433          */
3434         fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)smu_atom_get_data_table(hwmgr->adev, index,
3435                         &size, &frev, &crev);
3436         if (!fw_info)
3437                 /* During a test, there is no firmware info table. */
3438                 return 0;
3439
3440         /* Patch the state. */
3441         data->vbios_boot_state.sclk_bootup_value =
3442                         le32_to_cpu(fw_info->ulDefaultEngineClock);
3443         data->vbios_boot_state.mclk_bootup_value =
3444                         le32_to_cpu(fw_info->ulDefaultMemoryClock);
3445         data->vbios_boot_state.mvdd_bootup_value =
3446                         le16_to_cpu(fw_info->usBootUpMVDDCVoltage);
3447         data->vbios_boot_state.vddc_bootup_value =
3448                         le16_to_cpu(fw_info->usBootUpVDDCVoltage);
3449         data->vbios_boot_state.vddci_bootup_value =
3450                         le16_to_cpu(fw_info->usBootUpVDDCIVoltage);
3451         data->vbios_boot_state.pcie_gen_bootup_value =
3452                         smu7_get_current_pcie_speed(hwmgr);
3453
3454         data->vbios_boot_state.pcie_lane_bootup_value =
3455                         (uint16_t)smu7_get_current_pcie_lane_number(hwmgr);
3456
3457         /* set boot power state */
3458         ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value;
3459         ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value;
3460         ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value;
3461         ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value;
3462
3463         return 0;
3464 }
3465
3466 static int smu7_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr)
3467 {
3468         int result;
3469         unsigned long ret = 0;
3470
3471         if (hwmgr->pp_table_version == PP_TABLE_V0) {
3472                 result = pp_tables_get_num_of_entries(hwmgr, &ret);
3473                 return result ? 0 : ret;
3474         } else if (hwmgr->pp_table_version == PP_TABLE_V1) {
3475                 result = get_number_of_powerplay_table_entries_v1_0(hwmgr);
3476                 return result;
3477         }
3478         return 0;
3479 }
3480
3481 static int smu7_get_pp_table_entry_callback_func_v1(struct pp_hwmgr *hwmgr,
3482                 void *state, struct pp_power_state *power_state,
3483                 void *pp_table, uint32_t classification_flag)
3484 {
3485         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3486         struct smu7_power_state  *smu7_power_state =
3487                         (struct smu7_power_state *)(&(power_state->hardware));
3488         struct smu7_performance_level *performance_level;
3489         ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state;
3490         ATOM_Tonga_POWERPLAYTABLE *powerplay_table =
3491                         (ATOM_Tonga_POWERPLAYTABLE *)pp_table;
3492         PPTable_Generic_SubTable_Header *sclk_dep_table =
3493                         (PPTable_Generic_SubTable_Header *)
3494                         (((unsigned long)powerplay_table) +
3495                                 le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
3496
3497         ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
3498                         (ATOM_Tonga_MCLK_Dependency_Table *)
3499                         (((unsigned long)powerplay_table) +
3500                                 le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
3501
3502         /* The following fields are not initialized here: id orderedList allStatesList */
3503         power_state->classification.ui_label =
3504                         (le16_to_cpu(state_entry->usClassification) &
3505                         ATOM_PPLIB_CLASSIFICATION_UI_MASK) >>
3506                         ATOM_PPLIB_CLASSIFICATION_UI_SHIFT;
3507         power_state->classification.flags = classification_flag;
3508         /* NOTE: There is a classification2 flag in BIOS that is not being used right now */
3509
3510         power_state->classification.temporary_state = false;
3511         power_state->classification.to_be_deleted = false;
3512
3513         power_state->validation.disallowOnDC =
3514                         (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
3515                                         ATOM_Tonga_DISALLOW_ON_DC));
3516
3517         power_state->pcie.lanes = 0;
3518
3519         power_state->display.disableFrameModulation = false;
3520         power_state->display.limitRefreshrate = false;
3521         power_state->display.enableVariBright =
3522                         (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
3523                                         ATOM_Tonga_ENABLE_VARIBRIGHT));
3524
3525         power_state->validation.supportedPowerLevels = 0;
3526         power_state->uvd_clocks.VCLK = 0;
3527         power_state->uvd_clocks.DCLK = 0;
3528         power_state->temperatures.min = 0;
3529         power_state->temperatures.max = 0;
3530
3531         performance_level = &(smu7_power_state->performance_levels
3532                         [smu7_power_state->performance_level_count++]);
3533
3534         PP_ASSERT_WITH_CODE(
3535                         (smu7_power_state->performance_level_count < smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_GRAPHICS)),
3536                         "Performance levels exceeds SMC limit!",
3537                         return -EINVAL);
3538
3539         PP_ASSERT_WITH_CODE(
3540                         (smu7_power_state->performance_level_count <=
3541                                         hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
3542                         "Performance levels exceeds Driver limit!",
3543                         return -EINVAL);
3544
3545         /* Performance levels are arranged from low to high. */
3546         performance_level->memory_clock = mclk_dep_table->entries
3547                         [state_entry->ucMemoryClockIndexLow].ulMclk;
3548         if (sclk_dep_table->ucRevId == 0)
3549                 performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
3550                         [state_entry->ucEngineClockIndexLow].ulSclk;
3551         else if (sclk_dep_table->ucRevId == 1)
3552                 performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
3553                         [state_entry->ucEngineClockIndexLow].ulSclk;
3554         performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
3555                         state_entry->ucPCIEGenLow);
3556         performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
3557                         state_entry->ucPCIELaneLow);
3558
3559         performance_level = &(smu7_power_state->performance_levels
3560                         [smu7_power_state->performance_level_count++]);
3561         performance_level->memory_clock = mclk_dep_table->entries
3562                         [state_entry->ucMemoryClockIndexHigh].ulMclk;
3563
3564         if (sclk_dep_table->ucRevId == 0)
3565                 performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
3566                         [state_entry->ucEngineClockIndexHigh].ulSclk;
3567         else if (sclk_dep_table->ucRevId == 1)
3568                 performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
3569                         [state_entry->ucEngineClockIndexHigh].ulSclk;
3570
3571         performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
3572                         state_entry->ucPCIEGenHigh);
3573         performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
3574                         state_entry->ucPCIELaneHigh);
3575
3576         return 0;
3577 }
3578
3579 static int smu7_get_pp_table_entry_v1(struct pp_hwmgr *hwmgr,
3580                 unsigned long entry_index, struct pp_power_state *state)
3581 {
3582         int result;
3583         struct smu7_power_state *ps;
3584         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3585         struct phm_ppt_v1_information *table_info =
3586                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
3587         struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
3588                         table_info->vdd_dep_on_mclk;
3589
3590         state->hardware.magic = PHM_VIslands_Magic;
3591
3592         ps = (struct smu7_power_state *)(&state->hardware);
3593
3594         result = get_powerplay_table_entry_v1_0(hwmgr, entry_index, state,
3595                         smu7_get_pp_table_entry_callback_func_v1);
3596
3597         /* This is the earliest time we have all the dependency table and the VBIOS boot state
3598          * as PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot state
3599          * if there is only one VDDCI/MCLK level, check if it's the same as VBIOS boot state
3600          */
3601         if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
3602                 if (dep_mclk_table->entries[0].clk !=
3603                                 data->vbios_boot_state.mclk_bootup_value)
3604                         pr_debug("Single MCLK entry VDDCI/MCLK dependency table "
3605                                         "does not match VBIOS boot MCLK level");
3606                 if (dep_mclk_table->entries[0].vddci !=
3607                                 data->vbios_boot_state.vddci_bootup_value)
3608                         pr_debug("Single VDDCI entry VDDCI/MCLK dependency table "
3609                                         "does not match VBIOS boot VDDCI level");
3610         }
3611
3612         /* set DC compatible flag if this state supports DC */
3613         if (!state->validation.disallowOnDC)
3614                 ps->dc_compatible = true;
3615
3616         if (state->classification.flags & PP_StateClassificationFlag_ACPI)
3617                 data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen;
3618
3619         ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
3620         ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
3621
3622         if (!result) {
3623                 uint32_t i;
3624
3625                 switch (state->classification.ui_label) {
3626                 case PP_StateUILabel_Performance:
3627                         data->use_pcie_performance_levels = true;
3628                         for (i = 0; i < ps->performance_level_count; i++) {
3629                                 if (data->pcie_gen_performance.max <
3630                                                 ps->performance_levels[i].pcie_gen)
3631                                         data->pcie_gen_performance.max =
3632                                                         ps->performance_levels[i].pcie_gen;
3633
3634                                 if (data->pcie_gen_performance.min >
3635                                                 ps->performance_levels[i].pcie_gen)
3636                                         data->pcie_gen_performance.min =
3637                                                         ps->performance_levels[i].pcie_gen;
3638
3639                                 if (data->pcie_lane_performance.max <
3640                                                 ps->performance_levels[i].pcie_lane)
3641                                         data->pcie_lane_performance.max =
3642                                                         ps->performance_levels[i].pcie_lane;
3643                                 if (data->pcie_lane_performance.min >
3644                                                 ps->performance_levels[i].pcie_lane)
3645                                         data->pcie_lane_performance.min =
3646                                                         ps->performance_levels[i].pcie_lane;
3647                         }
3648                         break;
3649                 case PP_StateUILabel_Battery:
3650                         data->use_pcie_power_saving_levels = true;
3651
3652                         for (i = 0; i < ps->performance_level_count; i++) {
3653                                 if (data->pcie_gen_power_saving.max <
3654                                                 ps->performance_levels[i].pcie_gen)
3655                                         data->pcie_gen_power_saving.max =
3656                                                         ps->performance_levels[i].pcie_gen;
3657
3658                                 if (data->pcie_gen_power_saving.min >
3659                                                 ps->performance_levels[i].pcie_gen)
3660                                         data->pcie_gen_power_saving.min =
3661                                                         ps->performance_levels[i].pcie_gen;
3662
3663                                 if (data->pcie_lane_power_saving.max <
3664                                                 ps->performance_levels[i].pcie_lane)
3665                                         data->pcie_lane_power_saving.max =
3666                                                         ps->performance_levels[i].pcie_lane;
3667
3668                                 if (data->pcie_lane_power_saving.min >
3669                                                 ps->performance_levels[i].pcie_lane)
3670                                         data->pcie_lane_power_saving.min =
3671                                                         ps->performance_levels[i].pcie_lane;
3672                         }
3673                         break;
3674                 default:
3675                         break;
3676                 }
3677         }
3678         return 0;
3679 }
3680
3681 static int smu7_get_pp_table_entry_callback_func_v0(struct pp_hwmgr *hwmgr,
3682                                         struct pp_hw_power_state *power_state,
3683                                         unsigned int index, const void *clock_info)
3684 {
3685         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3686         struct smu7_power_state  *ps = cast_phw_smu7_power_state(power_state);
3687         const ATOM_PPLIB_CI_CLOCK_INFO *visland_clk_info = clock_info;
3688         struct smu7_performance_level *performance_level;
3689         uint32_t engine_clock, memory_clock;
3690         uint16_t pcie_gen_from_bios;
3691
3692         engine_clock = visland_clk_info->ucEngineClockHigh << 16 | visland_clk_info->usEngineClockLow;
3693         memory_clock = visland_clk_info->ucMemoryClockHigh << 16 | visland_clk_info->usMemoryClockLow;
3694
3695         if (!(data->mc_micro_code_feature & DISABLE_MC_LOADMICROCODE) && memory_clock > data->highest_mclk)
3696                 data->highest_mclk = memory_clock;
3697
3698         PP_ASSERT_WITH_CODE(
3699                         (ps->performance_level_count < smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_GRAPHICS)),
3700                         "Performance levels exceeds SMC limit!",
3701                         return -EINVAL);
3702
3703         PP_ASSERT_WITH_CODE(
3704                         (ps->performance_level_count <
3705                                         hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
3706                         "Performance levels exceeds Driver limit, Skip!",
3707                         return 0);
3708
3709         performance_level = &(ps->performance_levels
3710                         [ps->performance_level_count++]);
3711
3712         /* Performance levels are arranged from low to high. */
3713         performance_level->memory_clock = memory_clock;
3714         performance_level->engine_clock = engine_clock;
3715
3716         pcie_gen_from_bios = visland_clk_info->ucPCIEGen;
3717
3718         performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, pcie_gen_from_bios);
3719         performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, visland_clk_info->usPCIELane);
3720
3721         return 0;
3722 }
3723
3724 static int smu7_get_pp_table_entry_v0(struct pp_hwmgr *hwmgr,
3725                 unsigned long entry_index, struct pp_power_state *state)
3726 {
3727         int result;
3728         struct smu7_power_state *ps;
3729         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3730         struct phm_clock_voltage_dependency_table *dep_mclk_table =
3731                         hwmgr->dyn_state.vddci_dependency_on_mclk;
3732
3733         memset(&state->hardware, 0x00, sizeof(struct pp_hw_power_state));
3734
3735         state->hardware.magic = PHM_VIslands_Magic;
3736
3737         ps = (struct smu7_power_state *)(&state->hardware);
3738
3739         result = pp_tables_get_entry(hwmgr, entry_index, state,
3740                         smu7_get_pp_table_entry_callback_func_v0);
3741
3742         /*
3743          * This is the earliest time we have all the dependency table
3744          * and the VBIOS boot state as
3745          * PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot
3746          * state if there is only one VDDCI/MCLK level, check if it's
3747          * the same as VBIOS boot state
3748          */
3749         if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
3750                 if (dep_mclk_table->entries[0].clk !=
3751                                 data->vbios_boot_state.mclk_bootup_value)
3752                         pr_debug("Single MCLK entry VDDCI/MCLK dependency table "
3753                                         "does not match VBIOS boot MCLK level");
3754                 if (dep_mclk_table->entries[0].v !=
3755                                 data->vbios_boot_state.vddci_bootup_value)
3756                         pr_debug("Single VDDCI entry VDDCI/MCLK dependency table "
3757                                         "does not match VBIOS boot VDDCI level");
3758         }
3759
3760         /* set DC compatible flag if this state supports DC */
3761         if (!state->validation.disallowOnDC)
3762                 ps->dc_compatible = true;
3763
3764         if (state->classification.flags & PP_StateClassificationFlag_ACPI)
3765                 data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen;
3766
3767         ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
3768         ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
3769
3770         if (!result) {
3771                 uint32_t i;
3772
3773                 switch (state->classification.ui_label) {
3774                 case PP_StateUILabel_Performance:
3775                         data->use_pcie_performance_levels = true;
3776
3777                         for (i = 0; i < ps->performance_level_count; i++) {
3778                                 if (data->pcie_gen_performance.max <
3779                                                 ps->performance_levels[i].pcie_gen)
3780                                         data->pcie_gen_performance.max =
3781                                                         ps->performance_levels[i].pcie_gen;
3782
3783                                 if (data->pcie_gen_performance.min >
3784                                                 ps->performance_levels[i].pcie_gen)
3785                                         data->pcie_gen_performance.min =
3786                                                         ps->performance_levels[i].pcie_gen;
3787
3788                                 if (data->pcie_lane_performance.max <
3789                                                 ps->performance_levels[i].pcie_lane)
3790                                         data->pcie_lane_performance.max =
3791                                                         ps->performance_levels[i].pcie_lane;
3792
3793                                 if (data->pcie_lane_performance.min >
3794                                                 ps->performance_levels[i].pcie_lane)
3795                                         data->pcie_lane_performance.min =
3796                                                         ps->performance_levels[i].pcie_lane;
3797                         }
3798                         break;
3799                 case PP_StateUILabel_Battery:
3800                         data->use_pcie_power_saving_levels = true;
3801
3802                         for (i = 0; i < ps->performance_level_count; i++) {
3803                                 if (data->pcie_gen_power_saving.max <
3804                                                 ps->performance_levels[i].pcie_gen)
3805                                         data->pcie_gen_power_saving.max =
3806                                                         ps->performance_levels[i].pcie_gen;
3807
3808                                 if (data->pcie_gen_power_saving.min >
3809                                                 ps->performance_levels[i].pcie_gen)
3810                                         data->pcie_gen_power_saving.min =
3811                                                         ps->performance_levels[i].pcie_gen;
3812
3813                                 if (data->pcie_lane_power_saving.max <
3814                                                 ps->performance_levels[i].pcie_lane)
3815                                         data->pcie_lane_power_saving.max =
3816                                                         ps->performance_levels[i].pcie_lane;
3817
3818                                 if (data->pcie_lane_power_saving.min >
3819                                                 ps->performance_levels[i].pcie_lane)
3820                                         data->pcie_lane_power_saving.min =
3821                                                         ps->performance_levels[i].pcie_lane;
3822                         }
3823                         break;
3824                 default:
3825                         break;
3826                 }
3827         }
3828         return 0;
3829 }
3830
3831 static int smu7_get_pp_table_entry(struct pp_hwmgr *hwmgr,
3832                 unsigned long entry_index, struct pp_power_state *state)
3833 {
3834         if (hwmgr->pp_table_version == PP_TABLE_V0)
3835                 return smu7_get_pp_table_entry_v0(hwmgr, entry_index, state);
3836         else if (hwmgr->pp_table_version == PP_TABLE_V1)
3837                 return smu7_get_pp_table_entry_v1(hwmgr, entry_index, state);
3838
3839         return 0;
3840 }
3841
3842 static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, u32 *query)
3843 {
3844         struct amdgpu_device *adev = hwmgr->adev;
3845         int i;
3846         u32 tmp = 0;
3847
3848         if (!query)
3849                 return -EINVAL;
3850
3851         /*
3852          * PPSMC_MSG_GetCurrPkgPwr is not supported on:
3853          *  - Hawaii
3854          *  - Bonaire
3855          *  - Fiji
3856          *  - Tonga
3857          */
3858         if ((adev->asic_type != CHIP_HAWAII) &&
3859             (adev->asic_type != CHIP_BONAIRE) &&
3860             (adev->asic_type != CHIP_FIJI) &&
3861             (adev->asic_type != CHIP_TONGA)) {
3862                 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0, &tmp);
3863                 *query = tmp;
3864
3865                 if (tmp != 0)
3866                         return 0;
3867         }
3868
3869         smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart, NULL);
3870         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3871                                                         ixSMU_PM_STATUS_95, 0);
3872
3873         for (i = 0; i < 10; i++) {
3874                 msleep(500);
3875                 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogSample, NULL);
3876                 tmp = cgs_read_ind_register(hwmgr->device,
3877                                                 CGS_IND_REG__SMC,
3878                                                 ixSMU_PM_STATUS_95);
3879                 if (tmp != 0)
3880                         break;
3881         }
3882         *query = tmp;
3883
3884         return 0;
3885 }
3886
3887 static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx,
3888                             void *value, int *size)
3889 {
3890         uint32_t sclk, mclk, activity_percent;
3891         uint32_t offset, val_vid;
3892         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3893
3894         /* size must be at least 4 bytes for all sensors */
3895         if (*size < 4)
3896                 return -EINVAL;
3897
3898         switch (idx) {
3899         case AMDGPU_PP_SENSOR_GFX_SCLK:
3900                 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency, &sclk);
3901                 *((uint32_t *)value) = sclk;
3902                 *size = 4;
3903                 return 0;
3904         case AMDGPU_PP_SENSOR_GFX_MCLK:
3905                 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency, &mclk);
3906                 *((uint32_t *)value) = mclk;
3907                 *size = 4;
3908                 return 0;
3909         case AMDGPU_PP_SENSOR_GPU_LOAD:
3910         case AMDGPU_PP_SENSOR_MEM_LOAD:
3911                 offset = data->soft_regs_start + smum_get_offsetof(hwmgr,
3912                                                                 SMU_SoftRegisters,
3913                                                                 (idx == AMDGPU_PP_SENSOR_GPU_LOAD) ?
3914                                                                 AverageGraphicsActivity:
3915                                                                 AverageMemoryActivity);
3916
3917                 activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset);
3918                 activity_percent += 0x80;
3919                 activity_percent >>= 8;
3920                 *((uint32_t *)value) = activity_percent > 100 ? 100 : activity_percent;
3921                 *size = 4;
3922                 return 0;
3923         case AMDGPU_PP_SENSOR_GPU_TEMP:
3924                 *((uint32_t *)value) = smu7_thermal_get_temperature(hwmgr);
3925                 *size = 4;
3926                 return 0;
3927         case AMDGPU_PP_SENSOR_UVD_POWER:
3928                 *((uint32_t *)value) = data->uvd_power_gated ? 0 : 1;
3929                 *size = 4;
3930                 return 0;
3931         case AMDGPU_PP_SENSOR_VCE_POWER:
3932                 *((uint32_t *)value) = data->vce_power_gated ? 0 : 1;
3933                 *size = 4;
3934                 return 0;
3935         case AMDGPU_PP_SENSOR_GPU_POWER:
3936                 return smu7_get_gpu_power(hwmgr, (uint32_t *)value);
3937         case AMDGPU_PP_SENSOR_VDDGFX:
3938                 if ((data->vr_config & VRCONF_VDDGFX_MASK) ==
3939                     (VR_SVI2_PLANE_2 << VRCONF_VDDGFX_SHIFT))
3940                         val_vid = PHM_READ_INDIRECT_FIELD(hwmgr->device,
3941                                         CGS_IND_REG__SMC, PWR_SVI2_STATUS, PLANE2_VID);
3942                 else
3943                         val_vid = PHM_READ_INDIRECT_FIELD(hwmgr->device,
3944                                         CGS_IND_REG__SMC, PWR_SVI2_STATUS, PLANE1_VID);
3945
3946                 *((uint32_t *)value) = (uint32_t)convert_to_vddc(val_vid);
3947                 return 0;
3948         default:
3949                 return -EINVAL;
3950         }
3951 }
3952
3953 static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
3954 {
3955         const struct phm_set_power_state_input *states =
3956                         (const struct phm_set_power_state_input *)input;
3957         const struct smu7_power_state *smu7_ps =
3958                         cast_const_phw_smu7_power_state(states->pnew_state);
3959         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3960         struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
3961         uint32_t sclk = smu7_ps->performance_levels
3962                         [smu7_ps->performance_level_count - 1].engine_clock;
3963         struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
3964         uint32_t mclk = smu7_ps->performance_levels
3965                         [smu7_ps->performance_level_count - 1].memory_clock;
3966         struct PP_Clocks min_clocks = {0};
3967         uint32_t i;
3968
3969         for (i = 0; i < sclk_table->count; i++) {
3970                 if (sclk == sclk_table->dpm_levels[i].value)
3971                         break;
3972         }
3973
3974         if (i >= sclk_table->count) {
3975                 if (sclk > sclk_table->dpm_levels[i-1].value) {
3976                         data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
3977                         sclk_table->dpm_levels[i-1].value = sclk;
3978                 }
3979         } else {
3980         /* TODO: Check SCLK in DAL's minimum clocks
3981          * in case DeepSleep divider update is required.
3982          */
3983                 if (data->display_timing.min_clock_in_sr != min_clocks.engineClockInSR &&
3984                         (min_clocks.engineClockInSR >= SMU7_MINIMUM_ENGINE_CLOCK ||
3985                                 data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK))
3986                         data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
3987         }
3988
3989         for (i = 0; i < mclk_table->count; i++) {
3990                 if (mclk == mclk_table->dpm_levels[i].value)
3991                         break;
3992         }
3993
3994         if (i >= mclk_table->count) {
3995                 if (mclk > mclk_table->dpm_levels[i-1].value) {
3996                         data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
3997                         mclk_table->dpm_levels[i-1].value = mclk;
3998                 }
3999         }
4000
4001         if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
4002                 data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
4003
4004         return 0;
4005 }
4006
4007 static uint16_t smu7_get_maximum_link_speed(struct pp_hwmgr *hwmgr,
4008                 const struct smu7_power_state *smu7_ps)
4009 {
4010         uint32_t i;
4011         uint32_t sclk, max_sclk = 0;
4012         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4013         struct smu7_dpm_table *dpm_table = &data->dpm_table;
4014
4015         for (i = 0; i < smu7_ps->performance_level_count; i++) {
4016                 sclk = smu7_ps->performance_levels[i].engine_clock;
4017                 if (max_sclk < sclk)
4018                         max_sclk = sclk;
4019         }
4020
4021         for (i = 0; i < dpm_table->sclk_table.count; i++) {
4022                 if (dpm_table->sclk_table.dpm_levels[i].value == max_sclk)
4023                         return (uint16_t) ((i >= dpm_table->pcie_speed_table.count) ?
4024                                         dpm_table->pcie_speed_table.dpm_levels
4025                                         [dpm_table->pcie_speed_table.count - 1].value :
4026                                         dpm_table->pcie_speed_table.dpm_levels[i].value);
4027         }
4028
4029         return 0;
4030 }
4031
4032 static int smu7_request_link_speed_change_before_state_change(
4033                 struct pp_hwmgr *hwmgr, const void *input)
4034 {
4035         const struct phm_set_power_state_input *states =
4036                         (const struct phm_set_power_state_input *)input;
4037         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4038         const struct smu7_power_state *smu7_nps =
4039                         cast_const_phw_smu7_power_state(states->pnew_state);
4040         const struct smu7_power_state *polaris10_cps =
4041                         cast_const_phw_smu7_power_state(states->pcurrent_state);
4042
4043         uint16_t target_link_speed = smu7_get_maximum_link_speed(hwmgr, smu7_nps);
4044         uint16_t current_link_speed;
4045
4046         if (data->force_pcie_gen == PP_PCIEGenInvalid)
4047                 current_link_speed = smu7_get_maximum_link_speed(hwmgr, polaris10_cps);
4048         else
4049                 current_link_speed = data->force_pcie_gen;
4050
4051         data->force_pcie_gen = PP_PCIEGenInvalid;
4052         data->pspp_notify_required = false;
4053
4054         if (target_link_speed > current_link_speed) {
4055                 switch (target_link_speed) {
4056 #ifdef CONFIG_ACPI
4057                 case PP_PCIEGen3:
4058                         if (0 == amdgpu_acpi_pcie_performance_request(hwmgr->adev, PCIE_PERF_REQ_GEN3, false))
4059                                 break;
4060                         data->force_pcie_gen = PP_PCIEGen2;
4061                         if (current_link_speed == PP_PCIEGen2)
4062                                 break;
4063                         fallthrough;
4064                 case PP_PCIEGen2:
4065                         if (0 == amdgpu_acpi_pcie_performance_request(hwmgr->adev, PCIE_PERF_REQ_GEN2, false))
4066                                 break;
4067                         fallthrough;
4068 #endif
4069                 default:
4070                         data->force_pcie_gen = smu7_get_current_pcie_speed(hwmgr);
4071                         break;
4072                 }
4073         } else {
4074                 if (target_link_speed < current_link_speed)
4075                         data->pspp_notify_required = true;
4076         }
4077
4078         return 0;
4079 }
4080
4081 static int smu7_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
4082 {
4083         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4084
4085         if (0 == data->need_update_smu7_dpm_table)
4086                 return 0;
4087
4088         if ((0 == data->sclk_dpm_key_disabled) &&
4089                 (data->need_update_smu7_dpm_table &
4090                         (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
4091                 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
4092                                 "Trying to freeze SCLK DPM when DPM is disabled",
4093                                 );
4094                 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
4095                                 PPSMC_MSG_SCLKDPM_FreezeLevel,
4096                                 NULL),
4097                                 "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!",
4098                                 return -EINVAL);
4099         }
4100
4101         if ((0 == data->mclk_dpm_key_disabled) &&
4102                 !data->mclk_ignore_signal &&
4103                 (data->need_update_smu7_dpm_table &
4104                  DPMTABLE_OD_UPDATE_MCLK)) {
4105                 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
4106                                 "Trying to freeze MCLK DPM when DPM is disabled",
4107                                 );
4108                 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
4109                                 PPSMC_MSG_MCLKDPM_FreezeLevel,
4110                                 NULL),
4111                                 "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!",
4112                                 return -EINVAL);
4113         }
4114
4115         return 0;
4116 }
4117
4118 static int smu7_populate_and_upload_sclk_mclk_dpm_levels(
4119                 struct pp_hwmgr *hwmgr, const void *input)
4120 {
4121         int result = 0;
4122         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4123         struct smu7_dpm_table *dpm_table = &data->dpm_table;
4124         uint32_t count;
4125         struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
4126         struct phm_odn_clock_levels *odn_sclk_table = &(odn_table->odn_core_clock_dpm_levels);
4127         struct phm_odn_clock_levels *odn_mclk_table = &(odn_table->odn_memory_clock_dpm_levels);
4128
4129         if (0 == data->need_update_smu7_dpm_table)
4130                 return 0;
4131
4132         if (hwmgr->od_enabled && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
4133                 for (count = 0; count < dpm_table->sclk_table.count; count++) {
4134                         dpm_table->sclk_table.dpm_levels[count].enabled = odn_sclk_table->entries[count].enabled;
4135                         dpm_table->sclk_table.dpm_levels[count].value = odn_sclk_table->entries[count].clock;
4136                 }
4137         }
4138
4139         if (hwmgr->od_enabled && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
4140                 for (count = 0; count < dpm_table->mclk_table.count; count++) {
4141                         dpm_table->mclk_table.dpm_levels[count].enabled = odn_mclk_table->entries[count].enabled;
4142                         dpm_table->mclk_table.dpm_levels[count].value = odn_mclk_table->entries[count].clock;
4143                 }
4144         }
4145
4146         if (data->need_update_smu7_dpm_table &
4147                         (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) {
4148                 result = smum_populate_all_graphic_levels(hwmgr);
4149                 PP_ASSERT_WITH_CODE((0 == result),
4150                                 "Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
4151                                 return result);
4152         }
4153
4154         if (data->need_update_smu7_dpm_table &
4155                         (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
4156                 /*populate MCLK dpm table to SMU7 */
4157                 result = smum_populate_all_memory_levels(hwmgr);
4158                 PP_ASSERT_WITH_CODE((0 == result),
4159                                 "Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
4160                                 return result);
4161         }
4162
4163         return result;
4164 }
4165
4166 static int smu7_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
4167                           struct smu7_single_dpm_table *dpm_table,
4168                         uint32_t low_limit, uint32_t high_limit)
4169 {
4170         uint32_t i;
4171
4172         /* force the trim if mclk_switching is disabled to prevent flicker */
4173         bool force_trim = (low_limit == high_limit);
4174         for (i = 0; i < dpm_table->count; i++) {
4175         /*skip the trim if od is enabled*/
4176                 if ((!hwmgr->od_enabled || force_trim)
4177                         && (dpm_table->dpm_levels[i].value < low_limit
4178                         || dpm_table->dpm_levels[i].value > high_limit))
4179                         dpm_table->dpm_levels[i].enabled = false;
4180                 else
4181                         dpm_table->dpm_levels[i].enabled = true;
4182         }
4183
4184         return 0;
4185 }
4186
4187 static int smu7_trim_dpm_states(struct pp_hwmgr *hwmgr,
4188                 const struct smu7_power_state *smu7_ps)
4189 {
4190         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4191         uint32_t high_limit_count;
4192
4193         PP_ASSERT_WITH_CODE((smu7_ps->performance_level_count >= 1),
4194                         "power state did not have any performance level",
4195                         return -EINVAL);
4196
4197         high_limit_count = (1 == smu7_ps->performance_level_count) ? 0 : 1;
4198
4199         smu7_trim_single_dpm_states(hwmgr,
4200                         &(data->dpm_table.sclk_table),
4201                         smu7_ps->performance_levels[0].engine_clock,
4202                         smu7_ps->performance_levels[high_limit_count].engine_clock);
4203
4204         smu7_trim_single_dpm_states(hwmgr,
4205                         &(data->dpm_table.mclk_table),
4206                         smu7_ps->performance_levels[0].memory_clock,
4207                         smu7_ps->performance_levels[high_limit_count].memory_clock);
4208
4209         return 0;
4210 }
4211
4212 static int smu7_generate_dpm_level_enable_mask(
4213                 struct pp_hwmgr *hwmgr, const void *input)
4214 {
4215         int result = 0;
4216         const struct phm_set_power_state_input *states =
4217                         (const struct phm_set_power_state_input *)input;
4218         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4219         const struct smu7_power_state *smu7_ps =
4220                         cast_const_phw_smu7_power_state(states->pnew_state);
4221
4222
4223         result = smu7_trim_dpm_states(hwmgr, smu7_ps);
4224         if (result)
4225                 return result;
4226
4227         data->dpm_level_enable_mask.sclk_dpm_enable_mask =
4228                         phm_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table);
4229         data->dpm_level_enable_mask.mclk_dpm_enable_mask =
4230                         phm_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table);
4231         data->dpm_level_enable_mask.pcie_dpm_enable_mask =
4232                         phm_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table);
4233
4234         return 0;
4235 }
4236
4237 static int smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
4238 {
4239         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4240
4241         if (0 == data->need_update_smu7_dpm_table)
4242                 return 0;
4243
4244         if ((0 == data->sclk_dpm_key_disabled) &&
4245                 (data->need_update_smu7_dpm_table &
4246                 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
4247
4248                 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
4249                                 "Trying to Unfreeze SCLK DPM when DPM is disabled",
4250                                 );
4251                 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
4252                                 PPSMC_MSG_SCLKDPM_UnfreezeLevel,
4253                                 NULL),
4254                         "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!",
4255                         return -EINVAL);
4256         }
4257
4258         if ((0 == data->mclk_dpm_key_disabled) &&
4259                 !data->mclk_ignore_signal &&
4260                 (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
4261
4262                 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
4263                                 "Trying to Unfreeze MCLK DPM when DPM is disabled",
4264                                 );
4265                 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
4266                                 PPSMC_MSG_MCLKDPM_UnfreezeLevel,
4267                                 NULL),
4268                     "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!",
4269                     return -EINVAL);
4270         }
4271
4272         data->need_update_smu7_dpm_table &= DPMTABLE_OD_UPDATE_VDDC;
4273
4274         return 0;
4275 }
4276
4277 static int smu7_notify_link_speed_change_after_state_change(
4278                 struct pp_hwmgr *hwmgr, const void *input)
4279 {
4280         const struct phm_set_power_state_input *states =
4281                         (const struct phm_set_power_state_input *)input;
4282         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4283         const struct smu7_power_state *smu7_ps =
4284                         cast_const_phw_smu7_power_state(states->pnew_state);
4285         uint16_t target_link_speed = smu7_get_maximum_link_speed(hwmgr, smu7_ps);
4286         uint8_t  request;
4287
4288         if (data->pspp_notify_required) {
4289                 if (target_link_speed == PP_PCIEGen3)
4290                         request = PCIE_PERF_REQ_GEN3;
4291                 else if (target_link_speed == PP_PCIEGen2)
4292                         request = PCIE_PERF_REQ_GEN2;
4293                 else
4294                         request = PCIE_PERF_REQ_GEN1;
4295
4296                 if (request == PCIE_PERF_REQ_GEN1 &&
4297                                 smu7_get_current_pcie_speed(hwmgr) > 0)
4298                         return 0;
4299
4300 #ifdef CONFIG_ACPI
4301                 if (amdgpu_acpi_pcie_performance_request(hwmgr->adev, request, false)) {
4302                         if (PP_PCIEGen2 == target_link_speed)
4303                                 pr_info("PSPP request to switch to Gen2 from Gen3 Failed!");
4304                         else
4305                                 pr_info("PSPP request to switch to Gen1 from Gen2 Failed!");
4306                 }
4307 #endif
4308         }
4309
4310         return 0;
4311 }
4312
4313 static int smu7_notify_no_display(struct pp_hwmgr *hwmgr)
4314 {
4315         return (smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_NoDisplay, NULL) == 0) ?  0 : -EINVAL;
4316 }
4317
4318 static int smu7_notify_has_display(struct pp_hwmgr *hwmgr)
4319 {
4320         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4321
4322         if (hwmgr->feature_mask & PP_VBI_TIME_SUPPORT_MASK) {
4323                 if (hwmgr->chip_id == CHIP_VEGAM)
4324                         smum_send_msg_to_smc_with_parameter(hwmgr,
4325                                         (PPSMC_Msg)PPSMC_MSG_SetVBITimeout_VEGAM, data->frame_time_x2,
4326                                         NULL);
4327                 else
4328                         smum_send_msg_to_smc_with_parameter(hwmgr,
4329                                         (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2,
4330                                         NULL);
4331                 data->last_sent_vbi_timeout = data->frame_time_x2;
4332         }
4333
4334         return (smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_HasDisplay, NULL) == 0) ?  0 : -EINVAL;
4335 }
4336
4337 static int smu7_notify_smc_display(struct pp_hwmgr *hwmgr)
4338 {
4339         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4340         int result = 0;
4341
4342         if (data->mclk_ignore_signal)
4343                 result = smu7_notify_no_display(hwmgr);
4344         else
4345                 result = smu7_notify_has_display(hwmgr);
4346
4347         return result;
4348 }
4349
4350 static int smu7_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
4351 {
4352         int tmp_result, result = 0;
4353         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4354
4355         tmp_result = smu7_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
4356         PP_ASSERT_WITH_CODE((0 == tmp_result),
4357                         "Failed to find DPM states clocks in DPM table!",
4358                         result = tmp_result);
4359
4360         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4361                         PHM_PlatformCaps_PCIEPerformanceRequest)) {
4362                 tmp_result =
4363                         smu7_request_link_speed_change_before_state_change(hwmgr, input);
4364                 PP_ASSERT_WITH_CODE((0 == tmp_result),
4365                                 "Failed to request link speed change before state change!",
4366                                 result = tmp_result);
4367         }
4368
4369         tmp_result = smu7_freeze_sclk_mclk_dpm(hwmgr);
4370         PP_ASSERT_WITH_CODE((0 == tmp_result),
4371                         "Failed to freeze SCLK MCLK DPM!", result = tmp_result);
4372
4373         tmp_result = smu7_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input);
4374         PP_ASSERT_WITH_CODE((0 == tmp_result),
4375                         "Failed to populate and upload SCLK MCLK DPM levels!",
4376                         result = tmp_result);
4377
4378         /*
4379          * If a custom pp table is loaded, set DPMTABLE_OD_UPDATE_VDDC flag.
4380          * That effectively disables AVFS feature.
4381          */
4382         if (hwmgr->hardcode_pp_table != NULL)
4383                 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC;
4384
4385         tmp_result = smu7_update_avfs(hwmgr);
4386         PP_ASSERT_WITH_CODE((0 == tmp_result),
4387                         "Failed to update avfs voltages!",
4388                         result = tmp_result);
4389
4390         tmp_result = smu7_generate_dpm_level_enable_mask(hwmgr, input);
4391         PP_ASSERT_WITH_CODE((0 == tmp_result),
4392                         "Failed to generate DPM level enabled mask!",
4393                         result = tmp_result);
4394
4395         tmp_result = smum_update_sclk_threshold(hwmgr);
4396         PP_ASSERT_WITH_CODE((0 == tmp_result),
4397                         "Failed to update SCLK threshold!",
4398                         result = tmp_result);
4399
4400         tmp_result = smu7_unfreeze_sclk_mclk_dpm(hwmgr);
4401         PP_ASSERT_WITH_CODE((0 == tmp_result),
4402                         "Failed to unfreeze SCLK MCLK DPM!",
4403                         result = tmp_result);
4404
4405         tmp_result = smu7_upload_dpm_level_enable_mask(hwmgr);
4406         PP_ASSERT_WITH_CODE((0 == tmp_result),
4407                         "Failed to upload DPM level enabled mask!",
4408                         result = tmp_result);
4409
4410         tmp_result = smu7_notify_smc_display(hwmgr);
4411         PP_ASSERT_WITH_CODE((0 == tmp_result),
4412                         "Failed to notify smc display settings!",
4413                         result = tmp_result);
4414
4415         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4416                         PHM_PlatformCaps_PCIEPerformanceRequest)) {
4417                 tmp_result =
4418                         smu7_notify_link_speed_change_after_state_change(hwmgr, input);
4419                 PP_ASSERT_WITH_CODE((0 == tmp_result),
4420                                 "Failed to notify link speed change after state change!",
4421                                 result = tmp_result);
4422         }
4423         data->apply_optimized_settings = false;
4424         return result;
4425 }
4426
4427 static int smu7_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm)
4428 {
4429         hwmgr->thermal_controller.
4430         advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm;
4431
4432         return smum_send_msg_to_smc_with_parameter(hwmgr,
4433                         PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm,
4434                         NULL);
4435 }
4436
4437 static int
4438 smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
4439 {
4440         return 0;
4441 }
4442
4443 /**
4444 * Programs the display gap
4445 *
4446 * @param    hwmgr  the address of the powerplay hardware manager.
4447 * @return   always OK
4448 */
4449 static int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
4450 {
4451         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4452         uint32_t display_gap = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL);
4453         uint32_t display_gap2;
4454         uint32_t pre_vbi_time_in_us;
4455         uint32_t frame_time_in_us;
4456         uint32_t ref_clock, refresh_rate;
4457
4458         display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (hwmgr->display_config->num_display > 0) ? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE);
4459         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL, display_gap);
4460
4461         ref_clock =  amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev);
4462         refresh_rate = hwmgr->display_config->vrefresh;
4463
4464         if (0 == refresh_rate)
4465                 refresh_rate = 60;
4466
4467         frame_time_in_us = 1000000 / refresh_rate;
4468
4469         pre_vbi_time_in_us = frame_time_in_us - 200 - hwmgr->display_config->min_vblank_time;
4470
4471         data->frame_time_x2 = frame_time_in_us * 2 / 100;
4472
4473         if (data->frame_time_x2 < 280) {
4474                 pr_debug("%s: enforce minimal VBITimeout: %d -> 280\n", __func__, data->frame_time_x2);
4475                 data->frame_time_x2 = 280;
4476         }
4477
4478         display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
4479
4480         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2);
4481
4482         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4483                         data->soft_regs_start + smum_get_offsetof(hwmgr,
4484                                                         SMU_SoftRegisters,
4485                                                         PreVBlankGap), 0x64);
4486
4487         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4488                         data->soft_regs_start + smum_get_offsetof(hwmgr,
4489                                                         SMU_SoftRegisters,
4490                                                         VBlankTimeout),
4491                                         (frame_time_in_us - pre_vbi_time_in_us));
4492
4493         return 0;
4494 }
4495
4496 static int smu7_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
4497 {
4498         return smu7_program_display_gap(hwmgr);
4499 }
4500
4501 /**
4502 *  Set maximum target operating fan output RPM
4503 *
4504 * @param    hwmgr:  the address of the powerplay hardware manager.
4505 * @param    usMaxFanRpm:  max operating fan RPM value.
4506 * @return   The response that came from the SMC.
4507 */
4508 static int smu7_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_rpm)
4509 {
4510         hwmgr->thermal_controller.
4511         advanceFanControlParameters.usMaxFanRPM = us_max_fan_rpm;
4512
4513         return smum_send_msg_to_smc_with_parameter(hwmgr,
4514                         PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm,
4515                         NULL);
4516 }
4517
4518 static const struct amdgpu_irq_src_funcs smu7_irq_funcs = {
4519         .process = phm_irq_process,
4520 };
4521
4522 static int smu7_register_irq_handlers(struct pp_hwmgr *hwmgr)
4523 {
4524         struct amdgpu_irq_src *source =
4525                 kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL);
4526
4527         if (!source)
4528                 return -ENOMEM;
4529
4530         source->funcs = &smu7_irq_funcs;
4531
4532         amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
4533                         AMDGPU_IRQ_CLIENTID_LEGACY,
4534                         VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH,
4535                         source);
4536         amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
4537                         AMDGPU_IRQ_CLIENTID_LEGACY,
4538                         VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW,
4539                         source);
4540
4541         /* Register CTF(GPIO_19) interrupt */
4542         amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
4543                         AMDGPU_IRQ_CLIENTID_LEGACY,
4544                         VISLANDS30_IV_SRCID_GPIO_19,
4545                         source);
4546
4547         return 0;
4548 }
4549
4550 static bool
4551 smu7_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
4552 {
4553         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4554         bool is_update_required = false;
4555
4556         if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
4557                 is_update_required = true;
4558
4559         if (data->display_timing.vrefresh != hwmgr->display_config->vrefresh)
4560                 is_update_required = true;
4561
4562         if (hwmgr->chip_id >= CHIP_POLARIS10 &&
4563             hwmgr->chip_id <= CHIP_VEGAM &&
4564             data->last_sent_vbi_timeout != data->frame_time_x2)
4565                 is_update_required = true;
4566
4567         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
4568                 if (data->display_timing.min_clock_in_sr != hwmgr->display_config->min_core_set_clock_in_sr &&
4569                         (data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK ||
4570                         hwmgr->display_config->min_core_set_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK))
4571                         is_update_required = true;
4572         }
4573         return is_update_required;
4574 }
4575
4576 static inline bool smu7_are_power_levels_equal(const struct smu7_performance_level *pl1,
4577                                                            const struct smu7_performance_level *pl2)
4578 {
4579         return ((pl1->memory_clock == pl2->memory_clock) &&
4580                   (pl1->engine_clock == pl2->engine_clock) &&
4581                   (pl1->pcie_gen == pl2->pcie_gen) &&
4582                   (pl1->pcie_lane == pl2->pcie_lane));
4583 }
4584
4585 static int smu7_check_states_equal(struct pp_hwmgr *hwmgr,
4586                 const struct pp_hw_power_state *pstate1,
4587                 const struct pp_hw_power_state *pstate2, bool *equal)
4588 {
4589         const struct smu7_power_state *psa;
4590         const struct smu7_power_state *psb;
4591         int i;
4592         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4593
4594         if (pstate1 == NULL || pstate2 == NULL || equal == NULL)
4595                 return -EINVAL;
4596
4597         psa = cast_const_phw_smu7_power_state(pstate1);
4598         psb = cast_const_phw_smu7_power_state(pstate2);
4599         /* If the two states don't even have the same number of performance levels they cannot be the same state. */
4600         if (psa->performance_level_count != psb->performance_level_count) {
4601                 *equal = false;
4602                 return 0;
4603         }
4604
4605         for (i = 0; i < psa->performance_level_count; i++) {
4606                 if (!smu7_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
4607                         /* If we have found even one performance level pair that is different the states are different. */
4608                         *equal = false;
4609                         return 0;
4610                 }
4611         }
4612
4613         /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
4614         *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
4615         *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
4616         *equal &= (psa->sclk_threshold == psb->sclk_threshold);
4617         /* For OD call, set value based on flag */
4618         *equal &= !(data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK |
4619                                                         DPMTABLE_OD_UPDATE_MCLK |
4620                                                         DPMTABLE_OD_UPDATE_VDDC));
4621
4622         return 0;
4623 }
4624
4625 static int smu7_check_mc_firmware(struct pp_hwmgr *hwmgr)
4626 {
4627         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4628
4629         uint32_t tmp;
4630
4631         /* Read MC indirect register offset 0x9F bits [3:0] to see
4632          * if VBIOS has already loaded a full version of MC ucode
4633          * or not.
4634          */
4635
4636         smu7_get_mc_microcode_version(hwmgr);
4637
4638         data->need_long_memory_training = false;
4639
4640         cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX,
4641                                                         ixMC_IO_DEBUG_UP_13);
4642         tmp = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA);
4643
4644         if (tmp & (1 << 23)) {
4645                 data->mem_latency_high = MEM_LATENCY_HIGH;
4646                 data->mem_latency_low = MEM_LATENCY_LOW;
4647                 if ((hwmgr->chip_id == CHIP_POLARIS10) ||
4648                     (hwmgr->chip_id == CHIP_POLARIS11) ||
4649                     (hwmgr->chip_id == CHIP_POLARIS12))
4650                         smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableFFC, NULL);
4651         } else {
4652                 data->mem_latency_high = 330;
4653                 data->mem_latency_low = 330;
4654                 if ((hwmgr->chip_id == CHIP_POLARIS10) ||
4655                     (hwmgr->chip_id == CHIP_POLARIS11) ||
4656                     (hwmgr->chip_id == CHIP_POLARIS12))
4657                         smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableFFC, NULL);
4658         }
4659
4660         return 0;
4661 }
4662
4663 static int smu7_read_clock_registers(struct pp_hwmgr *hwmgr)
4664 {
4665         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4666
4667         data->clock_registers.vCG_SPLL_FUNC_CNTL         =
4668                 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL);
4669         data->clock_registers.vCG_SPLL_FUNC_CNTL_2       =
4670                 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_2);
4671         data->clock_registers.vCG_SPLL_FUNC_CNTL_3       =
4672                 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_3);
4673         data->clock_registers.vCG_SPLL_FUNC_CNTL_4       =
4674                 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_4);
4675         data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM   =
4676                 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM);
4677         data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2 =
4678                 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM_2);
4679         data->clock_registers.vDLL_CNTL                  =
4680                 cgs_read_register(hwmgr->device, mmDLL_CNTL);
4681         data->clock_registers.vMCLK_PWRMGT_CNTL          =
4682                 cgs_read_register(hwmgr->device, mmMCLK_PWRMGT_CNTL);
4683         data->clock_registers.vMPLL_AD_FUNC_CNTL         =
4684                 cgs_read_register(hwmgr->device, mmMPLL_AD_FUNC_CNTL);
4685         data->clock_registers.vMPLL_DQ_FUNC_CNTL         =
4686                 cgs_read_register(hwmgr->device, mmMPLL_DQ_FUNC_CNTL);
4687         data->clock_registers.vMPLL_FUNC_CNTL            =
4688                 cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL);
4689         data->clock_registers.vMPLL_FUNC_CNTL_1          =
4690                 cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_1);
4691         data->clock_registers.vMPLL_FUNC_CNTL_2          =
4692                 cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_2);
4693         data->clock_registers.vMPLL_SS1                  =
4694                 cgs_read_register(hwmgr->device, mmMPLL_SS1);
4695         data->clock_registers.vMPLL_SS2                  =
4696                 cgs_read_register(hwmgr->device, mmMPLL_SS2);
4697         return 0;
4698
4699 }
4700
4701 /**
4702  * Find out if memory is GDDR5.
4703  *
4704  * @param    hwmgr  the address of the powerplay hardware manager.
4705  * @return   always 0
4706  */
4707 static int smu7_get_memory_type(struct pp_hwmgr *hwmgr)
4708 {
4709         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4710         struct amdgpu_device *adev = hwmgr->adev;
4711
4712         data->is_memory_gddr5 = (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5);
4713
4714         return 0;
4715 }
4716
4717 /**
4718  * Enables Dynamic Power Management by SMC
4719  *
4720  * @param    hwmgr  the address of the powerplay hardware manager.
4721  * @return   always 0
4722  */
4723 static int smu7_enable_acpi_power_management(struct pp_hwmgr *hwmgr)
4724 {
4725         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
4726                         GENERAL_PWRMGT, STATIC_PM_EN, 1);
4727
4728         return 0;
4729 }
4730
4731 /**
4732  * Initialize PowerGating States for different engines
4733  *
4734  * @param    hwmgr  the address of the powerplay hardware manager.
4735  * @return   always 0
4736  */
4737 static int smu7_init_power_gate_state(struct pp_hwmgr *hwmgr)
4738 {
4739         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4740
4741         data->uvd_power_gated = false;
4742         data->vce_power_gated = false;
4743
4744         return 0;
4745 }
4746
4747 static int smu7_init_sclk_threshold(struct pp_hwmgr *hwmgr)
4748 {
4749         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4750
4751         data->low_sclk_interrupt_threshold = 0;
4752         return 0;
4753 }
4754
4755 static int smu7_setup_asic_task(struct pp_hwmgr *hwmgr)
4756 {
4757         int tmp_result, result = 0;
4758
4759         smu7_check_mc_firmware(hwmgr);
4760
4761         tmp_result = smu7_read_clock_registers(hwmgr);
4762         PP_ASSERT_WITH_CODE((0 == tmp_result),
4763                         "Failed to read clock registers!", result = tmp_result);
4764
4765         tmp_result = smu7_get_memory_type(hwmgr);
4766         PP_ASSERT_WITH_CODE((0 == tmp_result),
4767                         "Failed to get memory type!", result = tmp_result);
4768
4769         tmp_result = smu7_enable_acpi_power_management(hwmgr);
4770         PP_ASSERT_WITH_CODE((0 == tmp_result),
4771                         "Failed to enable ACPI power management!", result = tmp_result);
4772
4773         tmp_result = smu7_init_power_gate_state(hwmgr);
4774         PP_ASSERT_WITH_CODE((0 == tmp_result),
4775                         "Failed to init power gate state!", result = tmp_result);
4776
4777         tmp_result = smu7_get_mc_microcode_version(hwmgr);
4778         PP_ASSERT_WITH_CODE((0 == tmp_result),
4779                         "Failed to get MC microcode version!", result = tmp_result);
4780
4781         tmp_result = smu7_init_sclk_threshold(hwmgr);
4782         PP_ASSERT_WITH_CODE((0 == tmp_result),
4783                         "Failed to init sclk threshold!", result = tmp_result);
4784
4785         return result;
4786 }
4787
4788 static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
4789                 enum pp_clock_type type, uint32_t mask)
4790 {
4791         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4792
4793         if (mask == 0)
4794                 return -EINVAL;
4795
4796         switch (type) {
4797         case PP_SCLK:
4798                 if (!data->sclk_dpm_key_disabled)
4799                         smum_send_msg_to_smc_with_parameter(hwmgr,
4800                                         PPSMC_MSG_SCLKDPM_SetEnabledMask,
4801                                         data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask,
4802                                         NULL);
4803                 break;
4804         case PP_MCLK:
4805                 if (!data->mclk_dpm_key_disabled)
4806                         smum_send_msg_to_smc_with_parameter(hwmgr,
4807                                         PPSMC_MSG_MCLKDPM_SetEnabledMask,
4808                                         data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask,
4809                                         NULL);
4810                 break;
4811         case PP_PCIE:
4812         {
4813                 uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask;
4814
4815                 if (!data->pcie_dpm_key_disabled) {
4816                         if (fls(tmp) != ffs(tmp))
4817                                 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PCIeDPM_UnForceLevel,
4818                                                 NULL);
4819                         else
4820                                 smum_send_msg_to_smc_with_parameter(hwmgr,
4821                                         PPSMC_MSG_PCIeDPM_ForceLevel,
4822                                         fls(tmp) - 1,
4823                                         NULL);
4824                 }
4825                 break;
4826         }
4827         default:
4828                 break;
4829         }
4830
4831         return 0;
4832 }
4833
4834 static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
4835                 enum pp_clock_type type, char *buf)
4836 {
4837         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4838         struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
4839         struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
4840         struct smu7_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table);
4841         struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
4842         struct phm_odn_clock_levels *odn_sclk_table = &(odn_table->odn_core_clock_dpm_levels);
4843         struct phm_odn_clock_levels *odn_mclk_table = &(odn_table->odn_memory_clock_dpm_levels);
4844         int i, now, size = 0;
4845         uint32_t clock, pcie_speed;
4846
4847         switch (type) {
4848         case PP_SCLK:
4849                 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency, &clock);
4850
4851                 for (i = 0; i < sclk_table->count; i++) {
4852                         if (clock > sclk_table->dpm_levels[i].value)
4853                                 continue;
4854                         break;
4855                 }
4856                 now = i;
4857
4858                 for (i = 0; i < sclk_table->count; i++)
4859                         size += sprintf(buf + size, "%d: %uMhz %s\n",
4860                                         i, sclk_table->dpm_levels[i].value / 100,
4861                                         (i == now) ? "*" : "");
4862                 break;
4863         case PP_MCLK:
4864                 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency, &clock);
4865
4866                 for (i = 0; i < mclk_table->count; i++) {
4867                         if (clock > mclk_table->dpm_levels[i].value)
4868                                 continue;
4869                         break;
4870                 }
4871                 now = i;
4872
4873                 for (i = 0; i < mclk_table->count; i++)
4874                         size += sprintf(buf + size, "%d: %uMhz %s\n",
4875                                         i, mclk_table->dpm_levels[i].value / 100,
4876                                         (i == now) ? "*" : "");
4877                 break;
4878         case PP_PCIE:
4879                 pcie_speed = smu7_get_current_pcie_speed(hwmgr);
4880                 for (i = 0; i < pcie_table->count; i++) {
4881                         if (pcie_speed != pcie_table->dpm_levels[i].value)
4882                                 continue;
4883                         break;
4884                 }
4885                 now = i;
4886
4887                 for (i = 0; i < pcie_table->count; i++)
4888                         size += sprintf(buf + size, "%d: %s %s\n", i,
4889                                         (pcie_table->dpm_levels[i].value == 0) ? "2.5GT/s, x8" :
4890                                         (pcie_table->dpm_levels[i].value == 1) ? "5.0GT/s, x16" :
4891                                         (pcie_table->dpm_levels[i].value == 2) ? "8.0GT/s, x16" : "",
4892                                         (i == now) ? "*" : "");
4893                 break;
4894         case OD_SCLK:
4895                 if (hwmgr->od_enabled) {
4896                         size = sprintf(buf, "%s:\n", "OD_SCLK");
4897                         for (i = 0; i < odn_sclk_table->num_of_pl; i++)
4898                                 size += sprintf(buf + size, "%d: %10uMHz %10umV\n",
4899                                         i, odn_sclk_table->entries[i].clock/100,
4900                                         odn_sclk_table->entries[i].vddc);
4901                 }
4902                 break;
4903         case OD_MCLK:
4904                 if (hwmgr->od_enabled) {
4905                         size = sprintf(buf, "%s:\n", "OD_MCLK");
4906                         for (i = 0; i < odn_mclk_table->num_of_pl; i++)
4907                                 size += sprintf(buf + size, "%d: %10uMHz %10umV\n",
4908                                         i, odn_mclk_table->entries[i].clock/100,
4909                                         odn_mclk_table->entries[i].vddc);
4910                 }
4911                 break;
4912         case OD_RANGE:
4913                 if (hwmgr->od_enabled) {
4914                         size = sprintf(buf, "%s:\n", "OD_RANGE");
4915                         size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
4916                                 data->golden_dpm_table.sclk_table.dpm_levels[0].value/100,
4917                                 hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
4918                         size += sprintf(buf + size, "MCLK: %7uMHz %10uMHz\n",
4919                                 data->golden_dpm_table.mclk_table.dpm_levels[0].value/100,
4920                                 hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
4921                         size += sprintf(buf + size, "VDDC: %7umV %11umV\n",
4922                                 data->odn_dpm_table.min_vddc,
4923                                 data->odn_dpm_table.max_vddc);
4924                 }
4925                 break;
4926         default:
4927                 break;
4928         }
4929         return size;
4930 }
4931
4932 static void smu7_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
4933 {
4934         switch (mode) {
4935         case AMD_FAN_CTRL_NONE:
4936                 smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
4937                 break;
4938         case AMD_FAN_CTRL_MANUAL:
4939                 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4940                         PHM_PlatformCaps_MicrocodeFanControl))
4941                         smu7_fan_ctrl_stop_smc_fan_control(hwmgr);
4942                 break;
4943         case AMD_FAN_CTRL_AUTO:
4944                 if (!smu7_fan_ctrl_set_static_mode(hwmgr, mode))
4945                         smu7_fan_ctrl_start_smc_fan_control(hwmgr);
4946                 break;
4947         default:
4948                 break;
4949         }
4950 }
4951
4952 static uint32_t smu7_get_fan_control_mode(struct pp_hwmgr *hwmgr)
4953 {
4954         return hwmgr->fan_ctrl_enabled ? AMD_FAN_CTRL_AUTO : AMD_FAN_CTRL_MANUAL;
4955 }
4956
4957 static int smu7_get_sclk_od(struct pp_hwmgr *hwmgr)
4958 {
4959         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4960         struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
4961         struct smu7_single_dpm_table *golden_sclk_table =
4962                         &(data->golden_dpm_table.sclk_table);
4963         int value = sclk_table->dpm_levels[sclk_table->count - 1].value;
4964         int golden_value = golden_sclk_table->dpm_levels
4965                         [golden_sclk_table->count - 1].value;
4966
4967         value -= golden_value;
4968         value = DIV_ROUND_UP(value * 100, golden_value);
4969
4970         return value;
4971 }
4972
4973 static int smu7_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
4974 {
4975         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4976         struct smu7_single_dpm_table *golden_sclk_table =
4977                         &(data->golden_dpm_table.sclk_table);
4978         struct pp_power_state  *ps;
4979         struct smu7_power_state  *smu7_ps;
4980
4981         if (value > 20)
4982                 value = 20;
4983
4984         ps = hwmgr->request_ps;
4985
4986         if (ps == NULL)
4987                 return -EINVAL;
4988
4989         smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
4990
4991         smu7_ps->performance_levels[smu7_ps->performance_level_count - 1].engine_clock =
4992                         golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
4993                         value / 100 +
4994                         golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
4995
4996         return 0;
4997 }
4998
4999 static int smu7_get_mclk_od(struct pp_hwmgr *hwmgr)
5000 {
5001         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5002         struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
5003         struct smu7_single_dpm_table *golden_mclk_table =
5004                         &(data->golden_dpm_table.mclk_table);
5005         int value = mclk_table->dpm_levels[mclk_table->count - 1].value;
5006         int golden_value = golden_mclk_table->dpm_levels
5007                         [golden_mclk_table->count - 1].value;
5008
5009         value -= golden_value;
5010         value = DIV_ROUND_UP(value * 100, golden_value);
5011
5012         return value;
5013 }
5014
5015 static int smu7_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
5016 {
5017         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5018         struct smu7_single_dpm_table *golden_mclk_table =
5019                         &(data->golden_dpm_table.mclk_table);
5020         struct pp_power_state  *ps;
5021         struct smu7_power_state  *smu7_ps;
5022
5023         if (value > 20)
5024                 value = 20;
5025
5026         ps = hwmgr->request_ps;
5027
5028         if (ps == NULL)
5029                 return -EINVAL;
5030
5031         smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
5032
5033         smu7_ps->performance_levels[smu7_ps->performance_level_count - 1].memory_clock =
5034                         golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
5035                         value / 100 +
5036                         golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
5037
5038         return 0;
5039 }
5040
5041
5042 static int smu7_get_sclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
5043 {
5044         struct phm_ppt_v1_information *table_info =
5045                         (struct phm_ppt_v1_information *)hwmgr->pptable;
5046         struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table = NULL;
5047         struct phm_clock_voltage_dependency_table *sclk_table;
5048         int i;
5049
5050         if (hwmgr->pp_table_version == PP_TABLE_V1) {
5051                 if (table_info == NULL || table_info->vdd_dep_on_sclk == NULL)
5052                         return -EINVAL;
5053                 dep_sclk_table = table_info->vdd_dep_on_sclk;
5054                 for (i = 0; i < dep_sclk_table->count; i++)
5055                         clocks->clock[i] = dep_sclk_table->entries[i].clk * 10;
5056                 clocks->count = dep_sclk_table->count;
5057         } else if (hwmgr->pp_table_version == PP_TABLE_V0) {
5058                 sclk_table = hwmgr->dyn_state.vddc_dependency_on_sclk;
5059                 for (i = 0; i < sclk_table->count; i++)
5060                         clocks->clock[i] = sclk_table->entries[i].clk * 10;
5061                 clocks->count = sclk_table->count;
5062         }
5063
5064         return 0;
5065 }
5066
5067 static uint32_t smu7_get_mem_latency(struct pp_hwmgr *hwmgr, uint32_t clk)
5068 {
5069         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5070
5071         if (clk >= MEM_FREQ_LOW_LATENCY && clk < MEM_FREQ_HIGH_LATENCY)
5072                 return data->mem_latency_high;
5073         else if (clk >= MEM_FREQ_HIGH_LATENCY)
5074                 return data->mem_latency_low;
5075         else
5076                 return MEM_LATENCY_ERR;
5077 }
5078
5079 static int smu7_get_mclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
5080 {
5081         struct phm_ppt_v1_information *table_info =
5082                         (struct phm_ppt_v1_information *)hwmgr->pptable;
5083         struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
5084         int i;
5085         struct phm_clock_voltage_dependency_table *mclk_table;
5086
5087         if (hwmgr->pp_table_version == PP_TABLE_V1) {
5088                 if (table_info == NULL)
5089                         return -EINVAL;
5090                 dep_mclk_table = table_info->vdd_dep_on_mclk;
5091                 for (i = 0; i < dep_mclk_table->count; i++) {
5092                         clocks->clock[i] = dep_mclk_table->entries[i].clk * 10;
5093                         clocks->latency[i] = smu7_get_mem_latency(hwmgr,
5094                                                 dep_mclk_table->entries[i].clk);
5095                 }
5096                 clocks->count = dep_mclk_table->count;
5097         } else if (hwmgr->pp_table_version == PP_TABLE_V0) {
5098                 mclk_table = hwmgr->dyn_state.vddc_dependency_on_mclk;
5099                 for (i = 0; i < mclk_table->count; i++)
5100                         clocks->clock[i] = mclk_table->entries[i].clk * 10;
5101                 clocks->count = mclk_table->count;
5102         }
5103         return 0;
5104 }
5105
5106 static int smu7_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type,
5107                                                 struct amd_pp_clocks *clocks)
5108 {
5109         switch (type) {
5110         case amd_pp_sys_clock:
5111                 smu7_get_sclks(hwmgr, clocks);
5112                 break;
5113         case amd_pp_mem_clock:
5114                 smu7_get_mclks(hwmgr, clocks);
5115                 break;
5116         default:
5117                 return -EINVAL;
5118         }
5119
5120         return 0;
5121 }
5122
5123 static int smu7_get_sclks_with_latency(struct pp_hwmgr *hwmgr,
5124                                        struct pp_clock_levels_with_latency *clocks)
5125 {
5126         struct phm_ppt_v1_information *table_info =
5127                         (struct phm_ppt_v1_information *)hwmgr->pptable;
5128         struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table =
5129                         table_info->vdd_dep_on_sclk;
5130         int i;
5131
5132         clocks->num_levels = 0;
5133         for (i = 0; i < dep_sclk_table->count; i++) {
5134                 if (dep_sclk_table->entries[i].clk) {
5135                         clocks->data[clocks->num_levels].clocks_in_khz =
5136                                 dep_sclk_table->entries[i].clk * 10;
5137                         clocks->num_levels++;
5138                 }
5139         }
5140
5141         return 0;
5142 }
5143
5144 static int smu7_get_mclks_with_latency(struct pp_hwmgr *hwmgr,
5145                                        struct pp_clock_levels_with_latency *clocks)
5146 {
5147         struct phm_ppt_v1_information *table_info =
5148                         (struct phm_ppt_v1_information *)hwmgr->pptable;
5149         struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
5150                         table_info->vdd_dep_on_mclk;
5151         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5152         int i;
5153
5154         clocks->num_levels = 0;
5155         data->mclk_latency_table.count = 0;
5156         for (i = 0; i < dep_mclk_table->count; i++) {
5157                 if (dep_mclk_table->entries[i].clk) {
5158                         clocks->data[clocks->num_levels].clocks_in_khz =
5159                                         dep_mclk_table->entries[i].clk * 10;
5160                         data->mclk_latency_table.entries[data->mclk_latency_table.count].frequency =
5161                                         dep_mclk_table->entries[i].clk;
5162                         clocks->data[clocks->num_levels].latency_in_us =
5163                                 data->mclk_latency_table.entries[data->mclk_latency_table.count].latency =
5164                                         smu7_get_mem_latency(hwmgr, dep_mclk_table->entries[i].clk);
5165                         clocks->num_levels++;
5166                         data->mclk_latency_table.count++;
5167                 }
5168         }
5169
5170         return 0;
5171 }
5172
5173 static int smu7_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
5174                                                enum amd_pp_clock_type type,
5175                                                struct pp_clock_levels_with_latency *clocks)
5176 {
5177         if (!(hwmgr->chip_id >= CHIP_POLARIS10 &&
5178               hwmgr->chip_id <= CHIP_VEGAM))
5179                 return -EINVAL;
5180
5181         switch (type) {
5182         case amd_pp_sys_clock:
5183                 smu7_get_sclks_with_latency(hwmgr, clocks);
5184                 break;
5185         case amd_pp_mem_clock:
5186                 smu7_get_mclks_with_latency(hwmgr, clocks);
5187                 break;
5188         default:
5189                 return -EINVAL;
5190         }
5191
5192         return 0;
5193 }
5194
5195 static int smu7_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
5196                                                  void *clock_range)
5197 {
5198         struct phm_ppt_v1_information *table_info =
5199                         (struct phm_ppt_v1_information *)hwmgr->pptable;
5200         struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
5201                         table_info->vdd_dep_on_mclk;
5202         struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table =
5203                         table_info->vdd_dep_on_sclk;
5204         struct polaris10_smumgr *smu_data =
5205                         (struct polaris10_smumgr *)(hwmgr->smu_backend);
5206         SMU74_Discrete_DpmTable  *table = &(smu_data->smc_state_table);
5207         struct dm_pp_wm_sets_with_clock_ranges *watermarks =
5208                         (struct dm_pp_wm_sets_with_clock_ranges *)clock_range;
5209         uint32_t i, j, k;
5210         bool valid_entry;
5211
5212         if (!(hwmgr->chip_id >= CHIP_POLARIS10 &&
5213               hwmgr->chip_id <= CHIP_VEGAM))
5214                 return -EINVAL;
5215
5216         for (i = 0; i < dep_mclk_table->count; i++) {
5217                 for (j = 0; j < dep_sclk_table->count; j++) {
5218                         valid_entry = false;
5219                         for (k = 0; k < watermarks->num_wm_sets; k++) {
5220                                 if (dep_sclk_table->entries[i].clk / 10 >= watermarks->wm_clk_ranges[k].wm_min_eng_clk_in_khz &&
5221                                     dep_sclk_table->entries[i].clk / 10 < watermarks->wm_clk_ranges[k].wm_max_eng_clk_in_khz &&
5222                                     dep_mclk_table->entries[i].clk / 10 >= watermarks->wm_clk_ranges[k].wm_min_mem_clk_in_khz &&
5223                                     dep_mclk_table->entries[i].clk / 10 < watermarks->wm_clk_ranges[k].wm_max_mem_clk_in_khz) {
5224                                         valid_entry = true;
5225                                         table->DisplayWatermark[i][j] = watermarks->wm_clk_ranges[k].wm_set_id;
5226                                         break;
5227                                 }
5228                         }
5229                         PP_ASSERT_WITH_CODE(valid_entry,
5230                                         "Clock is not in range of specified clock range for watermark from DAL!  Using highest water mark set.",
5231                                         table->DisplayWatermark[i][j] = watermarks->wm_clk_ranges[k - 1].wm_set_id);
5232                 }
5233         }
5234
5235         return smu7_copy_bytes_to_smc(hwmgr,
5236                                       smu_data->smu7_data.dpm_table_start + offsetof(SMU74_Discrete_DpmTable, DisplayWatermark),
5237                                       (uint8_t *)table->DisplayWatermark,
5238                                       sizeof(uint8_t) * SMU74_MAX_LEVELS_MEMORY * SMU74_MAX_LEVELS_GRAPHICS,
5239                                       SMC_RAM_END);
5240 }
5241
5242 static int smu7_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
5243                                         uint32_t virtual_addr_low,
5244                                         uint32_t virtual_addr_hi,
5245                                         uint32_t mc_addr_low,
5246                                         uint32_t mc_addr_hi,
5247                                         uint32_t size)
5248 {
5249         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5250
5251         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
5252                                         data->soft_regs_start +
5253                                         smum_get_offsetof(hwmgr,
5254                                         SMU_SoftRegisters, DRAM_LOG_ADDR_H),
5255                                         mc_addr_hi);
5256
5257         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
5258                                         data->soft_regs_start +
5259                                         smum_get_offsetof(hwmgr,
5260                                         SMU_SoftRegisters, DRAM_LOG_ADDR_L),
5261                                         mc_addr_low);
5262
5263         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
5264                                         data->soft_regs_start +
5265                                         smum_get_offsetof(hwmgr,
5266                                         SMU_SoftRegisters, DRAM_LOG_PHY_ADDR_H),
5267                                         virtual_addr_hi);
5268
5269         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
5270                                         data->soft_regs_start +
5271                                         smum_get_offsetof(hwmgr,
5272                                         SMU_SoftRegisters, DRAM_LOG_PHY_ADDR_L),
5273                                         virtual_addr_low);
5274
5275         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
5276                                         data->soft_regs_start +
5277                                         smum_get_offsetof(hwmgr,
5278                                         SMU_SoftRegisters, DRAM_LOG_BUFF_SIZE),
5279                                         size);
5280         return 0;
5281 }
5282
5283 static int smu7_get_max_high_clocks(struct pp_hwmgr *hwmgr,
5284                                         struct amd_pp_simple_clock_info *clocks)
5285 {
5286         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5287         struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
5288         struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
5289
5290         if (clocks == NULL)
5291                 return -EINVAL;
5292
5293         clocks->memory_max_clock = mclk_table->count > 1 ?
5294                                 mclk_table->dpm_levels[mclk_table->count-1].value :
5295                                 mclk_table->dpm_levels[0].value;
5296         clocks->engine_max_clock = sclk_table->count > 1 ?
5297                                 sclk_table->dpm_levels[sclk_table->count-1].value :
5298                                 sclk_table->dpm_levels[0].value;
5299         return 0;
5300 }
5301
5302 static int smu7_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
5303                 struct PP_TemperatureRange *thermal_data)
5304 {
5305         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5306         struct phm_ppt_v1_information *table_info =
5307                         (struct phm_ppt_v1_information *)hwmgr->pptable;
5308
5309         memcpy(thermal_data, &SMU7ThermalPolicy[0], sizeof(struct PP_TemperatureRange));
5310
5311         if (hwmgr->pp_table_version == PP_TABLE_V1)
5312                 thermal_data->max = table_info->cac_dtp_table->usSoftwareShutdownTemp *
5313                         PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
5314         else if (hwmgr->pp_table_version == PP_TABLE_V0)
5315                 thermal_data->max = data->thermal_temp_setting.temperature_shutdown *
5316                         PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
5317
5318         return 0;
5319 }
5320
5321 static bool smu7_check_clk_voltage_valid(struct pp_hwmgr *hwmgr,
5322                                         enum PP_OD_DPM_TABLE_COMMAND type,
5323                                         uint32_t clk,
5324                                         uint32_t voltage)
5325 {
5326         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5327
5328         if (voltage < data->odn_dpm_table.min_vddc || voltage > data->odn_dpm_table.max_vddc) {
5329                 pr_info("OD voltage is out of range [%d - %d] mV\n",
5330                                                 data->odn_dpm_table.min_vddc,
5331                                                 data->odn_dpm_table.max_vddc);
5332                 return false;
5333         }
5334
5335         if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) {
5336                 if (data->golden_dpm_table.sclk_table.dpm_levels[0].value > clk ||
5337                         hwmgr->platform_descriptor.overdriveLimit.engineClock < clk) {
5338                         pr_info("OD engine clock is out of range [%d - %d] MHz\n",
5339                                 data->golden_dpm_table.sclk_table.dpm_levels[0].value/100,
5340                                 hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
5341                         return false;
5342                 }
5343         } else if (type == PP_OD_EDIT_MCLK_VDDC_TABLE) {
5344                 if (data->golden_dpm_table.mclk_table.dpm_levels[0].value > clk ||
5345                         hwmgr->platform_descriptor.overdriveLimit.memoryClock < clk) {
5346                         pr_info("OD memory clock is out of range [%d - %d] MHz\n",
5347                                 data->golden_dpm_table.mclk_table.dpm_levels[0].value/100,
5348                                 hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
5349                         return false;
5350                 }
5351         } else {
5352                 return false;
5353         }
5354
5355         return true;
5356 }
5357
5358 static int smu7_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
5359                                         enum PP_OD_DPM_TABLE_COMMAND type,
5360                                         long *input, uint32_t size)
5361 {
5362         uint32_t i;
5363         struct phm_odn_clock_levels *podn_dpm_table_in_backend = NULL;
5364         struct smu7_odn_clock_voltage_dependency_table *podn_vdd_dep_in_backend = NULL;
5365         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5366
5367         uint32_t input_clk;
5368         uint32_t input_vol;
5369         uint32_t input_level;
5370
5371         PP_ASSERT_WITH_CODE(input, "NULL user input for clock and voltage",
5372                                 return -EINVAL);
5373
5374         if (!hwmgr->od_enabled) {
5375                 pr_info("OverDrive feature not enabled\n");
5376                 return -EINVAL;
5377         }
5378
5379         if (PP_OD_EDIT_SCLK_VDDC_TABLE == type) {
5380                 podn_dpm_table_in_backend = &data->odn_dpm_table.odn_core_clock_dpm_levels;
5381                 podn_vdd_dep_in_backend = &data->odn_dpm_table.vdd_dependency_on_sclk;
5382                 PP_ASSERT_WITH_CODE((podn_dpm_table_in_backend && podn_vdd_dep_in_backend),
5383                                 "Failed to get ODN SCLK and Voltage tables",
5384                                 return -EINVAL);
5385         } else if (PP_OD_EDIT_MCLK_VDDC_TABLE == type) {
5386                 podn_dpm_table_in_backend = &data->odn_dpm_table.odn_memory_clock_dpm_levels;
5387                 podn_vdd_dep_in_backend = &data->odn_dpm_table.vdd_dependency_on_mclk;
5388
5389                 PP_ASSERT_WITH_CODE((podn_dpm_table_in_backend && podn_vdd_dep_in_backend),
5390                         "Failed to get ODN MCLK and Voltage tables",
5391                         return -EINVAL);
5392         } else if (PP_OD_RESTORE_DEFAULT_TABLE == type) {
5393                 smu7_odn_initial_default_setting(hwmgr);
5394                 return 0;
5395         } else if (PP_OD_COMMIT_DPM_TABLE == type) {
5396                 smu7_check_dpm_table_updated(hwmgr);
5397                 return 0;
5398         } else {
5399                 return -EINVAL;
5400         }
5401
5402         for (i = 0; i < size; i += 3) {
5403                 if (i + 3 > size || input[i] >= podn_dpm_table_in_backend->num_of_pl) {
5404                         pr_info("invalid clock voltage input \n");
5405                         return 0;
5406                 }
5407                 input_level = input[i];
5408                 input_clk = input[i+1] * 100;
5409                 input_vol = input[i+2];
5410
5411                 if (smu7_check_clk_voltage_valid(hwmgr, type, input_clk, input_vol)) {
5412                         podn_dpm_table_in_backend->entries[input_level].clock = input_clk;
5413                         podn_vdd_dep_in_backend->entries[input_level].clk = input_clk;
5414                         podn_dpm_table_in_backend->entries[input_level].vddc = input_vol;
5415                         podn_vdd_dep_in_backend->entries[input_level].vddc = input_vol;
5416                         podn_vdd_dep_in_backend->entries[input_level].vddgfx = input_vol;
5417                 } else {
5418                         return -EINVAL;
5419                 }
5420         }
5421
5422         return 0;
5423 }
5424
5425 static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
5426 {
5427         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5428         uint32_t i, size = 0;
5429         uint32_t len;
5430
5431         static const char *profile_name[7] = {"BOOTUP_DEFAULT",
5432                                         "3D_FULL_SCREEN",
5433                                         "POWER_SAVING",
5434                                         "VIDEO",
5435                                         "VR",
5436                                         "COMPUTE",
5437                                         "CUSTOM"};
5438
5439         static const char *title[8] = {"NUM",
5440                         "MODE_NAME",
5441                         "SCLK_UP_HYST",
5442                         "SCLK_DOWN_HYST",
5443                         "SCLK_ACTIVE_LEVEL",
5444                         "MCLK_UP_HYST",
5445                         "MCLK_DOWN_HYST",
5446                         "MCLK_ACTIVE_LEVEL"};
5447
5448         if (!buf)
5449                 return -EINVAL;
5450
5451         size += sprintf(buf + size, "%s %16s %16s %16s %16s %16s %16s %16s\n",
5452                         title[0], title[1], title[2], title[3],
5453                         title[4], title[5], title[6], title[7]);
5454
5455         len = ARRAY_SIZE(smu7_profiling);
5456
5457         for (i = 0; i < len; i++) {
5458                 if (i == hwmgr->power_profile_mode) {
5459                         size += sprintf(buf + size, "%3d %14s %s: %8d %16d %16d %16d %16d %16d\n",
5460                         i, profile_name[i], "*",
5461                         data->current_profile_setting.sclk_up_hyst,
5462                         data->current_profile_setting.sclk_down_hyst,
5463                         data->current_profile_setting.sclk_activity,
5464                         data->current_profile_setting.mclk_up_hyst,
5465                         data->current_profile_setting.mclk_down_hyst,
5466                         data->current_profile_setting.mclk_activity);
5467                         continue;
5468                 }
5469                 if (smu7_profiling[i].bupdate_sclk)
5470                         size += sprintf(buf + size, "%3d %16s: %8d %16d %16d ",
5471                         i, profile_name[i], smu7_profiling[i].sclk_up_hyst,
5472                         smu7_profiling[i].sclk_down_hyst,
5473                         smu7_profiling[i].sclk_activity);
5474                 else
5475                         size += sprintf(buf + size, "%3d %16s: %8s %16s %16s ",
5476                         i, profile_name[i], "-", "-", "-");
5477
5478                 if (smu7_profiling[i].bupdate_mclk)
5479                         size += sprintf(buf + size, "%16d %16d %16d\n",
5480                         smu7_profiling[i].mclk_up_hyst,
5481                         smu7_profiling[i].mclk_down_hyst,
5482                         smu7_profiling[i].mclk_activity);
5483                 else
5484                         size += sprintf(buf + size, "%16s %16s %16s\n",
5485                         "-", "-", "-");
5486         }
5487
5488         return size;
5489 }
5490
5491 static void smu7_patch_compute_profile_mode(struct pp_hwmgr *hwmgr,
5492                                         enum PP_SMC_POWER_PROFILE requst)
5493 {
5494         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5495         uint32_t tmp, level;
5496
5497         if (requst == PP_SMC_POWER_PROFILE_COMPUTE) {
5498                 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
5499                         level = 0;
5500                         tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask;
5501                         while (tmp >>= 1)
5502                                 level++;
5503                         if (level > 0)
5504                                 smu7_force_clock_level(hwmgr, PP_SCLK, 3 << (level-1));
5505                 }
5506         } else if (hwmgr->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE) {
5507                 smu7_force_clock_level(hwmgr, PP_SCLK, data->dpm_level_enable_mask.sclk_dpm_enable_mask);
5508         }
5509 }
5510
5511 static int smu7_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size)
5512 {
5513         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5514         struct profile_mode_setting tmp;
5515         enum PP_SMC_POWER_PROFILE mode;
5516
5517         if (input == NULL)
5518                 return -EINVAL;
5519
5520         mode = input[size];
5521         switch (mode) {
5522         case PP_SMC_POWER_PROFILE_CUSTOM:
5523                 if (size < 8 && size != 0)
5524                         return -EINVAL;
5525                 /* If only CUSTOM is passed in, use the saved values. Check
5526                  * that we actually have a CUSTOM profile by ensuring that
5527                  * the "use sclk" or the "use mclk" bits are set
5528                  */
5529                 tmp = smu7_profiling[PP_SMC_POWER_PROFILE_CUSTOM];
5530                 if (size == 0) {
5531                         if (tmp.bupdate_sclk == 0 && tmp.bupdate_mclk == 0)
5532                                 return -EINVAL;
5533                 } else {
5534                         tmp.bupdate_sclk = input[0];
5535                         tmp.sclk_up_hyst = input[1];
5536                         tmp.sclk_down_hyst = input[2];
5537                         tmp.sclk_activity = input[3];
5538                         tmp.bupdate_mclk = input[4];
5539                         tmp.mclk_up_hyst = input[5];
5540                         tmp.mclk_down_hyst = input[6];
5541                         tmp.mclk_activity = input[7];
5542                         smu7_profiling[PP_SMC_POWER_PROFILE_CUSTOM] = tmp;
5543                 }
5544                 if (!smum_update_dpm_settings(hwmgr, &tmp)) {
5545                         memcpy(&data->current_profile_setting, &tmp, sizeof(struct profile_mode_setting));
5546                         hwmgr->power_profile_mode = mode;
5547                 }
5548                 break;
5549         case PP_SMC_POWER_PROFILE_FULLSCREEN3D:
5550         case PP_SMC_POWER_PROFILE_POWERSAVING:
5551         case PP_SMC_POWER_PROFILE_VIDEO:
5552         case PP_SMC_POWER_PROFILE_VR:
5553         case PP_SMC_POWER_PROFILE_COMPUTE:
5554                 if (mode == hwmgr->power_profile_mode)
5555                         return 0;
5556
5557                 memcpy(&tmp, &smu7_profiling[mode], sizeof(struct profile_mode_setting));
5558                 if (!smum_update_dpm_settings(hwmgr, &tmp)) {
5559                         if (tmp.bupdate_sclk) {
5560                                 data->current_profile_setting.bupdate_sclk = tmp.bupdate_sclk;
5561                                 data->current_profile_setting.sclk_up_hyst = tmp.sclk_up_hyst;
5562                                 data->current_profile_setting.sclk_down_hyst = tmp.sclk_down_hyst;
5563                                 data->current_profile_setting.sclk_activity = tmp.sclk_activity;
5564                         }
5565                         if (tmp.bupdate_mclk) {
5566                                 data->current_profile_setting.bupdate_mclk = tmp.bupdate_mclk;
5567                                 data->current_profile_setting.mclk_up_hyst = tmp.mclk_up_hyst;
5568                                 data->current_profile_setting.mclk_down_hyst = tmp.mclk_down_hyst;
5569                                 data->current_profile_setting.mclk_activity = tmp.mclk_activity;
5570                         }
5571                         smu7_patch_compute_profile_mode(hwmgr, mode);
5572                         hwmgr->power_profile_mode = mode;
5573                 }
5574                 break;
5575         default:
5576                 return -EINVAL;
5577         }
5578
5579         return 0;
5580 }
5581
5582 static int smu7_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
5583                                 PHM_PerformanceLevelDesignation designation, uint32_t index,
5584                                 PHM_PerformanceLevel *level)
5585 {
5586         const struct smu7_power_state *ps;
5587         uint32_t i;
5588
5589         if (level == NULL || hwmgr == NULL || state == NULL)
5590                 return -EINVAL;
5591
5592         ps = cast_const_phw_smu7_power_state(state);
5593
5594         i = index > ps->performance_level_count - 1 ?
5595                         ps->performance_level_count - 1 : index;
5596
5597         level->coreClock = ps->performance_levels[i].engine_clock;
5598         level->memory_clock = ps->performance_levels[i].memory_clock;
5599
5600         return 0;
5601 }
5602
5603 static int smu7_power_off_asic(struct pp_hwmgr *hwmgr)
5604 {
5605         int result;
5606
5607         result = smu7_disable_dpm_tasks(hwmgr);
5608         PP_ASSERT_WITH_CODE((0 == result),
5609                         "[disable_dpm_tasks] Failed to disable DPM!",
5610                         );
5611
5612         return result;
5613 }
5614
5615 static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
5616         .backend_init = &smu7_hwmgr_backend_init,
5617         .backend_fini = &smu7_hwmgr_backend_fini,
5618         .asic_setup = &smu7_setup_asic_task,
5619         .dynamic_state_management_enable = &smu7_enable_dpm_tasks,
5620         .apply_state_adjust_rules = smu7_apply_state_adjust_rules,
5621         .force_dpm_level = &smu7_force_dpm_level,
5622         .power_state_set = smu7_set_power_state_tasks,
5623         .get_power_state_size = smu7_get_power_state_size,
5624         .get_mclk = smu7_dpm_get_mclk,
5625         .get_sclk = smu7_dpm_get_sclk,
5626         .patch_boot_state = smu7_dpm_patch_boot_state,
5627         .get_pp_table_entry = smu7_get_pp_table_entry,
5628         .get_num_of_pp_table_entries = smu7_get_number_of_powerplay_table_entries,
5629         .powerdown_uvd = smu7_powerdown_uvd,
5630         .powergate_uvd = smu7_powergate_uvd,
5631         .powergate_vce = smu7_powergate_vce,
5632         .disable_clock_power_gating = smu7_disable_clock_power_gating,
5633         .update_clock_gatings = smu7_update_clock_gatings,
5634         .notify_smc_display_config_after_ps_adjustment = smu7_notify_smc_display_config_after_ps_adjustment,
5635         .display_config_changed = smu7_display_configuration_changed_task,
5636         .set_max_fan_pwm_output = smu7_set_max_fan_pwm_output,
5637         .set_max_fan_rpm_output = smu7_set_max_fan_rpm_output,
5638         .stop_thermal_controller = smu7_thermal_stop_thermal_controller,
5639         .get_fan_speed_info = smu7_fan_ctrl_get_fan_speed_info,
5640         .get_fan_speed_percent = smu7_fan_ctrl_get_fan_speed_percent,
5641         .set_fan_speed_percent = smu7_fan_ctrl_set_fan_speed_percent,
5642         .reset_fan_speed_to_default = smu7_fan_ctrl_reset_fan_speed_to_default,
5643         .get_fan_speed_rpm = smu7_fan_ctrl_get_fan_speed_rpm,
5644         .set_fan_speed_rpm = smu7_fan_ctrl_set_fan_speed_rpm,
5645         .uninitialize_thermal_controller = smu7_thermal_ctrl_uninitialize_thermal_controller,
5646         .register_irq_handlers = smu7_register_irq_handlers,
5647         .check_smc_update_required_for_display_configuration = smu7_check_smc_update_required_for_display_configuration,
5648         .check_states_equal = smu7_check_states_equal,
5649         .set_fan_control_mode = smu7_set_fan_control_mode,
5650         .get_fan_control_mode = smu7_get_fan_control_mode,
5651         .force_clock_level = smu7_force_clock_level,
5652         .print_clock_levels = smu7_print_clock_levels,
5653         .powergate_gfx = smu7_powergate_gfx,
5654         .get_sclk_od = smu7_get_sclk_od,
5655         .set_sclk_od = smu7_set_sclk_od,
5656         .get_mclk_od = smu7_get_mclk_od,
5657         .set_mclk_od = smu7_set_mclk_od,
5658         .get_clock_by_type = smu7_get_clock_by_type,
5659         .get_clock_by_type_with_latency = smu7_get_clock_by_type_with_latency,
5660         .set_watermarks_for_clocks_ranges = smu7_set_watermarks_for_clocks_ranges,
5661         .read_sensor = smu7_read_sensor,
5662         .dynamic_state_management_disable = smu7_disable_dpm_tasks,
5663         .avfs_control = smu7_avfs_control,
5664         .disable_smc_firmware_ctf = smu7_thermal_disable_alert,
5665         .start_thermal_controller = smu7_start_thermal_controller,
5666         .notify_cac_buffer_info = smu7_notify_cac_buffer_info,
5667         .get_max_high_clocks = smu7_get_max_high_clocks,
5668         .get_thermal_temperature_range = smu7_get_thermal_temperature_range,
5669         .odn_edit_dpm_table = smu7_odn_edit_dpm_table,
5670         .set_power_limit = smu7_set_power_limit,
5671         .get_power_profile_mode = smu7_get_power_profile_mode,
5672         .set_power_profile_mode = smu7_set_power_profile_mode,
5673         .get_performance_level = smu7_get_performance_level,
5674         .get_asic_baco_capability = smu7_baco_get_capability,
5675         .get_asic_baco_state = smu7_baco_get_state,
5676         .set_asic_baco_state = smu7_baco_set_state,
5677         .power_off_asic = smu7_power_off_asic,
5678 };
5679
5680 uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock,
5681                 uint32_t clock_insr)
5682 {
5683         uint8_t i;
5684         uint32_t temp;
5685         uint32_t min = max(clock_insr, (uint32_t)SMU7_MINIMUM_ENGINE_CLOCK);
5686
5687         PP_ASSERT_WITH_CODE((clock >= min), "Engine clock can't satisfy stutter requirement!", return 0);
5688         for (i = SMU7_MAX_DEEPSLEEP_DIVIDER_ID;  ; i--) {
5689                 temp = clock >> i;
5690
5691                 if (temp >= min || i == 0)
5692                         break;
5693         }
5694         return i;
5695 }
5696
5697 int smu7_init_function_pointers(struct pp_hwmgr *hwmgr)
5698 {
5699         hwmgr->hwmgr_func = &smu7_hwmgr_funcs;
5700         if (hwmgr->pp_table_version == PP_TABLE_V0)
5701                 hwmgr->pptable_func = &pptable_funcs;
5702         else if (hwmgr->pp_table_version == PP_TABLE_V1)
5703                 hwmgr->pptable_func = &pptable_v1_0_funcs;
5704
5705         return 0;
5706 }