drm/amd/pm: no need to force MCLK to highest when no display connected
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / pm / powerplay / hwmgr / smu7_hwmgr.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "pp_debug.h"
24 #include <linux/delay.h>
25 #include <linux/fb.h>
26 #include <linux/module.h>
27 #include <linux/pci.h>
28 #include <linux/slab.h>
29 #include <asm/div64.h>
30 #include <drm/amdgpu_drm.h>
31 #include "ppatomctrl.h"
32 #include "atombios.h"
33 #include "pptable_v1_0.h"
34 #include "pppcielanes.h"
35 #include "amd_pcie_helpers.h"
36 #include "hardwaremanager.h"
37 #include "process_pptables_v1_0.h"
38 #include "cgs_common.h"
39
40 #include "smu7_common.h"
41
42 #include "hwmgr.h"
43 #include "smu7_hwmgr.h"
44 #include "smu_ucode_xfer_vi.h"
45 #include "smu7_powertune.h"
46 #include "smu7_dyn_defaults.h"
47 #include "smu7_thermal.h"
48 #include "smu7_clockpowergating.h"
49 #include "processpptables.h"
50 #include "pp_thermal.h"
51 #include "smu7_baco.h"
52 #include "smu7_smumgr.h"
53 #include "polaris10_smumgr.h"
54
55 #include "ivsrcid/ivsrcid_vislands30.h"
56
57 #define MC_CG_ARB_FREQ_F0           0x0a
58 #define MC_CG_ARB_FREQ_F1           0x0b
59 #define MC_CG_ARB_FREQ_F2           0x0c
60 #define MC_CG_ARB_FREQ_F3           0x0d
61
62 #define MC_CG_SEQ_DRAMCONF_S0       0x05
63 #define MC_CG_SEQ_DRAMCONF_S1       0x06
64 #define MC_CG_SEQ_YCLK_SUSPEND      0x04
65 #define MC_CG_SEQ_YCLK_RESUME       0x0a
66
67 #define SMC_CG_IND_START            0xc0030000
68 #define SMC_CG_IND_END              0xc0040000
69
70 #define MEM_FREQ_LOW_LATENCY        25000
71 #define MEM_FREQ_HIGH_LATENCY       80000
72
73 #define MEM_LATENCY_HIGH            45
74 #define MEM_LATENCY_LOW             35
75 #define MEM_LATENCY_ERR             0xFFFF
76
77 #define MC_SEQ_MISC0_GDDR5_SHIFT 28
78 #define MC_SEQ_MISC0_GDDR5_MASK  0xf0000000
79 #define MC_SEQ_MISC0_GDDR5_VALUE 5
80
81 #define PCIE_BUS_CLK                10000
82 #define TCLK                        (PCIE_BUS_CLK / 10)
83
84 static struct profile_mode_setting smu7_profiling[7] =
85                                         {{0, 0, 0, 0, 0, 0, 0, 0},
86                                          {1, 0, 100, 30, 1, 0, 100, 10},
87                                          {1, 10, 0, 30, 0, 0, 0, 0},
88                                          {0, 0, 0, 0, 1, 10, 16, 31},
89                                          {1, 0, 11, 50, 1, 0, 100, 10},
90                                          {1, 0, 5, 30, 0, 0, 0, 0},
91                                          {0, 0, 0, 0, 0, 0, 0, 0},
92                                         };
93
94 #define PPSMC_MSG_SetVBITimeout_VEGAM    ((uint16_t) 0x310)
95
96 #define ixPWR_SVI2_PLANE1_LOAD                     0xC0200280
97 #define PWR_SVI2_PLANE1_LOAD__PSI1_MASK                    0x00000020L
98 #define PWR_SVI2_PLANE1_LOAD__PSI0_EN_MASK                 0x00000040L
99 #define PWR_SVI2_PLANE1_LOAD__PSI1__SHIFT                  0x00000005
100 #define PWR_SVI2_PLANE1_LOAD__PSI0_EN__SHIFT               0x00000006
101
102 #define STRAP_EVV_REVISION_MSB          2211
103 #define STRAP_EVV_REVISION_LSB          2208
104
105 /** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */
106 enum DPM_EVENT_SRC {
107         DPM_EVENT_SRC_ANALOG = 0,
108         DPM_EVENT_SRC_EXTERNAL = 1,
109         DPM_EVENT_SRC_DIGITAL = 2,
110         DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
111         DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4
112 };
113
114 #define ixDIDT_SQ_EDC_CTRL                         0x0013
115 #define ixDIDT_SQ_EDC_THRESHOLD                    0x0014
116 #define ixDIDT_SQ_EDC_STALL_PATTERN_1_2            0x0015
117 #define ixDIDT_SQ_EDC_STALL_PATTERN_3_4            0x0016
118 #define ixDIDT_SQ_EDC_STALL_PATTERN_5_6            0x0017
119 #define ixDIDT_SQ_EDC_STALL_PATTERN_7              0x0018
120
121 #define ixDIDT_TD_EDC_CTRL                         0x0053
122 #define ixDIDT_TD_EDC_THRESHOLD                    0x0054
123 #define ixDIDT_TD_EDC_STALL_PATTERN_1_2            0x0055
124 #define ixDIDT_TD_EDC_STALL_PATTERN_3_4            0x0056
125 #define ixDIDT_TD_EDC_STALL_PATTERN_5_6            0x0057
126 #define ixDIDT_TD_EDC_STALL_PATTERN_7              0x0058
127
128 #define ixDIDT_TCP_EDC_CTRL                        0x0073
129 #define ixDIDT_TCP_EDC_THRESHOLD                   0x0074
130 #define ixDIDT_TCP_EDC_STALL_PATTERN_1_2           0x0075
131 #define ixDIDT_TCP_EDC_STALL_PATTERN_3_4           0x0076
132 #define ixDIDT_TCP_EDC_STALL_PATTERN_5_6           0x0077
133 #define ixDIDT_TCP_EDC_STALL_PATTERN_7             0x0078
134
135 #define ixDIDT_DB_EDC_CTRL                         0x0033
136 #define ixDIDT_DB_EDC_THRESHOLD                    0x0034
137 #define ixDIDT_DB_EDC_STALL_PATTERN_1_2            0x0035
138 #define ixDIDT_DB_EDC_STALL_PATTERN_3_4            0x0036
139 #define ixDIDT_DB_EDC_STALL_PATTERN_5_6            0x0037
140 #define ixDIDT_DB_EDC_STALL_PATTERN_7              0x0038
141
142 uint32_t DIDTEDCConfig_P12[] = {
143     ixDIDT_SQ_EDC_STALL_PATTERN_1_2,
144     ixDIDT_SQ_EDC_STALL_PATTERN_3_4,
145     ixDIDT_SQ_EDC_STALL_PATTERN_5_6,
146     ixDIDT_SQ_EDC_STALL_PATTERN_7,
147     ixDIDT_SQ_EDC_THRESHOLD,
148     ixDIDT_SQ_EDC_CTRL,
149     ixDIDT_TD_EDC_STALL_PATTERN_1_2,
150     ixDIDT_TD_EDC_STALL_PATTERN_3_4,
151     ixDIDT_TD_EDC_STALL_PATTERN_5_6,
152     ixDIDT_TD_EDC_STALL_PATTERN_7,
153     ixDIDT_TD_EDC_THRESHOLD,
154     ixDIDT_TD_EDC_CTRL,
155     ixDIDT_TCP_EDC_STALL_PATTERN_1_2,
156     ixDIDT_TCP_EDC_STALL_PATTERN_3_4,
157     ixDIDT_TCP_EDC_STALL_PATTERN_5_6,
158     ixDIDT_TCP_EDC_STALL_PATTERN_7,
159     ixDIDT_TCP_EDC_THRESHOLD,
160     ixDIDT_TCP_EDC_CTRL,
161     ixDIDT_DB_EDC_STALL_PATTERN_1_2,
162     ixDIDT_DB_EDC_STALL_PATTERN_3_4,
163     ixDIDT_DB_EDC_STALL_PATTERN_5_6,
164     ixDIDT_DB_EDC_STALL_PATTERN_7,
165     ixDIDT_DB_EDC_THRESHOLD,
166     ixDIDT_DB_EDC_CTRL,
167     0xFFFFFFFF // End of list
168 };
169
170 static const unsigned long PhwVIslands_Magic = (unsigned long)(PHM_VIslands_Magic);
171 static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
172                 enum pp_clock_type type, uint32_t mask);
173 static int smu7_notify_has_display(struct pp_hwmgr *hwmgr);
174
175 static struct smu7_power_state *cast_phw_smu7_power_state(
176                                   struct pp_hw_power_state *hw_ps)
177 {
178         PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic),
179                                 "Invalid Powerstate Type!",
180                                  return NULL);
181
182         return (struct smu7_power_state *)hw_ps;
183 }
184
185 static const struct smu7_power_state *cast_const_phw_smu7_power_state(
186                                  const struct pp_hw_power_state *hw_ps)
187 {
188         PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic),
189                                 "Invalid Powerstate Type!",
190                                  return NULL);
191
192         return (const struct smu7_power_state *)hw_ps;
193 }
194
195 /**
196  * smu7_get_mc_microcode_version - Find the MC microcode version and store it in the HwMgr struct
197  *
198  * @hwmgr:  the address of the powerplay hardware manager.
199  * Return:   always 0
200  */
201 static int smu7_get_mc_microcode_version(struct pp_hwmgr *hwmgr)
202 {
203         cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
204
205         hwmgr->microcode_version_info.MC = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA);
206
207         return 0;
208 }
209
210 static uint16_t smu7_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
211 {
212         uint32_t speedCntl = 0;
213
214         /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
215         speedCntl = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__PCIE,
216                         ixPCIE_LC_SPEED_CNTL);
217         return((uint16_t)PHM_GET_FIELD(speedCntl,
218                         PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
219 }
220
221 static int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
222 {
223         uint32_t link_width;
224
225         /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
226         link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
227                         PCIE_LC_LINK_WIDTH_CNTL, LC_LINK_WIDTH_RD);
228
229         PP_ASSERT_WITH_CODE((7 >= link_width),
230                         "Invalid PCIe lane width!", return 0);
231
232         return decode_pcie_lane_width(link_width);
233 }
234
235 /**
236  * smu7_enable_smc_voltage_controller - Enable voltage control
237  *
238  * @hwmgr:  the address of the powerplay hardware manager.
239  * Return:   always PP_Result_OK
240  */
241 static int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
242 {
243         if (hwmgr->chip_id >= CHIP_POLARIS10 &&
244             hwmgr->chip_id <= CHIP_VEGAM) {
245                 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device,
246                                 CGS_IND_REG__SMC, PWR_SVI2_PLANE1_LOAD, PSI1, 0);
247                 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device,
248                                 CGS_IND_REG__SMC, PWR_SVI2_PLANE1_LOAD, PSI0_EN, 0);
249         }
250
251         if (hwmgr->feature_mask & PP_SMC_VOLTAGE_CONTROL_MASK)
252                 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Enable, NULL);
253
254         return 0;
255 }
256
257 /**
258  * smu7_voltage_control - Checks if we want to support voltage control
259  *
260  * @hwmgr:  the address of the powerplay hardware manager.
261  */
262 static bool smu7_voltage_control(const struct pp_hwmgr *hwmgr)
263 {
264         const struct smu7_hwmgr *data =
265                         (const struct smu7_hwmgr *)(hwmgr->backend);
266
267         return (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control);
268 }
269
270 /**
271  * smu7_enable_voltage_control - Enable voltage control
272  *
273  * @hwmgr:  the address of the powerplay hardware manager.
274  * Return:   always 0
275  */
276 static int smu7_enable_voltage_control(struct pp_hwmgr *hwmgr)
277 {
278         /* enable voltage control */
279         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
280                         GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1);
281
282         return 0;
283 }
284
285 static int phm_get_svi2_voltage_table_v0(pp_atomctrl_voltage_table *voltage_table,
286                 struct phm_clock_voltage_dependency_table *voltage_dependency_table
287                 )
288 {
289         uint32_t i;
290
291         PP_ASSERT_WITH_CODE((NULL != voltage_table),
292                         "Voltage Dependency Table empty.", return -EINVAL;);
293
294         voltage_table->mask_low = 0;
295         voltage_table->phase_delay = 0;
296         voltage_table->count = voltage_dependency_table->count;
297
298         for (i = 0; i < voltage_dependency_table->count; i++) {
299                 voltage_table->entries[i].value =
300                         voltage_dependency_table->entries[i].v;
301                 voltage_table->entries[i].smio_low = 0;
302         }
303
304         return 0;
305 }
306
307
308 /**
309  * smu7_construct_voltage_tables - Create Voltage Tables.
310  *
311  * @hwmgr:  the address of the powerplay hardware manager.
312  * Return:   always 0
313  */
314 static int smu7_construct_voltage_tables(struct pp_hwmgr *hwmgr)
315 {
316         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
317         struct phm_ppt_v1_information *table_info =
318                         (struct phm_ppt_v1_information *)hwmgr->pptable;
319         int result = 0;
320         uint32_t tmp;
321
322         if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
323                 result = atomctrl_get_voltage_table_v3(hwmgr,
324                                 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT,
325                                 &(data->mvdd_voltage_table));
326                 PP_ASSERT_WITH_CODE((0 == result),
327                                 "Failed to retrieve MVDD table.",
328                                 return result);
329         } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
330                 if (hwmgr->pp_table_version == PP_TABLE_V1)
331                         result = phm_get_svi2_mvdd_voltage_table(&(data->mvdd_voltage_table),
332                                         table_info->vdd_dep_on_mclk);
333                 else if (hwmgr->pp_table_version == PP_TABLE_V0)
334                         result = phm_get_svi2_voltage_table_v0(&(data->mvdd_voltage_table),
335                                         hwmgr->dyn_state.mvdd_dependency_on_mclk);
336
337                 PP_ASSERT_WITH_CODE((0 == result),
338                                 "Failed to retrieve SVI2 MVDD table from dependency table.",
339                                 return result;);
340         }
341
342         if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
343                 result = atomctrl_get_voltage_table_v3(hwmgr,
344                                 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT,
345                                 &(data->vddci_voltage_table));
346                 PP_ASSERT_WITH_CODE((0 == result),
347                                 "Failed to retrieve VDDCI table.",
348                                 return result);
349         } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
350                 if (hwmgr->pp_table_version == PP_TABLE_V1)
351                         result = phm_get_svi2_vddci_voltage_table(&(data->vddci_voltage_table),
352                                         table_info->vdd_dep_on_mclk);
353                 else if (hwmgr->pp_table_version == PP_TABLE_V0)
354                         result = phm_get_svi2_voltage_table_v0(&(data->vddci_voltage_table),
355                                         hwmgr->dyn_state.vddci_dependency_on_mclk);
356                 PP_ASSERT_WITH_CODE((0 == result),
357                                 "Failed to retrieve SVI2 VDDCI table from dependency table.",
358                                 return result);
359         }
360
361         if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
362                 /* VDDGFX has only SVI2 voltage control */
363                 result = phm_get_svi2_vdd_voltage_table(&(data->vddgfx_voltage_table),
364                                         table_info->vddgfx_lookup_table);
365                 PP_ASSERT_WITH_CODE((0 == result),
366                         "Failed to retrieve SVI2 VDDGFX table from lookup table.", return result;);
367         }
368
369
370         if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control) {
371                 result = atomctrl_get_voltage_table_v3(hwmgr,
372                                         VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT,
373                                         &data->vddc_voltage_table);
374                 PP_ASSERT_WITH_CODE((0 == result),
375                         "Failed to retrieve VDDC table.", return result;);
376         } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
377
378                 if (hwmgr->pp_table_version == PP_TABLE_V0)
379                         result = phm_get_svi2_voltage_table_v0(&data->vddc_voltage_table,
380                                         hwmgr->dyn_state.vddc_dependency_on_mclk);
381                 else if (hwmgr->pp_table_version == PP_TABLE_V1)
382                         result = phm_get_svi2_vdd_voltage_table(&(data->vddc_voltage_table),
383                                 table_info->vddc_lookup_table);
384
385                 PP_ASSERT_WITH_CODE((0 == result),
386                         "Failed to retrieve SVI2 VDDC table from dependency table.", return result;);
387         }
388
389         tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDC);
390         PP_ASSERT_WITH_CODE(
391                         (data->vddc_voltage_table.count <= tmp),
392                 "Too many voltage values for VDDC. Trimming to fit state table.",
393                         phm_trim_voltage_table_to_fit_state_table(tmp,
394                                                 &(data->vddc_voltage_table)));
395
396         tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDGFX);
397         PP_ASSERT_WITH_CODE(
398                         (data->vddgfx_voltage_table.count <= tmp),
399                 "Too many voltage values for VDDC. Trimming to fit state table.",
400                         phm_trim_voltage_table_to_fit_state_table(tmp,
401                                                 &(data->vddgfx_voltage_table)));
402
403         tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDCI);
404         PP_ASSERT_WITH_CODE(
405                         (data->vddci_voltage_table.count <= tmp),
406                 "Too many voltage values for VDDCI. Trimming to fit state table.",
407                         phm_trim_voltage_table_to_fit_state_table(tmp,
408                                         &(data->vddci_voltage_table)));
409
410         tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_MVDD);
411         PP_ASSERT_WITH_CODE(
412                         (data->mvdd_voltage_table.count <= tmp),
413                 "Too many voltage values for MVDD. Trimming to fit state table.",
414                         phm_trim_voltage_table_to_fit_state_table(tmp,
415                                                 &(data->mvdd_voltage_table)));
416
417         return 0;
418 }
419
420 /**
421  * smu7_program_static_screen_threshold_parameters - Programs static screed detection parameters
422  *
423  * @hwmgr:  the address of the powerplay hardware manager.
424  * Return:   always 0
425  */
426 static int smu7_program_static_screen_threshold_parameters(
427                                                         struct pp_hwmgr *hwmgr)
428 {
429         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
430
431         /* Set static screen threshold unit */
432         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
433                         CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT,
434                         data->static_screen_threshold_unit);
435         /* Set static screen threshold */
436         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
437                         CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD,
438                         data->static_screen_threshold);
439
440         return 0;
441 }
442
443 /**
444  * smu7_enable_display_gap - Setup display gap for glitch free memory clock switching.
445  *
446  * @hwmgr:  the address of the powerplay hardware manager.
447  * Return:   always  0
448  */
449 static int smu7_enable_display_gap(struct pp_hwmgr *hwmgr)
450 {
451         uint32_t display_gap =
452                         cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
453                                         ixCG_DISPLAY_GAP_CNTL);
454
455         display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
456                         DISP_GAP, DISPLAY_GAP_IGNORE);
457
458         display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
459                         DISP_GAP_MCHG, DISPLAY_GAP_VBLANK);
460
461         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
462                         ixCG_DISPLAY_GAP_CNTL, display_gap);
463
464         return 0;
465 }
466
467 /**
468  * smu7_program_voting_clients - Programs activity state transition voting clients
469  *
470  * @hwmgr:  the address of the powerplay hardware manager.
471  * Return:   always  0
472  */
473 static int smu7_program_voting_clients(struct pp_hwmgr *hwmgr)
474 {
475         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
476         int i;
477
478         /* Clear reset for voting clients before enabling DPM */
479         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
480                         SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0);
481         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
482                         SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0);
483
484         for (i = 0; i < 8; i++)
485                 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
486                                         ixCG_FREQ_TRAN_VOTING_0 + i * 4,
487                                         data->voting_rights_clients[i]);
488         return 0;
489 }
490
491 static int smu7_clear_voting_clients(struct pp_hwmgr *hwmgr)
492 {
493         int i;
494
495         /* Reset voting clients before disabling DPM */
496         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
497                         SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1);
498         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
499                         SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1);
500
501         for (i = 0; i < 8; i++)
502                 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
503                                 ixCG_FREQ_TRAN_VOTING_0 + i * 4, 0);
504
505         return 0;
506 }
507
508 /* Copy one arb setting to another and then switch the active set.
509  * arb_src and arb_dest is one of the MC_CG_ARB_FREQ_Fx constants.
510  */
511 static int smu7_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
512                 uint32_t arb_src, uint32_t arb_dest)
513 {
514         uint32_t mc_arb_dram_timing;
515         uint32_t mc_arb_dram_timing2;
516         uint32_t burst_time;
517         uint32_t mc_cg_config;
518
519         switch (arb_src) {
520         case MC_CG_ARB_FREQ_F0:
521                 mc_arb_dram_timing  = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
522                 mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
523                 burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
524                 break;
525         case MC_CG_ARB_FREQ_F1:
526                 mc_arb_dram_timing  = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1);
527                 mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1);
528                 burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1);
529                 break;
530         default:
531                 return -EINVAL;
532         }
533
534         switch (arb_dest) {
535         case MC_CG_ARB_FREQ_F0:
536                 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
537                 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
538                 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time);
539                 break;
540         case MC_CG_ARB_FREQ_F1:
541                 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
542                 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
543                 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time);
544                 break;
545         default:
546                 return -EINVAL;
547         }
548
549         mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG);
550         mc_cg_config |= 0x0000000F;
551         cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config);
552         PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arb_dest);
553
554         return 0;
555 }
556
557 static int smu7_reset_to_default(struct pp_hwmgr *hwmgr)
558 {
559         return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ResetToDefaults, NULL);
560 }
561
562 /**
563  * smu7_initial_switch_from_arbf0_to_f1 - Initial switch from ARB F0->F1
564  *
565  * @hwmgr:  the address of the powerplay hardware manager.
566  * Return:   always 0
567  * This function is to be called from the SetPowerState table.
568  */
569 static int smu7_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr)
570 {
571         return smu7_copy_and_switch_arb_sets(hwmgr,
572                         MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
573 }
574
575 static int smu7_force_switch_to_arbf0(struct pp_hwmgr *hwmgr)
576 {
577         uint32_t tmp;
578
579         tmp = (cgs_read_ind_register(hwmgr->device,
580                         CGS_IND_REG__SMC, ixSMC_SCRATCH9) &
581                         0x0000ff00) >> 8;
582
583         if (tmp == MC_CG_ARB_FREQ_F0)
584                 return 0;
585
586         return smu7_copy_and_switch_arb_sets(hwmgr,
587                         tmp, MC_CG_ARB_FREQ_F0);
588 }
589
590 static uint16_t smu7_override_pcie_speed(struct pp_hwmgr *hwmgr)
591 {
592         struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
593         uint16_t pcie_gen = 0;
594
595         if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 &&
596             adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4)
597                 pcie_gen = 3;
598         else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 &&
599                 adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3)
600                 pcie_gen = 2;
601         else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 &&
602                 adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2)
603                 pcie_gen = 1;
604         else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 &&
605                 adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1)
606                 pcie_gen = 0;
607
608         return pcie_gen;
609 }
610
611 static uint16_t smu7_override_pcie_width(struct pp_hwmgr *hwmgr)
612 {
613         struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
614         uint16_t pcie_width = 0;
615
616         if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
617                 pcie_width = 16;
618         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
619                 pcie_width = 12;
620         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
621                 pcie_width = 8;
622         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
623                 pcie_width = 4;
624         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
625                 pcie_width = 2;
626         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
627                 pcie_width = 1;
628
629         return pcie_width;
630 }
631
632 static int smu7_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
633 {
634         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
635
636         struct phm_ppt_v1_information *table_info =
637                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
638         struct phm_ppt_v1_pcie_table *pcie_table = NULL;
639
640         uint32_t i, max_entry;
641         uint32_t tmp;
642
643         PP_ASSERT_WITH_CODE((data->use_pcie_performance_levels ||
644                         data->use_pcie_power_saving_levels), "No pcie performance levels!",
645                         return -EINVAL);
646
647         if (table_info != NULL)
648                 pcie_table = table_info->pcie_table;
649
650         if (data->use_pcie_performance_levels &&
651                         !data->use_pcie_power_saving_levels) {
652                 data->pcie_gen_power_saving = data->pcie_gen_performance;
653                 data->pcie_lane_power_saving = data->pcie_lane_performance;
654         } else if (!data->use_pcie_performance_levels &&
655                         data->use_pcie_power_saving_levels) {
656                 data->pcie_gen_performance = data->pcie_gen_power_saving;
657                 data->pcie_lane_performance = data->pcie_lane_power_saving;
658         }
659         tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_LINK);
660         phm_reset_single_dpm_table(&data->dpm_table.pcie_speed_table,
661                                         tmp,
662                                         MAX_REGULAR_DPM_NUMBER);
663
664         if (pcie_table != NULL) {
665                 /* max_entry is used to make sure we reserve one PCIE level
666                  * for boot level (fix for A+A PSPP issue).
667                  * If PCIE table from PPTable have ULV entry + 8 entries,
668                  * then ignore the last entry.*/
669                 max_entry = (tmp < pcie_table->count) ? tmp : pcie_table->count;
670                 for (i = 1; i < max_entry; i++) {
671                         phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i - 1,
672                                         get_pcie_gen_support(data->pcie_gen_cap,
673                                                         pcie_table->entries[i].gen_speed),
674                                         get_pcie_lane_support(data->pcie_lane_cap,
675                                                         pcie_table->entries[i].lane_width));
676                 }
677                 data->dpm_table.pcie_speed_table.count = max_entry - 1;
678                 smum_update_smc_table(hwmgr, SMU_BIF_TABLE);
679         } else {
680                 /* Hardcode Pcie Table */
681                 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0,
682                                 get_pcie_gen_support(data->pcie_gen_cap,
683                                                 PP_Min_PCIEGen),
684                                 get_pcie_lane_support(data->pcie_lane_cap,
685                                                 PP_Max_PCIELane));
686                 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1,
687                                 get_pcie_gen_support(data->pcie_gen_cap,
688                                                 PP_Min_PCIEGen),
689                                 get_pcie_lane_support(data->pcie_lane_cap,
690                                                 PP_Max_PCIELane));
691                 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2,
692                                 get_pcie_gen_support(data->pcie_gen_cap,
693                                                 PP_Max_PCIEGen),
694                                 get_pcie_lane_support(data->pcie_lane_cap,
695                                                 PP_Max_PCIELane));
696                 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3,
697                                 get_pcie_gen_support(data->pcie_gen_cap,
698                                                 PP_Max_PCIEGen),
699                                 get_pcie_lane_support(data->pcie_lane_cap,
700                                                 PP_Max_PCIELane));
701                 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4,
702                                 get_pcie_gen_support(data->pcie_gen_cap,
703                                                 PP_Max_PCIEGen),
704                                 get_pcie_lane_support(data->pcie_lane_cap,
705                                                 PP_Max_PCIELane));
706                 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5,
707                                 get_pcie_gen_support(data->pcie_gen_cap,
708                                                 PP_Max_PCIEGen),
709                                 get_pcie_lane_support(data->pcie_lane_cap,
710                                                 PP_Max_PCIELane));
711
712                 data->dpm_table.pcie_speed_table.count = 6;
713         }
714         /* Populate last level for boot PCIE level, but do not increment count. */
715         if (hwmgr->chip_family == AMDGPU_FAMILY_CI) {
716                 for (i = 0; i <= data->dpm_table.pcie_speed_table.count; i++)
717                         phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i,
718                                 get_pcie_gen_support(data->pcie_gen_cap,
719                                                 PP_Max_PCIEGen),
720                                 data->vbios_boot_state.pcie_lane_bootup_value);
721         } else {
722                 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table,
723                         data->dpm_table.pcie_speed_table.count,
724                         get_pcie_gen_support(data->pcie_gen_cap,
725                                         PP_Min_PCIEGen),
726                         get_pcie_lane_support(data->pcie_lane_cap,
727                                         PP_Max_PCIELane));
728
729                 if (data->pcie_dpm_key_disabled)
730                         phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table,
731                                 data->dpm_table.pcie_speed_table.count,
732                                 smu7_override_pcie_speed(hwmgr), smu7_override_pcie_width(hwmgr));
733         }
734         return 0;
735 }
736
737 static int smu7_reset_dpm_tables(struct pp_hwmgr *hwmgr)
738 {
739         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
740
741         memset(&(data->dpm_table), 0x00, sizeof(data->dpm_table));
742
743         phm_reset_single_dpm_table(
744                         &data->dpm_table.sclk_table,
745                                 smum_get_mac_definition(hwmgr,
746                                         SMU_MAX_LEVELS_GRAPHICS),
747                                         MAX_REGULAR_DPM_NUMBER);
748         phm_reset_single_dpm_table(
749                         &data->dpm_table.mclk_table,
750                         smum_get_mac_definition(hwmgr,
751                                 SMU_MAX_LEVELS_MEMORY), MAX_REGULAR_DPM_NUMBER);
752
753         phm_reset_single_dpm_table(
754                         &data->dpm_table.vddc_table,
755                                 smum_get_mac_definition(hwmgr,
756                                         SMU_MAX_LEVELS_VDDC),
757                                         MAX_REGULAR_DPM_NUMBER);
758         phm_reset_single_dpm_table(
759                         &data->dpm_table.vddci_table,
760                         smum_get_mac_definition(hwmgr,
761                                 SMU_MAX_LEVELS_VDDCI), MAX_REGULAR_DPM_NUMBER);
762
763         phm_reset_single_dpm_table(
764                         &data->dpm_table.mvdd_table,
765                                 smum_get_mac_definition(hwmgr,
766                                         SMU_MAX_LEVELS_MVDD),
767                                         MAX_REGULAR_DPM_NUMBER);
768         return 0;
769 }
770 /*
771  * This function is to initialize all DPM state tables
772  * for SMU7 based on the dependency table.
773  * Dynamic state patching function will then trim these
774  * state tables to the allowed range based
775  * on the power policy or external client requests,
776  * such as UVD request, etc.
777  */
778
779 static int smu7_setup_dpm_tables_v0(struct pp_hwmgr *hwmgr)
780 {
781         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
782         struct phm_clock_voltage_dependency_table *allowed_vdd_sclk_table =
783                 hwmgr->dyn_state.vddc_dependency_on_sclk;
784         struct phm_clock_voltage_dependency_table *allowed_vdd_mclk_table =
785                 hwmgr->dyn_state.vddc_dependency_on_mclk;
786         struct phm_cac_leakage_table *std_voltage_table =
787                 hwmgr->dyn_state.cac_leakage_table;
788         uint32_t i;
789
790         PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL,
791                 "SCLK dependency table is missing. This table is mandatory", return -EINVAL);
792         PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table->count >= 1,
793                 "SCLK dependency table has to have is missing. This table is mandatory", return -EINVAL);
794
795         PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
796                 "MCLK dependency table is missing. This table is mandatory", return -EINVAL);
797         PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table->count >= 1,
798                 "VMCLK dependency table has to have is missing. This table is mandatory", return -EINVAL);
799
800
801         /* Initialize Sclk DPM table based on allow Sclk values*/
802         data->dpm_table.sclk_table.count = 0;
803
804         for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
805                 if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count-1].value !=
806                                 allowed_vdd_sclk_table->entries[i].clk) {
807                         data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
808                                 allowed_vdd_sclk_table->entries[i].clk;
809                         data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = (i == 0) ? 1 : 0;
810                         data->dpm_table.sclk_table.count++;
811                 }
812         }
813
814         PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
815                 "MCLK dependency table is missing. This table is mandatory", return -EINVAL);
816         /* Initialize Mclk DPM table based on allow Mclk values */
817         data->dpm_table.mclk_table.count = 0;
818         for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
819                 if (i == 0 || data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count-1].value !=
820                         allowed_vdd_mclk_table->entries[i].clk) {
821                         data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
822                                 allowed_vdd_mclk_table->entries[i].clk;
823                         data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = (i == 0) ? 1 : 0;
824                         data->dpm_table.mclk_table.count++;
825                 }
826         }
827
828         /* Initialize Vddc DPM table based on allow Vddc values.  And populate corresponding std values. */
829         for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
830                 data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
831                 data->dpm_table.vddc_table.dpm_levels[i].param1 = std_voltage_table->entries[i].Leakage;
832                 /* param1 is for corresponding std voltage */
833                 data->dpm_table.vddc_table.dpm_levels[i].enabled = true;
834         }
835
836         data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count;
837         allowed_vdd_mclk_table = hwmgr->dyn_state.vddci_dependency_on_mclk;
838
839         if (NULL != allowed_vdd_mclk_table) {
840                 /* Initialize Vddci DPM table based on allow Mclk values */
841                 for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
842                         data->dpm_table.vddci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
843                         data->dpm_table.vddci_table.dpm_levels[i].enabled = true;
844                 }
845                 data->dpm_table.vddci_table.count = allowed_vdd_mclk_table->count;
846         }
847
848         allowed_vdd_mclk_table = hwmgr->dyn_state.mvdd_dependency_on_mclk;
849
850         if (NULL != allowed_vdd_mclk_table) {
851                 /*
852                  * Initialize MVDD DPM table based on allow Mclk
853                  * values
854                  */
855                 for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
856                         data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
857                         data->dpm_table.mvdd_table.dpm_levels[i].enabled = true;
858                 }
859                 data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count;
860         }
861
862         return 0;
863 }
864
865 static int smu7_setup_dpm_tables_v1(struct pp_hwmgr *hwmgr)
866 {
867         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
868         struct phm_ppt_v1_information *table_info =
869                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
870         uint32_t i;
871
872         struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
873         struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
874
875         if (table_info == NULL)
876                 return -EINVAL;
877
878         dep_sclk_table = table_info->vdd_dep_on_sclk;
879         dep_mclk_table = table_info->vdd_dep_on_mclk;
880
881         PP_ASSERT_WITH_CODE(dep_sclk_table != NULL,
882                         "SCLK dependency table is missing.",
883                         return -EINVAL);
884         PP_ASSERT_WITH_CODE(dep_sclk_table->count >= 1,
885                         "SCLK dependency table count is 0.",
886                         return -EINVAL);
887
888         PP_ASSERT_WITH_CODE(dep_mclk_table != NULL,
889                         "MCLK dependency table is missing.",
890                         return -EINVAL);
891         PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1,
892                         "MCLK dependency table count is 0",
893                         return -EINVAL);
894
895         /* Initialize Sclk DPM table based on allow Sclk values */
896         data->dpm_table.sclk_table.count = 0;
897         for (i = 0; i < dep_sclk_table->count; i++) {
898                 if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count - 1].value !=
899                                                 dep_sclk_table->entries[i].clk) {
900
901                         data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
902                                         dep_sclk_table->entries[i].clk;
903
904                         data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled =
905                                         (i == 0) ? true : false;
906                         data->dpm_table.sclk_table.count++;
907                 }
908         }
909         if (hwmgr->platform_descriptor.overdriveLimit.engineClock == 0)
910                 hwmgr->platform_descriptor.overdriveLimit.engineClock = dep_sclk_table->entries[i-1].clk;
911         /* Initialize Mclk DPM table based on allow Mclk values */
912         data->dpm_table.mclk_table.count = 0;
913         for (i = 0; i < dep_mclk_table->count; i++) {
914                 if (i == 0 || data->dpm_table.mclk_table.dpm_levels
915                                 [data->dpm_table.mclk_table.count - 1].value !=
916                                                 dep_mclk_table->entries[i].clk) {
917                         data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
918                                                         dep_mclk_table->entries[i].clk;
919                         data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled =
920                                                         (i == 0) ? true : false;
921                         data->dpm_table.mclk_table.count++;
922                 }
923         }
924
925         if (hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0)
926                 hwmgr->platform_descriptor.overdriveLimit.memoryClock = dep_mclk_table->entries[i-1].clk;
927         return 0;
928 }
929
930 static int smu7_odn_initial_default_setting(struct pp_hwmgr *hwmgr)
931 {
932         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
933         struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
934         struct phm_ppt_v1_information *table_info =
935                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
936         uint32_t i;
937
938         struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
939         struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
940         struct phm_odn_performance_level *entries;
941
942         if (table_info == NULL)
943                 return -EINVAL;
944
945         dep_sclk_table = table_info->vdd_dep_on_sclk;
946         dep_mclk_table = table_info->vdd_dep_on_mclk;
947
948         odn_table->odn_core_clock_dpm_levels.num_of_pl =
949                                                 data->golden_dpm_table.sclk_table.count;
950         entries = odn_table->odn_core_clock_dpm_levels.entries;
951         for (i=0; i<data->golden_dpm_table.sclk_table.count; i++) {
952                 entries[i].clock = data->golden_dpm_table.sclk_table.dpm_levels[i].value;
953                 entries[i].enabled = true;
954                 entries[i].vddc = dep_sclk_table->entries[i].vddc;
955         }
956
957         smu_get_voltage_dependency_table_ppt_v1(dep_sclk_table,
958                 (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk));
959
960         odn_table->odn_memory_clock_dpm_levels.num_of_pl =
961                                                 data->golden_dpm_table.mclk_table.count;
962         entries = odn_table->odn_memory_clock_dpm_levels.entries;
963         for (i=0; i<data->golden_dpm_table.mclk_table.count; i++) {
964                 entries[i].clock = data->golden_dpm_table.mclk_table.dpm_levels[i].value;
965                 entries[i].enabled = true;
966                 entries[i].vddc = dep_mclk_table->entries[i].vddc;
967         }
968
969         smu_get_voltage_dependency_table_ppt_v1(dep_mclk_table,
970                 (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk));
971
972         return 0;
973 }
974
975 static void smu7_setup_voltage_range_from_vbios(struct pp_hwmgr *hwmgr)
976 {
977         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
978         struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
979         struct phm_ppt_v1_information *table_info =
980                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
981         uint32_t min_vddc = 0;
982         uint32_t max_vddc = 0;
983
984         if (!table_info)
985                 return;
986
987         dep_sclk_table = table_info->vdd_dep_on_sclk;
988
989         atomctrl_get_voltage_range(hwmgr, &max_vddc, &min_vddc);
990
991         if (min_vddc == 0 || min_vddc > 2000
992                 || min_vddc > dep_sclk_table->entries[0].vddc)
993                 min_vddc = dep_sclk_table->entries[0].vddc;
994
995         if (max_vddc == 0 || max_vddc > 2000
996                 || max_vddc < dep_sclk_table->entries[dep_sclk_table->count-1].vddc)
997                 max_vddc = dep_sclk_table->entries[dep_sclk_table->count-1].vddc;
998
999         data->odn_dpm_table.min_vddc = min_vddc;
1000         data->odn_dpm_table.max_vddc = max_vddc;
1001 }
1002
1003 static void smu7_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
1004 {
1005         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1006         struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
1007         struct phm_ppt_v1_information *table_info =
1008                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
1009         uint32_t i;
1010
1011         struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
1012         struct phm_ppt_v1_clock_voltage_dependency_table *odn_dep_table;
1013
1014         if (table_info == NULL)
1015                 return;
1016
1017         for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
1018                 if (odn_table->odn_core_clock_dpm_levels.entries[i].clock !=
1019                                         data->dpm_table.sclk_table.dpm_levels[i].value) {
1020                         data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
1021                         break;
1022                 }
1023         }
1024
1025         for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
1026                 if (odn_table->odn_memory_clock_dpm_levels.entries[i].clock !=
1027                                         data->dpm_table.mclk_table.dpm_levels[i].value) {
1028                         data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
1029                         break;
1030                 }
1031         }
1032
1033         dep_table = table_info->vdd_dep_on_mclk;
1034         odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk);
1035
1036         for (i = 0; i < dep_table->count; i++) {
1037                 if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
1038                         data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK;
1039                         return;
1040                 }
1041         }
1042
1043         dep_table = table_info->vdd_dep_on_sclk;
1044         odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk);
1045         for (i = 0; i < dep_table->count; i++) {
1046                 if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
1047                         data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK;
1048                         return;
1049                 }
1050         }
1051         if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
1052                 data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
1053                 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK;
1054         }
1055 }
1056
1057 static int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
1058 {
1059         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1060
1061         smu7_reset_dpm_tables(hwmgr);
1062
1063         if (hwmgr->pp_table_version == PP_TABLE_V1)
1064                 smu7_setup_dpm_tables_v1(hwmgr);
1065         else if (hwmgr->pp_table_version == PP_TABLE_V0)
1066                 smu7_setup_dpm_tables_v0(hwmgr);
1067
1068         smu7_setup_default_pcie_table(hwmgr);
1069
1070         /* save a copy of the default DPM table */
1071         memcpy(&(data->golden_dpm_table), &(data->dpm_table),
1072                         sizeof(struct smu7_dpm_table));
1073
1074         /* initialize ODN table */
1075         if (hwmgr->od_enabled) {
1076                 if (data->odn_dpm_table.max_vddc) {
1077                         smu7_check_dpm_table_updated(hwmgr);
1078                 } else {
1079                         smu7_setup_voltage_range_from_vbios(hwmgr);
1080                         smu7_odn_initial_default_setting(hwmgr);
1081                 }
1082         }
1083         return 0;
1084 }
1085
1086 static int smu7_enable_vrhot_gpio_interrupt(struct pp_hwmgr *hwmgr)
1087 {
1088
1089         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1090                         PHM_PlatformCaps_RegulatorHot))
1091                 return smum_send_msg_to_smc(hwmgr,
1092                                 PPSMC_MSG_EnableVRHotGPIOInterrupt,
1093                                 NULL);
1094
1095         return 0;
1096 }
1097
1098 static int smu7_enable_sclk_control(struct pp_hwmgr *hwmgr)
1099 {
1100         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
1101                         SCLK_PWRMGT_OFF, 0);
1102         return 0;
1103 }
1104
1105 static int smu7_enable_ulv(struct pp_hwmgr *hwmgr)
1106 {
1107         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1108
1109         if (data->ulv_supported)
1110                 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableULV, NULL);
1111
1112         return 0;
1113 }
1114
1115 static int smu7_disable_ulv(struct pp_hwmgr *hwmgr)
1116 {
1117         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1118
1119         if (data->ulv_supported)
1120                 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableULV, NULL);
1121
1122         return 0;
1123 }
1124
1125 static int smu7_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
1126 {
1127         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1128                         PHM_PlatformCaps_SclkDeepSleep)) {
1129                 if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MASTER_DeepSleep_ON, NULL))
1130                         PP_ASSERT_WITH_CODE(false,
1131                                         "Attempt to enable Master Deep Sleep switch failed!",
1132                                         return -EINVAL);
1133         } else {
1134                 if (smum_send_msg_to_smc(hwmgr,
1135                                 PPSMC_MSG_MASTER_DeepSleep_OFF,
1136                                 NULL)) {
1137                         PP_ASSERT_WITH_CODE(false,
1138                                         "Attempt to disable Master Deep Sleep switch failed!",
1139                                         return -EINVAL);
1140                 }
1141         }
1142
1143         return 0;
1144 }
1145
1146 static int smu7_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
1147 {
1148         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1149                         PHM_PlatformCaps_SclkDeepSleep)) {
1150                 if (smum_send_msg_to_smc(hwmgr,
1151                                 PPSMC_MSG_MASTER_DeepSleep_OFF,
1152                                 NULL)) {
1153                         PP_ASSERT_WITH_CODE(false,
1154                                         "Attempt to disable Master Deep Sleep switch failed!",
1155                                         return -EINVAL);
1156                 }
1157         }
1158
1159         return 0;
1160 }
1161
1162 static int smu7_disable_sclk_vce_handshake(struct pp_hwmgr *hwmgr)
1163 {
1164         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1165         uint32_t soft_register_value = 0;
1166         uint32_t handshake_disables_offset = data->soft_regs_start
1167                                 + smum_get_offsetof(hwmgr,
1168                                         SMU_SoftRegisters, HandshakeDisables);
1169
1170         soft_register_value = cgs_read_ind_register(hwmgr->device,
1171                                 CGS_IND_REG__SMC, handshake_disables_offset);
1172         soft_register_value |= SMU7_VCE_SCLK_HANDSHAKE_DISABLE;
1173         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1174                         handshake_disables_offset, soft_register_value);
1175         return 0;
1176 }
1177
1178 static int smu7_disable_handshake_uvd(struct pp_hwmgr *hwmgr)
1179 {
1180         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1181         uint32_t soft_register_value = 0;
1182         uint32_t handshake_disables_offset = data->soft_regs_start
1183                                 + smum_get_offsetof(hwmgr,
1184                                         SMU_SoftRegisters, HandshakeDisables);
1185
1186         soft_register_value = cgs_read_ind_register(hwmgr->device,
1187                                 CGS_IND_REG__SMC, handshake_disables_offset);
1188         soft_register_value |= smum_get_mac_definition(hwmgr,
1189                                         SMU_UVD_MCLK_HANDSHAKE_DISABLE);
1190         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1191                         handshake_disables_offset, soft_register_value);
1192         return 0;
1193 }
1194
1195 static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
1196 {
1197         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1198
1199         /* enable SCLK dpm */
1200         if (!data->sclk_dpm_key_disabled) {
1201                 if (hwmgr->chip_id >= CHIP_POLARIS10 &&
1202                     hwmgr->chip_id <= CHIP_VEGAM)
1203                         smu7_disable_sclk_vce_handshake(hwmgr);
1204
1205                 PP_ASSERT_WITH_CODE(
1206                 (0 == smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Enable, NULL)),
1207                 "Failed to enable SCLK DPM during DPM Start Function!",
1208                 return -EINVAL);
1209         }
1210
1211         /* enable MCLK dpm */
1212         if (0 == data->mclk_dpm_key_disabled) {
1213                 if (!(hwmgr->feature_mask & PP_UVD_HANDSHAKE_MASK))
1214                         smu7_disable_handshake_uvd(hwmgr);
1215
1216                 PP_ASSERT_WITH_CODE(
1217                                 (0 == smum_send_msg_to_smc(hwmgr,
1218                                                 PPSMC_MSG_MCLKDPM_Enable,
1219                                                 NULL)),
1220                                 "Failed to enable MCLK DPM during DPM Start Function!",
1221                                 return -EINVAL);
1222
1223                 if ((hwmgr->chip_family == AMDGPU_FAMILY_CI) ||
1224                     (hwmgr->chip_id == CHIP_POLARIS10) ||
1225                     (hwmgr->chip_id == CHIP_POLARIS11) ||
1226                     (hwmgr->chip_id == CHIP_POLARIS12) ||
1227                     (hwmgr->chip_id == CHIP_TONGA))
1228                         PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
1229
1230
1231                 if (hwmgr->chip_family == AMDGPU_FAMILY_CI) {
1232                         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d30, 0x5);
1233                         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d3c, 0x5);
1234                         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d80, 0x100005);
1235                         udelay(10);
1236                         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d30, 0x400005);
1237                         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d3c, 0x400005);
1238                         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d80, 0x500005);
1239                 } else {
1240                         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5);
1241                         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x5);
1242                         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x100005);
1243                         udelay(10);
1244                         if (hwmgr->chip_id == CHIP_VEGAM) {
1245                                 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400009);
1246                                 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400009);
1247                         } else {
1248                                 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400005);
1249                                 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400005);
1250                         }
1251                         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x500005);
1252                 }
1253         }
1254
1255         return 0;
1256 }
1257
1258 static int smu7_start_dpm(struct pp_hwmgr *hwmgr)
1259 {
1260         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1261
1262         /*enable general power management */
1263
1264         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
1265                         GLOBAL_PWRMGT_EN, 1);
1266
1267         /* enable sclk deep sleep */
1268
1269         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
1270                         DYNAMIC_PM_EN, 1);
1271
1272         /* prepare for PCIE DPM */
1273
1274         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1275                         data->soft_regs_start +
1276                         smum_get_offsetof(hwmgr, SMU_SoftRegisters,
1277                                                 VoltageChangeTimeout), 0x1000);
1278         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
1279                         SWRST_COMMAND_1, RESETLC, 0x0);
1280
1281         if (hwmgr->chip_family == AMDGPU_FAMILY_CI)
1282                 cgs_write_register(hwmgr->device, 0x1488,
1283                         (cgs_read_register(hwmgr->device, 0x1488) & ~0x1));
1284
1285         if (smu7_enable_sclk_mclk_dpm(hwmgr)) {
1286                 pr_err("Failed to enable Sclk DPM and Mclk DPM!");
1287                 return -EINVAL;
1288         }
1289
1290         /* enable PCIE dpm */
1291         if (0 == data->pcie_dpm_key_disabled) {
1292                 PP_ASSERT_WITH_CODE(
1293                                 (0 == smum_send_msg_to_smc(hwmgr,
1294                                                 PPSMC_MSG_PCIeDPM_Enable,
1295                                                 NULL)),
1296                                 "Failed to enable pcie DPM during DPM Start Function!",
1297                                 return -EINVAL);
1298         } else {
1299                 PP_ASSERT_WITH_CODE(
1300                                 (0 == smum_send_msg_to_smc(hwmgr,
1301                                                 PPSMC_MSG_PCIeDPM_Disable,
1302                                                 NULL)),
1303                                 "Failed to disble pcie DPM during DPM Start Function!",
1304                                 return -EINVAL);
1305         }
1306
1307         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1308                                 PHM_PlatformCaps_Falcon_QuickTransition)) {
1309                 PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr,
1310                                 PPSMC_MSG_EnableACDCGPIOInterrupt,
1311                                 NULL)),
1312                                 "Failed to enable AC DC GPIO Interrupt!",
1313                                 );
1314         }
1315
1316         return 0;
1317 }
1318
1319 static int smu7_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
1320 {
1321         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1322
1323         /* disable SCLK dpm */
1324         if (!data->sclk_dpm_key_disabled) {
1325                 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
1326                                 "Trying to disable SCLK DPM when DPM is disabled",
1327                                 return 0);
1328                 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Disable, NULL);
1329         }
1330
1331         /* disable MCLK dpm */
1332         if (!data->mclk_dpm_key_disabled) {
1333                 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
1334                                 "Trying to disable MCLK DPM when DPM is disabled",
1335                                 return 0);
1336                 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_Disable, NULL);
1337         }
1338
1339         return 0;
1340 }
1341
1342 static int smu7_stop_dpm(struct pp_hwmgr *hwmgr)
1343 {
1344         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1345
1346         /* disable general power management */
1347         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
1348                         GLOBAL_PWRMGT_EN, 0);
1349         /* disable sclk deep sleep */
1350         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
1351                         DYNAMIC_PM_EN, 0);
1352
1353         /* disable PCIE dpm */
1354         if (!data->pcie_dpm_key_disabled) {
1355                 PP_ASSERT_WITH_CODE(
1356                                 (smum_send_msg_to_smc(hwmgr,
1357                                                 PPSMC_MSG_PCIeDPM_Disable,
1358                                                 NULL) == 0),
1359                                 "Failed to disable pcie DPM during DPM Stop Function!",
1360                                 return -EINVAL);
1361         }
1362
1363         smu7_disable_sclk_mclk_dpm(hwmgr);
1364
1365         PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
1366                         "Trying to disable voltage DPM when DPM is disabled",
1367                         return 0);
1368
1369         smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Disable, NULL);
1370
1371         return 0;
1372 }
1373
1374 static void smu7_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources)
1375 {
1376         bool protection;
1377         enum DPM_EVENT_SRC src;
1378
1379         switch (sources) {
1380         default:
1381                 pr_err("Unknown throttling event sources.");
1382                 fallthrough;
1383         case 0:
1384                 protection = false;
1385                 /* src is unused */
1386                 break;
1387         case (1 << PHM_AutoThrottleSource_Thermal):
1388                 protection = true;
1389                 src = DPM_EVENT_SRC_DIGITAL;
1390                 break;
1391         case (1 << PHM_AutoThrottleSource_External):
1392                 protection = true;
1393                 src = DPM_EVENT_SRC_EXTERNAL;
1394                 break;
1395         case (1 << PHM_AutoThrottleSource_External) |
1396                         (1 << PHM_AutoThrottleSource_Thermal):
1397                 protection = true;
1398                 src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL;
1399                 break;
1400         }
1401         /* Order matters - don't enable thermal protection for the wrong source. */
1402         if (protection) {
1403                 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL,
1404                                 DPM_EVENT_SRC, src);
1405                 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
1406                                 THERMAL_PROTECTION_DIS,
1407                                 !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1408                                                 PHM_PlatformCaps_ThermalController));
1409         } else
1410                 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
1411                                 THERMAL_PROTECTION_DIS, 1);
1412 }
1413
1414 static int smu7_enable_auto_throttle_source(struct pp_hwmgr *hwmgr,
1415                 PHM_AutoThrottleSource source)
1416 {
1417         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1418
1419         if (!(data->active_auto_throttle_sources & (1 << source))) {
1420                 data->active_auto_throttle_sources |= 1 << source;
1421                 smu7_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
1422         }
1423         return 0;
1424 }
1425
1426 static int smu7_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
1427 {
1428         return smu7_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
1429 }
1430
1431 static int smu7_disable_auto_throttle_source(struct pp_hwmgr *hwmgr,
1432                 PHM_AutoThrottleSource source)
1433 {
1434         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1435
1436         if (data->active_auto_throttle_sources & (1 << source)) {
1437                 data->active_auto_throttle_sources &= ~(1 << source);
1438                 smu7_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
1439         }
1440         return 0;
1441 }
1442
1443 static int smu7_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
1444 {
1445         return smu7_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
1446 }
1447
1448 static int smu7_pcie_performance_request(struct pp_hwmgr *hwmgr)
1449 {
1450         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1451         data->pcie_performance_request = true;
1452
1453         return 0;
1454 }
1455
1456 static int smu7_program_edc_didt_registers(struct pp_hwmgr *hwmgr,
1457                                            uint32_t *cac_config_regs,
1458                                            AtomCtrl_EDCLeakgeTable *edc_leakage_table)
1459 {
1460         uint32_t data, i = 0;
1461
1462         while (cac_config_regs[i] != 0xFFFFFFFF) {
1463                 data = edc_leakage_table->DIDT_REG[i];
1464                 cgs_write_ind_register(hwmgr->device,
1465                                        CGS_IND_REG__DIDT,
1466                                        cac_config_regs[i],
1467                                        data);
1468                 i++;
1469         }
1470
1471         return 0;
1472 }
1473
1474 static int smu7_populate_edc_leakage_registers(struct pp_hwmgr *hwmgr)
1475 {
1476         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1477         int ret = 0;
1478
1479         if (!data->disable_edc_leakage_controller &&
1480             data->edc_hilo_leakage_offset_from_vbios.usEdcDidtLoDpm7TableOffset &&
1481             data->edc_hilo_leakage_offset_from_vbios.usEdcDidtHiDpm7TableOffset) {
1482                 ret = smu7_program_edc_didt_registers(hwmgr,
1483                                                       DIDTEDCConfig_P12,
1484                                                       &data->edc_leakage_table);
1485                 if (ret)
1486                         return ret;
1487
1488                 ret = smum_send_msg_to_smc(hwmgr,
1489                                            (PPSMC_Msg)PPSMC_MSG_EnableEDCController,
1490                                            NULL);
1491         } else {
1492                 ret = smum_send_msg_to_smc(hwmgr,
1493                                            (PPSMC_Msg)PPSMC_MSG_DisableEDCController,
1494                                            NULL);
1495         }
1496
1497         return ret;
1498 }
1499
1500 static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
1501 {
1502         int tmp_result = 0;
1503         int result = 0;
1504
1505         if (smu7_voltage_control(hwmgr)) {
1506                 tmp_result = smu7_enable_voltage_control(hwmgr);
1507                 PP_ASSERT_WITH_CODE(tmp_result == 0,
1508                                 "Failed to enable voltage control!",
1509                                 result = tmp_result);
1510
1511                 tmp_result = smu7_construct_voltage_tables(hwmgr);
1512                 PP_ASSERT_WITH_CODE((0 == tmp_result),
1513                                 "Failed to construct voltage tables!",
1514                                 result = tmp_result);
1515         }
1516         smum_initialize_mc_reg_table(hwmgr);
1517
1518         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1519                         PHM_PlatformCaps_EngineSpreadSpectrumSupport))
1520                 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1521                                 GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 1);
1522
1523         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1524                         PHM_PlatformCaps_ThermalController))
1525                 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1526                                 GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 0);
1527
1528         tmp_result = smu7_program_static_screen_threshold_parameters(hwmgr);
1529         PP_ASSERT_WITH_CODE((0 == tmp_result),
1530                         "Failed to program static screen threshold parameters!",
1531                         result = tmp_result);
1532
1533         tmp_result = smu7_enable_display_gap(hwmgr);
1534         PP_ASSERT_WITH_CODE((0 == tmp_result),
1535                         "Failed to enable display gap!", result = tmp_result);
1536
1537         tmp_result = smu7_program_voting_clients(hwmgr);
1538         PP_ASSERT_WITH_CODE((0 == tmp_result),
1539                         "Failed to program voting clients!", result = tmp_result);
1540
1541         tmp_result = smum_process_firmware_header(hwmgr);
1542         PP_ASSERT_WITH_CODE((0 == tmp_result),
1543                         "Failed to process firmware header!", result = tmp_result);
1544
1545         if (hwmgr->chip_id != CHIP_VEGAM) {
1546                 tmp_result = smu7_initial_switch_from_arbf0_to_f1(hwmgr);
1547                 PP_ASSERT_WITH_CODE((0 == tmp_result),
1548                                 "Failed to initialize switch from ArbF0 to F1!",
1549                                 result = tmp_result);
1550         }
1551
1552         result = smu7_setup_default_dpm_tables(hwmgr);
1553         PP_ASSERT_WITH_CODE(0 == result,
1554                         "Failed to setup default DPM tables!", return result);
1555
1556         tmp_result = smum_init_smc_table(hwmgr);
1557         PP_ASSERT_WITH_CODE((0 == tmp_result),
1558                         "Failed to initialize SMC table!", result = tmp_result);
1559
1560         tmp_result = smu7_enable_vrhot_gpio_interrupt(hwmgr);
1561         PP_ASSERT_WITH_CODE((0 == tmp_result),
1562                         "Failed to enable VR hot GPIO interrupt!", result = tmp_result);
1563
1564         if (hwmgr->chip_id >= CHIP_POLARIS10 &&
1565             hwmgr->chip_id <= CHIP_VEGAM) {
1566                 tmp_result = smu7_notify_has_display(hwmgr);
1567                 PP_ASSERT_WITH_CODE((0 == tmp_result),
1568                                 "Failed to enable display setting!", result = tmp_result);
1569         } else {
1570                 smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_NoDisplay, NULL);
1571         }
1572
1573         if (hwmgr->chip_id >= CHIP_POLARIS10 &&
1574             hwmgr->chip_id <= CHIP_VEGAM) {
1575                 tmp_result = smu7_populate_edc_leakage_registers(hwmgr);
1576                 PP_ASSERT_WITH_CODE((0 == tmp_result),
1577                                 "Failed to populate edc leakage registers!", result = tmp_result);
1578         }
1579
1580         tmp_result = smu7_enable_sclk_control(hwmgr);
1581         PP_ASSERT_WITH_CODE((0 == tmp_result),
1582                         "Failed to enable SCLK control!", result = tmp_result);
1583
1584         tmp_result = smu7_enable_smc_voltage_controller(hwmgr);
1585         PP_ASSERT_WITH_CODE((0 == tmp_result),
1586                         "Failed to enable voltage control!", result = tmp_result);
1587
1588         tmp_result = smu7_enable_ulv(hwmgr);
1589         PP_ASSERT_WITH_CODE((0 == tmp_result),
1590                         "Failed to enable ULV!", result = tmp_result);
1591
1592         tmp_result = smu7_enable_deep_sleep_master_switch(hwmgr);
1593         PP_ASSERT_WITH_CODE((0 == tmp_result),
1594                         "Failed to enable deep sleep master switch!", result = tmp_result);
1595
1596         tmp_result = smu7_enable_didt_config(hwmgr);
1597         PP_ASSERT_WITH_CODE((tmp_result == 0),
1598                         "Failed to enable deep sleep master switch!", result = tmp_result);
1599
1600         tmp_result = smu7_start_dpm(hwmgr);
1601         PP_ASSERT_WITH_CODE((0 == tmp_result),
1602                         "Failed to start DPM!", result = tmp_result);
1603
1604         tmp_result = smu7_enable_smc_cac(hwmgr);
1605         PP_ASSERT_WITH_CODE((0 == tmp_result),
1606                         "Failed to enable SMC CAC!", result = tmp_result);
1607
1608         tmp_result = smu7_enable_power_containment(hwmgr);
1609         PP_ASSERT_WITH_CODE((0 == tmp_result),
1610                         "Failed to enable power containment!", result = tmp_result);
1611
1612         tmp_result = smu7_power_control_set_level(hwmgr);
1613         PP_ASSERT_WITH_CODE((0 == tmp_result),
1614                         "Failed to power control set level!", result = tmp_result);
1615
1616         tmp_result = smu7_enable_thermal_auto_throttle(hwmgr);
1617         PP_ASSERT_WITH_CODE((0 == tmp_result),
1618                         "Failed to enable thermal auto throttle!", result = tmp_result);
1619
1620         tmp_result = smu7_pcie_performance_request(hwmgr);
1621         PP_ASSERT_WITH_CODE((0 == tmp_result),
1622                         "pcie performance request failed!", result = tmp_result);
1623
1624         return 0;
1625 }
1626
1627 static int smu7_avfs_control(struct pp_hwmgr *hwmgr, bool enable)
1628 {
1629         if (!hwmgr->avfs_supported)
1630                 return 0;
1631
1632         if (enable) {
1633                 if (!PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
1634                                 CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) {
1635                         PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(
1636                                         hwmgr, PPSMC_MSG_EnableAvfs, NULL),
1637                                         "Failed to enable AVFS!",
1638                                         return -EINVAL);
1639                 }
1640         } else if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
1641                         CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) {
1642                 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(
1643                                 hwmgr, PPSMC_MSG_DisableAvfs, NULL),
1644                                 "Failed to disable AVFS!",
1645                                 return -EINVAL);
1646         }
1647
1648         return 0;
1649 }
1650
1651 static int smu7_update_avfs(struct pp_hwmgr *hwmgr)
1652 {
1653         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1654
1655         if (!hwmgr->avfs_supported)
1656                 return 0;
1657
1658         if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
1659                 smu7_avfs_control(hwmgr, false);
1660         } else if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
1661                 smu7_avfs_control(hwmgr, false);
1662                 smu7_avfs_control(hwmgr, true);
1663         } else {
1664                 smu7_avfs_control(hwmgr, true);
1665         }
1666
1667         return 0;
1668 }
1669
1670 static int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
1671 {
1672         int tmp_result, result = 0;
1673
1674         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1675                         PHM_PlatformCaps_ThermalController))
1676                 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1677                                 GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 1);
1678
1679         tmp_result = smu7_disable_power_containment(hwmgr);
1680         PP_ASSERT_WITH_CODE((tmp_result == 0),
1681                         "Failed to disable power containment!", result = tmp_result);
1682
1683         tmp_result = smu7_disable_smc_cac(hwmgr);
1684         PP_ASSERT_WITH_CODE((tmp_result == 0),
1685                         "Failed to disable SMC CAC!", result = tmp_result);
1686
1687         tmp_result = smu7_disable_didt_config(hwmgr);
1688         PP_ASSERT_WITH_CODE((tmp_result == 0),
1689                         "Failed to disable DIDT!", result = tmp_result);
1690
1691         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1692                         CG_SPLL_SPREAD_SPECTRUM, SSEN, 0);
1693         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1694                         GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 0);
1695
1696         tmp_result = smu7_disable_thermal_auto_throttle(hwmgr);
1697         PP_ASSERT_WITH_CODE((tmp_result == 0),
1698                         "Failed to disable thermal auto throttle!", result = tmp_result);
1699
1700         tmp_result = smu7_avfs_control(hwmgr, false);
1701         PP_ASSERT_WITH_CODE((tmp_result == 0),
1702                         "Failed to disable AVFS!", result = tmp_result);
1703
1704         tmp_result = smu7_stop_dpm(hwmgr);
1705         PP_ASSERT_WITH_CODE((tmp_result == 0),
1706                         "Failed to stop DPM!", result = tmp_result);
1707
1708         tmp_result = smu7_disable_deep_sleep_master_switch(hwmgr);
1709         PP_ASSERT_WITH_CODE((tmp_result == 0),
1710                         "Failed to disable deep sleep master switch!", result = tmp_result);
1711
1712         tmp_result = smu7_disable_ulv(hwmgr);
1713         PP_ASSERT_WITH_CODE((tmp_result == 0),
1714                         "Failed to disable ULV!", result = tmp_result);
1715
1716         tmp_result = smu7_clear_voting_clients(hwmgr);
1717         PP_ASSERT_WITH_CODE((tmp_result == 0),
1718                         "Failed to clear voting clients!", result = tmp_result);
1719
1720         tmp_result = smu7_reset_to_default(hwmgr);
1721         PP_ASSERT_WITH_CODE((tmp_result == 0),
1722                         "Failed to reset to default!", result = tmp_result);
1723
1724         tmp_result = smum_stop_smc(hwmgr);
1725         PP_ASSERT_WITH_CODE((tmp_result == 0),
1726                         "Failed to stop smc!", result = tmp_result);
1727
1728         tmp_result = smu7_force_switch_to_arbf0(hwmgr);
1729         PP_ASSERT_WITH_CODE((tmp_result == 0),
1730                         "Failed to force to switch arbf0!", result = tmp_result);
1731
1732         return result;
1733 }
1734
1735 static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
1736 {
1737         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1738         struct phm_ppt_v1_information *table_info =
1739                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
1740         struct amdgpu_device *adev = hwmgr->adev;
1741         uint8_t tmp1, tmp2;
1742         uint16_t tmp3 = 0;
1743
1744         data->dll_default_on = false;
1745         data->mclk_dpm0_activity_target = 0xa;
1746         data->vddc_vddgfx_delta = 300;
1747         data->static_screen_threshold = SMU7_STATICSCREENTHRESHOLD_DFLT;
1748         data->static_screen_threshold_unit = SMU7_STATICSCREENTHRESHOLDUNIT_DFLT;
1749         data->voting_rights_clients[0] = SMU7_VOTINGRIGHTSCLIENTS_DFLT0;
1750         data->voting_rights_clients[1]= SMU7_VOTINGRIGHTSCLIENTS_DFLT1;
1751         data->voting_rights_clients[2] = SMU7_VOTINGRIGHTSCLIENTS_DFLT2;
1752         data->voting_rights_clients[3]= SMU7_VOTINGRIGHTSCLIENTS_DFLT3;
1753         data->voting_rights_clients[4]= SMU7_VOTINGRIGHTSCLIENTS_DFLT4;
1754         data->voting_rights_clients[5]= SMU7_VOTINGRIGHTSCLIENTS_DFLT5;
1755         data->voting_rights_clients[6]= SMU7_VOTINGRIGHTSCLIENTS_DFLT6;
1756         data->voting_rights_clients[7]= SMU7_VOTINGRIGHTSCLIENTS_DFLT7;
1757
1758         data->mclk_dpm_key_disabled = hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true;
1759         data->sclk_dpm_key_disabled = hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true;
1760         data->pcie_dpm_key_disabled = hwmgr->feature_mask & PP_PCIE_DPM_MASK ? false : true;
1761         /* need to set voltage control types before EVV patching */
1762         data->voltage_control = SMU7_VOLTAGE_CONTROL_NONE;
1763         data->vddci_control = SMU7_VOLTAGE_CONTROL_NONE;
1764         data->mvdd_control = SMU7_VOLTAGE_CONTROL_NONE;
1765         data->enable_tdc_limit_feature = true;
1766         data->enable_pkg_pwr_tracking_feature = true;
1767         data->force_pcie_gen = PP_PCIEGenInvalid;
1768         data->ulv_supported = hwmgr->feature_mask & PP_ULV_MASK ? true : false;
1769         data->current_profile_setting.bupdate_sclk = 1;
1770         data->current_profile_setting.sclk_up_hyst = 0;
1771         data->current_profile_setting.sclk_down_hyst = 100;
1772         data->current_profile_setting.sclk_activity = SMU7_SCLK_TARGETACTIVITY_DFLT;
1773         data->current_profile_setting.bupdate_mclk = 1;
1774         if (hwmgr->chip_id >= CHIP_POLARIS10) {
1775                 if (adev->gmc.vram_width == 256) {
1776                         data->current_profile_setting.mclk_up_hyst = 10;
1777                         data->current_profile_setting.mclk_down_hyst = 60;
1778                         data->current_profile_setting.mclk_activity = 25;
1779                 } else if (adev->gmc.vram_width == 128) {
1780                         data->current_profile_setting.mclk_up_hyst = 5;
1781                         data->current_profile_setting.mclk_down_hyst = 16;
1782                         data->current_profile_setting.mclk_activity = 20;
1783                 } else if (adev->gmc.vram_width == 64) {
1784                         data->current_profile_setting.mclk_up_hyst = 3;
1785                         data->current_profile_setting.mclk_down_hyst = 16;
1786                         data->current_profile_setting.mclk_activity = 20;
1787                 }
1788         } else {
1789                 data->current_profile_setting.mclk_up_hyst = 0;
1790                 data->current_profile_setting.mclk_down_hyst = 100;
1791                 data->current_profile_setting.mclk_activity = SMU7_MCLK_TARGETACTIVITY_DFLT;
1792         }
1793         hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D];
1794         hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
1795         hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
1796
1797         if (hwmgr->chip_id  == CHIP_HAWAII) {
1798                 data->thermal_temp_setting.temperature_low = 94500;
1799                 data->thermal_temp_setting.temperature_high = 95000;
1800                 data->thermal_temp_setting.temperature_shutdown = 104000;
1801         } else {
1802                 data->thermal_temp_setting.temperature_low = 99500;
1803                 data->thermal_temp_setting.temperature_high = 100000;
1804                 data->thermal_temp_setting.temperature_shutdown = 104000;
1805         }
1806
1807         data->fast_watermark_threshold = 100;
1808         if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1809                         VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
1810                 data->voltage_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1811         else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1812                         VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT))
1813                 data->voltage_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
1814
1815         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1816                         PHM_PlatformCaps_ControlVDDGFX)) {
1817                 if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1818                         VOLTAGE_TYPE_VDDGFX, VOLTAGE_OBJ_SVID2)) {
1819                         data->vdd_gfx_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1820                 }
1821         }
1822
1823         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1824                         PHM_PlatformCaps_EnableMVDDControl)) {
1825                 if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1826                                 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
1827                         data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
1828                 else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1829                                 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
1830                         data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1831         }
1832
1833         if (SMU7_VOLTAGE_CONTROL_NONE == data->vdd_gfx_control)
1834                 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1835                         PHM_PlatformCaps_ControlVDDGFX);
1836
1837         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1838                         PHM_PlatformCaps_ControlVDDCI)) {
1839                 if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1840                                 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
1841                         data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
1842                 else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1843                                 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
1844                         data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1845         }
1846
1847         if (data->mvdd_control == SMU7_VOLTAGE_CONTROL_NONE)
1848                 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1849                                 PHM_PlatformCaps_EnableMVDDControl);
1850
1851         if (data->vddci_control == SMU7_VOLTAGE_CONTROL_NONE)
1852                 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1853                                 PHM_PlatformCaps_ControlVDDCI);
1854
1855         data->vddc_phase_shed_control = 1;
1856         if ((hwmgr->chip_id == CHIP_POLARIS12) ||
1857             ASICID_IS_P20(adev->pdev->device, adev->pdev->revision) ||
1858             ASICID_IS_P21(adev->pdev->device, adev->pdev->revision) ||
1859             ASICID_IS_P30(adev->pdev->device, adev->pdev->revision) ||
1860             ASICID_IS_P31(adev->pdev->device, adev->pdev->revision)) {
1861                 if (data->voltage_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
1862                         atomctrl_get_svi2_info(hwmgr, VOLTAGE_TYPE_VDDC, &tmp1, &tmp2,
1863                                                         &tmp3);
1864                         tmp3 = (tmp3 >> 5) & 0x3;
1865                         data->vddc_phase_shed_control = ((tmp3 << 1) | (tmp3 >> 1)) & 0x3;
1866                 }
1867         } else if (hwmgr->chip_family == AMDGPU_FAMILY_CI) {
1868                 data->vddc_phase_shed_control = 1;
1869         }
1870
1871         if ((hwmgr->pp_table_version != PP_TABLE_V0) && (hwmgr->feature_mask & PP_CLOCK_STRETCH_MASK)
1872                 && (table_info->cac_dtp_table->usClockStretchAmount != 0))
1873                 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1874                                         PHM_PlatformCaps_ClockStretcher);
1875
1876         data->pcie_gen_performance.max = PP_PCIEGen1;
1877         data->pcie_gen_performance.min = PP_PCIEGen3;
1878         data->pcie_gen_power_saving.max = PP_PCIEGen1;
1879         data->pcie_gen_power_saving.min = PP_PCIEGen3;
1880         data->pcie_lane_performance.max = 0;
1881         data->pcie_lane_performance.min = 16;
1882         data->pcie_lane_power_saving.max = 0;
1883         data->pcie_lane_power_saving.min = 16;
1884
1885
1886         if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
1887                 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1888                               PHM_PlatformCaps_UVDPowerGating);
1889         if (adev->pg_flags & AMD_PG_SUPPORT_VCE)
1890                 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1891                               PHM_PlatformCaps_VCEPowerGating);
1892
1893         data->disable_edc_leakage_controller = true;
1894         if (((adev->asic_type == CHIP_POLARIS10) && hwmgr->is_kicker) ||
1895             ((adev->asic_type == CHIP_POLARIS11) && hwmgr->is_kicker) ||
1896             (adev->asic_type == CHIP_POLARIS12) ||
1897             (adev->asic_type == CHIP_VEGAM))
1898                 data->disable_edc_leakage_controller = false;
1899
1900         if (!atomctrl_is_asic_internal_ss_supported(hwmgr)) {
1901                 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1902                         PHM_PlatformCaps_MemorySpreadSpectrumSupport);
1903                 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1904                         PHM_PlatformCaps_EngineSpreadSpectrumSupport);
1905         }
1906
1907         if ((adev->pdev->device == 0x699F) &&
1908             (adev->pdev->revision == 0xCF)) {
1909                 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1910                                 PHM_PlatformCaps_PowerContainment);
1911                 data->enable_tdc_limit_feature = false;
1912                 data->enable_pkg_pwr_tracking_feature = false;
1913                 data->disable_edc_leakage_controller = true;
1914                 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1915                                         PHM_PlatformCaps_ClockStretcher);
1916         }
1917 }
1918
1919 static int smu7_calculate_ro_range(struct pp_hwmgr *hwmgr)
1920 {
1921         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1922         struct amdgpu_device *adev = hwmgr->adev;
1923         uint32_t asicrev1, evv_revision, max = 0, min = 0;
1924
1925         atomctrl_read_efuse(hwmgr, STRAP_EVV_REVISION_LSB, STRAP_EVV_REVISION_MSB,
1926                         &evv_revision);
1927
1928         atomctrl_read_efuse(hwmgr, 568, 579, &asicrev1);
1929
1930         if (ASICID_IS_P20(adev->pdev->device, adev->pdev->revision) ||
1931             ASICID_IS_P30(adev->pdev->device, adev->pdev->revision)) {
1932                 min = 1200;
1933                 max = 2500;
1934         } else if (ASICID_IS_P21(adev->pdev->device, adev->pdev->revision) ||
1935                    ASICID_IS_P31(adev->pdev->device, adev->pdev->revision)) {
1936                 min = 900;
1937                 max= 2100;
1938         } else if (hwmgr->chip_id == CHIP_POLARIS10) {
1939                 if (adev->pdev->subsystem_vendor == 0x106B) {
1940                         min = 1000;
1941                         max = 2300;
1942                 } else {
1943                         if (evv_revision == 0) {
1944                                 min = 1000;
1945                                 max = 2300;
1946                         } else if (evv_revision == 1) {
1947                                 if (asicrev1 == 326) {
1948                                         min = 1200;
1949                                         max = 2500;
1950                                         /* TODO: PATCH RO in VBIOS */
1951                                 } else {
1952                                         min = 1200;
1953                                         max = 2000;
1954                                 }
1955                         } else if (evv_revision == 2) {
1956                                 min = 1200;
1957                                 max = 2500;
1958                         }
1959                 }
1960         } else {
1961                 min = 1100;
1962                 max = 2100;
1963         }
1964
1965         data->ro_range_minimum = min;
1966         data->ro_range_maximum = max;
1967
1968         /* TODO: PATCH RO in VBIOS here */
1969
1970         return 0;
1971 }
1972
1973 /**
1974  * smu7_get_evv_voltages - Get Leakage VDDC based on leakage ID.
1975  *
1976  * @hwmgr:  the address of the powerplay hardware manager.
1977  * Return:   always 0
1978  */
1979 static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr)
1980 {
1981         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1982         uint16_t vv_id;
1983         uint16_t vddc = 0;
1984         uint16_t vddgfx = 0;
1985         uint16_t i, j;
1986         uint32_t sclk = 0;
1987         struct phm_ppt_v1_information *table_info =
1988                         (struct phm_ppt_v1_information *)hwmgr->pptable;
1989         struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = NULL;
1990
1991         if (hwmgr->chip_id == CHIP_POLARIS10 ||
1992             hwmgr->chip_id == CHIP_POLARIS11 ||
1993             hwmgr->chip_id == CHIP_POLARIS12)
1994                 smu7_calculate_ro_range(hwmgr);
1995
1996         for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) {
1997                 vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1998
1999                 if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
2000                         if ((hwmgr->pp_table_version == PP_TABLE_V1)
2001                             && !phm_get_sclk_for_voltage_evv(hwmgr,
2002                                                 table_info->vddgfx_lookup_table, vv_id, &sclk)) {
2003                                 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2004                                                         PHM_PlatformCaps_ClockStretcher)) {
2005                                         sclk_table = table_info->vdd_dep_on_sclk;
2006
2007                                         for (j = 1; j < sclk_table->count; j++) {
2008                                                 if (sclk_table->entries[j].clk == sclk &&
2009                                                                 sclk_table->entries[j].cks_enable == 0) {
2010                                                         sclk += 5000;
2011                                                         break;
2012                                                 }
2013                                         }
2014                                 }
2015                                 if (0 == atomctrl_get_voltage_evv_on_sclk
2016                                     (hwmgr, VOLTAGE_TYPE_VDDGFX, sclk,
2017                                      vv_id, &vddgfx)) {
2018                                         /* need to make sure vddgfx is less than 2v or else, it could burn the ASIC. */
2019                                         PP_ASSERT_WITH_CODE((vddgfx < 2000 && vddgfx != 0), "Invalid VDDGFX value!", return -EINVAL);
2020
2021                                         /* the voltage should not be zero nor equal to leakage ID */
2022                                         if (vddgfx != 0 && vddgfx != vv_id) {
2023                                                 data->vddcgfx_leakage.actual_voltage[data->vddcgfx_leakage.count] = vddgfx;
2024                                                 data->vddcgfx_leakage.leakage_id[data->vddcgfx_leakage.count] = vv_id;
2025                                                 data->vddcgfx_leakage.count++;
2026                                         }
2027                                 } else {
2028                                         pr_info("Error retrieving EVV voltage value!\n");
2029                                 }
2030                         }
2031                 } else {
2032                         if ((hwmgr->pp_table_version == PP_TABLE_V0)
2033                                 || !phm_get_sclk_for_voltage_evv(hwmgr,
2034                                         table_info->vddc_lookup_table, vv_id, &sclk)) {
2035                                 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2036                                                 PHM_PlatformCaps_ClockStretcher)) {
2037                                         if (table_info == NULL)
2038                                                 return -EINVAL;
2039                                         sclk_table = table_info->vdd_dep_on_sclk;
2040
2041                                         for (j = 1; j < sclk_table->count; j++) {
2042                                                 if (sclk_table->entries[j].clk == sclk &&
2043                                                                 sclk_table->entries[j].cks_enable == 0) {
2044                                                         sclk += 5000;
2045                                                         break;
2046                                                 }
2047                                         }
2048                                 }
2049
2050                                 if (phm_get_voltage_evv_on_sclk(hwmgr,
2051                                                         VOLTAGE_TYPE_VDDC,
2052                                                         sclk, vv_id, &vddc) == 0) {
2053                                         if (vddc >= 2000 || vddc == 0)
2054                                                 return -EINVAL;
2055                                 } else {
2056                                         pr_debug("failed to retrieving EVV voltage!\n");
2057                                         continue;
2058                                 }
2059
2060                                 /* the voltage should not be zero nor equal to leakage ID */
2061                                 if (vddc != 0 && vddc != vv_id) {
2062                                         data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = (uint16_t)(vddc);
2063                                         data->vddc_leakage.leakage_id[data->vddc_leakage.count] = vv_id;
2064                                         data->vddc_leakage.count++;
2065                                 }
2066                         }
2067                 }
2068         }
2069
2070         return 0;
2071 }
2072
2073 /**
2074  * smu7_patch_ppt_v1_with_vdd_leakage - Change virtual leakage voltage to actual value.
2075  *
2076  * @hwmgr:  the address of the powerplay hardware manager.
2077  * @voltage: pointer to changing voltage
2078  * @leakage_table: pointer to leakage table
2079  */
2080 static void smu7_patch_ppt_v1_with_vdd_leakage(struct pp_hwmgr *hwmgr,
2081                 uint16_t *voltage, struct smu7_leakage_voltage *leakage_table)
2082 {
2083         uint32_t index;
2084
2085         /* search for leakage voltage ID 0xff01 ~ 0xff08 */
2086         for (index = 0; index < leakage_table->count; index++) {
2087                 /* if this voltage matches a leakage voltage ID */
2088                 /* patch with actual leakage voltage */
2089                 if (leakage_table->leakage_id[index] == *voltage) {
2090                         *voltage = leakage_table->actual_voltage[index];
2091                         break;
2092                 }
2093         }
2094
2095         if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
2096                 pr_err("Voltage value looks like a Leakage ID but it's not patched \n");
2097 }
2098
2099 /**
2100  * smu7_patch_lookup_table_with_leakage - Patch voltage lookup table by EVV leakages.
2101  *
2102  * @hwmgr:  the address of the powerplay hardware manager.
2103  * @lookup_table: pointer to voltage lookup table
2104  * @leakage_table: pointer to leakage table
2105  * Return:     always 0
2106  */
2107 static int smu7_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
2108                 phm_ppt_v1_voltage_lookup_table *lookup_table,
2109                 struct smu7_leakage_voltage *leakage_table)
2110 {
2111         uint32_t i;
2112
2113         for (i = 0; i < lookup_table->count; i++)
2114                 smu7_patch_ppt_v1_with_vdd_leakage(hwmgr,
2115                                 &lookup_table->entries[i].us_vdd, leakage_table);
2116
2117         return 0;
2118 }
2119
2120 static int smu7_patch_clock_voltage_limits_with_vddc_leakage(
2121                 struct pp_hwmgr *hwmgr, struct smu7_leakage_voltage *leakage_table,
2122                 uint16_t *vddc)
2123 {
2124         struct phm_ppt_v1_information *table_info =
2125                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
2126         smu7_patch_ppt_v1_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table);
2127         hwmgr->dyn_state.max_clock_voltage_on_dc.vddc =
2128                         table_info->max_clock_voltage_on_dc.vddc;
2129         return 0;
2130 }
2131
2132 static int smu7_patch_voltage_dependency_tables_with_lookup_table(
2133                 struct pp_hwmgr *hwmgr)
2134 {
2135         uint8_t entry_id;
2136         uint8_t voltage_id;
2137         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2138         struct phm_ppt_v1_information *table_info =
2139                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
2140
2141         struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
2142                         table_info->vdd_dep_on_sclk;
2143         struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table =
2144                         table_info->vdd_dep_on_mclk;
2145         struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
2146                         table_info->mm_dep_table;
2147
2148         if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
2149                 for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
2150                         voltage_id = sclk_table->entries[entry_id].vddInd;
2151                         sclk_table->entries[entry_id].vddgfx =
2152                                 table_info->vddgfx_lookup_table->entries[voltage_id].us_vdd;
2153                 }
2154         } else {
2155                 for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
2156                         voltage_id = sclk_table->entries[entry_id].vddInd;
2157                         sclk_table->entries[entry_id].vddc =
2158                                 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
2159                 }
2160         }
2161
2162         for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) {
2163                 voltage_id = mclk_table->entries[entry_id].vddInd;
2164                 mclk_table->entries[entry_id].vddc =
2165                         table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
2166         }
2167
2168         for (entry_id = 0; entry_id < mm_table->count; ++entry_id) {
2169                 voltage_id = mm_table->entries[entry_id].vddcInd;
2170                 mm_table->entries[entry_id].vddc =
2171                         table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
2172         }
2173
2174         return 0;
2175
2176 }
2177
2178 static int phm_add_voltage(struct pp_hwmgr *hwmgr,
2179                         phm_ppt_v1_voltage_lookup_table *look_up_table,
2180                         phm_ppt_v1_voltage_lookup_record *record)
2181 {
2182         uint32_t i;
2183
2184         PP_ASSERT_WITH_CODE((NULL != look_up_table),
2185                 "Lookup Table empty.", return -EINVAL);
2186         PP_ASSERT_WITH_CODE((0 != look_up_table->count),
2187                 "Lookup Table empty.", return -EINVAL);
2188
2189         i = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDGFX);
2190         PP_ASSERT_WITH_CODE((i >= look_up_table->count),
2191                 "Lookup Table is full.", return -EINVAL);
2192
2193         /* This is to avoid entering duplicate calculated records. */
2194         for (i = 0; i < look_up_table->count; i++) {
2195                 if (look_up_table->entries[i].us_vdd == record->us_vdd) {
2196                         if (look_up_table->entries[i].us_calculated == 1)
2197                                 return 0;
2198                         break;
2199                 }
2200         }
2201
2202         look_up_table->entries[i].us_calculated = 1;
2203         look_up_table->entries[i].us_vdd = record->us_vdd;
2204         look_up_table->entries[i].us_cac_low = record->us_cac_low;
2205         look_up_table->entries[i].us_cac_mid = record->us_cac_mid;
2206         look_up_table->entries[i].us_cac_high = record->us_cac_high;
2207         /* Only increment the count when we're appending, not replacing duplicate entry. */
2208         if (i == look_up_table->count)
2209                 look_up_table->count++;
2210
2211         return 0;
2212 }
2213
2214
2215 static int smu7_calc_voltage_dependency_tables(struct pp_hwmgr *hwmgr)
2216 {
2217         uint8_t entry_id;
2218         struct phm_ppt_v1_voltage_lookup_record v_record;
2219         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2220         struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
2221
2222         phm_ppt_v1_clock_voltage_dependency_table *sclk_table = pptable_info->vdd_dep_on_sclk;
2223         phm_ppt_v1_clock_voltage_dependency_table *mclk_table = pptable_info->vdd_dep_on_mclk;
2224
2225         if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
2226                 for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
2227                         if (sclk_table->entries[entry_id].vdd_offset & (1 << 15))
2228                                 v_record.us_vdd = sclk_table->entries[entry_id].vddgfx +
2229                                         sclk_table->entries[entry_id].vdd_offset - 0xFFFF;
2230                         else
2231                                 v_record.us_vdd = sclk_table->entries[entry_id].vddgfx +
2232                                         sclk_table->entries[entry_id].vdd_offset;
2233
2234                         sclk_table->entries[entry_id].vddc =
2235                                 v_record.us_cac_low = v_record.us_cac_mid =
2236                                 v_record.us_cac_high = v_record.us_vdd;
2237
2238                         phm_add_voltage(hwmgr, pptable_info->vddc_lookup_table, &v_record);
2239                 }
2240
2241                 for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) {
2242                         if (mclk_table->entries[entry_id].vdd_offset & (1 << 15))
2243                                 v_record.us_vdd = mclk_table->entries[entry_id].vddc +
2244                                         mclk_table->entries[entry_id].vdd_offset - 0xFFFF;
2245                         else
2246                                 v_record.us_vdd = mclk_table->entries[entry_id].vddc +
2247                                         mclk_table->entries[entry_id].vdd_offset;
2248
2249                         mclk_table->entries[entry_id].vddgfx = v_record.us_cac_low =
2250                                 v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd;
2251                         phm_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record);
2252                 }
2253         }
2254         return 0;
2255 }
2256
2257 static int smu7_calc_mm_voltage_dependency_table(struct pp_hwmgr *hwmgr)
2258 {
2259         uint8_t entry_id;
2260         struct phm_ppt_v1_voltage_lookup_record v_record;
2261         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2262         struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
2263         phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table;
2264
2265         if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
2266                 for (entry_id = 0; entry_id < mm_table->count; entry_id++) {
2267                         if (mm_table->entries[entry_id].vddgfx_offset & (1 << 15))
2268                                 v_record.us_vdd = mm_table->entries[entry_id].vddc +
2269                                         mm_table->entries[entry_id].vddgfx_offset - 0xFFFF;
2270                         else
2271                                 v_record.us_vdd = mm_table->entries[entry_id].vddc +
2272                                         mm_table->entries[entry_id].vddgfx_offset;
2273
2274                         /* Add the calculated VDDGFX to the VDDGFX lookup table */
2275                         mm_table->entries[entry_id].vddgfx = v_record.us_cac_low =
2276                                 v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd;
2277                         phm_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record);
2278                 }
2279         }
2280         return 0;
2281 }
2282
2283 static int smu7_sort_lookup_table(struct pp_hwmgr *hwmgr,
2284                 struct phm_ppt_v1_voltage_lookup_table *lookup_table)
2285 {
2286         uint32_t table_size, i, j;
2287         table_size = lookup_table->count;
2288
2289         PP_ASSERT_WITH_CODE(0 != lookup_table->count,
2290                 "Lookup table is empty", return -EINVAL);
2291
2292         /* Sorting voltages */
2293         for (i = 0; i < table_size - 1; i++) {
2294                 for (j = i + 1; j > 0; j--) {
2295                         if (lookup_table->entries[j].us_vdd <
2296                                         lookup_table->entries[j - 1].us_vdd) {
2297                                 swap(lookup_table->entries[j - 1],
2298                                      lookup_table->entries[j]);
2299                         }
2300                 }
2301         }
2302
2303         return 0;
2304 }
2305
2306 static int smu7_complete_dependency_tables(struct pp_hwmgr *hwmgr)
2307 {
2308         int result = 0;
2309         int tmp_result;
2310         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2311         struct phm_ppt_v1_information *table_info =
2312                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
2313
2314         if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
2315                 tmp_result = smu7_patch_lookup_table_with_leakage(hwmgr,
2316                         table_info->vddgfx_lookup_table, &(data->vddcgfx_leakage));
2317                 if (tmp_result != 0)
2318                         result = tmp_result;
2319
2320                 smu7_patch_ppt_v1_with_vdd_leakage(hwmgr,
2321                         &table_info->max_clock_voltage_on_dc.vddgfx, &(data->vddcgfx_leakage));
2322         } else {
2323
2324                 tmp_result = smu7_patch_lookup_table_with_leakage(hwmgr,
2325                                 table_info->vddc_lookup_table, &(data->vddc_leakage));
2326                 if (tmp_result)
2327                         result = tmp_result;
2328
2329                 tmp_result = smu7_patch_clock_voltage_limits_with_vddc_leakage(hwmgr,
2330                                 &(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc);
2331                 if (tmp_result)
2332                         result = tmp_result;
2333         }
2334
2335         tmp_result = smu7_patch_voltage_dependency_tables_with_lookup_table(hwmgr);
2336         if (tmp_result)
2337                 result = tmp_result;
2338
2339         tmp_result = smu7_calc_voltage_dependency_tables(hwmgr);
2340         if (tmp_result)
2341                 result = tmp_result;
2342
2343         tmp_result = smu7_calc_mm_voltage_dependency_table(hwmgr);
2344         if (tmp_result)
2345                 result = tmp_result;
2346
2347         tmp_result = smu7_sort_lookup_table(hwmgr, table_info->vddgfx_lookup_table);
2348         if (tmp_result)
2349                 result = tmp_result;
2350
2351         tmp_result = smu7_sort_lookup_table(hwmgr, table_info->vddc_lookup_table);
2352         if (tmp_result)
2353                 result = tmp_result;
2354
2355         return result;
2356 }
2357
2358 static int smu7_find_highest_vddc(struct pp_hwmgr *hwmgr)
2359 {
2360         struct phm_ppt_v1_information *table_info =
2361                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
2362         struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
2363                                                 table_info->vdd_dep_on_sclk;
2364         struct phm_ppt_v1_voltage_lookup_table *lookup_table =
2365                                                 table_info->vddc_lookup_table;
2366         uint16_t highest_voltage;
2367         uint32_t i;
2368
2369         highest_voltage = allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
2370
2371         for (i = 0; i < lookup_table->count; i++) {
2372                 if (lookup_table->entries[i].us_vdd < ATOM_VIRTUAL_VOLTAGE_ID0 &&
2373                     lookup_table->entries[i].us_vdd > highest_voltage)
2374                         highest_voltage = lookup_table->entries[i].us_vdd;
2375         }
2376
2377         return highest_voltage;
2378 }
2379
2380 static int smu7_set_private_data_based_on_pptable_v1(struct pp_hwmgr *hwmgr)
2381 {
2382         struct phm_ppt_v1_information *table_info =
2383                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
2384
2385         struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
2386                                                 table_info->vdd_dep_on_sclk;
2387         struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table =
2388                                                 table_info->vdd_dep_on_mclk;
2389
2390         PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL,
2391                 "VDD dependency on SCLK table is missing.",
2392                 return -EINVAL);
2393         PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
2394                 "VDD dependency on SCLK table has to have is missing.",
2395                 return -EINVAL);
2396
2397         PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL,
2398                 "VDD dependency on MCLK table is missing",
2399                 return -EINVAL);
2400         PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
2401                 "VDD dependency on MCLK table has to have is missing.",
2402                 return -EINVAL);
2403
2404         table_info->max_clock_voltage_on_ac.sclk =
2405                 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk;
2406         table_info->max_clock_voltage_on_ac.mclk =
2407                 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk;
2408         if (hwmgr->chip_id >= CHIP_POLARIS10 && hwmgr->chip_id <= CHIP_VEGAM)
2409                 table_info->max_clock_voltage_on_ac.vddc =
2410                         smu7_find_highest_vddc(hwmgr);
2411         else
2412                 table_info->max_clock_voltage_on_ac.vddc =
2413                         allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
2414         table_info->max_clock_voltage_on_ac.vddci =
2415                 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci;
2416
2417         hwmgr->dyn_state.max_clock_voltage_on_ac.sclk = table_info->max_clock_voltage_on_ac.sclk;
2418         hwmgr->dyn_state.max_clock_voltage_on_ac.mclk = table_info->max_clock_voltage_on_ac.mclk;
2419         hwmgr->dyn_state.max_clock_voltage_on_ac.vddc = table_info->max_clock_voltage_on_ac.vddc;
2420         hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = table_info->max_clock_voltage_on_ac.vddci;
2421
2422         return 0;
2423 }
2424
2425 static int smu7_patch_voltage_workaround(struct pp_hwmgr *hwmgr)
2426 {
2427         struct phm_ppt_v1_information *table_info =
2428                        (struct phm_ppt_v1_information *)(hwmgr->pptable);
2429         struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
2430         struct phm_ppt_v1_voltage_lookup_table *lookup_table;
2431         uint32_t i;
2432         uint32_t hw_revision, sub_vendor_id, sub_sys_id;
2433         struct amdgpu_device *adev = hwmgr->adev;
2434
2435         if (table_info != NULL) {
2436                 dep_mclk_table = table_info->vdd_dep_on_mclk;
2437                 lookup_table = table_info->vddc_lookup_table;
2438         } else
2439                 return 0;
2440
2441         hw_revision = adev->pdev->revision;
2442         sub_sys_id = adev->pdev->subsystem_device;
2443         sub_vendor_id = adev->pdev->subsystem_vendor;
2444
2445         if (adev->pdev->device == 0x67DF && hw_revision == 0xC7 &&
2446             ((sub_sys_id == 0xb37 && sub_vendor_id == 0x1002) ||
2447              (sub_sys_id == 0x4a8 && sub_vendor_id == 0x1043) ||
2448              (sub_sys_id == 0x9480 && sub_vendor_id == 0x1682))) {
2449
2450                 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device,
2451                                               CGS_IND_REG__SMC,
2452                                               PWR_CKS_CNTL,
2453                                               CKS_STRETCH_AMOUNT,
2454                                               0x3);
2455
2456                 if (lookup_table->entries[dep_mclk_table->entries[dep_mclk_table->count-1].vddInd].us_vdd >= 1000)
2457                         return 0;
2458
2459                 for (i = 0; i < lookup_table->count; i++) {
2460                         if (lookup_table->entries[i].us_vdd < 0xff01 && lookup_table->entries[i].us_vdd >= 1000) {
2461                                 dep_mclk_table->entries[dep_mclk_table->count-1].vddInd = (uint8_t) i;
2462                                 return 0;
2463                         }
2464                 }
2465         }
2466         return 0;
2467 }
2468
2469 static int smu7_thermal_parameter_init(struct pp_hwmgr *hwmgr)
2470 {
2471         struct pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
2472         uint32_t temp_reg;
2473         struct phm_ppt_v1_information *table_info =
2474                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
2475
2476
2477         if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) {
2478                 temp_reg = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL);
2479                 switch (gpio_pin_assignment.uc_gpio_pin_bit_shift) {
2480                 case 0:
2481                         temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x1);
2482                         break;
2483                 case 1:
2484                         temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x2);
2485                         break;
2486                 case 2:
2487                         temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW, 0x1);
2488                         break;
2489                 case 3:
2490                         temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, FORCE_NB_PS1, 0x1);
2491                         break;
2492                 case 4:
2493                         temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, DPM_ENABLED, 0x1);
2494                         break;
2495                 default:
2496                         break;
2497                 }
2498                 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL, temp_reg);
2499         }
2500
2501         if (table_info == NULL)
2502                 return 0;
2503
2504         if (table_info->cac_dtp_table->usDefaultTargetOperatingTemp != 0 &&
2505                 hwmgr->thermal_controller.advanceFanControlParameters.ucFanControlMode) {
2506                 hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMinLimit =
2507                         (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit;
2508
2509                 hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMaxLimit =
2510                         (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
2511
2512                 hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMStep = 1;
2513
2514                 hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMaxLimit = 100;
2515
2516                 hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMinLimit =
2517                         (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit;
2518
2519                 hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMStep = 1;
2520
2521                 table_info->cac_dtp_table->usDefaultTargetOperatingTemp = (table_info->cac_dtp_table->usDefaultTargetOperatingTemp >= 50) ?
2522                                                                 (table_info->cac_dtp_table->usDefaultTargetOperatingTemp - 50) : 0;
2523
2524                 table_info->cac_dtp_table->usOperatingTempMaxLimit = table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
2525                 table_info->cac_dtp_table->usOperatingTempStep = 1;
2526                 table_info->cac_dtp_table->usOperatingTempHyst = 1;
2527
2528                 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM =
2529                                hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
2530
2531                 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM =
2532                                hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM;
2533
2534                 hwmgr->dyn_state.cac_dtp_table->usOperatingTempMinLimit =
2535                                table_info->cac_dtp_table->usOperatingTempMinLimit;
2536
2537                 hwmgr->dyn_state.cac_dtp_table->usOperatingTempMaxLimit =
2538                                table_info->cac_dtp_table->usOperatingTempMaxLimit;
2539
2540                 hwmgr->dyn_state.cac_dtp_table->usDefaultTargetOperatingTemp =
2541                                table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
2542
2543                 hwmgr->dyn_state.cac_dtp_table->usOperatingTempStep =
2544                                table_info->cac_dtp_table->usOperatingTempStep;
2545
2546                 hwmgr->dyn_state.cac_dtp_table->usTargetOperatingTemp =
2547                                table_info->cac_dtp_table->usTargetOperatingTemp;
2548                 if (hwmgr->feature_mask & PP_OD_FUZZY_FAN_CONTROL_MASK)
2549                         phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2550                                         PHM_PlatformCaps_ODFuzzyFanControlSupport);
2551         }
2552
2553         return 0;
2554 }
2555
2556 /**
2557  * smu7_patch_ppt_v0_with_vdd_leakage - Change virtual leakage voltage to actual value.
2558  *
2559  * @hwmgr:  the address of the powerplay hardware manager.
2560  * @voltage: pointer to changing voltage
2561  * @leakage_table: pointer to leakage table
2562  */
2563 static void smu7_patch_ppt_v0_with_vdd_leakage(struct pp_hwmgr *hwmgr,
2564                 uint32_t *voltage, struct smu7_leakage_voltage *leakage_table)
2565 {
2566         uint32_t index;
2567
2568         /* search for leakage voltage ID 0xff01 ~ 0xff08 */
2569         for (index = 0; index < leakage_table->count; index++) {
2570                 /* if this voltage matches a leakage voltage ID */
2571                 /* patch with actual leakage voltage */
2572                 if (leakage_table->leakage_id[index] == *voltage) {
2573                         *voltage = leakage_table->actual_voltage[index];
2574                         break;
2575                 }
2576         }
2577
2578         if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
2579                 pr_err("Voltage value looks like a Leakage ID but it's not patched \n");
2580 }
2581
2582
2583 static int smu7_patch_vddc(struct pp_hwmgr *hwmgr,
2584                               struct phm_clock_voltage_dependency_table *tab)
2585 {
2586         uint16_t i;
2587         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2588
2589         if (tab)
2590                 for (i = 0; i < tab->count; i++)
2591                         smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2592                                                 &data->vddc_leakage);
2593
2594         return 0;
2595 }
2596
2597 static int smu7_patch_vddci(struct pp_hwmgr *hwmgr,
2598                                struct phm_clock_voltage_dependency_table *tab)
2599 {
2600         uint16_t i;
2601         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2602
2603         if (tab)
2604                 for (i = 0; i < tab->count; i++)
2605                         smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2606                                                         &data->vddci_leakage);
2607
2608         return 0;
2609 }
2610
2611 static int smu7_patch_vce_vddc(struct pp_hwmgr *hwmgr,
2612                                   struct phm_vce_clock_voltage_dependency_table *tab)
2613 {
2614         uint16_t i;
2615         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2616
2617         if (tab)
2618                 for (i = 0; i < tab->count; i++)
2619                         smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2620                                                         &data->vddc_leakage);
2621
2622         return 0;
2623 }
2624
2625
2626 static int smu7_patch_uvd_vddc(struct pp_hwmgr *hwmgr,
2627                                   struct phm_uvd_clock_voltage_dependency_table *tab)
2628 {
2629         uint16_t i;
2630         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2631
2632         if (tab)
2633                 for (i = 0; i < tab->count; i++)
2634                         smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2635                                                         &data->vddc_leakage);
2636
2637         return 0;
2638 }
2639
2640 static int smu7_patch_vddc_shed_limit(struct pp_hwmgr *hwmgr,
2641                                          struct phm_phase_shedding_limits_table *tab)
2642 {
2643         uint16_t i;
2644         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2645
2646         if (tab)
2647                 for (i = 0; i < tab->count; i++)
2648                         smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].Voltage,
2649                                                         &data->vddc_leakage);
2650
2651         return 0;
2652 }
2653
2654 static int smu7_patch_samu_vddc(struct pp_hwmgr *hwmgr,
2655                                    struct phm_samu_clock_voltage_dependency_table *tab)
2656 {
2657         uint16_t i;
2658         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2659
2660         if (tab)
2661                 for (i = 0; i < tab->count; i++)
2662                         smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2663                                                         &data->vddc_leakage);
2664
2665         return 0;
2666 }
2667
2668 static int smu7_patch_acp_vddc(struct pp_hwmgr *hwmgr,
2669                                   struct phm_acp_clock_voltage_dependency_table *tab)
2670 {
2671         uint16_t i;
2672         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2673
2674         if (tab)
2675                 for (i = 0; i < tab->count; i++)
2676                         smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2677                                         &data->vddc_leakage);
2678
2679         return 0;
2680 }
2681
2682 static int smu7_patch_limits_vddc(struct pp_hwmgr *hwmgr,
2683                                   struct phm_clock_and_voltage_limits *tab)
2684 {
2685         uint32_t vddc, vddci;
2686         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2687
2688         if (tab) {
2689                 vddc = tab->vddc;
2690                 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc,
2691                                                    &data->vddc_leakage);
2692                 tab->vddc = vddc;
2693                 vddci = tab->vddci;
2694                 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddci,
2695                                                    &data->vddci_leakage);
2696                 tab->vddci = vddci;
2697         }
2698
2699         return 0;
2700 }
2701
2702 static int smu7_patch_cac_vddc(struct pp_hwmgr *hwmgr, struct phm_cac_leakage_table *tab)
2703 {
2704         uint32_t i;
2705         uint32_t vddc;
2706         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2707
2708         if (tab) {
2709                 for (i = 0; i < tab->count; i++) {
2710                         vddc = (uint32_t)(tab->entries[i].Vddc);
2711                         smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc, &data->vddc_leakage);
2712                         tab->entries[i].Vddc = (uint16_t)vddc;
2713                 }
2714         }
2715
2716         return 0;
2717 }
2718
2719 static int smu7_patch_dependency_tables_with_leakage(struct pp_hwmgr *hwmgr)
2720 {
2721         int tmp;
2722
2723         tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_sclk);
2724         if (tmp)
2725                 return -EINVAL;
2726
2727         tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_mclk);
2728         if (tmp)
2729                 return -EINVAL;
2730
2731         tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
2732         if (tmp)
2733                 return -EINVAL;
2734
2735         tmp = smu7_patch_vddci(hwmgr, hwmgr->dyn_state.vddci_dependency_on_mclk);
2736         if (tmp)
2737                 return -EINVAL;
2738
2739         tmp = smu7_patch_vce_vddc(hwmgr, hwmgr->dyn_state.vce_clock_voltage_dependency_table);
2740         if (tmp)
2741                 return -EINVAL;
2742
2743         tmp = smu7_patch_uvd_vddc(hwmgr, hwmgr->dyn_state.uvd_clock_voltage_dependency_table);
2744         if (tmp)
2745                 return -EINVAL;
2746
2747         tmp = smu7_patch_samu_vddc(hwmgr, hwmgr->dyn_state.samu_clock_voltage_dependency_table);
2748         if (tmp)
2749                 return -EINVAL;
2750
2751         tmp = smu7_patch_acp_vddc(hwmgr, hwmgr->dyn_state.acp_clock_voltage_dependency_table);
2752         if (tmp)
2753                 return -EINVAL;
2754
2755         tmp = smu7_patch_vddc_shed_limit(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table);
2756         if (tmp)
2757                 return -EINVAL;
2758
2759         tmp = smu7_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_ac);
2760         if (tmp)
2761                 return -EINVAL;
2762
2763         tmp = smu7_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_dc);
2764         if (tmp)
2765                 return -EINVAL;
2766
2767         tmp = smu7_patch_cac_vddc(hwmgr, hwmgr->dyn_state.cac_leakage_table);
2768         if (tmp)
2769                 return -EINVAL;
2770
2771         return 0;
2772 }
2773
2774
2775 static int smu7_set_private_data_based_on_pptable_v0(struct pp_hwmgr *hwmgr)
2776 {
2777         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2778
2779         struct phm_clock_voltage_dependency_table *allowed_sclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_sclk;
2780         struct phm_clock_voltage_dependency_table *allowed_mclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_mclk;
2781         struct phm_clock_voltage_dependency_table *allowed_mclk_vddci_table = hwmgr->dyn_state.vddci_dependency_on_mclk;
2782
2783         PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table != NULL,
2784                 "VDDC dependency on SCLK table is missing. This table is mandatory",
2785                 return -EINVAL);
2786         PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table->count >= 1,
2787                 "VDDC dependency on SCLK table has to have is missing. This table is mandatory",
2788                 return -EINVAL);
2789
2790         PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table != NULL,
2791                 "VDDC dependency on MCLK table is missing. This table is mandatory",
2792                 return -EINVAL);
2793         PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table->count >= 1,
2794                 "VDD dependency on MCLK table has to have is missing. This table is mandatory",
2795                 return -EINVAL);
2796
2797         data->min_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[0].v;
2798         data->max_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
2799
2800         hwmgr->dyn_state.max_clock_voltage_on_ac.sclk =
2801                 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
2802         hwmgr->dyn_state.max_clock_voltage_on_ac.mclk =
2803                 allowed_mclk_vddc_table->entries[allowed_mclk_vddc_table->count - 1].clk;
2804         hwmgr->dyn_state.max_clock_voltage_on_ac.vddc =
2805                 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
2806
2807         if (allowed_mclk_vddci_table != NULL && allowed_mclk_vddci_table->count >= 1) {
2808                 data->min_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[0].v;
2809                 data->max_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
2810         }
2811
2812         if (hwmgr->dyn_state.vddci_dependency_on_mclk != NULL && hwmgr->dyn_state.vddci_dependency_on_mclk->count >= 1)
2813                 hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = hwmgr->dyn_state.vddci_dependency_on_mclk->entries[hwmgr->dyn_state.vddci_dependency_on_mclk->count - 1].v;
2814
2815         return 0;
2816 }
2817
2818 static int smu7_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
2819 {
2820         kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
2821         hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
2822         kfree(hwmgr->backend);
2823         hwmgr->backend = NULL;
2824
2825         return 0;
2826 }
2827
2828 static int smu7_get_elb_voltages(struct pp_hwmgr *hwmgr)
2829 {
2830         uint16_t virtual_voltage_id, vddc, vddci, efuse_voltage_id;
2831         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2832         int i;
2833
2834         if (atomctrl_get_leakage_id_from_efuse(hwmgr, &efuse_voltage_id) == 0) {
2835                 for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) {
2836                         virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
2837                         if (atomctrl_get_leakage_vddc_base_on_leakage(hwmgr, &vddc, &vddci,
2838                                                                 virtual_voltage_id,
2839                                                                 efuse_voltage_id) == 0) {
2840                                 if (vddc != 0 && vddc != virtual_voltage_id) {
2841                                         data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = vddc;
2842                                         data->vddc_leakage.leakage_id[data->vddc_leakage.count] = virtual_voltage_id;
2843                                         data->vddc_leakage.count++;
2844                                 }
2845                                 if (vddci != 0 && vddci != virtual_voltage_id) {
2846                                         data->vddci_leakage.actual_voltage[data->vddci_leakage.count] = vddci;
2847                                         data->vddci_leakage.leakage_id[data->vddci_leakage.count] = virtual_voltage_id;
2848                                         data->vddci_leakage.count++;
2849                                 }
2850                         }
2851                 }
2852         }
2853         return 0;
2854 }
2855
2856 #define LEAKAGE_ID_MSB                  463
2857 #define LEAKAGE_ID_LSB                  454
2858
2859 static int smu7_update_edc_leakage_table(struct pp_hwmgr *hwmgr)
2860 {
2861         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2862         uint32_t efuse;
2863         uint16_t offset;
2864         int ret = 0;
2865
2866         if (data->disable_edc_leakage_controller)
2867                 return 0;
2868
2869         ret = atomctrl_get_edc_hilo_leakage_offset_table(hwmgr,
2870                                                          &data->edc_hilo_leakage_offset_from_vbios);
2871         if (ret)
2872                 return ret;
2873
2874         if (data->edc_hilo_leakage_offset_from_vbios.usEdcDidtLoDpm7TableOffset &&
2875             data->edc_hilo_leakage_offset_from_vbios.usEdcDidtHiDpm7TableOffset) {
2876                 atomctrl_read_efuse(hwmgr, LEAKAGE_ID_LSB, LEAKAGE_ID_MSB, &efuse);
2877                 if (efuse < data->edc_hilo_leakage_offset_from_vbios.usHiLoLeakageThreshold)
2878                         offset = data->edc_hilo_leakage_offset_from_vbios.usEdcDidtLoDpm7TableOffset;
2879                 else
2880                         offset = data->edc_hilo_leakage_offset_from_vbios.usEdcDidtHiDpm7TableOffset;
2881
2882                 ret = atomctrl_get_edc_leakage_table(hwmgr,
2883                                                      &data->edc_leakage_table,
2884                                                      offset);
2885                 if (ret)
2886                         return ret;
2887         }
2888
2889         return ret;
2890 }
2891
2892 static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
2893 {
2894         struct smu7_hwmgr *data;
2895         int result = 0;
2896
2897         data = kzalloc(sizeof(struct smu7_hwmgr), GFP_KERNEL);
2898         if (data == NULL)
2899                 return -ENOMEM;
2900
2901         hwmgr->backend = data;
2902         smu7_patch_voltage_workaround(hwmgr);
2903         smu7_init_dpm_defaults(hwmgr);
2904
2905         /* Get leakage voltage based on leakage ID. */
2906         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2907                         PHM_PlatformCaps_EVV)) {
2908                 result = smu7_get_evv_voltages(hwmgr);
2909                 if (result) {
2910                         pr_info("Get EVV Voltage Failed.  Abort Driver loading!\n");
2911                         return -EINVAL;
2912                 }
2913         } else {
2914                 smu7_get_elb_voltages(hwmgr);
2915         }
2916
2917         if (hwmgr->pp_table_version == PP_TABLE_V1) {
2918                 smu7_complete_dependency_tables(hwmgr);
2919                 smu7_set_private_data_based_on_pptable_v1(hwmgr);
2920         } else if (hwmgr->pp_table_version == PP_TABLE_V0) {
2921                 smu7_patch_dependency_tables_with_leakage(hwmgr);
2922                 smu7_set_private_data_based_on_pptable_v0(hwmgr);
2923         }
2924
2925         /* Initalize Dynamic State Adjustment Rule Settings */
2926         result = phm_initializa_dynamic_state_adjustment_rule_settings(hwmgr);
2927
2928         if (0 == result) {
2929                 struct amdgpu_device *adev = hwmgr->adev;
2930
2931                 data->is_tlu_enabled = false;
2932
2933                 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
2934                                                         SMU7_MAX_HARDWARE_POWERLEVELS;
2935                 hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
2936                 hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
2937
2938                 data->pcie_gen_cap = adev->pm.pcie_gen_mask;
2939                 if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
2940                         data->pcie_spc_cap = 20;
2941                 else
2942                         data->pcie_spc_cap = 16;
2943                 data->pcie_lane_cap = adev->pm.pcie_mlw_mask;
2944
2945                 hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
2946 /* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
2947                 hwmgr->platform_descriptor.clockStep.engineClock = 500;
2948                 hwmgr->platform_descriptor.clockStep.memoryClock = 500;
2949                 smu7_thermal_parameter_init(hwmgr);
2950         } else {
2951                 /* Ignore return value in here, we are cleaning up a mess. */
2952                 smu7_hwmgr_backend_fini(hwmgr);
2953         }
2954
2955         result = smu7_update_edc_leakage_table(hwmgr);
2956         if (result)
2957                 return result;
2958
2959         return 0;
2960 }
2961
2962 static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr)
2963 {
2964         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2965         uint32_t level, tmp;
2966
2967         if (!data->pcie_dpm_key_disabled) {
2968                 if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
2969                         level = 0;
2970                         tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask;
2971                         while (tmp >>= 1)
2972                                 level++;
2973
2974                         if (level)
2975                                 smum_send_msg_to_smc_with_parameter(hwmgr,
2976                                                 PPSMC_MSG_PCIeDPM_ForceLevel, level,
2977                                                 NULL);
2978                 }
2979         }
2980
2981         if (!data->sclk_dpm_key_disabled) {
2982                 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
2983                         level = 0;
2984                         tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask;
2985                         while (tmp >>= 1)
2986                                 level++;
2987
2988                         if (level)
2989                                 smum_send_msg_to_smc_with_parameter(hwmgr,
2990                                                 PPSMC_MSG_SCLKDPM_SetEnabledMask,
2991                                                 (1 << level),
2992                                                 NULL);
2993                 }
2994         }
2995
2996         if (!data->mclk_dpm_key_disabled) {
2997                 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
2998                         level = 0;
2999                         tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask;
3000                         while (tmp >>= 1)
3001                                 level++;
3002
3003                         if (level)
3004                                 smum_send_msg_to_smc_with_parameter(hwmgr,
3005                                                 PPSMC_MSG_MCLKDPM_SetEnabledMask,
3006                                                 (1 << level),
3007                                                 NULL);
3008                 }
3009         }
3010
3011         return 0;
3012 }
3013
3014 static int smu7_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
3015 {
3016         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3017
3018         if (hwmgr->pp_table_version == PP_TABLE_V1)
3019                 phm_apply_dal_min_voltage_request(hwmgr);
3020 /* TO DO  for v0 iceland and Ci*/
3021
3022         if (!data->sclk_dpm_key_disabled) {
3023                 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask)
3024                         smum_send_msg_to_smc_with_parameter(hwmgr,
3025                                         PPSMC_MSG_SCLKDPM_SetEnabledMask,
3026                                         data->dpm_level_enable_mask.sclk_dpm_enable_mask,
3027                                         NULL);
3028         }
3029
3030         if (!data->mclk_dpm_key_disabled) {
3031                 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask)
3032                         smum_send_msg_to_smc_with_parameter(hwmgr,
3033                                         PPSMC_MSG_MCLKDPM_SetEnabledMask,
3034                                         data->dpm_level_enable_mask.mclk_dpm_enable_mask,
3035                                         NULL);
3036         }
3037
3038         return 0;
3039 }
3040
3041 static int smu7_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
3042 {
3043         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3044
3045         if (!smum_is_dpm_running(hwmgr))
3046                 return -EINVAL;
3047
3048         if (!data->pcie_dpm_key_disabled) {
3049                 smum_send_msg_to_smc(hwmgr,
3050                                 PPSMC_MSG_PCIeDPM_UnForceLevel,
3051                                 NULL);
3052         }
3053
3054         return smu7_upload_dpm_level_enable_mask(hwmgr);
3055 }
3056
3057 static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr)
3058 {
3059         struct smu7_hwmgr *data =
3060                         (struct smu7_hwmgr *)(hwmgr->backend);
3061         uint32_t level;
3062
3063         if (!data->sclk_dpm_key_disabled)
3064                 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3065                         level = phm_get_lowest_enabled_level(hwmgr,
3066                                                               data->dpm_level_enable_mask.sclk_dpm_enable_mask);
3067                         smum_send_msg_to_smc_with_parameter(hwmgr,
3068                                                             PPSMC_MSG_SCLKDPM_SetEnabledMask,
3069                                                             (1 << level),
3070                                                             NULL);
3071
3072         }
3073
3074         if (!data->mclk_dpm_key_disabled) {
3075                 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3076                         level = phm_get_lowest_enabled_level(hwmgr,
3077                                                               data->dpm_level_enable_mask.mclk_dpm_enable_mask);
3078                         smum_send_msg_to_smc_with_parameter(hwmgr,
3079                                                             PPSMC_MSG_MCLKDPM_SetEnabledMask,
3080                                                             (1 << level),
3081                                                             NULL);
3082                 }
3083         }
3084
3085         if (!data->pcie_dpm_key_disabled) {
3086                 if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3087                         level = phm_get_lowest_enabled_level(hwmgr,
3088                                                               data->dpm_level_enable_mask.pcie_dpm_enable_mask);
3089                         smum_send_msg_to_smc_with_parameter(hwmgr,
3090                                                             PPSMC_MSG_PCIeDPM_ForceLevel,
3091                                                             (level),
3092                                                             NULL);
3093                 }
3094         }
3095
3096         return 0;
3097 }
3098
3099 static int smu7_get_profiling_clk(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level,
3100                                 uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *pcie_mask)
3101 {
3102         uint32_t percentage;
3103         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3104         struct smu7_dpm_table *golden_dpm_table = &data->golden_dpm_table;
3105         int32_t tmp_mclk;
3106         int32_t tmp_sclk;
3107         int32_t count;
3108
3109         if (golden_dpm_table->mclk_table.count < 1)
3110                 return -EINVAL;
3111
3112         percentage = 100 * golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count - 1].value /
3113                         golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value;
3114
3115         if (golden_dpm_table->mclk_table.count == 1) {
3116                 percentage = 70;
3117                 tmp_mclk = golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value;
3118                 *mclk_mask = golden_dpm_table->mclk_table.count - 1;
3119         } else {
3120                 tmp_mclk = golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 2].value;
3121                 *mclk_mask = golden_dpm_table->mclk_table.count - 2;
3122         }
3123
3124         tmp_sclk = tmp_mclk * percentage / 100;
3125
3126         if (hwmgr->pp_table_version == PP_TABLE_V0) {
3127                 for (count = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1;
3128                         count >= 0; count--) {
3129                         if (tmp_sclk >= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk) {
3130                                 tmp_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk;
3131                                 *sclk_mask = count;
3132                                 break;
3133                         }
3134                 }
3135                 if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
3136                         *sclk_mask = 0;
3137                         tmp_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].clk;
3138                 }
3139
3140                 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
3141                         *sclk_mask = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1;
3142         } else if (hwmgr->pp_table_version == PP_TABLE_V1) {
3143                 struct phm_ppt_v1_information *table_info =
3144                                 (struct phm_ppt_v1_information *)(hwmgr->pptable);
3145
3146                 for (count = table_info->vdd_dep_on_sclk->count-1; count >= 0; count--) {
3147                         if (tmp_sclk >= table_info->vdd_dep_on_sclk->entries[count].clk) {
3148                                 tmp_sclk = table_info->vdd_dep_on_sclk->entries[count].clk;
3149                                 *sclk_mask = count;
3150                                 break;
3151                         }
3152                 }
3153                 if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
3154                         *sclk_mask = 0;
3155                         tmp_sclk =  table_info->vdd_dep_on_sclk->entries[0].clk;
3156                 }
3157
3158                 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
3159                         *sclk_mask = table_info->vdd_dep_on_sclk->count - 1;
3160         }
3161
3162         if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK)
3163                 *mclk_mask = 0;
3164         else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
3165                 *mclk_mask = golden_dpm_table->mclk_table.count - 1;
3166
3167         *pcie_mask = data->dpm_table.pcie_speed_table.count - 1;
3168         hwmgr->pstate_sclk = tmp_sclk;
3169         hwmgr->pstate_mclk = tmp_mclk;
3170
3171         return 0;
3172 }
3173
3174 static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr,
3175                                 enum amd_dpm_forced_level level)
3176 {
3177         int ret = 0;
3178         uint32_t sclk_mask = 0;
3179         uint32_t mclk_mask = 0;
3180         uint32_t pcie_mask = 0;
3181
3182         if (hwmgr->pstate_sclk == 0)
3183                 smu7_get_profiling_clk(hwmgr, level, &sclk_mask, &mclk_mask, &pcie_mask);
3184
3185         switch (level) {
3186         case AMD_DPM_FORCED_LEVEL_HIGH:
3187                 ret = smu7_force_dpm_highest(hwmgr);
3188                 break;
3189         case AMD_DPM_FORCED_LEVEL_LOW:
3190                 ret = smu7_force_dpm_lowest(hwmgr);
3191                 break;
3192         case AMD_DPM_FORCED_LEVEL_AUTO:
3193                 ret = smu7_unforce_dpm_levels(hwmgr);
3194                 break;
3195         case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
3196         case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
3197         case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
3198         case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
3199                 ret = smu7_get_profiling_clk(hwmgr, level, &sclk_mask, &mclk_mask, &pcie_mask);
3200                 if (ret)
3201                         return ret;
3202                 smu7_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
3203                 smu7_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
3204                 smu7_force_clock_level(hwmgr, PP_PCIE, 1<<pcie_mask);
3205                 break;
3206         case AMD_DPM_FORCED_LEVEL_MANUAL:
3207         case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
3208         default:
3209                 break;
3210         }
3211
3212         if (!ret) {
3213                 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
3214                         smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
3215                 else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
3216                         smu7_fan_ctrl_reset_fan_speed_to_default(hwmgr);
3217         }
3218         return ret;
3219 }
3220
3221 static int smu7_get_power_state_size(struct pp_hwmgr *hwmgr)
3222 {
3223         return sizeof(struct smu7_power_state);
3224 }
3225
3226 static int smu7_vblank_too_short(struct pp_hwmgr *hwmgr,
3227                                  uint32_t vblank_time_us)
3228 {
3229         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3230         uint32_t switch_limit_us;
3231
3232         switch (hwmgr->chip_id) {
3233         case CHIP_POLARIS10:
3234         case CHIP_POLARIS11:
3235         case CHIP_POLARIS12:
3236                 if (hwmgr->is_kicker || (hwmgr->chip_id == CHIP_POLARIS12))
3237                         switch_limit_us = data->is_memory_gddr5 ? 450 : 150;
3238                 else
3239                         switch_limit_us = data->is_memory_gddr5 ? 200 : 150;
3240                 break;
3241         case CHIP_VEGAM:
3242                 switch_limit_us = 30;
3243                 break;
3244         default:
3245                 switch_limit_us = data->is_memory_gddr5 ? 450 : 150;
3246                 break;
3247         }
3248
3249         if (vblank_time_us < switch_limit_us)
3250                 return true;
3251         else
3252                 return false;
3253 }
3254
3255 static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
3256                                 struct pp_power_state *request_ps,
3257                         const struct pp_power_state *current_ps)
3258 {
3259         struct amdgpu_device *adev = hwmgr->adev;
3260         struct smu7_power_state *smu7_ps =
3261                                 cast_phw_smu7_power_state(&request_ps->hardware);
3262         uint32_t sclk;
3263         uint32_t mclk;
3264         struct PP_Clocks minimum_clocks = {0};
3265         bool disable_mclk_switching;
3266         bool disable_mclk_switching_for_frame_lock;
3267         bool disable_mclk_switching_for_display;
3268         const struct phm_clock_and_voltage_limits *max_limits;
3269         uint32_t i;
3270         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3271         struct phm_ppt_v1_information *table_info =
3272                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
3273         int32_t count;
3274         int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
3275         uint32_t latency;
3276         bool latency_allowed = false;
3277
3278         data->battery_state = (PP_StateUILabel_Battery ==
3279                         request_ps->classification.ui_label);
3280         data->mclk_ignore_signal = false;
3281
3282         PP_ASSERT_WITH_CODE(smu7_ps->performance_level_count == 2,
3283                                  "VI should always have 2 performance levels",
3284                                 );
3285
3286         max_limits = adev->pm.ac_power ?
3287                         &(hwmgr->dyn_state.max_clock_voltage_on_ac) :
3288                         &(hwmgr->dyn_state.max_clock_voltage_on_dc);
3289
3290         /* Cap clock DPM tables at DC MAX if it is in DC. */
3291         if (!adev->pm.ac_power) {
3292                 for (i = 0; i < smu7_ps->performance_level_count; i++) {
3293                         if (smu7_ps->performance_levels[i].memory_clock > max_limits->mclk)
3294                                 smu7_ps->performance_levels[i].memory_clock = max_limits->mclk;
3295                         if (smu7_ps->performance_levels[i].engine_clock > max_limits->sclk)
3296                                 smu7_ps->performance_levels[i].engine_clock = max_limits->sclk;
3297                 }
3298         }
3299
3300         minimum_clocks.engineClock = hwmgr->display_config->min_core_set_clock;
3301         minimum_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock;
3302
3303         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3304                         PHM_PlatformCaps_StablePState)) {
3305                 max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
3306                 stable_pstate_sclk = (max_limits->sclk * 75) / 100;
3307
3308                 for (count = table_info->vdd_dep_on_sclk->count - 1;
3309                                 count >= 0; count--) {
3310                         if (stable_pstate_sclk >=
3311                                         table_info->vdd_dep_on_sclk->entries[count].clk) {
3312                                 stable_pstate_sclk =
3313                                                 table_info->vdd_dep_on_sclk->entries[count].clk;
3314                                 break;
3315                         }
3316                 }
3317
3318                 if (count < 0)
3319                         stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
3320
3321                 stable_pstate_mclk = max_limits->mclk;
3322
3323                 minimum_clocks.engineClock = stable_pstate_sclk;
3324                 minimum_clocks.memoryClock = stable_pstate_mclk;
3325         }
3326
3327         disable_mclk_switching_for_frame_lock = phm_cap_enabled(
3328                                     hwmgr->platform_descriptor.platformCaps,
3329                                     PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
3330
3331         disable_mclk_switching_for_display = ((1 < hwmgr->display_config->num_display) &&
3332                                                 !hwmgr->display_config->multi_monitor_in_sync) ||
3333                                                 (hwmgr->display_config->num_display &&
3334                                                 smu7_vblank_too_short(hwmgr, hwmgr->display_config->min_vblank_time));
3335
3336         disable_mclk_switching = disable_mclk_switching_for_frame_lock ||
3337                                          disable_mclk_switching_for_display;
3338
3339         if (hwmgr->display_config->num_display == 0) {
3340                 if (hwmgr->chip_id >= CHIP_POLARIS10 && hwmgr->chip_id <= CHIP_VEGAM)
3341                         data->mclk_ignore_signal = true;
3342                 else
3343                         disable_mclk_switching = false;
3344         }
3345
3346         sclk = smu7_ps->performance_levels[0].engine_clock;
3347         mclk = smu7_ps->performance_levels[0].memory_clock;
3348
3349         if (disable_mclk_switching &&
3350             (!(hwmgr->chip_id >= CHIP_POLARIS10 &&
3351             hwmgr->chip_id <= CHIP_VEGAM)))
3352                 mclk = smu7_ps->performance_levels
3353                 [smu7_ps->performance_level_count - 1].memory_clock;
3354
3355         if (sclk < minimum_clocks.engineClock)
3356                 sclk = (minimum_clocks.engineClock > max_limits->sclk) ?
3357                                 max_limits->sclk : minimum_clocks.engineClock;
3358
3359         if (mclk < minimum_clocks.memoryClock)
3360                 mclk = (minimum_clocks.memoryClock > max_limits->mclk) ?
3361                                 max_limits->mclk : minimum_clocks.memoryClock;
3362
3363         smu7_ps->performance_levels[0].engine_clock = sclk;
3364         smu7_ps->performance_levels[0].memory_clock = mclk;
3365
3366         smu7_ps->performance_levels[1].engine_clock =
3367                 (smu7_ps->performance_levels[1].engine_clock >=
3368                                 smu7_ps->performance_levels[0].engine_clock) ?
3369                                                 smu7_ps->performance_levels[1].engine_clock :
3370                                                 smu7_ps->performance_levels[0].engine_clock;
3371
3372         if (disable_mclk_switching) {
3373                 if (mclk < smu7_ps->performance_levels[1].memory_clock)
3374                         mclk = smu7_ps->performance_levels[1].memory_clock;
3375
3376                 if (hwmgr->chip_id >= CHIP_POLARIS10 && hwmgr->chip_id <= CHIP_VEGAM) {
3377                         if (disable_mclk_switching_for_display) {
3378                                 /* Find the lowest MCLK frequency that is within
3379                                  * the tolerable latency defined in DAL
3380                                  */
3381                                 latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency;
3382                                 for (i = 0; i < data->mclk_latency_table.count; i++) {
3383                                         if (data->mclk_latency_table.entries[i].latency <= latency) {
3384                                                 latency_allowed = true;
3385
3386                                                 if ((data->mclk_latency_table.entries[i].frequency >=
3387                                                                 smu7_ps->performance_levels[0].memory_clock) &&
3388                                                     (data->mclk_latency_table.entries[i].frequency <=
3389                                                                 smu7_ps->performance_levels[1].memory_clock)) {
3390                                                         mclk = data->mclk_latency_table.entries[i].frequency;
3391                                                         break;
3392                                                 }
3393                                         }
3394                                 }
3395                                 if ((i >= data->mclk_latency_table.count - 1) && !latency_allowed) {
3396                                         data->mclk_ignore_signal = true;
3397                                 } else {
3398                                         data->mclk_ignore_signal = false;
3399                                 }
3400                         }
3401
3402                         if (disable_mclk_switching_for_frame_lock)
3403                                 mclk = smu7_ps->performance_levels[1].memory_clock;
3404                 }
3405
3406                 smu7_ps->performance_levels[0].memory_clock = mclk;
3407
3408                 if (!(hwmgr->chip_id >= CHIP_POLARIS10 &&
3409                       hwmgr->chip_id <= CHIP_VEGAM))
3410                         smu7_ps->performance_levels[1].memory_clock = mclk;
3411         } else {
3412                 if (smu7_ps->performance_levels[1].memory_clock <
3413                                 smu7_ps->performance_levels[0].memory_clock)
3414                         smu7_ps->performance_levels[1].memory_clock =
3415                                         smu7_ps->performance_levels[0].memory_clock;
3416         }
3417
3418         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3419                         PHM_PlatformCaps_StablePState)) {
3420                 for (i = 0; i < smu7_ps->performance_level_count; i++) {
3421                         smu7_ps->performance_levels[i].engine_clock = stable_pstate_sclk;
3422                         smu7_ps->performance_levels[i].memory_clock = stable_pstate_mclk;
3423                         smu7_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max;
3424                         smu7_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max;
3425                 }
3426         }
3427         return 0;
3428 }
3429
3430
3431 static uint32_t smu7_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
3432 {
3433         struct pp_power_state  *ps;
3434         struct smu7_power_state  *smu7_ps;
3435
3436         if (hwmgr == NULL)
3437                 return -EINVAL;
3438
3439         ps = hwmgr->request_ps;
3440
3441         if (ps == NULL)
3442                 return -EINVAL;
3443
3444         smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
3445
3446         if (low)
3447                 return smu7_ps->performance_levels[0].memory_clock;
3448         else
3449                 return smu7_ps->performance_levels
3450                                 [smu7_ps->performance_level_count-1].memory_clock;
3451 }
3452
3453 static uint32_t smu7_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
3454 {
3455         struct pp_power_state  *ps;
3456         struct smu7_power_state  *smu7_ps;
3457
3458         if (hwmgr == NULL)
3459                 return -EINVAL;
3460
3461         ps = hwmgr->request_ps;
3462
3463         if (ps == NULL)
3464                 return -EINVAL;
3465
3466         smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
3467
3468         if (low)
3469                 return smu7_ps->performance_levels[0].engine_clock;
3470         else
3471                 return smu7_ps->performance_levels
3472                                 [smu7_ps->performance_level_count-1].engine_clock;
3473 }
3474
3475 static int smu7_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
3476                                         struct pp_hw_power_state *hw_ps)
3477 {
3478         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3479         struct smu7_power_state *ps = (struct smu7_power_state *)hw_ps;
3480         ATOM_FIRMWARE_INFO_V2_2 *fw_info;
3481         uint16_t size;
3482         uint8_t frev, crev;
3483         int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
3484
3485         /* First retrieve the Boot clocks and VDDC from the firmware info table.
3486          * We assume here that fw_info is unchanged if this call fails.
3487          */
3488         fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)smu_atom_get_data_table(hwmgr->adev, index,
3489                         &size, &frev, &crev);
3490         if (!fw_info)
3491                 /* During a test, there is no firmware info table. */
3492                 return 0;
3493
3494         /* Patch the state. */
3495         data->vbios_boot_state.sclk_bootup_value =
3496                         le32_to_cpu(fw_info->ulDefaultEngineClock);
3497         data->vbios_boot_state.mclk_bootup_value =
3498                         le32_to_cpu(fw_info->ulDefaultMemoryClock);
3499         data->vbios_boot_state.mvdd_bootup_value =
3500                         le16_to_cpu(fw_info->usBootUpMVDDCVoltage);
3501         data->vbios_boot_state.vddc_bootup_value =
3502                         le16_to_cpu(fw_info->usBootUpVDDCVoltage);
3503         data->vbios_boot_state.vddci_bootup_value =
3504                         le16_to_cpu(fw_info->usBootUpVDDCIVoltage);
3505         data->vbios_boot_state.pcie_gen_bootup_value =
3506                         smu7_get_current_pcie_speed(hwmgr);
3507
3508         data->vbios_boot_state.pcie_lane_bootup_value =
3509                         (uint16_t)smu7_get_current_pcie_lane_number(hwmgr);
3510
3511         /* set boot power state */
3512         ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value;
3513         ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value;
3514         ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value;
3515         ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value;
3516
3517         return 0;
3518 }
3519
3520 static int smu7_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr)
3521 {
3522         int result;
3523         unsigned long ret = 0;
3524
3525         if (hwmgr->pp_table_version == PP_TABLE_V0) {
3526                 result = pp_tables_get_num_of_entries(hwmgr, &ret);
3527                 return result ? 0 : ret;
3528         } else if (hwmgr->pp_table_version == PP_TABLE_V1) {
3529                 result = get_number_of_powerplay_table_entries_v1_0(hwmgr);
3530                 return result;
3531         }
3532         return 0;
3533 }
3534
3535 static int smu7_get_pp_table_entry_callback_func_v1(struct pp_hwmgr *hwmgr,
3536                 void *state, struct pp_power_state *power_state,
3537                 void *pp_table, uint32_t classification_flag)
3538 {
3539         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3540         struct smu7_power_state  *smu7_power_state =
3541                         (struct smu7_power_state *)(&(power_state->hardware));
3542         struct smu7_performance_level *performance_level;
3543         ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state;
3544         ATOM_Tonga_POWERPLAYTABLE *powerplay_table =
3545                         (ATOM_Tonga_POWERPLAYTABLE *)pp_table;
3546         PPTable_Generic_SubTable_Header *sclk_dep_table =
3547                         (PPTable_Generic_SubTable_Header *)
3548                         (((unsigned long)powerplay_table) +
3549                                 le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
3550
3551         ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
3552                         (ATOM_Tonga_MCLK_Dependency_Table *)
3553                         (((unsigned long)powerplay_table) +
3554                                 le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
3555
3556         /* The following fields are not initialized here: id orderedList allStatesList */
3557         power_state->classification.ui_label =
3558                         (le16_to_cpu(state_entry->usClassification) &
3559                         ATOM_PPLIB_CLASSIFICATION_UI_MASK) >>
3560                         ATOM_PPLIB_CLASSIFICATION_UI_SHIFT;
3561         power_state->classification.flags = classification_flag;
3562         /* NOTE: There is a classification2 flag in BIOS that is not being used right now */
3563
3564         power_state->classification.temporary_state = false;
3565         power_state->classification.to_be_deleted = false;
3566
3567         power_state->validation.disallowOnDC =
3568                         (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
3569                                         ATOM_Tonga_DISALLOW_ON_DC));
3570
3571         power_state->pcie.lanes = 0;
3572
3573         power_state->display.disableFrameModulation = false;
3574         power_state->display.limitRefreshrate = false;
3575         power_state->display.enableVariBright =
3576                         (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
3577                                         ATOM_Tonga_ENABLE_VARIBRIGHT));
3578
3579         power_state->validation.supportedPowerLevels = 0;
3580         power_state->uvd_clocks.VCLK = 0;
3581         power_state->uvd_clocks.DCLK = 0;
3582         power_state->temperatures.min = 0;
3583         power_state->temperatures.max = 0;
3584
3585         performance_level = &(smu7_power_state->performance_levels
3586                         [smu7_power_state->performance_level_count++]);
3587
3588         PP_ASSERT_WITH_CODE(
3589                         (smu7_power_state->performance_level_count < smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_GRAPHICS)),
3590                         "Performance levels exceeds SMC limit!",
3591                         return -EINVAL);
3592
3593         PP_ASSERT_WITH_CODE(
3594                         (smu7_power_state->performance_level_count <=
3595                                         hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
3596                         "Performance levels exceeds Driver limit!",
3597                         return -EINVAL);
3598
3599         /* Performance levels are arranged from low to high. */
3600         performance_level->memory_clock = mclk_dep_table->entries
3601                         [state_entry->ucMemoryClockIndexLow].ulMclk;
3602         if (sclk_dep_table->ucRevId == 0)
3603                 performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
3604                         [state_entry->ucEngineClockIndexLow].ulSclk;
3605         else if (sclk_dep_table->ucRevId == 1)
3606                 performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
3607                         [state_entry->ucEngineClockIndexLow].ulSclk;
3608         performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
3609                         state_entry->ucPCIEGenLow);
3610         performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
3611                         state_entry->ucPCIELaneLow);
3612
3613         performance_level = &(smu7_power_state->performance_levels
3614                         [smu7_power_state->performance_level_count++]);
3615         performance_level->memory_clock = mclk_dep_table->entries
3616                         [state_entry->ucMemoryClockIndexHigh].ulMclk;
3617
3618         if (sclk_dep_table->ucRevId == 0)
3619                 performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
3620                         [state_entry->ucEngineClockIndexHigh].ulSclk;
3621         else if (sclk_dep_table->ucRevId == 1)
3622                 performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
3623                         [state_entry->ucEngineClockIndexHigh].ulSclk;
3624
3625         performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
3626                         state_entry->ucPCIEGenHigh);
3627         performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
3628                         state_entry->ucPCIELaneHigh);
3629
3630         return 0;
3631 }
3632
3633 static int smu7_get_pp_table_entry_v1(struct pp_hwmgr *hwmgr,
3634                 unsigned long entry_index, struct pp_power_state *state)
3635 {
3636         int result;
3637         struct smu7_power_state *ps;
3638         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3639         struct phm_ppt_v1_information *table_info =
3640                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
3641         struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
3642                         table_info->vdd_dep_on_mclk;
3643
3644         state->hardware.magic = PHM_VIslands_Magic;
3645
3646         ps = (struct smu7_power_state *)(&state->hardware);
3647
3648         result = get_powerplay_table_entry_v1_0(hwmgr, entry_index, state,
3649                         smu7_get_pp_table_entry_callback_func_v1);
3650
3651         /* This is the earliest time we have all the dependency table and the VBIOS boot state
3652          * as PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot state
3653          * if there is only one VDDCI/MCLK level, check if it's the same as VBIOS boot state
3654          */
3655         if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
3656                 if (dep_mclk_table->entries[0].clk !=
3657                                 data->vbios_boot_state.mclk_bootup_value)
3658                         pr_debug("Single MCLK entry VDDCI/MCLK dependency table "
3659                                         "does not match VBIOS boot MCLK level");
3660                 if (dep_mclk_table->entries[0].vddci !=
3661                                 data->vbios_boot_state.vddci_bootup_value)
3662                         pr_debug("Single VDDCI entry VDDCI/MCLK dependency table "
3663                                         "does not match VBIOS boot VDDCI level");
3664         }
3665
3666         /* set DC compatible flag if this state supports DC */
3667         if (!state->validation.disallowOnDC)
3668                 ps->dc_compatible = true;
3669
3670         if (state->classification.flags & PP_StateClassificationFlag_ACPI)
3671                 data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen;
3672
3673         ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
3674         ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
3675
3676         if (!result) {
3677                 uint32_t i;
3678
3679                 switch (state->classification.ui_label) {
3680                 case PP_StateUILabel_Performance:
3681                         data->use_pcie_performance_levels = true;
3682                         for (i = 0; i < ps->performance_level_count; i++) {
3683                                 if (data->pcie_gen_performance.max <
3684                                                 ps->performance_levels[i].pcie_gen)
3685                                         data->pcie_gen_performance.max =
3686                                                         ps->performance_levels[i].pcie_gen;
3687
3688                                 if (data->pcie_gen_performance.min >
3689                                                 ps->performance_levels[i].pcie_gen)
3690                                         data->pcie_gen_performance.min =
3691                                                         ps->performance_levels[i].pcie_gen;
3692
3693                                 if (data->pcie_lane_performance.max <
3694                                                 ps->performance_levels[i].pcie_lane)
3695                                         data->pcie_lane_performance.max =
3696                                                         ps->performance_levels[i].pcie_lane;
3697                                 if (data->pcie_lane_performance.min >
3698                                                 ps->performance_levels[i].pcie_lane)
3699                                         data->pcie_lane_performance.min =
3700                                                         ps->performance_levels[i].pcie_lane;
3701                         }
3702                         break;
3703                 case PP_StateUILabel_Battery:
3704                         data->use_pcie_power_saving_levels = true;
3705
3706                         for (i = 0; i < ps->performance_level_count; i++) {
3707                                 if (data->pcie_gen_power_saving.max <
3708                                                 ps->performance_levels[i].pcie_gen)
3709                                         data->pcie_gen_power_saving.max =
3710                                                         ps->performance_levels[i].pcie_gen;
3711
3712                                 if (data->pcie_gen_power_saving.min >
3713                                                 ps->performance_levels[i].pcie_gen)
3714                                         data->pcie_gen_power_saving.min =
3715                                                         ps->performance_levels[i].pcie_gen;
3716
3717                                 if (data->pcie_lane_power_saving.max <
3718                                                 ps->performance_levels[i].pcie_lane)
3719                                         data->pcie_lane_power_saving.max =
3720                                                         ps->performance_levels[i].pcie_lane;
3721
3722                                 if (data->pcie_lane_power_saving.min >
3723                                                 ps->performance_levels[i].pcie_lane)
3724                                         data->pcie_lane_power_saving.min =
3725                                                         ps->performance_levels[i].pcie_lane;
3726                         }
3727                         break;
3728                 default:
3729                         break;
3730                 }
3731         }
3732         return 0;
3733 }
3734
3735 static int smu7_get_pp_table_entry_callback_func_v0(struct pp_hwmgr *hwmgr,
3736                                         struct pp_hw_power_state *power_state,
3737                                         unsigned int index, const void *clock_info)
3738 {
3739         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3740         struct smu7_power_state  *ps = cast_phw_smu7_power_state(power_state);
3741         const ATOM_PPLIB_CI_CLOCK_INFO *visland_clk_info = clock_info;
3742         struct smu7_performance_level *performance_level;
3743         uint32_t engine_clock, memory_clock;
3744         uint16_t pcie_gen_from_bios;
3745
3746         engine_clock = visland_clk_info->ucEngineClockHigh << 16 | visland_clk_info->usEngineClockLow;
3747         memory_clock = visland_clk_info->ucMemoryClockHigh << 16 | visland_clk_info->usMemoryClockLow;
3748
3749         if (!(data->mc_micro_code_feature & DISABLE_MC_LOADMICROCODE) && memory_clock > data->highest_mclk)
3750                 data->highest_mclk = memory_clock;
3751
3752         PP_ASSERT_WITH_CODE(
3753                         (ps->performance_level_count < smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_GRAPHICS)),
3754                         "Performance levels exceeds SMC limit!",
3755                         return -EINVAL);
3756
3757         PP_ASSERT_WITH_CODE(
3758                         (ps->performance_level_count <
3759                                         hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
3760                         "Performance levels exceeds Driver limit, Skip!",
3761                         return 0);
3762
3763         performance_level = &(ps->performance_levels
3764                         [ps->performance_level_count++]);
3765
3766         /* Performance levels are arranged from low to high. */
3767         performance_level->memory_clock = memory_clock;
3768         performance_level->engine_clock = engine_clock;
3769
3770         pcie_gen_from_bios = visland_clk_info->ucPCIEGen;
3771
3772         performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, pcie_gen_from_bios);
3773         performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, visland_clk_info->usPCIELane);
3774
3775         return 0;
3776 }
3777
3778 static int smu7_get_pp_table_entry_v0(struct pp_hwmgr *hwmgr,
3779                 unsigned long entry_index, struct pp_power_state *state)
3780 {
3781         int result;
3782         struct smu7_power_state *ps;
3783         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3784         struct phm_clock_voltage_dependency_table *dep_mclk_table =
3785                         hwmgr->dyn_state.vddci_dependency_on_mclk;
3786
3787         memset(&state->hardware, 0x00, sizeof(struct pp_hw_power_state));
3788
3789         state->hardware.magic = PHM_VIslands_Magic;
3790
3791         ps = (struct smu7_power_state *)(&state->hardware);
3792
3793         result = pp_tables_get_entry(hwmgr, entry_index, state,
3794                         smu7_get_pp_table_entry_callback_func_v0);
3795
3796         /*
3797          * This is the earliest time we have all the dependency table
3798          * and the VBIOS boot state as
3799          * PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot
3800          * state if there is only one VDDCI/MCLK level, check if it's
3801          * the same as VBIOS boot state
3802          */
3803         if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
3804                 if (dep_mclk_table->entries[0].clk !=
3805                                 data->vbios_boot_state.mclk_bootup_value)
3806                         pr_debug("Single MCLK entry VDDCI/MCLK dependency table "
3807                                         "does not match VBIOS boot MCLK level");
3808                 if (dep_mclk_table->entries[0].v !=
3809                                 data->vbios_boot_state.vddci_bootup_value)
3810                         pr_debug("Single VDDCI entry VDDCI/MCLK dependency table "
3811                                         "does not match VBIOS boot VDDCI level");
3812         }
3813
3814         /* set DC compatible flag if this state supports DC */
3815         if (!state->validation.disallowOnDC)
3816                 ps->dc_compatible = true;
3817
3818         if (state->classification.flags & PP_StateClassificationFlag_ACPI)
3819                 data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen;
3820
3821         ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
3822         ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
3823
3824         if (!result) {
3825                 uint32_t i;
3826
3827                 switch (state->classification.ui_label) {
3828                 case PP_StateUILabel_Performance:
3829                         data->use_pcie_performance_levels = true;
3830
3831                         for (i = 0; i < ps->performance_level_count; i++) {
3832                                 if (data->pcie_gen_performance.max <
3833                                                 ps->performance_levels[i].pcie_gen)
3834                                         data->pcie_gen_performance.max =
3835                                                         ps->performance_levels[i].pcie_gen;
3836
3837                                 if (data->pcie_gen_performance.min >
3838                                                 ps->performance_levels[i].pcie_gen)
3839                                         data->pcie_gen_performance.min =
3840                                                         ps->performance_levels[i].pcie_gen;
3841
3842                                 if (data->pcie_lane_performance.max <
3843                                                 ps->performance_levels[i].pcie_lane)
3844                                         data->pcie_lane_performance.max =
3845                                                         ps->performance_levels[i].pcie_lane;
3846
3847                                 if (data->pcie_lane_performance.min >
3848                                                 ps->performance_levels[i].pcie_lane)
3849                                         data->pcie_lane_performance.min =
3850                                                         ps->performance_levels[i].pcie_lane;
3851                         }
3852                         break;
3853                 case PP_StateUILabel_Battery:
3854                         data->use_pcie_power_saving_levels = true;
3855
3856                         for (i = 0; i < ps->performance_level_count; i++) {
3857                                 if (data->pcie_gen_power_saving.max <
3858                                                 ps->performance_levels[i].pcie_gen)
3859                                         data->pcie_gen_power_saving.max =
3860                                                         ps->performance_levels[i].pcie_gen;
3861
3862                                 if (data->pcie_gen_power_saving.min >
3863                                                 ps->performance_levels[i].pcie_gen)
3864                                         data->pcie_gen_power_saving.min =
3865                                                         ps->performance_levels[i].pcie_gen;
3866
3867                                 if (data->pcie_lane_power_saving.max <
3868                                                 ps->performance_levels[i].pcie_lane)
3869                                         data->pcie_lane_power_saving.max =
3870                                                         ps->performance_levels[i].pcie_lane;
3871
3872                                 if (data->pcie_lane_power_saving.min >
3873                                                 ps->performance_levels[i].pcie_lane)
3874                                         data->pcie_lane_power_saving.min =
3875                                                         ps->performance_levels[i].pcie_lane;
3876                         }
3877                         break;
3878                 default:
3879                         break;
3880                 }
3881         }
3882         return 0;
3883 }
3884
3885 static int smu7_get_pp_table_entry(struct pp_hwmgr *hwmgr,
3886                 unsigned long entry_index, struct pp_power_state *state)
3887 {
3888         if (hwmgr->pp_table_version == PP_TABLE_V0)
3889                 return smu7_get_pp_table_entry_v0(hwmgr, entry_index, state);
3890         else if (hwmgr->pp_table_version == PP_TABLE_V1)
3891                 return smu7_get_pp_table_entry_v1(hwmgr, entry_index, state);
3892
3893         return 0;
3894 }
3895
3896 static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, u32 *query)
3897 {
3898         struct amdgpu_device *adev = hwmgr->adev;
3899         int i;
3900         u32 tmp = 0;
3901
3902         if (!query)
3903                 return -EINVAL;
3904
3905         /*
3906          * PPSMC_MSG_GetCurrPkgPwr is not supported on:
3907          *  - Hawaii
3908          *  - Bonaire
3909          *  - Fiji
3910          *  - Tonga
3911          */
3912         if ((adev->asic_type != CHIP_HAWAII) &&
3913             (adev->asic_type != CHIP_BONAIRE) &&
3914             (adev->asic_type != CHIP_FIJI) &&
3915             (adev->asic_type != CHIP_TONGA)) {
3916                 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0, &tmp);
3917                 *query = tmp;
3918
3919                 if (tmp != 0)
3920                         return 0;
3921         }
3922
3923         smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart, NULL);
3924         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3925                                                         ixSMU_PM_STATUS_95, 0);
3926
3927         for (i = 0; i < 10; i++) {
3928                 msleep(500);
3929                 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogSample, NULL);
3930                 tmp = cgs_read_ind_register(hwmgr->device,
3931                                                 CGS_IND_REG__SMC,
3932                                                 ixSMU_PM_STATUS_95);
3933                 if (tmp != 0)
3934                         break;
3935         }
3936         *query = tmp;
3937
3938         return 0;
3939 }
3940
3941 static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx,
3942                             void *value, int *size)
3943 {
3944         uint32_t sclk, mclk, activity_percent;
3945         uint32_t offset, val_vid;
3946         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3947
3948         /* size must be at least 4 bytes for all sensors */
3949         if (*size < 4)
3950                 return -EINVAL;
3951
3952         switch (idx) {
3953         case AMDGPU_PP_SENSOR_GFX_SCLK:
3954                 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency, &sclk);
3955                 *((uint32_t *)value) = sclk;
3956                 *size = 4;
3957                 return 0;
3958         case AMDGPU_PP_SENSOR_GFX_MCLK:
3959                 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency, &mclk);
3960                 *((uint32_t *)value) = mclk;
3961                 *size = 4;
3962                 return 0;
3963         case AMDGPU_PP_SENSOR_GPU_LOAD:
3964         case AMDGPU_PP_SENSOR_MEM_LOAD:
3965                 offset = data->soft_regs_start + smum_get_offsetof(hwmgr,
3966                                                                 SMU_SoftRegisters,
3967                                                                 (idx == AMDGPU_PP_SENSOR_GPU_LOAD) ?
3968                                                                 AverageGraphicsActivity:
3969                                                                 AverageMemoryActivity);
3970
3971                 activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset);
3972                 activity_percent += 0x80;
3973                 activity_percent >>= 8;
3974                 *((uint32_t *)value) = activity_percent > 100 ? 100 : activity_percent;
3975                 *size = 4;
3976                 return 0;
3977         case AMDGPU_PP_SENSOR_GPU_TEMP:
3978                 *((uint32_t *)value) = smu7_thermal_get_temperature(hwmgr);
3979                 *size = 4;
3980                 return 0;
3981         case AMDGPU_PP_SENSOR_UVD_POWER:
3982                 *((uint32_t *)value) = data->uvd_power_gated ? 0 : 1;
3983                 *size = 4;
3984                 return 0;
3985         case AMDGPU_PP_SENSOR_VCE_POWER:
3986                 *((uint32_t *)value) = data->vce_power_gated ? 0 : 1;
3987                 *size = 4;
3988                 return 0;
3989         case AMDGPU_PP_SENSOR_GPU_POWER:
3990                 return smu7_get_gpu_power(hwmgr, (uint32_t *)value);
3991         case AMDGPU_PP_SENSOR_VDDGFX:
3992                 if ((data->vr_config & VRCONF_VDDGFX_MASK) ==
3993                     (VR_SVI2_PLANE_2 << VRCONF_VDDGFX_SHIFT))
3994                         val_vid = PHM_READ_INDIRECT_FIELD(hwmgr->device,
3995                                         CGS_IND_REG__SMC, PWR_SVI2_STATUS, PLANE2_VID);
3996                 else
3997                         val_vid = PHM_READ_INDIRECT_FIELD(hwmgr->device,
3998                                         CGS_IND_REG__SMC, PWR_SVI2_STATUS, PLANE1_VID);
3999
4000                 *((uint32_t *)value) = (uint32_t)convert_to_vddc(val_vid);
4001                 return 0;
4002         default:
4003                 return -EINVAL;
4004         }
4005 }
4006
4007 static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
4008 {
4009         const struct phm_set_power_state_input *states =
4010                         (const struct phm_set_power_state_input *)input;
4011         const struct smu7_power_state *smu7_ps =
4012                         cast_const_phw_smu7_power_state(states->pnew_state);
4013         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4014         struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
4015         uint32_t sclk = smu7_ps->performance_levels
4016                         [smu7_ps->performance_level_count - 1].engine_clock;
4017         struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
4018         uint32_t mclk = smu7_ps->performance_levels
4019                         [smu7_ps->performance_level_count - 1].memory_clock;
4020         struct PP_Clocks min_clocks = {0};
4021         uint32_t i;
4022
4023         for (i = 0; i < sclk_table->count; i++) {
4024                 if (sclk == sclk_table->dpm_levels[i].value)
4025                         break;
4026         }
4027
4028         if (i >= sclk_table->count) {
4029                 if (sclk > sclk_table->dpm_levels[i-1].value) {
4030                         data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
4031                         sclk_table->dpm_levels[i-1].value = sclk;
4032                 }
4033         } else {
4034         /* TODO: Check SCLK in DAL's minimum clocks
4035          * in case DeepSleep divider update is required.
4036          */
4037                 if (data->display_timing.min_clock_in_sr != min_clocks.engineClockInSR &&
4038                         (min_clocks.engineClockInSR >= SMU7_MINIMUM_ENGINE_CLOCK ||
4039                                 data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK))
4040                         data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
4041         }
4042
4043         for (i = 0; i < mclk_table->count; i++) {
4044                 if (mclk == mclk_table->dpm_levels[i].value)
4045                         break;
4046         }
4047
4048         if (i >= mclk_table->count) {
4049                 if (mclk > mclk_table->dpm_levels[i-1].value) {
4050                         data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
4051                         mclk_table->dpm_levels[i-1].value = mclk;
4052                 }
4053         }
4054
4055         if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
4056                 data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
4057
4058         return 0;
4059 }
4060
4061 static uint16_t smu7_get_maximum_link_speed(struct pp_hwmgr *hwmgr,
4062                 const struct smu7_power_state *smu7_ps)
4063 {
4064         uint32_t i;
4065         uint32_t sclk, max_sclk = 0;
4066         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4067         struct smu7_dpm_table *dpm_table = &data->dpm_table;
4068
4069         for (i = 0; i < smu7_ps->performance_level_count; i++) {
4070                 sclk = smu7_ps->performance_levels[i].engine_clock;
4071                 if (max_sclk < sclk)
4072                         max_sclk = sclk;
4073         }
4074
4075         for (i = 0; i < dpm_table->sclk_table.count; i++) {
4076                 if (dpm_table->sclk_table.dpm_levels[i].value == max_sclk)
4077                         return (uint16_t) ((i >= dpm_table->pcie_speed_table.count) ?
4078                                         dpm_table->pcie_speed_table.dpm_levels
4079                                         [dpm_table->pcie_speed_table.count - 1].value :
4080                                         dpm_table->pcie_speed_table.dpm_levels[i].value);
4081         }
4082
4083         return 0;
4084 }
4085
4086 static int smu7_request_link_speed_change_before_state_change(
4087                 struct pp_hwmgr *hwmgr, const void *input)
4088 {
4089         const struct phm_set_power_state_input *states =
4090                         (const struct phm_set_power_state_input *)input;
4091         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4092         const struct smu7_power_state *smu7_nps =
4093                         cast_const_phw_smu7_power_state(states->pnew_state);
4094         const struct smu7_power_state *polaris10_cps =
4095                         cast_const_phw_smu7_power_state(states->pcurrent_state);
4096
4097         uint16_t target_link_speed = smu7_get_maximum_link_speed(hwmgr, smu7_nps);
4098         uint16_t current_link_speed;
4099
4100         if (data->force_pcie_gen == PP_PCIEGenInvalid)
4101                 current_link_speed = smu7_get_maximum_link_speed(hwmgr, polaris10_cps);
4102         else
4103                 current_link_speed = data->force_pcie_gen;
4104
4105         data->force_pcie_gen = PP_PCIEGenInvalid;
4106         data->pspp_notify_required = false;
4107
4108         if (target_link_speed > current_link_speed) {
4109                 switch (target_link_speed) {
4110 #ifdef CONFIG_ACPI
4111                 case PP_PCIEGen3:
4112                         if (0 == amdgpu_acpi_pcie_performance_request(hwmgr->adev, PCIE_PERF_REQ_GEN3, false))
4113                                 break;
4114                         data->force_pcie_gen = PP_PCIEGen2;
4115                         if (current_link_speed == PP_PCIEGen2)
4116                                 break;
4117                         fallthrough;
4118                 case PP_PCIEGen2:
4119                         if (0 == amdgpu_acpi_pcie_performance_request(hwmgr->adev, PCIE_PERF_REQ_GEN2, false))
4120                                 break;
4121                         fallthrough;
4122 #endif
4123                 default:
4124                         data->force_pcie_gen = smu7_get_current_pcie_speed(hwmgr);
4125                         break;
4126                 }
4127         } else {
4128                 if (target_link_speed < current_link_speed)
4129                         data->pspp_notify_required = true;
4130         }
4131
4132         return 0;
4133 }
4134
4135 static int smu7_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
4136 {
4137         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4138
4139         if (0 == data->need_update_smu7_dpm_table)
4140                 return 0;
4141
4142         if ((0 == data->sclk_dpm_key_disabled) &&
4143                 (data->need_update_smu7_dpm_table &
4144                         (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
4145                 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
4146                                 "Trying to freeze SCLK DPM when DPM is disabled",
4147                                 );
4148                 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
4149                                 PPSMC_MSG_SCLKDPM_FreezeLevel,
4150                                 NULL),
4151                                 "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!",
4152                                 return -EINVAL);
4153         }
4154
4155         if ((0 == data->mclk_dpm_key_disabled) &&
4156                 !data->mclk_ignore_signal &&
4157                 (data->need_update_smu7_dpm_table &
4158                  DPMTABLE_OD_UPDATE_MCLK)) {
4159                 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
4160                                 "Trying to freeze MCLK DPM when DPM is disabled",
4161                                 );
4162                 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
4163                                 PPSMC_MSG_MCLKDPM_FreezeLevel,
4164                                 NULL),
4165                                 "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!",
4166                                 return -EINVAL);
4167         }
4168
4169         return 0;
4170 }
4171
4172 static int smu7_populate_and_upload_sclk_mclk_dpm_levels(
4173                 struct pp_hwmgr *hwmgr, const void *input)
4174 {
4175         int result = 0;
4176         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4177         struct smu7_dpm_table *dpm_table = &data->dpm_table;
4178         uint32_t count;
4179         struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
4180         struct phm_odn_clock_levels *odn_sclk_table = &(odn_table->odn_core_clock_dpm_levels);
4181         struct phm_odn_clock_levels *odn_mclk_table = &(odn_table->odn_memory_clock_dpm_levels);
4182
4183         if (0 == data->need_update_smu7_dpm_table)
4184                 return 0;
4185
4186         if (hwmgr->od_enabled && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
4187                 for (count = 0; count < dpm_table->sclk_table.count; count++) {
4188                         dpm_table->sclk_table.dpm_levels[count].enabled = odn_sclk_table->entries[count].enabled;
4189                         dpm_table->sclk_table.dpm_levels[count].value = odn_sclk_table->entries[count].clock;
4190                 }
4191         }
4192
4193         if (hwmgr->od_enabled && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
4194                 for (count = 0; count < dpm_table->mclk_table.count; count++) {
4195                         dpm_table->mclk_table.dpm_levels[count].enabled = odn_mclk_table->entries[count].enabled;
4196                         dpm_table->mclk_table.dpm_levels[count].value = odn_mclk_table->entries[count].clock;
4197                 }
4198         }
4199
4200         if (data->need_update_smu7_dpm_table &
4201                         (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) {
4202                 result = smum_populate_all_graphic_levels(hwmgr);
4203                 PP_ASSERT_WITH_CODE((0 == result),
4204                                 "Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
4205                                 return result);
4206         }
4207
4208         if (data->need_update_smu7_dpm_table &
4209                         (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
4210                 /*populate MCLK dpm table to SMU7 */
4211                 result = smum_populate_all_memory_levels(hwmgr);
4212                 PP_ASSERT_WITH_CODE((0 == result),
4213                                 "Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
4214                                 return result);
4215         }
4216
4217         return result;
4218 }
4219
4220 static int smu7_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
4221                           struct smu7_single_dpm_table *dpm_table,
4222                         uint32_t low_limit, uint32_t high_limit)
4223 {
4224         uint32_t i;
4225
4226         /* force the trim if mclk_switching is disabled to prevent flicker */
4227         bool force_trim = (low_limit == high_limit);
4228         for (i = 0; i < dpm_table->count; i++) {
4229         /*skip the trim if od is enabled*/
4230                 if ((!hwmgr->od_enabled || force_trim)
4231                         && (dpm_table->dpm_levels[i].value < low_limit
4232                         || dpm_table->dpm_levels[i].value > high_limit))
4233                         dpm_table->dpm_levels[i].enabled = false;
4234                 else
4235                         dpm_table->dpm_levels[i].enabled = true;
4236         }
4237
4238         return 0;
4239 }
4240
4241 static int smu7_trim_dpm_states(struct pp_hwmgr *hwmgr,
4242                 const struct smu7_power_state *smu7_ps)
4243 {
4244         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4245         uint32_t high_limit_count;
4246
4247         PP_ASSERT_WITH_CODE((smu7_ps->performance_level_count >= 1),
4248                         "power state did not have any performance level",
4249                         return -EINVAL);
4250
4251         high_limit_count = (1 == smu7_ps->performance_level_count) ? 0 : 1;
4252
4253         smu7_trim_single_dpm_states(hwmgr,
4254                         &(data->dpm_table.sclk_table),
4255                         smu7_ps->performance_levels[0].engine_clock,
4256                         smu7_ps->performance_levels[high_limit_count].engine_clock);
4257
4258         smu7_trim_single_dpm_states(hwmgr,
4259                         &(data->dpm_table.mclk_table),
4260                         smu7_ps->performance_levels[0].memory_clock,
4261                         smu7_ps->performance_levels[high_limit_count].memory_clock);
4262
4263         return 0;
4264 }
4265
4266 static int smu7_generate_dpm_level_enable_mask(
4267                 struct pp_hwmgr *hwmgr, const void *input)
4268 {
4269         int result = 0;
4270         const struct phm_set_power_state_input *states =
4271                         (const struct phm_set_power_state_input *)input;
4272         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4273         const struct smu7_power_state *smu7_ps =
4274                         cast_const_phw_smu7_power_state(states->pnew_state);
4275
4276
4277         result = smu7_trim_dpm_states(hwmgr, smu7_ps);
4278         if (result)
4279                 return result;
4280
4281         data->dpm_level_enable_mask.sclk_dpm_enable_mask =
4282                         phm_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table);
4283         data->dpm_level_enable_mask.mclk_dpm_enable_mask =
4284                         phm_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table);
4285         data->dpm_level_enable_mask.pcie_dpm_enable_mask =
4286                         phm_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table);
4287
4288         return 0;
4289 }
4290
4291 static int smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
4292 {
4293         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4294
4295         if (0 == data->need_update_smu7_dpm_table)
4296                 return 0;
4297
4298         if ((0 == data->sclk_dpm_key_disabled) &&
4299                 (data->need_update_smu7_dpm_table &
4300                 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
4301
4302                 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
4303                                 "Trying to Unfreeze SCLK DPM when DPM is disabled",
4304                                 );
4305                 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
4306                                 PPSMC_MSG_SCLKDPM_UnfreezeLevel,
4307                                 NULL),
4308                         "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!",
4309                         return -EINVAL);
4310         }
4311
4312         if ((0 == data->mclk_dpm_key_disabled) &&
4313                 !data->mclk_ignore_signal &&
4314                 (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
4315
4316                 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
4317                                 "Trying to Unfreeze MCLK DPM when DPM is disabled",
4318                                 );
4319                 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
4320                                 PPSMC_MSG_MCLKDPM_UnfreezeLevel,
4321                                 NULL),
4322                     "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!",
4323                     return -EINVAL);
4324         }
4325
4326         data->need_update_smu7_dpm_table &= DPMTABLE_OD_UPDATE_VDDC;
4327
4328         return 0;
4329 }
4330
4331 static int smu7_notify_link_speed_change_after_state_change(
4332                 struct pp_hwmgr *hwmgr, const void *input)
4333 {
4334         const struct phm_set_power_state_input *states =
4335                         (const struct phm_set_power_state_input *)input;
4336         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4337         const struct smu7_power_state *smu7_ps =
4338                         cast_const_phw_smu7_power_state(states->pnew_state);
4339         uint16_t target_link_speed = smu7_get_maximum_link_speed(hwmgr, smu7_ps);
4340         uint8_t  request;
4341
4342         if (data->pspp_notify_required) {
4343                 if (target_link_speed == PP_PCIEGen3)
4344                         request = PCIE_PERF_REQ_GEN3;
4345                 else if (target_link_speed == PP_PCIEGen2)
4346                         request = PCIE_PERF_REQ_GEN2;
4347                 else
4348                         request = PCIE_PERF_REQ_GEN1;
4349
4350                 if (request == PCIE_PERF_REQ_GEN1 &&
4351                                 smu7_get_current_pcie_speed(hwmgr) > 0)
4352                         return 0;
4353
4354 #ifdef CONFIG_ACPI
4355                 if (amdgpu_acpi_pcie_performance_request(hwmgr->adev, request, false)) {
4356                         if (PP_PCIEGen2 == target_link_speed)
4357                                 pr_info("PSPP request to switch to Gen2 from Gen3 Failed!");
4358                         else
4359                                 pr_info("PSPP request to switch to Gen1 from Gen2 Failed!");
4360                 }
4361 #endif
4362         }
4363
4364         return 0;
4365 }
4366
4367 static int smu7_notify_no_display(struct pp_hwmgr *hwmgr)
4368 {
4369         return (smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_NoDisplay, NULL) == 0) ?  0 : -EINVAL;
4370 }
4371
4372 static int smu7_notify_has_display(struct pp_hwmgr *hwmgr)
4373 {
4374         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4375
4376         if (hwmgr->feature_mask & PP_VBI_TIME_SUPPORT_MASK) {
4377                 if (hwmgr->chip_id == CHIP_VEGAM)
4378                         smum_send_msg_to_smc_with_parameter(hwmgr,
4379                                         (PPSMC_Msg)PPSMC_MSG_SetVBITimeout_VEGAM, data->frame_time_x2,
4380                                         NULL);
4381                 else
4382                         smum_send_msg_to_smc_with_parameter(hwmgr,
4383                                         (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2,
4384                                         NULL);
4385                 data->last_sent_vbi_timeout = data->frame_time_x2;
4386         }
4387
4388         return (smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_HasDisplay, NULL) == 0) ?  0 : -EINVAL;
4389 }
4390
4391 static int smu7_notify_smc_display(struct pp_hwmgr *hwmgr)
4392 {
4393         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4394         int result = 0;
4395
4396         if (data->mclk_ignore_signal)
4397                 result = smu7_notify_no_display(hwmgr);
4398         else
4399                 result = smu7_notify_has_display(hwmgr);
4400
4401         return result;
4402 }
4403
4404 static int smu7_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
4405 {
4406         int tmp_result, result = 0;
4407         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4408
4409         tmp_result = smu7_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
4410         PP_ASSERT_WITH_CODE((0 == tmp_result),
4411                         "Failed to find DPM states clocks in DPM table!",
4412                         result = tmp_result);
4413
4414         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4415                         PHM_PlatformCaps_PCIEPerformanceRequest)) {
4416                 tmp_result =
4417                         smu7_request_link_speed_change_before_state_change(hwmgr, input);
4418                 PP_ASSERT_WITH_CODE((0 == tmp_result),
4419                                 "Failed to request link speed change before state change!",
4420                                 result = tmp_result);
4421         }
4422
4423         tmp_result = smu7_freeze_sclk_mclk_dpm(hwmgr);
4424         PP_ASSERT_WITH_CODE((0 == tmp_result),
4425                         "Failed to freeze SCLK MCLK DPM!", result = tmp_result);
4426
4427         tmp_result = smu7_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input);
4428         PP_ASSERT_WITH_CODE((0 == tmp_result),
4429                         "Failed to populate and upload SCLK MCLK DPM levels!",
4430                         result = tmp_result);
4431
4432         /*
4433          * If a custom pp table is loaded, set DPMTABLE_OD_UPDATE_VDDC flag.
4434          * That effectively disables AVFS feature.
4435          */
4436         if (hwmgr->hardcode_pp_table != NULL)
4437                 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC;
4438
4439         tmp_result = smu7_update_avfs(hwmgr);
4440         PP_ASSERT_WITH_CODE((0 == tmp_result),
4441                         "Failed to update avfs voltages!",
4442                         result = tmp_result);
4443
4444         tmp_result = smu7_generate_dpm_level_enable_mask(hwmgr, input);
4445         PP_ASSERT_WITH_CODE((0 == tmp_result),
4446                         "Failed to generate DPM level enabled mask!",
4447                         result = tmp_result);
4448
4449         tmp_result = smum_update_sclk_threshold(hwmgr);
4450         PP_ASSERT_WITH_CODE((0 == tmp_result),
4451                         "Failed to update SCLK threshold!",
4452                         result = tmp_result);
4453
4454         tmp_result = smu7_unfreeze_sclk_mclk_dpm(hwmgr);
4455         PP_ASSERT_WITH_CODE((0 == tmp_result),
4456                         "Failed to unfreeze SCLK MCLK DPM!",
4457                         result = tmp_result);
4458
4459         tmp_result = smu7_upload_dpm_level_enable_mask(hwmgr);
4460         PP_ASSERT_WITH_CODE((0 == tmp_result),
4461                         "Failed to upload DPM level enabled mask!",
4462                         result = tmp_result);
4463
4464         tmp_result = smu7_notify_smc_display(hwmgr);
4465         PP_ASSERT_WITH_CODE((0 == tmp_result),
4466                         "Failed to notify smc display settings!",
4467                         result = tmp_result);
4468
4469         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4470                         PHM_PlatformCaps_PCIEPerformanceRequest)) {
4471                 tmp_result =
4472                         smu7_notify_link_speed_change_after_state_change(hwmgr, input);
4473                 PP_ASSERT_WITH_CODE((0 == tmp_result),
4474                                 "Failed to notify link speed change after state change!",
4475                                 result = tmp_result);
4476         }
4477         data->apply_optimized_settings = false;
4478         return result;
4479 }
4480
4481 static int smu7_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm)
4482 {
4483         hwmgr->thermal_controller.
4484         advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm;
4485
4486         return smum_send_msg_to_smc_with_parameter(hwmgr,
4487                         PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm,
4488                         NULL);
4489 }
4490
4491 static int
4492 smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
4493 {
4494         return 0;
4495 }
4496
4497 /**
4498  * smu7_program_display_gap - Programs the display gap
4499  *
4500  * @hwmgr:  the address of the powerplay hardware manager.
4501  * Return:   always OK
4502  */
4503 static int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
4504 {
4505         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4506         uint32_t display_gap = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL);
4507         uint32_t display_gap2;
4508         uint32_t pre_vbi_time_in_us;
4509         uint32_t frame_time_in_us;
4510         uint32_t ref_clock, refresh_rate;
4511
4512         display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (hwmgr->display_config->num_display > 0) ? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE);
4513         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL, display_gap);
4514
4515         ref_clock =  amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev);
4516         refresh_rate = hwmgr->display_config->vrefresh;
4517
4518         if (0 == refresh_rate)
4519                 refresh_rate = 60;
4520
4521         frame_time_in_us = 1000000 / refresh_rate;
4522
4523         pre_vbi_time_in_us = frame_time_in_us - 200 - hwmgr->display_config->min_vblank_time;
4524
4525         data->frame_time_x2 = frame_time_in_us * 2 / 100;
4526
4527         if (data->frame_time_x2 < 280) {
4528                 pr_debug("%s: enforce minimal VBITimeout: %d -> 280\n", __func__, data->frame_time_x2);
4529                 data->frame_time_x2 = 280;
4530         }
4531
4532         display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
4533
4534         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2);
4535
4536         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4537                         data->soft_regs_start + smum_get_offsetof(hwmgr,
4538                                                         SMU_SoftRegisters,
4539                                                         PreVBlankGap), 0x64);
4540
4541         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4542                         data->soft_regs_start + smum_get_offsetof(hwmgr,
4543                                                         SMU_SoftRegisters,
4544                                                         VBlankTimeout),
4545                                         (frame_time_in_us - pre_vbi_time_in_us));
4546
4547         return 0;
4548 }
4549
4550 static int smu7_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
4551 {
4552         return smu7_program_display_gap(hwmgr);
4553 }
4554
4555 /**
4556  * smu7_set_max_fan_rpm_output - Set maximum target operating fan output RPM
4557  *
4558  * @hwmgr:  the address of the powerplay hardware manager.
4559  * @us_max_fan_rpm:  max operating fan RPM value.
4560  * Return:   The response that came from the SMC.
4561  */
4562 static int smu7_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_rpm)
4563 {
4564         hwmgr->thermal_controller.
4565         advanceFanControlParameters.usMaxFanRPM = us_max_fan_rpm;
4566
4567         return smum_send_msg_to_smc_with_parameter(hwmgr,
4568                         PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm,
4569                         NULL);
4570 }
4571
4572 static const struct amdgpu_irq_src_funcs smu7_irq_funcs = {
4573         .process = phm_irq_process,
4574 };
4575
4576 static int smu7_register_irq_handlers(struct pp_hwmgr *hwmgr)
4577 {
4578         struct amdgpu_irq_src *source =
4579                 kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL);
4580
4581         if (!source)
4582                 return -ENOMEM;
4583
4584         source->funcs = &smu7_irq_funcs;
4585
4586         amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
4587                         AMDGPU_IRQ_CLIENTID_LEGACY,
4588                         VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH,
4589                         source);
4590         amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
4591                         AMDGPU_IRQ_CLIENTID_LEGACY,
4592                         VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW,
4593                         source);
4594
4595         /* Register CTF(GPIO_19) interrupt */
4596         amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
4597                         AMDGPU_IRQ_CLIENTID_LEGACY,
4598                         VISLANDS30_IV_SRCID_GPIO_19,
4599                         source);
4600
4601         return 0;
4602 }
4603
4604 static bool
4605 smu7_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
4606 {
4607         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4608         bool is_update_required = false;
4609
4610         if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
4611                 is_update_required = true;
4612
4613         if (data->display_timing.vrefresh != hwmgr->display_config->vrefresh)
4614                 is_update_required = true;
4615
4616         if (hwmgr->chip_id >= CHIP_POLARIS10 &&
4617             hwmgr->chip_id <= CHIP_VEGAM &&
4618             data->last_sent_vbi_timeout != data->frame_time_x2)
4619                 is_update_required = true;
4620
4621         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
4622                 if (data->display_timing.min_clock_in_sr != hwmgr->display_config->min_core_set_clock_in_sr &&
4623                         (data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK ||
4624                         hwmgr->display_config->min_core_set_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK))
4625                         is_update_required = true;
4626         }
4627         return is_update_required;
4628 }
4629
4630 static inline bool smu7_are_power_levels_equal(const struct smu7_performance_level *pl1,
4631                                                            const struct smu7_performance_level *pl2)
4632 {
4633         return ((pl1->memory_clock == pl2->memory_clock) &&
4634                   (pl1->engine_clock == pl2->engine_clock) &&
4635                   (pl1->pcie_gen == pl2->pcie_gen) &&
4636                   (pl1->pcie_lane == pl2->pcie_lane));
4637 }
4638
4639 static int smu7_check_states_equal(struct pp_hwmgr *hwmgr,
4640                 const struct pp_hw_power_state *pstate1,
4641                 const struct pp_hw_power_state *pstate2, bool *equal)
4642 {
4643         const struct smu7_power_state *psa;
4644         const struct smu7_power_state *psb;
4645         int i;
4646         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4647
4648         if (pstate1 == NULL || pstate2 == NULL || equal == NULL)
4649                 return -EINVAL;
4650
4651         psa = cast_const_phw_smu7_power_state(pstate1);
4652         psb = cast_const_phw_smu7_power_state(pstate2);
4653         /* If the two states don't even have the same number of performance levels they cannot be the same state. */
4654         if (psa->performance_level_count != psb->performance_level_count) {
4655                 *equal = false;
4656                 return 0;
4657         }
4658
4659         for (i = 0; i < psa->performance_level_count; i++) {
4660                 if (!smu7_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
4661                         /* If we have found even one performance level pair that is different the states are different. */
4662                         *equal = false;
4663                         return 0;
4664                 }
4665         }
4666
4667         /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
4668         *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
4669         *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
4670         *equal &= (psa->sclk_threshold == psb->sclk_threshold);
4671         /* For OD call, set value based on flag */
4672         *equal &= !(data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK |
4673                                                         DPMTABLE_OD_UPDATE_MCLK |
4674                                                         DPMTABLE_OD_UPDATE_VDDC));
4675
4676         return 0;
4677 }
4678
4679 static int smu7_check_mc_firmware(struct pp_hwmgr *hwmgr)
4680 {
4681         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4682
4683         uint32_t tmp;
4684
4685         /* Read MC indirect register offset 0x9F bits [3:0] to see
4686          * if VBIOS has already loaded a full version of MC ucode
4687          * or not.
4688          */
4689
4690         smu7_get_mc_microcode_version(hwmgr);
4691
4692         data->need_long_memory_training = false;
4693
4694         cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX,
4695                                                         ixMC_IO_DEBUG_UP_13);
4696         tmp = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA);
4697
4698         if (tmp & (1 << 23)) {
4699                 data->mem_latency_high = MEM_LATENCY_HIGH;
4700                 data->mem_latency_low = MEM_LATENCY_LOW;
4701                 if ((hwmgr->chip_id == CHIP_POLARIS10) ||
4702                     (hwmgr->chip_id == CHIP_POLARIS11) ||
4703                     (hwmgr->chip_id == CHIP_POLARIS12))
4704                         smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableFFC, NULL);
4705         } else {
4706                 data->mem_latency_high = 330;
4707                 data->mem_latency_low = 330;
4708                 if ((hwmgr->chip_id == CHIP_POLARIS10) ||
4709                     (hwmgr->chip_id == CHIP_POLARIS11) ||
4710                     (hwmgr->chip_id == CHIP_POLARIS12))
4711                         smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableFFC, NULL);
4712         }
4713
4714         return 0;
4715 }
4716
4717 static int smu7_read_clock_registers(struct pp_hwmgr *hwmgr)
4718 {
4719         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4720
4721         data->clock_registers.vCG_SPLL_FUNC_CNTL         =
4722                 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL);
4723         data->clock_registers.vCG_SPLL_FUNC_CNTL_2       =
4724                 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_2);
4725         data->clock_registers.vCG_SPLL_FUNC_CNTL_3       =
4726                 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_3);
4727         data->clock_registers.vCG_SPLL_FUNC_CNTL_4       =
4728                 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_4);
4729         data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM   =
4730                 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM);
4731         data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2 =
4732                 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM_2);
4733         data->clock_registers.vDLL_CNTL                  =
4734                 cgs_read_register(hwmgr->device, mmDLL_CNTL);
4735         data->clock_registers.vMCLK_PWRMGT_CNTL          =
4736                 cgs_read_register(hwmgr->device, mmMCLK_PWRMGT_CNTL);
4737         data->clock_registers.vMPLL_AD_FUNC_CNTL         =
4738                 cgs_read_register(hwmgr->device, mmMPLL_AD_FUNC_CNTL);
4739         data->clock_registers.vMPLL_DQ_FUNC_CNTL         =
4740                 cgs_read_register(hwmgr->device, mmMPLL_DQ_FUNC_CNTL);
4741         data->clock_registers.vMPLL_FUNC_CNTL            =
4742                 cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL);
4743         data->clock_registers.vMPLL_FUNC_CNTL_1          =
4744                 cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_1);
4745         data->clock_registers.vMPLL_FUNC_CNTL_2          =
4746                 cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_2);
4747         data->clock_registers.vMPLL_SS1                  =
4748                 cgs_read_register(hwmgr->device, mmMPLL_SS1);
4749         data->clock_registers.vMPLL_SS2                  =
4750                 cgs_read_register(hwmgr->device, mmMPLL_SS2);
4751         return 0;
4752
4753 }
4754
4755 /**
4756  * smu7_get_memory_type - Find out if memory is GDDR5.
4757  *
4758  * @hwmgr:  the address of the powerplay hardware manager.
4759  * Return:   always 0
4760  */
4761 static int smu7_get_memory_type(struct pp_hwmgr *hwmgr)
4762 {
4763         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4764         struct amdgpu_device *adev = hwmgr->adev;
4765
4766         data->is_memory_gddr5 = (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5);
4767
4768         return 0;
4769 }
4770
4771 /**
4772  * smu7_enable_acpi_power_management - Enables Dynamic Power Management by SMC
4773  *
4774  * @hwmgr:  the address of the powerplay hardware manager.
4775  * Return:   always 0
4776  */
4777 static int smu7_enable_acpi_power_management(struct pp_hwmgr *hwmgr)
4778 {
4779         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
4780                         GENERAL_PWRMGT, STATIC_PM_EN, 1);
4781
4782         return 0;
4783 }
4784
4785 /**
4786  * smu7_init_power_gate_state - Initialize PowerGating States for different engines
4787  *
4788  * @hwmgr:  the address of the powerplay hardware manager.
4789  * Return:   always 0
4790  */
4791 static int smu7_init_power_gate_state(struct pp_hwmgr *hwmgr)
4792 {
4793         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4794
4795         data->uvd_power_gated = false;
4796         data->vce_power_gated = false;
4797
4798         return 0;
4799 }
4800
4801 static int smu7_init_sclk_threshold(struct pp_hwmgr *hwmgr)
4802 {
4803         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4804
4805         data->low_sclk_interrupt_threshold = 0;
4806         return 0;
4807 }
4808
4809 static int smu7_setup_asic_task(struct pp_hwmgr *hwmgr)
4810 {
4811         int tmp_result, result = 0;
4812
4813         smu7_check_mc_firmware(hwmgr);
4814
4815         tmp_result = smu7_read_clock_registers(hwmgr);
4816         PP_ASSERT_WITH_CODE((0 == tmp_result),
4817                         "Failed to read clock registers!", result = tmp_result);
4818
4819         tmp_result = smu7_get_memory_type(hwmgr);
4820         PP_ASSERT_WITH_CODE((0 == tmp_result),
4821                         "Failed to get memory type!", result = tmp_result);
4822
4823         tmp_result = smu7_enable_acpi_power_management(hwmgr);
4824         PP_ASSERT_WITH_CODE((0 == tmp_result),
4825                         "Failed to enable ACPI power management!", result = tmp_result);
4826
4827         tmp_result = smu7_init_power_gate_state(hwmgr);
4828         PP_ASSERT_WITH_CODE((0 == tmp_result),
4829                         "Failed to init power gate state!", result = tmp_result);
4830
4831         tmp_result = smu7_get_mc_microcode_version(hwmgr);
4832         PP_ASSERT_WITH_CODE((0 == tmp_result),
4833                         "Failed to get MC microcode version!", result = tmp_result);
4834
4835         tmp_result = smu7_init_sclk_threshold(hwmgr);
4836         PP_ASSERT_WITH_CODE((0 == tmp_result),
4837                         "Failed to init sclk threshold!", result = tmp_result);
4838
4839         return result;
4840 }
4841
4842 static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
4843                 enum pp_clock_type type, uint32_t mask)
4844 {
4845         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4846
4847         if (mask == 0)
4848                 return -EINVAL;
4849
4850         switch (type) {
4851         case PP_SCLK:
4852                 if (!data->sclk_dpm_key_disabled)
4853                         smum_send_msg_to_smc_with_parameter(hwmgr,
4854                                         PPSMC_MSG_SCLKDPM_SetEnabledMask,
4855                                         data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask,
4856                                         NULL);
4857                 break;
4858         case PP_MCLK:
4859                 if (!data->mclk_dpm_key_disabled)
4860                         smum_send_msg_to_smc_with_parameter(hwmgr,
4861                                         PPSMC_MSG_MCLKDPM_SetEnabledMask,
4862                                         data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask,
4863                                         NULL);
4864                 break;
4865         case PP_PCIE:
4866         {
4867                 uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask;
4868
4869                 if (!data->pcie_dpm_key_disabled) {
4870                         if (fls(tmp) != ffs(tmp))
4871                                 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PCIeDPM_UnForceLevel,
4872                                                 NULL);
4873                         else
4874                                 smum_send_msg_to_smc_with_parameter(hwmgr,
4875                                         PPSMC_MSG_PCIeDPM_ForceLevel,
4876                                         fls(tmp) - 1,
4877                                         NULL);
4878                 }
4879                 break;
4880         }
4881         default:
4882                 break;
4883         }
4884
4885         return 0;
4886 }
4887
4888 static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
4889                 enum pp_clock_type type, char *buf)
4890 {
4891         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4892         struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
4893         struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
4894         struct smu7_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table);
4895         struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
4896         struct phm_odn_clock_levels *odn_sclk_table = &(odn_table->odn_core_clock_dpm_levels);
4897         struct phm_odn_clock_levels *odn_mclk_table = &(odn_table->odn_memory_clock_dpm_levels);
4898         int i, now, size = 0;
4899         uint32_t clock, pcie_speed;
4900
4901         switch (type) {
4902         case PP_SCLK:
4903                 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency, &clock);
4904
4905                 for (i = 0; i < sclk_table->count; i++) {
4906                         if (clock > sclk_table->dpm_levels[i].value)
4907                                 continue;
4908                         break;
4909                 }
4910                 now = i;
4911
4912                 for (i = 0; i < sclk_table->count; i++)
4913                         size += sprintf(buf + size, "%d: %uMhz %s\n",
4914                                         i, sclk_table->dpm_levels[i].value / 100,
4915                                         (i == now) ? "*" : "");
4916                 break;
4917         case PP_MCLK:
4918                 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency, &clock);
4919
4920                 for (i = 0; i < mclk_table->count; i++) {
4921                         if (clock > mclk_table->dpm_levels[i].value)
4922                                 continue;
4923                         break;
4924                 }
4925                 now = i;
4926
4927                 for (i = 0; i < mclk_table->count; i++)
4928                         size += sprintf(buf + size, "%d: %uMhz %s\n",
4929                                         i, mclk_table->dpm_levels[i].value / 100,
4930                                         (i == now) ? "*" : "");
4931                 break;
4932         case PP_PCIE:
4933                 pcie_speed = smu7_get_current_pcie_speed(hwmgr);
4934                 for (i = 0; i < pcie_table->count; i++) {
4935                         if (pcie_speed != pcie_table->dpm_levels[i].value)
4936                                 continue;
4937                         break;
4938                 }
4939                 now = i;
4940
4941                 for (i = 0; i < pcie_table->count; i++)
4942                         size += sprintf(buf + size, "%d: %s %s\n", i,
4943                                         (pcie_table->dpm_levels[i].value == 0) ? "2.5GT/s, x8" :
4944                                         (pcie_table->dpm_levels[i].value == 1) ? "5.0GT/s, x16" :
4945                                         (pcie_table->dpm_levels[i].value == 2) ? "8.0GT/s, x16" : "",
4946                                         (i == now) ? "*" : "");
4947                 break;
4948         case OD_SCLK:
4949                 if (hwmgr->od_enabled) {
4950                         size = sprintf(buf, "%s:\n", "OD_SCLK");
4951                         for (i = 0; i < odn_sclk_table->num_of_pl; i++)
4952                                 size += sprintf(buf + size, "%d: %10uMHz %10umV\n",
4953                                         i, odn_sclk_table->entries[i].clock/100,
4954                                         odn_sclk_table->entries[i].vddc);
4955                 }
4956                 break;
4957         case OD_MCLK:
4958                 if (hwmgr->od_enabled) {
4959                         size = sprintf(buf, "%s:\n", "OD_MCLK");
4960                         for (i = 0; i < odn_mclk_table->num_of_pl; i++)
4961                                 size += sprintf(buf + size, "%d: %10uMHz %10umV\n",
4962                                         i, odn_mclk_table->entries[i].clock/100,
4963                                         odn_mclk_table->entries[i].vddc);
4964                 }
4965                 break;
4966         case OD_RANGE:
4967                 if (hwmgr->od_enabled) {
4968                         size = sprintf(buf, "%s:\n", "OD_RANGE");
4969                         size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
4970                                 data->golden_dpm_table.sclk_table.dpm_levels[0].value/100,
4971                                 hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
4972                         size += sprintf(buf + size, "MCLK: %7uMHz %10uMHz\n",
4973                                 data->golden_dpm_table.mclk_table.dpm_levels[0].value/100,
4974                                 hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
4975                         size += sprintf(buf + size, "VDDC: %7umV %11umV\n",
4976                                 data->odn_dpm_table.min_vddc,
4977                                 data->odn_dpm_table.max_vddc);
4978                 }
4979                 break;
4980         default:
4981                 break;
4982         }
4983         return size;
4984 }
4985
4986 static void smu7_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
4987 {
4988         switch (mode) {
4989         case AMD_FAN_CTRL_NONE:
4990                 smu7_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
4991                 break;
4992         case AMD_FAN_CTRL_MANUAL:
4993                 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4994                         PHM_PlatformCaps_MicrocodeFanControl))
4995                         smu7_fan_ctrl_stop_smc_fan_control(hwmgr);
4996                 break;
4997         case AMD_FAN_CTRL_AUTO:
4998                 if (!smu7_fan_ctrl_set_static_mode(hwmgr, mode))
4999                         smu7_fan_ctrl_start_smc_fan_control(hwmgr);
5000                 break;
5001         default:
5002                 break;
5003         }
5004 }
5005
5006 static uint32_t smu7_get_fan_control_mode(struct pp_hwmgr *hwmgr)
5007 {
5008         return hwmgr->fan_ctrl_enabled ? AMD_FAN_CTRL_AUTO : AMD_FAN_CTRL_MANUAL;
5009 }
5010
5011 static int smu7_get_sclk_od(struct pp_hwmgr *hwmgr)
5012 {
5013         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5014         struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
5015         struct smu7_single_dpm_table *golden_sclk_table =
5016                         &(data->golden_dpm_table.sclk_table);
5017         int value = sclk_table->dpm_levels[sclk_table->count - 1].value;
5018         int golden_value = golden_sclk_table->dpm_levels
5019                         [golden_sclk_table->count - 1].value;
5020
5021         value -= golden_value;
5022         value = DIV_ROUND_UP(value * 100, golden_value);
5023
5024         return value;
5025 }
5026
5027 static int smu7_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
5028 {
5029         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5030         struct smu7_single_dpm_table *golden_sclk_table =
5031                         &(data->golden_dpm_table.sclk_table);
5032         struct pp_power_state  *ps;
5033         struct smu7_power_state  *smu7_ps;
5034
5035         if (value > 20)
5036                 value = 20;
5037
5038         ps = hwmgr->request_ps;
5039
5040         if (ps == NULL)
5041                 return -EINVAL;
5042
5043         smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
5044
5045         smu7_ps->performance_levels[smu7_ps->performance_level_count - 1].engine_clock =
5046                         golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
5047                         value / 100 +
5048                         golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
5049
5050         return 0;
5051 }
5052
5053 static int smu7_get_mclk_od(struct pp_hwmgr *hwmgr)
5054 {
5055         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5056         struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
5057         struct smu7_single_dpm_table *golden_mclk_table =
5058                         &(data->golden_dpm_table.mclk_table);
5059         int value = mclk_table->dpm_levels[mclk_table->count - 1].value;
5060         int golden_value = golden_mclk_table->dpm_levels
5061                         [golden_mclk_table->count - 1].value;
5062
5063         value -= golden_value;
5064         value = DIV_ROUND_UP(value * 100, golden_value);
5065
5066         return value;
5067 }
5068
5069 static int smu7_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
5070 {
5071         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5072         struct smu7_single_dpm_table *golden_mclk_table =
5073                         &(data->golden_dpm_table.mclk_table);
5074         struct pp_power_state  *ps;
5075         struct smu7_power_state  *smu7_ps;
5076
5077         if (value > 20)
5078                 value = 20;
5079
5080         ps = hwmgr->request_ps;
5081
5082         if (ps == NULL)
5083                 return -EINVAL;
5084
5085         smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
5086
5087         smu7_ps->performance_levels[smu7_ps->performance_level_count - 1].memory_clock =
5088                         golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
5089                         value / 100 +
5090                         golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
5091
5092         return 0;
5093 }
5094
5095
5096 static int smu7_get_sclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
5097 {
5098         struct phm_ppt_v1_information *table_info =
5099                         (struct phm_ppt_v1_information *)hwmgr->pptable;
5100         struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table = NULL;
5101         struct phm_clock_voltage_dependency_table *sclk_table;
5102         int i;
5103
5104         if (hwmgr->pp_table_version == PP_TABLE_V1) {
5105                 if (table_info == NULL || table_info->vdd_dep_on_sclk == NULL)
5106                         return -EINVAL;
5107                 dep_sclk_table = table_info->vdd_dep_on_sclk;
5108                 for (i = 0; i < dep_sclk_table->count; i++)
5109                         clocks->clock[i] = dep_sclk_table->entries[i].clk * 10;
5110                 clocks->count = dep_sclk_table->count;
5111         } else if (hwmgr->pp_table_version == PP_TABLE_V0) {
5112                 sclk_table = hwmgr->dyn_state.vddc_dependency_on_sclk;
5113                 for (i = 0; i < sclk_table->count; i++)
5114                         clocks->clock[i] = sclk_table->entries[i].clk * 10;
5115                 clocks->count = sclk_table->count;
5116         }
5117
5118         return 0;
5119 }
5120
5121 static uint32_t smu7_get_mem_latency(struct pp_hwmgr *hwmgr, uint32_t clk)
5122 {
5123         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5124
5125         if (clk >= MEM_FREQ_LOW_LATENCY && clk < MEM_FREQ_HIGH_LATENCY)
5126                 return data->mem_latency_high;
5127         else if (clk >= MEM_FREQ_HIGH_LATENCY)
5128                 return data->mem_latency_low;
5129         else
5130                 return MEM_LATENCY_ERR;
5131 }
5132
5133 static int smu7_get_mclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
5134 {
5135         struct phm_ppt_v1_information *table_info =
5136                         (struct phm_ppt_v1_information *)hwmgr->pptable;
5137         struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
5138         int i;
5139         struct phm_clock_voltage_dependency_table *mclk_table;
5140
5141         if (hwmgr->pp_table_version == PP_TABLE_V1) {
5142                 if (table_info == NULL)
5143                         return -EINVAL;
5144                 dep_mclk_table = table_info->vdd_dep_on_mclk;
5145                 for (i = 0; i < dep_mclk_table->count; i++) {
5146                         clocks->clock[i] = dep_mclk_table->entries[i].clk * 10;
5147                         clocks->latency[i] = smu7_get_mem_latency(hwmgr,
5148                                                 dep_mclk_table->entries[i].clk);
5149                 }
5150                 clocks->count = dep_mclk_table->count;
5151         } else if (hwmgr->pp_table_version == PP_TABLE_V0) {
5152                 mclk_table = hwmgr->dyn_state.vddc_dependency_on_mclk;
5153                 for (i = 0; i < mclk_table->count; i++)
5154                         clocks->clock[i] = mclk_table->entries[i].clk * 10;
5155                 clocks->count = mclk_table->count;
5156         }
5157         return 0;
5158 }
5159
5160 static int smu7_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type,
5161                                                 struct amd_pp_clocks *clocks)
5162 {
5163         switch (type) {
5164         case amd_pp_sys_clock:
5165                 smu7_get_sclks(hwmgr, clocks);
5166                 break;
5167         case amd_pp_mem_clock:
5168                 smu7_get_mclks(hwmgr, clocks);
5169                 break;
5170         default:
5171                 return -EINVAL;
5172         }
5173
5174         return 0;
5175 }
5176
5177 static int smu7_get_sclks_with_latency(struct pp_hwmgr *hwmgr,
5178                                        struct pp_clock_levels_with_latency *clocks)
5179 {
5180         struct phm_ppt_v1_information *table_info =
5181                         (struct phm_ppt_v1_information *)hwmgr->pptable;
5182         struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table =
5183                         table_info->vdd_dep_on_sclk;
5184         int i;
5185
5186         clocks->num_levels = 0;
5187         for (i = 0; i < dep_sclk_table->count; i++) {
5188                 if (dep_sclk_table->entries[i].clk) {
5189                         clocks->data[clocks->num_levels].clocks_in_khz =
5190                                 dep_sclk_table->entries[i].clk * 10;
5191                         clocks->num_levels++;
5192                 }
5193         }
5194
5195         return 0;
5196 }
5197
5198 static int smu7_get_mclks_with_latency(struct pp_hwmgr *hwmgr,
5199                                        struct pp_clock_levels_with_latency *clocks)
5200 {
5201         struct phm_ppt_v1_information *table_info =
5202                         (struct phm_ppt_v1_information *)hwmgr->pptable;
5203         struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
5204                         table_info->vdd_dep_on_mclk;
5205         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5206         int i;
5207
5208         clocks->num_levels = 0;
5209         data->mclk_latency_table.count = 0;
5210         for (i = 0; i < dep_mclk_table->count; i++) {
5211                 if (dep_mclk_table->entries[i].clk) {
5212                         clocks->data[clocks->num_levels].clocks_in_khz =
5213                                         dep_mclk_table->entries[i].clk * 10;
5214                         data->mclk_latency_table.entries[data->mclk_latency_table.count].frequency =
5215                                         dep_mclk_table->entries[i].clk;
5216                         clocks->data[clocks->num_levels].latency_in_us =
5217                                 data->mclk_latency_table.entries[data->mclk_latency_table.count].latency =
5218                                         smu7_get_mem_latency(hwmgr, dep_mclk_table->entries[i].clk);
5219                         clocks->num_levels++;
5220                         data->mclk_latency_table.count++;
5221                 }
5222         }
5223
5224         return 0;
5225 }
5226
5227 static int smu7_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
5228                                                enum amd_pp_clock_type type,
5229                                                struct pp_clock_levels_with_latency *clocks)
5230 {
5231         if (!(hwmgr->chip_id >= CHIP_POLARIS10 &&
5232               hwmgr->chip_id <= CHIP_VEGAM))
5233                 return -EINVAL;
5234
5235         switch (type) {
5236         case amd_pp_sys_clock:
5237                 smu7_get_sclks_with_latency(hwmgr, clocks);
5238                 break;
5239         case amd_pp_mem_clock:
5240                 smu7_get_mclks_with_latency(hwmgr, clocks);
5241                 break;
5242         default:
5243                 return -EINVAL;
5244         }
5245
5246         return 0;
5247 }
5248
5249 static int smu7_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
5250                                                  void *clock_range)
5251 {
5252         struct phm_ppt_v1_information *table_info =
5253                         (struct phm_ppt_v1_information *)hwmgr->pptable;
5254         struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
5255                         table_info->vdd_dep_on_mclk;
5256         struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table =
5257                         table_info->vdd_dep_on_sclk;
5258         struct polaris10_smumgr *smu_data =
5259                         (struct polaris10_smumgr *)(hwmgr->smu_backend);
5260         SMU74_Discrete_DpmTable  *table = &(smu_data->smc_state_table);
5261         struct dm_pp_wm_sets_with_clock_ranges *watermarks =
5262                         (struct dm_pp_wm_sets_with_clock_ranges *)clock_range;
5263         uint32_t i, j, k;
5264         bool valid_entry;
5265
5266         if (!(hwmgr->chip_id >= CHIP_POLARIS10 &&
5267               hwmgr->chip_id <= CHIP_VEGAM))
5268                 return -EINVAL;
5269
5270         for (i = 0; i < dep_mclk_table->count; i++) {
5271                 for (j = 0; j < dep_sclk_table->count; j++) {
5272                         valid_entry = false;
5273                         for (k = 0; k < watermarks->num_wm_sets; k++) {
5274                                 if (dep_sclk_table->entries[i].clk >= watermarks->wm_clk_ranges[k].wm_min_eng_clk_in_khz / 10 &&
5275                                     dep_sclk_table->entries[i].clk < watermarks->wm_clk_ranges[k].wm_max_eng_clk_in_khz / 10 &&
5276                                     dep_mclk_table->entries[i].clk >= watermarks->wm_clk_ranges[k].wm_min_mem_clk_in_khz / 10 &&
5277                                     dep_mclk_table->entries[i].clk < watermarks->wm_clk_ranges[k].wm_max_mem_clk_in_khz / 10) {
5278                                         valid_entry = true;
5279                                         table->DisplayWatermark[i][j] = watermarks->wm_clk_ranges[k].wm_set_id;
5280                                         break;
5281                                 }
5282                         }
5283                         PP_ASSERT_WITH_CODE(valid_entry,
5284                                         "Clock is not in range of specified clock range for watermark from DAL!  Using highest water mark set.",
5285                                         table->DisplayWatermark[i][j] = watermarks->wm_clk_ranges[k - 1].wm_set_id);
5286                 }
5287         }
5288
5289         return smu7_copy_bytes_to_smc(hwmgr,
5290                                       smu_data->smu7_data.dpm_table_start + offsetof(SMU74_Discrete_DpmTable, DisplayWatermark),
5291                                       (uint8_t *)table->DisplayWatermark,
5292                                       sizeof(uint8_t) * SMU74_MAX_LEVELS_MEMORY * SMU74_MAX_LEVELS_GRAPHICS,
5293                                       SMC_RAM_END);
5294 }
5295
5296 static int smu7_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
5297                                         uint32_t virtual_addr_low,
5298                                         uint32_t virtual_addr_hi,
5299                                         uint32_t mc_addr_low,
5300                                         uint32_t mc_addr_hi,
5301                                         uint32_t size)
5302 {
5303         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5304
5305         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
5306                                         data->soft_regs_start +
5307                                         smum_get_offsetof(hwmgr,
5308                                         SMU_SoftRegisters, DRAM_LOG_ADDR_H),
5309                                         mc_addr_hi);
5310
5311         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
5312                                         data->soft_regs_start +
5313                                         smum_get_offsetof(hwmgr,
5314                                         SMU_SoftRegisters, DRAM_LOG_ADDR_L),
5315                                         mc_addr_low);
5316
5317         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
5318                                         data->soft_regs_start +
5319                                         smum_get_offsetof(hwmgr,
5320                                         SMU_SoftRegisters, DRAM_LOG_PHY_ADDR_H),
5321                                         virtual_addr_hi);
5322
5323         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
5324                                         data->soft_regs_start +
5325                                         smum_get_offsetof(hwmgr,
5326                                         SMU_SoftRegisters, DRAM_LOG_PHY_ADDR_L),
5327                                         virtual_addr_low);
5328
5329         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
5330                                         data->soft_regs_start +
5331                                         smum_get_offsetof(hwmgr,
5332                                         SMU_SoftRegisters, DRAM_LOG_BUFF_SIZE),
5333                                         size);
5334         return 0;
5335 }
5336
5337 static int smu7_get_max_high_clocks(struct pp_hwmgr *hwmgr,
5338                                         struct amd_pp_simple_clock_info *clocks)
5339 {
5340         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5341         struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
5342         struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
5343
5344         if (clocks == NULL)
5345                 return -EINVAL;
5346
5347         clocks->memory_max_clock = mclk_table->count > 1 ?
5348                                 mclk_table->dpm_levels[mclk_table->count-1].value :
5349                                 mclk_table->dpm_levels[0].value;
5350         clocks->engine_max_clock = sclk_table->count > 1 ?
5351                                 sclk_table->dpm_levels[sclk_table->count-1].value :
5352                                 sclk_table->dpm_levels[0].value;
5353         return 0;
5354 }
5355
5356 static int smu7_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
5357                 struct PP_TemperatureRange *thermal_data)
5358 {
5359         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5360         struct phm_ppt_v1_information *table_info =
5361                         (struct phm_ppt_v1_information *)hwmgr->pptable;
5362
5363         memcpy(thermal_data, &SMU7ThermalPolicy[0], sizeof(struct PP_TemperatureRange));
5364
5365         if (hwmgr->pp_table_version == PP_TABLE_V1)
5366                 thermal_data->max = table_info->cac_dtp_table->usSoftwareShutdownTemp *
5367                         PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
5368         else if (hwmgr->pp_table_version == PP_TABLE_V0)
5369                 thermal_data->max = data->thermal_temp_setting.temperature_shutdown *
5370                         PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
5371
5372         return 0;
5373 }
5374
5375 static bool smu7_check_clk_voltage_valid(struct pp_hwmgr *hwmgr,
5376                                         enum PP_OD_DPM_TABLE_COMMAND type,
5377                                         uint32_t clk,
5378                                         uint32_t voltage)
5379 {
5380         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5381
5382         if (voltage < data->odn_dpm_table.min_vddc || voltage > data->odn_dpm_table.max_vddc) {
5383                 pr_info("OD voltage is out of range [%d - %d] mV\n",
5384                                                 data->odn_dpm_table.min_vddc,
5385                                                 data->odn_dpm_table.max_vddc);
5386                 return false;
5387         }
5388
5389         if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) {
5390                 if (data->golden_dpm_table.sclk_table.dpm_levels[0].value > clk ||
5391                         hwmgr->platform_descriptor.overdriveLimit.engineClock < clk) {
5392                         pr_info("OD engine clock is out of range [%d - %d] MHz\n",
5393                                 data->golden_dpm_table.sclk_table.dpm_levels[0].value/100,
5394                                 hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
5395                         return false;
5396                 }
5397         } else if (type == PP_OD_EDIT_MCLK_VDDC_TABLE) {
5398                 if (data->golden_dpm_table.mclk_table.dpm_levels[0].value > clk ||
5399                         hwmgr->platform_descriptor.overdriveLimit.memoryClock < clk) {
5400                         pr_info("OD memory clock is out of range [%d - %d] MHz\n",
5401                                 data->golden_dpm_table.mclk_table.dpm_levels[0].value/100,
5402                                 hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
5403                         return false;
5404                 }
5405         } else {
5406                 return false;
5407         }
5408
5409         return true;
5410 }
5411
5412 static int smu7_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
5413                                         enum PP_OD_DPM_TABLE_COMMAND type,
5414                                         long *input, uint32_t size)
5415 {
5416         uint32_t i;
5417         struct phm_odn_clock_levels *podn_dpm_table_in_backend = NULL;
5418         struct smu7_odn_clock_voltage_dependency_table *podn_vdd_dep_in_backend = NULL;
5419         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5420
5421         uint32_t input_clk;
5422         uint32_t input_vol;
5423         uint32_t input_level;
5424
5425         PP_ASSERT_WITH_CODE(input, "NULL user input for clock and voltage",
5426                                 return -EINVAL);
5427
5428         if (!hwmgr->od_enabled) {
5429                 pr_info("OverDrive feature not enabled\n");
5430                 return -EINVAL;
5431         }
5432
5433         if (PP_OD_EDIT_SCLK_VDDC_TABLE == type) {
5434                 podn_dpm_table_in_backend = &data->odn_dpm_table.odn_core_clock_dpm_levels;
5435                 podn_vdd_dep_in_backend = &data->odn_dpm_table.vdd_dependency_on_sclk;
5436                 PP_ASSERT_WITH_CODE((podn_dpm_table_in_backend && podn_vdd_dep_in_backend),
5437                                 "Failed to get ODN SCLK and Voltage tables",
5438                                 return -EINVAL);
5439         } else if (PP_OD_EDIT_MCLK_VDDC_TABLE == type) {
5440                 podn_dpm_table_in_backend = &data->odn_dpm_table.odn_memory_clock_dpm_levels;
5441                 podn_vdd_dep_in_backend = &data->odn_dpm_table.vdd_dependency_on_mclk;
5442
5443                 PP_ASSERT_WITH_CODE((podn_dpm_table_in_backend && podn_vdd_dep_in_backend),
5444                         "Failed to get ODN MCLK and Voltage tables",
5445                         return -EINVAL);
5446         } else if (PP_OD_RESTORE_DEFAULT_TABLE == type) {
5447                 smu7_odn_initial_default_setting(hwmgr);
5448                 return 0;
5449         } else if (PP_OD_COMMIT_DPM_TABLE == type) {
5450                 smu7_check_dpm_table_updated(hwmgr);
5451                 return 0;
5452         } else {
5453                 return -EINVAL;
5454         }
5455
5456         for (i = 0; i < size; i += 3) {
5457                 if (i + 3 > size || input[i] >= podn_dpm_table_in_backend->num_of_pl) {
5458                         pr_info("invalid clock voltage input \n");
5459                         return 0;
5460                 }
5461                 input_level = input[i];
5462                 input_clk = input[i+1] * 100;
5463                 input_vol = input[i+2];
5464
5465                 if (smu7_check_clk_voltage_valid(hwmgr, type, input_clk, input_vol)) {
5466                         podn_dpm_table_in_backend->entries[input_level].clock = input_clk;
5467                         podn_vdd_dep_in_backend->entries[input_level].clk = input_clk;
5468                         podn_dpm_table_in_backend->entries[input_level].vddc = input_vol;
5469                         podn_vdd_dep_in_backend->entries[input_level].vddc = input_vol;
5470                         podn_vdd_dep_in_backend->entries[input_level].vddgfx = input_vol;
5471                 } else {
5472                         return -EINVAL;
5473                 }
5474         }
5475
5476         return 0;
5477 }
5478
5479 static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
5480 {
5481         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5482         uint32_t i, size = 0;
5483         uint32_t len;
5484
5485         static const char *profile_name[7] = {"BOOTUP_DEFAULT",
5486                                         "3D_FULL_SCREEN",
5487                                         "POWER_SAVING",
5488                                         "VIDEO",
5489                                         "VR",
5490                                         "COMPUTE",
5491                                         "CUSTOM"};
5492
5493         static const char *title[8] = {"NUM",
5494                         "MODE_NAME",
5495                         "SCLK_UP_HYST",
5496                         "SCLK_DOWN_HYST",
5497                         "SCLK_ACTIVE_LEVEL",
5498                         "MCLK_UP_HYST",
5499                         "MCLK_DOWN_HYST",
5500                         "MCLK_ACTIVE_LEVEL"};
5501
5502         if (!buf)
5503                 return -EINVAL;
5504
5505         size += sprintf(buf + size, "%s %16s %16s %16s %16s %16s %16s %16s\n",
5506                         title[0], title[1], title[2], title[3],
5507                         title[4], title[5], title[6], title[7]);
5508
5509         len = ARRAY_SIZE(smu7_profiling);
5510
5511         for (i = 0; i < len; i++) {
5512                 if (i == hwmgr->power_profile_mode) {
5513                         size += sprintf(buf + size, "%3d %14s %s: %8d %16d %16d %16d %16d %16d\n",
5514                         i, profile_name[i], "*",
5515                         data->current_profile_setting.sclk_up_hyst,
5516                         data->current_profile_setting.sclk_down_hyst,
5517                         data->current_profile_setting.sclk_activity,
5518                         data->current_profile_setting.mclk_up_hyst,
5519                         data->current_profile_setting.mclk_down_hyst,
5520                         data->current_profile_setting.mclk_activity);
5521                         continue;
5522                 }
5523                 if (smu7_profiling[i].bupdate_sclk)
5524                         size += sprintf(buf + size, "%3d %16s: %8d %16d %16d ",
5525                         i, profile_name[i], smu7_profiling[i].sclk_up_hyst,
5526                         smu7_profiling[i].sclk_down_hyst,
5527                         smu7_profiling[i].sclk_activity);
5528                 else
5529                         size += sprintf(buf + size, "%3d %16s: %8s %16s %16s ",
5530                         i, profile_name[i], "-", "-", "-");
5531
5532                 if (smu7_profiling[i].bupdate_mclk)
5533                         size += sprintf(buf + size, "%16d %16d %16d\n",
5534                         smu7_profiling[i].mclk_up_hyst,
5535                         smu7_profiling[i].mclk_down_hyst,
5536                         smu7_profiling[i].mclk_activity);
5537                 else
5538                         size += sprintf(buf + size, "%16s %16s %16s\n",
5539                         "-", "-", "-");
5540         }
5541
5542         return size;
5543 }
5544
5545 static void smu7_patch_compute_profile_mode(struct pp_hwmgr *hwmgr,
5546                                         enum PP_SMC_POWER_PROFILE requst)
5547 {
5548         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5549         uint32_t tmp, level;
5550
5551         if (requst == PP_SMC_POWER_PROFILE_COMPUTE) {
5552                 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
5553                         level = 0;
5554                         tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask;
5555                         while (tmp >>= 1)
5556                                 level++;
5557                         if (level > 0)
5558                                 smu7_force_clock_level(hwmgr, PP_SCLK, 3 << (level-1));
5559                 }
5560         } else if (hwmgr->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE) {
5561                 smu7_force_clock_level(hwmgr, PP_SCLK, data->dpm_level_enable_mask.sclk_dpm_enable_mask);
5562         }
5563 }
5564
5565 static int smu7_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size)
5566 {
5567         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5568         struct profile_mode_setting tmp;
5569         enum PP_SMC_POWER_PROFILE mode;
5570
5571         if (input == NULL)
5572                 return -EINVAL;
5573
5574         mode = input[size];
5575         switch (mode) {
5576         case PP_SMC_POWER_PROFILE_CUSTOM:
5577                 if (size < 8 && size != 0)
5578                         return -EINVAL;
5579                 /* If only CUSTOM is passed in, use the saved values. Check
5580                  * that we actually have a CUSTOM profile by ensuring that
5581                  * the "use sclk" or the "use mclk" bits are set
5582                  */
5583                 tmp = smu7_profiling[PP_SMC_POWER_PROFILE_CUSTOM];
5584                 if (size == 0) {
5585                         if (tmp.bupdate_sclk == 0 && tmp.bupdate_mclk == 0)
5586                                 return -EINVAL;
5587                 } else {
5588                         tmp.bupdate_sclk = input[0];
5589                         tmp.sclk_up_hyst = input[1];
5590                         tmp.sclk_down_hyst = input[2];
5591                         tmp.sclk_activity = input[3];
5592                         tmp.bupdate_mclk = input[4];
5593                         tmp.mclk_up_hyst = input[5];
5594                         tmp.mclk_down_hyst = input[6];
5595                         tmp.mclk_activity = input[7];
5596                         smu7_profiling[PP_SMC_POWER_PROFILE_CUSTOM] = tmp;
5597                 }
5598                 if (!smum_update_dpm_settings(hwmgr, &tmp)) {
5599                         memcpy(&data->current_profile_setting, &tmp, sizeof(struct profile_mode_setting));
5600                         hwmgr->power_profile_mode = mode;
5601                 }
5602                 break;
5603         case PP_SMC_POWER_PROFILE_FULLSCREEN3D:
5604         case PP_SMC_POWER_PROFILE_POWERSAVING:
5605         case PP_SMC_POWER_PROFILE_VIDEO:
5606         case PP_SMC_POWER_PROFILE_VR:
5607         case PP_SMC_POWER_PROFILE_COMPUTE:
5608                 if (mode == hwmgr->power_profile_mode)
5609                         return 0;
5610
5611                 memcpy(&tmp, &smu7_profiling[mode], sizeof(struct profile_mode_setting));
5612                 if (!smum_update_dpm_settings(hwmgr, &tmp)) {
5613                         if (tmp.bupdate_sclk) {
5614                                 data->current_profile_setting.bupdate_sclk = tmp.bupdate_sclk;
5615                                 data->current_profile_setting.sclk_up_hyst = tmp.sclk_up_hyst;
5616                                 data->current_profile_setting.sclk_down_hyst = tmp.sclk_down_hyst;
5617                                 data->current_profile_setting.sclk_activity = tmp.sclk_activity;
5618                         }
5619                         if (tmp.bupdate_mclk) {
5620                                 data->current_profile_setting.bupdate_mclk = tmp.bupdate_mclk;
5621                                 data->current_profile_setting.mclk_up_hyst = tmp.mclk_up_hyst;
5622                                 data->current_profile_setting.mclk_down_hyst = tmp.mclk_down_hyst;
5623                                 data->current_profile_setting.mclk_activity = tmp.mclk_activity;
5624                         }
5625                         smu7_patch_compute_profile_mode(hwmgr, mode);
5626                         hwmgr->power_profile_mode = mode;
5627                 }
5628                 break;
5629         default:
5630                 return -EINVAL;
5631         }
5632
5633         return 0;
5634 }
5635
5636 static int smu7_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
5637                                 PHM_PerformanceLevelDesignation designation, uint32_t index,
5638                                 PHM_PerformanceLevel *level)
5639 {
5640         const struct smu7_power_state *ps;
5641         uint32_t i;
5642
5643         if (level == NULL || hwmgr == NULL || state == NULL)
5644                 return -EINVAL;
5645
5646         ps = cast_const_phw_smu7_power_state(state);
5647
5648         i = index > ps->performance_level_count - 1 ?
5649                         ps->performance_level_count - 1 : index;
5650
5651         level->coreClock = ps->performance_levels[i].engine_clock;
5652         level->memory_clock = ps->performance_levels[i].memory_clock;
5653
5654         return 0;
5655 }
5656
5657 static int smu7_power_off_asic(struct pp_hwmgr *hwmgr)
5658 {
5659         int result;
5660
5661         result = smu7_disable_dpm_tasks(hwmgr);
5662         PP_ASSERT_WITH_CODE((0 == result),
5663                         "[disable_dpm_tasks] Failed to disable DPM!",
5664                         );
5665
5666         return result;
5667 }
5668
5669 static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
5670         .backend_init = &smu7_hwmgr_backend_init,
5671         .backend_fini = &smu7_hwmgr_backend_fini,
5672         .asic_setup = &smu7_setup_asic_task,
5673         .dynamic_state_management_enable = &smu7_enable_dpm_tasks,
5674         .apply_state_adjust_rules = smu7_apply_state_adjust_rules,
5675         .force_dpm_level = &smu7_force_dpm_level,
5676         .power_state_set = smu7_set_power_state_tasks,
5677         .get_power_state_size = smu7_get_power_state_size,
5678         .get_mclk = smu7_dpm_get_mclk,
5679         .get_sclk = smu7_dpm_get_sclk,
5680         .patch_boot_state = smu7_dpm_patch_boot_state,
5681         .get_pp_table_entry = smu7_get_pp_table_entry,
5682         .get_num_of_pp_table_entries = smu7_get_number_of_powerplay_table_entries,
5683         .powerdown_uvd = smu7_powerdown_uvd,
5684         .powergate_uvd = smu7_powergate_uvd,
5685         .powergate_vce = smu7_powergate_vce,
5686         .disable_clock_power_gating = smu7_disable_clock_power_gating,
5687         .update_clock_gatings = smu7_update_clock_gatings,
5688         .notify_smc_display_config_after_ps_adjustment = smu7_notify_smc_display_config_after_ps_adjustment,
5689         .display_config_changed = smu7_display_configuration_changed_task,
5690         .set_max_fan_pwm_output = smu7_set_max_fan_pwm_output,
5691         .set_max_fan_rpm_output = smu7_set_max_fan_rpm_output,
5692         .stop_thermal_controller = smu7_thermal_stop_thermal_controller,
5693         .get_fan_speed_info = smu7_fan_ctrl_get_fan_speed_info,
5694         .get_fan_speed_percent = smu7_fan_ctrl_get_fan_speed_percent,
5695         .set_fan_speed_percent = smu7_fan_ctrl_set_fan_speed_percent,
5696         .reset_fan_speed_to_default = smu7_fan_ctrl_reset_fan_speed_to_default,
5697         .get_fan_speed_rpm = smu7_fan_ctrl_get_fan_speed_rpm,
5698         .set_fan_speed_rpm = smu7_fan_ctrl_set_fan_speed_rpm,
5699         .uninitialize_thermal_controller = smu7_thermal_ctrl_uninitialize_thermal_controller,
5700         .register_irq_handlers = smu7_register_irq_handlers,
5701         .check_smc_update_required_for_display_configuration = smu7_check_smc_update_required_for_display_configuration,
5702         .check_states_equal = smu7_check_states_equal,
5703         .set_fan_control_mode = smu7_set_fan_control_mode,
5704         .get_fan_control_mode = smu7_get_fan_control_mode,
5705         .force_clock_level = smu7_force_clock_level,
5706         .print_clock_levels = smu7_print_clock_levels,
5707         .powergate_gfx = smu7_powergate_gfx,
5708         .get_sclk_od = smu7_get_sclk_od,
5709         .set_sclk_od = smu7_set_sclk_od,
5710         .get_mclk_od = smu7_get_mclk_od,
5711         .set_mclk_od = smu7_set_mclk_od,
5712         .get_clock_by_type = smu7_get_clock_by_type,
5713         .get_clock_by_type_with_latency = smu7_get_clock_by_type_with_latency,
5714         .set_watermarks_for_clocks_ranges = smu7_set_watermarks_for_clocks_ranges,
5715         .read_sensor = smu7_read_sensor,
5716         .dynamic_state_management_disable = smu7_disable_dpm_tasks,
5717         .avfs_control = smu7_avfs_control,
5718         .disable_smc_firmware_ctf = smu7_thermal_disable_alert,
5719         .start_thermal_controller = smu7_start_thermal_controller,
5720         .notify_cac_buffer_info = smu7_notify_cac_buffer_info,
5721         .get_max_high_clocks = smu7_get_max_high_clocks,
5722         .get_thermal_temperature_range = smu7_get_thermal_temperature_range,
5723         .odn_edit_dpm_table = smu7_odn_edit_dpm_table,
5724         .set_power_limit = smu7_set_power_limit,
5725         .get_power_profile_mode = smu7_get_power_profile_mode,
5726         .set_power_profile_mode = smu7_set_power_profile_mode,
5727         .get_performance_level = smu7_get_performance_level,
5728         .get_asic_baco_capability = smu7_baco_get_capability,
5729         .get_asic_baco_state = smu7_baco_get_state,
5730         .set_asic_baco_state = smu7_baco_set_state,
5731         .power_off_asic = smu7_power_off_asic,
5732 };
5733
5734 uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock,
5735                 uint32_t clock_insr)
5736 {
5737         uint8_t i;
5738         uint32_t temp;
5739         uint32_t min = max(clock_insr, (uint32_t)SMU7_MINIMUM_ENGINE_CLOCK);
5740
5741         PP_ASSERT_WITH_CODE((clock >= min), "Engine clock can't satisfy stutter requirement!", return 0);
5742         for (i = SMU7_MAX_DEEPSLEEP_DIVIDER_ID;  ; i--) {
5743                 temp = clock >> i;
5744
5745                 if (temp >= min || i == 0)
5746                         break;
5747         }
5748         return i;
5749 }
5750
5751 int smu7_init_function_pointers(struct pp_hwmgr *hwmgr)
5752 {
5753         hwmgr->hwmgr_func = &smu7_hwmgr_funcs;
5754         if (hwmgr->pp_table_version == PP_TABLE_V0)
5755                 hwmgr->pptable_func = &pptable_funcs;
5756         else if (hwmgr->pp_table_version == PP_TABLE_V1)
5757                 hwmgr->pptable_func = &pptable_v1_0_funcs;
5758
5759         return 0;
5760 }