Merge tag 'f2fs-for-5.15-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeu...
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / pm / powerplay / hwmgr / smu7_hwmgr.c
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23 #include "pp_debug.h"
24 #include <linux/delay.h>
25 #include <linux/fb.h>
26 #include <linux/module.h>
27 #include <linux/pci.h>
28 #include <linux/slab.h>
29 #include <asm/div64.h>
30 #include <drm/amdgpu_drm.h>
31 #include "ppatomctrl.h"
32 #include "atombios.h"
33 #include "pptable_v1_0.h"
34 #include "pppcielanes.h"
35 #include "amd_pcie_helpers.h"
36 #include "hardwaremanager.h"
37 #include "process_pptables_v1_0.h"
38 #include "cgs_common.h"
39
40 #include "smu7_common.h"
41
42 #include "hwmgr.h"
43 #include "smu7_hwmgr.h"
44 #include "smu_ucode_xfer_vi.h"
45 #include "smu7_powertune.h"
46 #include "smu7_dyn_defaults.h"
47 #include "smu7_thermal.h"
48 #include "smu7_clockpowergating.h"
49 #include "processpptables.h"
50 #include "pp_thermal.h"
51 #include "smu7_baco.h"
52 #include "smu7_smumgr.h"
53 #include "polaris10_smumgr.h"
54
55 #include "ivsrcid/ivsrcid_vislands30.h"
56
57 #define MC_CG_ARB_FREQ_F0           0x0a
58 #define MC_CG_ARB_FREQ_F1           0x0b
59 #define MC_CG_ARB_FREQ_F2           0x0c
60 #define MC_CG_ARB_FREQ_F3           0x0d
61
62 #define MC_CG_SEQ_DRAMCONF_S0       0x05
63 #define MC_CG_SEQ_DRAMCONF_S1       0x06
64 #define MC_CG_SEQ_YCLK_SUSPEND      0x04
65 #define MC_CG_SEQ_YCLK_RESUME       0x0a
66
67 #define SMC_CG_IND_START            0xc0030000
68 #define SMC_CG_IND_END              0xc0040000
69
70 #define MEM_FREQ_LOW_LATENCY        25000
71 #define MEM_FREQ_HIGH_LATENCY       80000
72
73 #define MEM_LATENCY_HIGH            45
74 #define MEM_LATENCY_LOW             35
75 #define MEM_LATENCY_ERR             0xFFFF
76
77 #define MC_SEQ_MISC0_GDDR5_SHIFT 28
78 #define MC_SEQ_MISC0_GDDR5_MASK  0xf0000000
79 #define MC_SEQ_MISC0_GDDR5_VALUE 5
80
81 #define PCIE_BUS_CLK                10000
82 #define TCLK                        (PCIE_BUS_CLK / 10)
83
84 static struct profile_mode_setting smu7_profiling[7] =
85                                         {{0, 0, 0, 0, 0, 0, 0, 0},
86                                          {1, 0, 100, 30, 1, 0, 100, 10},
87                                          {1, 10, 0, 30, 0, 0, 0, 0},
88                                          {0, 0, 0, 0, 1, 10, 16, 31},
89                                          {1, 0, 11, 50, 1, 0, 100, 10},
90                                          {1, 0, 5, 30, 0, 0, 0, 0},
91                                          {0, 0, 0, 0, 0, 0, 0, 0},
92                                         };
93
94 #define PPSMC_MSG_SetVBITimeout_VEGAM    ((uint16_t) 0x310)
95
96 #define ixPWR_SVI2_PLANE1_LOAD                     0xC0200280
97 #define PWR_SVI2_PLANE1_LOAD__PSI1_MASK                    0x00000020L
98 #define PWR_SVI2_PLANE1_LOAD__PSI0_EN_MASK                 0x00000040L
99 #define PWR_SVI2_PLANE1_LOAD__PSI1__SHIFT                  0x00000005
100 #define PWR_SVI2_PLANE1_LOAD__PSI0_EN__SHIFT               0x00000006
101
102 #define STRAP_EVV_REVISION_MSB          2211
103 #define STRAP_EVV_REVISION_LSB          2208
104
105 /** Values for the CG_THERMAL_CTRL::DPM_EVENT_SRC field. */
106 enum DPM_EVENT_SRC {
107         DPM_EVENT_SRC_ANALOG = 0,
108         DPM_EVENT_SRC_EXTERNAL = 1,
109         DPM_EVENT_SRC_DIGITAL = 2,
110         DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
111         DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL = 4
112 };
113
114 #define ixDIDT_SQ_EDC_CTRL                         0x0013
115 #define ixDIDT_SQ_EDC_THRESHOLD                    0x0014
116 #define ixDIDT_SQ_EDC_STALL_PATTERN_1_2            0x0015
117 #define ixDIDT_SQ_EDC_STALL_PATTERN_3_4            0x0016
118 #define ixDIDT_SQ_EDC_STALL_PATTERN_5_6            0x0017
119 #define ixDIDT_SQ_EDC_STALL_PATTERN_7              0x0018
120
121 #define ixDIDT_TD_EDC_CTRL                         0x0053
122 #define ixDIDT_TD_EDC_THRESHOLD                    0x0054
123 #define ixDIDT_TD_EDC_STALL_PATTERN_1_2            0x0055
124 #define ixDIDT_TD_EDC_STALL_PATTERN_3_4            0x0056
125 #define ixDIDT_TD_EDC_STALL_PATTERN_5_6            0x0057
126 #define ixDIDT_TD_EDC_STALL_PATTERN_7              0x0058
127
128 #define ixDIDT_TCP_EDC_CTRL                        0x0073
129 #define ixDIDT_TCP_EDC_THRESHOLD                   0x0074
130 #define ixDIDT_TCP_EDC_STALL_PATTERN_1_2           0x0075
131 #define ixDIDT_TCP_EDC_STALL_PATTERN_3_4           0x0076
132 #define ixDIDT_TCP_EDC_STALL_PATTERN_5_6           0x0077
133 #define ixDIDT_TCP_EDC_STALL_PATTERN_7             0x0078
134
135 #define ixDIDT_DB_EDC_CTRL                         0x0033
136 #define ixDIDT_DB_EDC_THRESHOLD                    0x0034
137 #define ixDIDT_DB_EDC_STALL_PATTERN_1_2            0x0035
138 #define ixDIDT_DB_EDC_STALL_PATTERN_3_4            0x0036
139 #define ixDIDT_DB_EDC_STALL_PATTERN_5_6            0x0037
140 #define ixDIDT_DB_EDC_STALL_PATTERN_7              0x0038
141
142 uint32_t DIDTEDCConfig_P12[] = {
143     ixDIDT_SQ_EDC_STALL_PATTERN_1_2,
144     ixDIDT_SQ_EDC_STALL_PATTERN_3_4,
145     ixDIDT_SQ_EDC_STALL_PATTERN_5_6,
146     ixDIDT_SQ_EDC_STALL_PATTERN_7,
147     ixDIDT_SQ_EDC_THRESHOLD,
148     ixDIDT_SQ_EDC_CTRL,
149     ixDIDT_TD_EDC_STALL_PATTERN_1_2,
150     ixDIDT_TD_EDC_STALL_PATTERN_3_4,
151     ixDIDT_TD_EDC_STALL_PATTERN_5_6,
152     ixDIDT_TD_EDC_STALL_PATTERN_7,
153     ixDIDT_TD_EDC_THRESHOLD,
154     ixDIDT_TD_EDC_CTRL,
155     ixDIDT_TCP_EDC_STALL_PATTERN_1_2,
156     ixDIDT_TCP_EDC_STALL_PATTERN_3_4,
157     ixDIDT_TCP_EDC_STALL_PATTERN_5_6,
158     ixDIDT_TCP_EDC_STALL_PATTERN_7,
159     ixDIDT_TCP_EDC_THRESHOLD,
160     ixDIDT_TCP_EDC_CTRL,
161     ixDIDT_DB_EDC_STALL_PATTERN_1_2,
162     ixDIDT_DB_EDC_STALL_PATTERN_3_4,
163     ixDIDT_DB_EDC_STALL_PATTERN_5_6,
164     ixDIDT_DB_EDC_STALL_PATTERN_7,
165     ixDIDT_DB_EDC_THRESHOLD,
166     ixDIDT_DB_EDC_CTRL,
167     0xFFFFFFFF // End of list
168 };
169
170 static const unsigned long PhwVIslands_Magic = (unsigned long)(PHM_VIslands_Magic);
171 static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
172                 enum pp_clock_type type, uint32_t mask);
173 static int smu7_notify_has_display(struct pp_hwmgr *hwmgr);
174
175 static struct smu7_power_state *cast_phw_smu7_power_state(
176                                   struct pp_hw_power_state *hw_ps)
177 {
178         PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic),
179                                 "Invalid Powerstate Type!",
180                                  return NULL);
181
182         return (struct smu7_power_state *)hw_ps;
183 }
184
185 static const struct smu7_power_state *cast_const_phw_smu7_power_state(
186                                  const struct pp_hw_power_state *hw_ps)
187 {
188         PP_ASSERT_WITH_CODE((PhwVIslands_Magic == hw_ps->magic),
189                                 "Invalid Powerstate Type!",
190                                  return NULL);
191
192         return (const struct smu7_power_state *)hw_ps;
193 }
194
195 /**
196  * smu7_get_mc_microcode_version - Find the MC microcode version and store it in the HwMgr struct
197  *
198  * @hwmgr:  the address of the powerplay hardware manager.
199  * Return:   always 0
200  */
201 static int smu7_get_mc_microcode_version(struct pp_hwmgr *hwmgr)
202 {
203         cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX, 0x9F);
204
205         hwmgr->microcode_version_info.MC = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA);
206
207         return 0;
208 }
209
210 static uint16_t smu7_get_current_pcie_speed(struct pp_hwmgr *hwmgr)
211 {
212         uint32_t speedCntl = 0;
213
214         /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
215         speedCntl = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__PCIE,
216                         ixPCIE_LC_SPEED_CNTL);
217         return((uint16_t)PHM_GET_FIELD(speedCntl,
218                         PCIE_LC_SPEED_CNTL, LC_CURRENT_DATA_RATE));
219 }
220
221 static int smu7_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
222 {
223         uint32_t link_width;
224
225         /* mmPCIE_PORT_INDEX rename as mmPCIE_INDEX */
226         link_width = PHM_READ_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
227                         PCIE_LC_LINK_WIDTH_CNTL, LC_LINK_WIDTH_RD);
228
229         PP_ASSERT_WITH_CODE((7 >= link_width),
230                         "Invalid PCIe lane width!", return 0);
231
232         return decode_pcie_lane_width(link_width);
233 }
234
235 /**
236  * smu7_enable_smc_voltage_controller - Enable voltage control
237  *
238  * @hwmgr:  the address of the powerplay hardware manager.
239  * Return:   always PP_Result_OK
240  */
241 static int smu7_enable_smc_voltage_controller(struct pp_hwmgr *hwmgr)
242 {
243         if (hwmgr->chip_id >= CHIP_POLARIS10 &&
244             hwmgr->chip_id <= CHIP_VEGAM) {
245                 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device,
246                                 CGS_IND_REG__SMC, PWR_SVI2_PLANE1_LOAD, PSI1, 0);
247                 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device,
248                                 CGS_IND_REG__SMC, PWR_SVI2_PLANE1_LOAD, PSI0_EN, 0);
249         }
250
251         if (hwmgr->feature_mask & PP_SMC_VOLTAGE_CONTROL_MASK)
252                 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Enable, NULL);
253
254         return 0;
255 }
256
257 /**
258  * smu7_voltage_control - Checks if we want to support voltage control
259  *
260  * @hwmgr:  the address of the powerplay hardware manager.
261  */
262 static bool smu7_voltage_control(const struct pp_hwmgr *hwmgr)
263 {
264         const struct smu7_hwmgr *data =
265                         (const struct smu7_hwmgr *)(hwmgr->backend);
266
267         return (SMU7_VOLTAGE_CONTROL_NONE != data->voltage_control);
268 }
269
270 /**
271  * smu7_enable_voltage_control - Enable voltage control
272  *
273  * @hwmgr:  the address of the powerplay hardware manager.
274  * Return:   always 0
275  */
276 static int smu7_enable_voltage_control(struct pp_hwmgr *hwmgr)
277 {
278         /* enable voltage control */
279         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
280                         GENERAL_PWRMGT, VOLT_PWRMGT_EN, 1);
281
282         return 0;
283 }
284
285 static int phm_get_svi2_voltage_table_v0(pp_atomctrl_voltage_table *voltage_table,
286                 struct phm_clock_voltage_dependency_table *voltage_dependency_table
287                 )
288 {
289         uint32_t i;
290
291         PP_ASSERT_WITH_CODE((NULL != voltage_table),
292                         "Voltage Dependency Table empty.", return -EINVAL;);
293
294         voltage_table->mask_low = 0;
295         voltage_table->phase_delay = 0;
296         voltage_table->count = voltage_dependency_table->count;
297
298         for (i = 0; i < voltage_dependency_table->count; i++) {
299                 voltage_table->entries[i].value =
300                         voltage_dependency_table->entries[i].v;
301                 voltage_table->entries[i].smio_low = 0;
302         }
303
304         return 0;
305 }
306
307
308 /**
309  * smu7_construct_voltage_tables - Create Voltage Tables.
310  *
311  * @hwmgr:  the address of the powerplay hardware manager.
312  * Return:   always 0
313  */
314 static int smu7_construct_voltage_tables(struct pp_hwmgr *hwmgr)
315 {
316         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
317         struct phm_ppt_v1_information *table_info =
318                         (struct phm_ppt_v1_information *)hwmgr->pptable;
319         int result = 0;
320         uint32_t tmp;
321
322         if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) {
323                 result = atomctrl_get_voltage_table_v3(hwmgr,
324                                 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT,
325                                 &(data->mvdd_voltage_table));
326                 PP_ASSERT_WITH_CODE((0 == result),
327                                 "Failed to retrieve MVDD table.",
328                                 return result);
329         } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
330                 if (hwmgr->pp_table_version == PP_TABLE_V1)
331                         result = phm_get_svi2_mvdd_voltage_table(&(data->mvdd_voltage_table),
332                                         table_info->vdd_dep_on_mclk);
333                 else if (hwmgr->pp_table_version == PP_TABLE_V0)
334                         result = phm_get_svi2_voltage_table_v0(&(data->mvdd_voltage_table),
335                                         hwmgr->dyn_state.mvdd_dependency_on_mclk);
336
337                 PP_ASSERT_WITH_CODE((0 == result),
338                                 "Failed to retrieve SVI2 MVDD table from dependency table.",
339                                 return result;);
340         }
341
342         if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->vddci_control) {
343                 result = atomctrl_get_voltage_table_v3(hwmgr,
344                                 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT,
345                                 &(data->vddci_voltage_table));
346                 PP_ASSERT_WITH_CODE((0 == result),
347                                 "Failed to retrieve VDDCI table.",
348                                 return result);
349         } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vddci_control) {
350                 if (hwmgr->pp_table_version == PP_TABLE_V1)
351                         result = phm_get_svi2_vddci_voltage_table(&(data->vddci_voltage_table),
352                                         table_info->vdd_dep_on_mclk);
353                 else if (hwmgr->pp_table_version == PP_TABLE_V0)
354                         result = phm_get_svi2_voltage_table_v0(&(data->vddci_voltage_table),
355                                         hwmgr->dyn_state.vddci_dependency_on_mclk);
356                 PP_ASSERT_WITH_CODE((0 == result),
357                                 "Failed to retrieve SVI2 VDDCI table from dependency table.",
358                                 return result);
359         }
360
361         if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->vdd_gfx_control) {
362                 /* VDDGFX has only SVI2 voltage control */
363                 result = phm_get_svi2_vdd_voltage_table(&(data->vddgfx_voltage_table),
364                                         table_info->vddgfx_lookup_table);
365                 PP_ASSERT_WITH_CODE((0 == result),
366                         "Failed to retrieve SVI2 VDDGFX table from lookup table.", return result;);
367         }
368
369
370         if (SMU7_VOLTAGE_CONTROL_BY_GPIO == data->voltage_control) {
371                 result = atomctrl_get_voltage_table_v3(hwmgr,
372                                         VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT,
373                                         &data->vddc_voltage_table);
374                 PP_ASSERT_WITH_CODE((0 == result),
375                         "Failed to retrieve VDDC table.", return result;);
376         } else if (SMU7_VOLTAGE_CONTROL_BY_SVID2 == data->voltage_control) {
377
378                 if (hwmgr->pp_table_version == PP_TABLE_V0)
379                         result = phm_get_svi2_voltage_table_v0(&data->vddc_voltage_table,
380                                         hwmgr->dyn_state.vddc_dependency_on_mclk);
381                 else if (hwmgr->pp_table_version == PP_TABLE_V1)
382                         result = phm_get_svi2_vdd_voltage_table(&(data->vddc_voltage_table),
383                                 table_info->vddc_lookup_table);
384
385                 PP_ASSERT_WITH_CODE((0 == result),
386                         "Failed to retrieve SVI2 VDDC table from dependency table.", return result;);
387         }
388
389         tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDC);
390         PP_ASSERT_WITH_CODE(
391                         (data->vddc_voltage_table.count <= tmp),
392                 "Too many voltage values for VDDC. Trimming to fit state table.",
393                         phm_trim_voltage_table_to_fit_state_table(tmp,
394                                                 &(data->vddc_voltage_table)));
395
396         tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDGFX);
397         PP_ASSERT_WITH_CODE(
398                         (data->vddgfx_voltage_table.count <= tmp),
399                 "Too many voltage values for VDDC. Trimming to fit state table.",
400                         phm_trim_voltage_table_to_fit_state_table(tmp,
401                                                 &(data->vddgfx_voltage_table)));
402
403         tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDCI);
404         PP_ASSERT_WITH_CODE(
405                         (data->vddci_voltage_table.count <= tmp),
406                 "Too many voltage values for VDDCI. Trimming to fit state table.",
407                         phm_trim_voltage_table_to_fit_state_table(tmp,
408                                         &(data->vddci_voltage_table)));
409
410         tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_MVDD);
411         PP_ASSERT_WITH_CODE(
412                         (data->mvdd_voltage_table.count <= tmp),
413                 "Too many voltage values for MVDD. Trimming to fit state table.",
414                         phm_trim_voltage_table_to_fit_state_table(tmp,
415                                                 &(data->mvdd_voltage_table)));
416
417         return 0;
418 }
419
420 /**
421  * smu7_program_static_screen_threshold_parameters - Programs static screed detection parameters
422  *
423  * @hwmgr:  the address of the powerplay hardware manager.
424  * Return:   always 0
425  */
426 static int smu7_program_static_screen_threshold_parameters(
427                                                         struct pp_hwmgr *hwmgr)
428 {
429         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
430
431         /* Set static screen threshold unit */
432         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
433                         CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD_UNIT,
434                         data->static_screen_threshold_unit);
435         /* Set static screen threshold */
436         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
437                         CG_STATIC_SCREEN_PARAMETER, STATIC_SCREEN_THRESHOLD,
438                         data->static_screen_threshold);
439
440         return 0;
441 }
442
443 /**
444  * smu7_enable_display_gap - Setup display gap for glitch free memory clock switching.
445  *
446  * @hwmgr:  the address of the powerplay hardware manager.
447  * Return:   always  0
448  */
449 static int smu7_enable_display_gap(struct pp_hwmgr *hwmgr)
450 {
451         uint32_t display_gap =
452                         cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
453                                         ixCG_DISPLAY_GAP_CNTL);
454
455         display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
456                         DISP_GAP, DISPLAY_GAP_IGNORE);
457
458         display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL,
459                         DISP_GAP_MCHG, DISPLAY_GAP_VBLANK);
460
461         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
462                         ixCG_DISPLAY_GAP_CNTL, display_gap);
463
464         return 0;
465 }
466
467 /**
468  * smu7_program_voting_clients - Programs activity state transition voting clients
469  *
470  * @hwmgr:  the address of the powerplay hardware manager.
471  * Return:   always  0
472  */
473 static int smu7_program_voting_clients(struct pp_hwmgr *hwmgr)
474 {
475         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
476         int i;
477
478         /* Clear reset for voting clients before enabling DPM */
479         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
480                         SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 0);
481         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
482                         SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 0);
483
484         for (i = 0; i < 8; i++)
485                 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
486                                         ixCG_FREQ_TRAN_VOTING_0 + i * 4,
487                                         data->voting_rights_clients[i]);
488         return 0;
489 }
490
491 static int smu7_clear_voting_clients(struct pp_hwmgr *hwmgr)
492 {
493         int i;
494
495         /* Reset voting clients before disabling DPM */
496         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
497                         SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1);
498         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
499                         SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1);
500
501         for (i = 0; i < 8; i++)
502                 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
503                                 ixCG_FREQ_TRAN_VOTING_0 + i * 4, 0);
504
505         return 0;
506 }
507
508 /* Copy one arb setting to another and then switch the active set.
509  * arb_src and arb_dest is one of the MC_CG_ARB_FREQ_Fx constants.
510  */
511 static int smu7_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
512                 uint32_t arb_src, uint32_t arb_dest)
513 {
514         uint32_t mc_arb_dram_timing;
515         uint32_t mc_arb_dram_timing2;
516         uint32_t burst_time;
517         uint32_t mc_cg_config;
518
519         switch (arb_src) {
520         case MC_CG_ARB_FREQ_F0:
521                 mc_arb_dram_timing  = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING);
522                 mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2);
523                 burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0);
524                 break;
525         case MC_CG_ARB_FREQ_F1:
526                 mc_arb_dram_timing  = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1);
527                 mc_arb_dram_timing2 = cgs_read_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1);
528                 burst_time = PHM_READ_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1);
529                 break;
530         default:
531                 return -EINVAL;
532         }
533
534         switch (arb_dest) {
535         case MC_CG_ARB_FREQ_F0:
536                 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
537                 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
538                 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE0, burst_time);
539                 break;
540         case MC_CG_ARB_FREQ_F1:
541                 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
542                 cgs_write_register(hwmgr->device, mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
543                 PHM_WRITE_FIELD(hwmgr->device, MC_ARB_BURST_TIME, STATE1, burst_time);
544                 break;
545         default:
546                 return -EINVAL;
547         }
548
549         mc_cg_config = cgs_read_register(hwmgr->device, mmMC_CG_CONFIG);
550         mc_cg_config |= 0x0000000F;
551         cgs_write_register(hwmgr->device, mmMC_CG_CONFIG, mc_cg_config);
552         PHM_WRITE_FIELD(hwmgr->device, MC_ARB_CG, CG_ARB_REQ, arb_dest);
553
554         return 0;
555 }
556
557 static int smu7_reset_to_default(struct pp_hwmgr *hwmgr)
558 {
559         return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ResetToDefaults, NULL);
560 }
561
562 /**
563  * smu7_initial_switch_from_arbf0_to_f1 - Initial switch from ARB F0->F1
564  *
565  * @hwmgr:  the address of the powerplay hardware manager.
566  * Return:   always 0
567  * This function is to be called from the SetPowerState table.
568  */
569 static int smu7_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr)
570 {
571         return smu7_copy_and_switch_arb_sets(hwmgr,
572                         MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
573 }
574
575 static int smu7_force_switch_to_arbf0(struct pp_hwmgr *hwmgr)
576 {
577         uint32_t tmp;
578
579         tmp = (cgs_read_ind_register(hwmgr->device,
580                         CGS_IND_REG__SMC, ixSMC_SCRATCH9) &
581                         0x0000ff00) >> 8;
582
583         if (tmp == MC_CG_ARB_FREQ_F0)
584                 return 0;
585
586         return smu7_copy_and_switch_arb_sets(hwmgr,
587                         tmp, MC_CG_ARB_FREQ_F0);
588 }
589
590 static uint16_t smu7_override_pcie_speed(struct pp_hwmgr *hwmgr)
591 {
592         struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
593         uint16_t pcie_gen = 0;
594
595         if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 &&
596             adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4)
597                 pcie_gen = 3;
598         else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 &&
599                 adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3)
600                 pcie_gen = 2;
601         else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 &&
602                 adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2)
603                 pcie_gen = 1;
604         else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 &&
605                 adev->pm.pcie_gen_mask & CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1)
606                 pcie_gen = 0;
607
608         return pcie_gen;
609 }
610
611 static uint16_t smu7_override_pcie_width(struct pp_hwmgr *hwmgr)
612 {
613         struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
614         uint16_t pcie_width = 0;
615
616         if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
617                 pcie_width = 16;
618         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
619                 pcie_width = 12;
620         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
621                 pcie_width = 8;
622         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
623                 pcie_width = 4;
624         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
625                 pcie_width = 2;
626         else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
627                 pcie_width = 1;
628
629         return pcie_width;
630 }
631
632 static int smu7_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
633 {
634         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
635
636         struct phm_ppt_v1_information *table_info =
637                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
638         struct phm_ppt_v1_pcie_table *pcie_table = NULL;
639
640         uint32_t i, max_entry;
641         uint32_t tmp;
642
643         PP_ASSERT_WITH_CODE((data->use_pcie_performance_levels ||
644                         data->use_pcie_power_saving_levels), "No pcie performance levels!",
645                         return -EINVAL);
646
647         if (table_info != NULL)
648                 pcie_table = table_info->pcie_table;
649
650         if (data->use_pcie_performance_levels &&
651                         !data->use_pcie_power_saving_levels) {
652                 data->pcie_gen_power_saving = data->pcie_gen_performance;
653                 data->pcie_lane_power_saving = data->pcie_lane_performance;
654         } else if (!data->use_pcie_performance_levels &&
655                         data->use_pcie_power_saving_levels) {
656                 data->pcie_gen_performance = data->pcie_gen_power_saving;
657                 data->pcie_lane_performance = data->pcie_lane_power_saving;
658         }
659         tmp = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_LINK);
660         phm_reset_single_dpm_table(&data->dpm_table.pcie_speed_table,
661                                         tmp,
662                                         MAX_REGULAR_DPM_NUMBER);
663
664         if (pcie_table != NULL) {
665                 /* max_entry is used to make sure we reserve one PCIE level
666                  * for boot level (fix for A+A PSPP issue).
667                  * If PCIE table from PPTable have ULV entry + 8 entries,
668                  * then ignore the last entry.*/
669                 max_entry = (tmp < pcie_table->count) ? tmp : pcie_table->count;
670                 for (i = 1; i < max_entry; i++) {
671                         phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i - 1,
672                                         get_pcie_gen_support(data->pcie_gen_cap,
673                                                         pcie_table->entries[i].gen_speed),
674                                         get_pcie_lane_support(data->pcie_lane_cap,
675                                                         pcie_table->entries[i].lane_width));
676                 }
677                 data->dpm_table.pcie_speed_table.count = max_entry - 1;
678                 smum_update_smc_table(hwmgr, SMU_BIF_TABLE);
679         } else {
680                 /* Hardcode Pcie Table */
681                 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 0,
682                                 get_pcie_gen_support(data->pcie_gen_cap,
683                                                 PP_Min_PCIEGen),
684                                 get_pcie_lane_support(data->pcie_lane_cap,
685                                                 PP_Max_PCIELane));
686                 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 1,
687                                 get_pcie_gen_support(data->pcie_gen_cap,
688                                                 PP_Min_PCIEGen),
689                                 get_pcie_lane_support(data->pcie_lane_cap,
690                                                 PP_Max_PCIELane));
691                 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 2,
692                                 get_pcie_gen_support(data->pcie_gen_cap,
693                                                 PP_Max_PCIEGen),
694                                 get_pcie_lane_support(data->pcie_lane_cap,
695                                                 PP_Max_PCIELane));
696                 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 3,
697                                 get_pcie_gen_support(data->pcie_gen_cap,
698                                                 PP_Max_PCIEGen),
699                                 get_pcie_lane_support(data->pcie_lane_cap,
700                                                 PP_Max_PCIELane));
701                 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 4,
702                                 get_pcie_gen_support(data->pcie_gen_cap,
703                                                 PP_Max_PCIEGen),
704                                 get_pcie_lane_support(data->pcie_lane_cap,
705                                                 PP_Max_PCIELane));
706                 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, 5,
707                                 get_pcie_gen_support(data->pcie_gen_cap,
708                                                 PP_Max_PCIEGen),
709                                 get_pcie_lane_support(data->pcie_lane_cap,
710                                                 PP_Max_PCIELane));
711
712                 data->dpm_table.pcie_speed_table.count = 6;
713         }
714         /* Populate last level for boot PCIE level, but do not increment count. */
715         if (hwmgr->chip_family == AMDGPU_FAMILY_CI) {
716                 for (i = 0; i <= data->dpm_table.pcie_speed_table.count; i++)
717                         phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table, i,
718                                 get_pcie_gen_support(data->pcie_gen_cap,
719                                                 PP_Max_PCIEGen),
720                                 data->vbios_boot_state.pcie_lane_bootup_value);
721         } else {
722                 phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table,
723                         data->dpm_table.pcie_speed_table.count,
724                         get_pcie_gen_support(data->pcie_gen_cap,
725                                         PP_Min_PCIEGen),
726                         get_pcie_lane_support(data->pcie_lane_cap,
727                                         PP_Max_PCIELane));
728
729                 if (data->pcie_dpm_key_disabled)
730                         phm_setup_pcie_table_entry(&data->dpm_table.pcie_speed_table,
731                                 data->dpm_table.pcie_speed_table.count,
732                                 smu7_override_pcie_speed(hwmgr), smu7_override_pcie_width(hwmgr));
733         }
734         return 0;
735 }
736
737 static int smu7_reset_dpm_tables(struct pp_hwmgr *hwmgr)
738 {
739         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
740
741         memset(&(data->dpm_table), 0x00, sizeof(data->dpm_table));
742
743         phm_reset_single_dpm_table(
744                         &data->dpm_table.sclk_table,
745                                 smum_get_mac_definition(hwmgr,
746                                         SMU_MAX_LEVELS_GRAPHICS),
747                                         MAX_REGULAR_DPM_NUMBER);
748         phm_reset_single_dpm_table(
749                         &data->dpm_table.mclk_table,
750                         smum_get_mac_definition(hwmgr,
751                                 SMU_MAX_LEVELS_MEMORY), MAX_REGULAR_DPM_NUMBER);
752
753         phm_reset_single_dpm_table(
754                         &data->dpm_table.vddc_table,
755                                 smum_get_mac_definition(hwmgr,
756                                         SMU_MAX_LEVELS_VDDC),
757                                         MAX_REGULAR_DPM_NUMBER);
758         phm_reset_single_dpm_table(
759                         &data->dpm_table.vddci_table,
760                         smum_get_mac_definition(hwmgr,
761                                 SMU_MAX_LEVELS_VDDCI), MAX_REGULAR_DPM_NUMBER);
762
763         phm_reset_single_dpm_table(
764                         &data->dpm_table.mvdd_table,
765                                 smum_get_mac_definition(hwmgr,
766                                         SMU_MAX_LEVELS_MVDD),
767                                         MAX_REGULAR_DPM_NUMBER);
768         return 0;
769 }
770 /*
771  * This function is to initialize all DPM state tables
772  * for SMU7 based on the dependency table.
773  * Dynamic state patching function will then trim these
774  * state tables to the allowed range based
775  * on the power policy or external client requests,
776  * such as UVD request, etc.
777  */
778
779 static int smu7_setup_dpm_tables_v0(struct pp_hwmgr *hwmgr)
780 {
781         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
782         struct phm_clock_voltage_dependency_table *allowed_vdd_sclk_table =
783                 hwmgr->dyn_state.vddc_dependency_on_sclk;
784         struct phm_clock_voltage_dependency_table *allowed_vdd_mclk_table =
785                 hwmgr->dyn_state.vddc_dependency_on_mclk;
786         struct phm_cac_leakage_table *std_voltage_table =
787                 hwmgr->dyn_state.cac_leakage_table;
788         uint32_t i;
789
790         PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table != NULL,
791                 "SCLK dependency table is missing. This table is mandatory", return -EINVAL);
792         PP_ASSERT_WITH_CODE(allowed_vdd_sclk_table->count >= 1,
793                 "SCLK dependency table has to have is missing. This table is mandatory", return -EINVAL);
794
795         PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
796                 "MCLK dependency table is missing. This table is mandatory", return -EINVAL);
797         PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table->count >= 1,
798                 "VMCLK dependency table has to have is missing. This table is mandatory", return -EINVAL);
799
800
801         /* Initialize Sclk DPM table based on allow Sclk values*/
802         data->dpm_table.sclk_table.count = 0;
803
804         for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
805                 if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count-1].value !=
806                                 allowed_vdd_sclk_table->entries[i].clk) {
807                         data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
808                                 allowed_vdd_sclk_table->entries[i].clk;
809                         data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = (i == 0) ? 1 : 0;
810                         data->dpm_table.sclk_table.count++;
811                 }
812         }
813
814         PP_ASSERT_WITH_CODE(allowed_vdd_mclk_table != NULL,
815                 "MCLK dependency table is missing. This table is mandatory", return -EINVAL);
816         /* Initialize Mclk DPM table based on allow Mclk values */
817         data->dpm_table.mclk_table.count = 0;
818         for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
819                 if (i == 0 || data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count-1].value !=
820                         allowed_vdd_mclk_table->entries[i].clk) {
821                         data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
822                                 allowed_vdd_mclk_table->entries[i].clk;
823                         data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = (i == 0) ? 1 : 0;
824                         data->dpm_table.mclk_table.count++;
825                 }
826         }
827
828         /* Initialize Vddc DPM table based on allow Vddc values.  And populate corresponding std values. */
829         for (i = 0; i < allowed_vdd_sclk_table->count; i++) {
830                 data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
831                 data->dpm_table.vddc_table.dpm_levels[i].param1 = std_voltage_table->entries[i].Leakage;
832                 /* param1 is for corresponding std voltage */
833                 data->dpm_table.vddc_table.dpm_levels[i].enabled = true;
834         }
835
836         data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count;
837         allowed_vdd_mclk_table = hwmgr->dyn_state.vddci_dependency_on_mclk;
838
839         if (NULL != allowed_vdd_mclk_table) {
840                 /* Initialize Vddci DPM table based on allow Mclk values */
841                 for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
842                         data->dpm_table.vddci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
843                         data->dpm_table.vddci_table.dpm_levels[i].enabled = true;
844                 }
845                 data->dpm_table.vddci_table.count = allowed_vdd_mclk_table->count;
846         }
847
848         allowed_vdd_mclk_table = hwmgr->dyn_state.mvdd_dependency_on_mclk;
849
850         if (NULL != allowed_vdd_mclk_table) {
851                 /*
852                  * Initialize MVDD DPM table based on allow Mclk
853                  * values
854                  */
855                 for (i = 0; i < allowed_vdd_mclk_table->count; i++) {
856                         data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v;
857                         data->dpm_table.mvdd_table.dpm_levels[i].enabled = true;
858                 }
859                 data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count;
860         }
861
862         return 0;
863 }
864
865 static int smu7_setup_dpm_tables_v1(struct pp_hwmgr *hwmgr)
866 {
867         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
868         struct phm_ppt_v1_information *table_info =
869                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
870         uint32_t i;
871
872         struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
873         struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
874
875         if (table_info == NULL)
876                 return -EINVAL;
877
878         dep_sclk_table = table_info->vdd_dep_on_sclk;
879         dep_mclk_table = table_info->vdd_dep_on_mclk;
880
881         PP_ASSERT_WITH_CODE(dep_sclk_table != NULL,
882                         "SCLK dependency table is missing.",
883                         return -EINVAL);
884         PP_ASSERT_WITH_CODE(dep_sclk_table->count >= 1,
885                         "SCLK dependency table count is 0.",
886                         return -EINVAL);
887
888         PP_ASSERT_WITH_CODE(dep_mclk_table != NULL,
889                         "MCLK dependency table is missing.",
890                         return -EINVAL);
891         PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1,
892                         "MCLK dependency table count is 0",
893                         return -EINVAL);
894
895         /* Initialize Sclk DPM table based on allow Sclk values */
896         data->dpm_table.sclk_table.count = 0;
897         for (i = 0; i < dep_sclk_table->count; i++) {
898                 if (i == 0 || data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count - 1].value !=
899                                                 dep_sclk_table->entries[i].clk) {
900
901                         data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
902                                         dep_sclk_table->entries[i].clk;
903
904                         data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled =
905                                         (i == 0) ? true : false;
906                         data->dpm_table.sclk_table.count++;
907                 }
908         }
909         if (hwmgr->platform_descriptor.overdriveLimit.engineClock == 0)
910                 hwmgr->platform_descriptor.overdriveLimit.engineClock = dep_sclk_table->entries[i-1].clk;
911         /* Initialize Mclk DPM table based on allow Mclk values */
912         data->dpm_table.mclk_table.count = 0;
913         for (i = 0; i < dep_mclk_table->count; i++) {
914                 if (i == 0 || data->dpm_table.mclk_table.dpm_levels
915                                 [data->dpm_table.mclk_table.count - 1].value !=
916                                                 dep_mclk_table->entries[i].clk) {
917                         data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
918                                                         dep_mclk_table->entries[i].clk;
919                         data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled =
920                                                         (i == 0) ? true : false;
921                         data->dpm_table.mclk_table.count++;
922                 }
923         }
924
925         if (hwmgr->platform_descriptor.overdriveLimit.memoryClock == 0)
926                 hwmgr->platform_descriptor.overdriveLimit.memoryClock = dep_mclk_table->entries[i-1].clk;
927         return 0;
928 }
929
930 static int smu7_odn_initial_default_setting(struct pp_hwmgr *hwmgr)
931 {
932         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
933         struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
934         struct phm_ppt_v1_information *table_info =
935                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
936         uint32_t i;
937
938         struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
939         struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
940         struct phm_odn_performance_level *entries;
941
942         if (table_info == NULL)
943                 return -EINVAL;
944
945         dep_sclk_table = table_info->vdd_dep_on_sclk;
946         dep_mclk_table = table_info->vdd_dep_on_mclk;
947
948         odn_table->odn_core_clock_dpm_levels.num_of_pl =
949                                                 data->golden_dpm_table.sclk_table.count;
950         entries = odn_table->odn_core_clock_dpm_levels.entries;
951         for (i=0; i<data->golden_dpm_table.sclk_table.count; i++) {
952                 entries[i].clock = data->golden_dpm_table.sclk_table.dpm_levels[i].value;
953                 entries[i].enabled = true;
954                 entries[i].vddc = dep_sclk_table->entries[i].vddc;
955         }
956
957         smu_get_voltage_dependency_table_ppt_v1(dep_sclk_table,
958                 (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk));
959
960         odn_table->odn_memory_clock_dpm_levels.num_of_pl =
961                                                 data->golden_dpm_table.mclk_table.count;
962         entries = odn_table->odn_memory_clock_dpm_levels.entries;
963         for (i=0; i<data->golden_dpm_table.mclk_table.count; i++) {
964                 entries[i].clock = data->golden_dpm_table.mclk_table.dpm_levels[i].value;
965                 entries[i].enabled = true;
966                 entries[i].vddc = dep_mclk_table->entries[i].vddc;
967         }
968
969         smu_get_voltage_dependency_table_ppt_v1(dep_mclk_table,
970                 (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk));
971
972         return 0;
973 }
974
975 static void smu7_setup_voltage_range_from_vbios(struct pp_hwmgr *hwmgr)
976 {
977         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
978         struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table;
979         struct phm_ppt_v1_information *table_info =
980                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
981         uint32_t min_vddc = 0;
982         uint32_t max_vddc = 0;
983
984         if (!table_info)
985                 return;
986
987         dep_sclk_table = table_info->vdd_dep_on_sclk;
988
989         atomctrl_get_voltage_range(hwmgr, &max_vddc, &min_vddc);
990
991         if (min_vddc == 0 || min_vddc > 2000
992                 || min_vddc > dep_sclk_table->entries[0].vddc)
993                 min_vddc = dep_sclk_table->entries[0].vddc;
994
995         if (max_vddc == 0 || max_vddc > 2000
996                 || max_vddc < dep_sclk_table->entries[dep_sclk_table->count-1].vddc)
997                 max_vddc = dep_sclk_table->entries[dep_sclk_table->count-1].vddc;
998
999         data->odn_dpm_table.min_vddc = min_vddc;
1000         data->odn_dpm_table.max_vddc = max_vddc;
1001 }
1002
1003 static void smu7_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
1004 {
1005         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1006         struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
1007         struct phm_ppt_v1_information *table_info =
1008                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
1009         uint32_t i;
1010
1011         struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
1012         struct phm_ppt_v1_clock_voltage_dependency_table *odn_dep_table;
1013
1014         if (table_info == NULL)
1015                 return;
1016
1017         for (i = 0; i < data->dpm_table.sclk_table.count; i++) {
1018                 if (odn_table->odn_core_clock_dpm_levels.entries[i].clock !=
1019                                         data->dpm_table.sclk_table.dpm_levels[i].value) {
1020                         data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
1021                         break;
1022                 }
1023         }
1024
1025         for (i = 0; i < data->dpm_table.mclk_table.count; i++) {
1026                 if (odn_table->odn_memory_clock_dpm_levels.entries[i].clock !=
1027                                         data->dpm_table.mclk_table.dpm_levels[i].value) {
1028                         data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
1029                         break;
1030                 }
1031         }
1032
1033         dep_table = table_info->vdd_dep_on_mclk;
1034         odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_mclk);
1035
1036         for (i = 0; i < dep_table->count; i++) {
1037                 if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
1038                         data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK;
1039                         return;
1040                 }
1041         }
1042
1043         dep_table = table_info->vdd_dep_on_sclk;
1044         odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk);
1045         for (i = 0; i < dep_table->count; i++) {
1046                 if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) {
1047                         data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK;
1048                         return;
1049                 }
1050         }
1051         if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
1052                 data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC;
1053                 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_OD_UPDATE_MCLK;
1054         }
1055 }
1056
1057 static int smu7_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
1058 {
1059         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1060
1061         smu7_reset_dpm_tables(hwmgr);
1062
1063         if (hwmgr->pp_table_version == PP_TABLE_V1)
1064                 smu7_setup_dpm_tables_v1(hwmgr);
1065         else if (hwmgr->pp_table_version == PP_TABLE_V0)
1066                 smu7_setup_dpm_tables_v0(hwmgr);
1067
1068         smu7_setup_default_pcie_table(hwmgr);
1069
1070         /* save a copy of the default DPM table */
1071         memcpy(&(data->golden_dpm_table), &(data->dpm_table),
1072                         sizeof(struct smu7_dpm_table));
1073
1074         /* initialize ODN table */
1075         if (hwmgr->od_enabled) {
1076                 if (data->odn_dpm_table.max_vddc) {
1077                         smu7_check_dpm_table_updated(hwmgr);
1078                 } else {
1079                         smu7_setup_voltage_range_from_vbios(hwmgr);
1080                         smu7_odn_initial_default_setting(hwmgr);
1081                 }
1082         }
1083         return 0;
1084 }
1085
1086 static int smu7_enable_vrhot_gpio_interrupt(struct pp_hwmgr *hwmgr)
1087 {
1088
1089         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1090                         PHM_PlatformCaps_RegulatorHot))
1091                 return smum_send_msg_to_smc(hwmgr,
1092                                 PPSMC_MSG_EnableVRHotGPIOInterrupt,
1093                                 NULL);
1094
1095         return 0;
1096 }
1097
1098 static int smu7_enable_sclk_control(struct pp_hwmgr *hwmgr)
1099 {
1100         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
1101                         SCLK_PWRMGT_OFF, 0);
1102         return 0;
1103 }
1104
1105 static int smu7_enable_ulv(struct pp_hwmgr *hwmgr)
1106 {
1107         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1108
1109         if (data->ulv_supported)
1110                 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableULV, NULL);
1111
1112         return 0;
1113 }
1114
1115 static int smu7_disable_ulv(struct pp_hwmgr *hwmgr)
1116 {
1117         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1118
1119         if (data->ulv_supported)
1120                 return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableULV, NULL);
1121
1122         return 0;
1123 }
1124
1125 static int smu7_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
1126 {
1127         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1128                         PHM_PlatformCaps_SclkDeepSleep)) {
1129                 if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MASTER_DeepSleep_ON, NULL))
1130                         PP_ASSERT_WITH_CODE(false,
1131                                         "Attempt to enable Master Deep Sleep switch failed!",
1132                                         return -EINVAL);
1133         } else {
1134                 if (smum_send_msg_to_smc(hwmgr,
1135                                 PPSMC_MSG_MASTER_DeepSleep_OFF,
1136                                 NULL)) {
1137                         PP_ASSERT_WITH_CODE(false,
1138                                         "Attempt to disable Master Deep Sleep switch failed!",
1139                                         return -EINVAL);
1140                 }
1141         }
1142
1143         return 0;
1144 }
1145
1146 static int smu7_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
1147 {
1148         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1149                         PHM_PlatformCaps_SclkDeepSleep)) {
1150                 if (smum_send_msg_to_smc(hwmgr,
1151                                 PPSMC_MSG_MASTER_DeepSleep_OFF,
1152                                 NULL)) {
1153                         PP_ASSERT_WITH_CODE(false,
1154                                         "Attempt to disable Master Deep Sleep switch failed!",
1155                                         return -EINVAL);
1156                 }
1157         }
1158
1159         return 0;
1160 }
1161
1162 static int smu7_disable_sclk_vce_handshake(struct pp_hwmgr *hwmgr)
1163 {
1164         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1165         uint32_t soft_register_value = 0;
1166         uint32_t handshake_disables_offset = data->soft_regs_start
1167                                 + smum_get_offsetof(hwmgr,
1168                                         SMU_SoftRegisters, HandshakeDisables);
1169
1170         soft_register_value = cgs_read_ind_register(hwmgr->device,
1171                                 CGS_IND_REG__SMC, handshake_disables_offset);
1172         soft_register_value |= SMU7_VCE_SCLK_HANDSHAKE_DISABLE;
1173         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1174                         handshake_disables_offset, soft_register_value);
1175         return 0;
1176 }
1177
1178 static int smu7_disable_handshake_uvd(struct pp_hwmgr *hwmgr)
1179 {
1180         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1181         uint32_t soft_register_value = 0;
1182         uint32_t handshake_disables_offset = data->soft_regs_start
1183                                 + smum_get_offsetof(hwmgr,
1184                                         SMU_SoftRegisters, HandshakeDisables);
1185
1186         soft_register_value = cgs_read_ind_register(hwmgr->device,
1187                                 CGS_IND_REG__SMC, handshake_disables_offset);
1188         soft_register_value |= smum_get_mac_definition(hwmgr,
1189                                         SMU_UVD_MCLK_HANDSHAKE_DISABLE);
1190         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1191                         handshake_disables_offset, soft_register_value);
1192         return 0;
1193 }
1194
1195 static int smu7_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
1196 {
1197         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1198
1199         /* enable SCLK dpm */
1200         if (!data->sclk_dpm_key_disabled) {
1201                 if (hwmgr->chip_id >= CHIP_POLARIS10 &&
1202                     hwmgr->chip_id <= CHIP_VEGAM)
1203                         smu7_disable_sclk_vce_handshake(hwmgr);
1204
1205                 PP_ASSERT_WITH_CODE(
1206                 (0 == smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Enable, NULL)),
1207                 "Failed to enable SCLK DPM during DPM Start Function!",
1208                 return -EINVAL);
1209         }
1210
1211         /* enable MCLK dpm */
1212         if (0 == data->mclk_dpm_key_disabled) {
1213                 if (!(hwmgr->feature_mask & PP_UVD_HANDSHAKE_MASK))
1214                         smu7_disable_handshake_uvd(hwmgr);
1215
1216                 PP_ASSERT_WITH_CODE(
1217                                 (0 == smum_send_msg_to_smc(hwmgr,
1218                                                 PPSMC_MSG_MCLKDPM_Enable,
1219                                                 NULL)),
1220                                 "Failed to enable MCLK DPM during DPM Start Function!",
1221                                 return -EINVAL);
1222
1223                 if ((hwmgr->chip_family == AMDGPU_FAMILY_CI) ||
1224                     (hwmgr->chip_id == CHIP_POLARIS10) ||
1225                     (hwmgr->chip_id == CHIP_POLARIS11) ||
1226                     (hwmgr->chip_id == CHIP_POLARIS12) ||
1227                     (hwmgr->chip_id == CHIP_TONGA) ||
1228                     (hwmgr->chip_id == CHIP_TOPAZ))
1229                         PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
1230
1231
1232                 if (hwmgr->chip_family == AMDGPU_FAMILY_CI) {
1233                         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d30, 0x5);
1234                         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d3c, 0x5);
1235                         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d80, 0x100005);
1236                         udelay(10);
1237                         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d30, 0x400005);
1238                         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d3c, 0x400005);
1239                         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 0xc0400d80, 0x500005);
1240                 } else {
1241                         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5);
1242                         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x5);
1243                         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x100005);
1244                         udelay(10);
1245                         if (hwmgr->chip_id == CHIP_VEGAM) {
1246                                 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400009);
1247                                 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400009);
1248                         } else {
1249                                 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x400005);
1250                                 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC1_CNTL, 0x400005);
1251                         }
1252                         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_CPL_CNTL, 0x500005);
1253                 }
1254         }
1255
1256         return 0;
1257 }
1258
1259 static int smu7_start_dpm(struct pp_hwmgr *hwmgr)
1260 {
1261         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1262
1263         /*enable general power management */
1264
1265         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
1266                         GLOBAL_PWRMGT_EN, 1);
1267
1268         /* enable sclk deep sleep */
1269
1270         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
1271                         DYNAMIC_PM_EN, 1);
1272
1273         /* prepare for PCIE DPM */
1274
1275         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1276                         data->soft_regs_start +
1277                         smum_get_offsetof(hwmgr, SMU_SoftRegisters,
1278                                                 VoltageChangeTimeout), 0x1000);
1279         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__PCIE,
1280                         SWRST_COMMAND_1, RESETLC, 0x0);
1281
1282         if (hwmgr->chip_family == AMDGPU_FAMILY_CI)
1283                 cgs_write_register(hwmgr->device, 0x1488,
1284                         (cgs_read_register(hwmgr->device, 0x1488) & ~0x1));
1285
1286         if (smu7_enable_sclk_mclk_dpm(hwmgr)) {
1287                 pr_err("Failed to enable Sclk DPM and Mclk DPM!");
1288                 return -EINVAL;
1289         }
1290
1291         /* enable PCIE dpm */
1292         if (0 == data->pcie_dpm_key_disabled) {
1293                 PP_ASSERT_WITH_CODE(
1294                                 (0 == smum_send_msg_to_smc(hwmgr,
1295                                                 PPSMC_MSG_PCIeDPM_Enable,
1296                                                 NULL)),
1297                                 "Failed to enable pcie DPM during DPM Start Function!",
1298                                 return -EINVAL);
1299         } else {
1300                 PP_ASSERT_WITH_CODE(
1301                                 (0 == smum_send_msg_to_smc(hwmgr,
1302                                                 PPSMC_MSG_PCIeDPM_Disable,
1303                                                 NULL)),
1304                                 "Failed to disable pcie DPM during DPM Start Function!",
1305                                 return -EINVAL);
1306         }
1307
1308         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1309                                 PHM_PlatformCaps_Falcon_QuickTransition)) {
1310                 PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(hwmgr,
1311                                 PPSMC_MSG_EnableACDCGPIOInterrupt,
1312                                 NULL)),
1313                                 "Failed to enable AC DC GPIO Interrupt!",
1314                                 );
1315         }
1316
1317         return 0;
1318 }
1319
1320 static int smu7_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
1321 {
1322         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1323
1324         /* disable SCLK dpm */
1325         if (!data->sclk_dpm_key_disabled) {
1326                 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
1327                                 "Trying to disable SCLK DPM when DPM is disabled",
1328                                 return 0);
1329                 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DPM_Disable, NULL);
1330         }
1331
1332         /* disable MCLK dpm */
1333         if (!data->mclk_dpm_key_disabled) {
1334                 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
1335                                 "Trying to disable MCLK DPM when DPM is disabled",
1336                                 return 0);
1337                 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_MCLKDPM_Disable, NULL);
1338         }
1339
1340         return 0;
1341 }
1342
1343 static int smu7_stop_dpm(struct pp_hwmgr *hwmgr)
1344 {
1345         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1346
1347         /* disable general power management */
1348         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
1349                         GLOBAL_PWRMGT_EN, 0);
1350         /* disable sclk deep sleep */
1351         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
1352                         DYNAMIC_PM_EN, 0);
1353
1354         /* disable PCIE dpm */
1355         if (!data->pcie_dpm_key_disabled) {
1356                 PP_ASSERT_WITH_CODE(
1357                                 (smum_send_msg_to_smc(hwmgr,
1358                                                 PPSMC_MSG_PCIeDPM_Disable,
1359                                                 NULL) == 0),
1360                                 "Failed to disable pcie DPM during DPM Stop Function!",
1361                                 return -EINVAL);
1362         }
1363
1364         smu7_disable_sclk_mclk_dpm(hwmgr);
1365
1366         PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
1367                         "Trying to disable voltage DPM when DPM is disabled",
1368                         return 0);
1369
1370         smum_send_msg_to_smc(hwmgr, PPSMC_MSG_Voltage_Cntl_Disable, NULL);
1371
1372         return 0;
1373 }
1374
1375 static void smu7_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources)
1376 {
1377         bool protection;
1378         enum DPM_EVENT_SRC src;
1379
1380         switch (sources) {
1381         default:
1382                 pr_err("Unknown throttling event sources.");
1383                 fallthrough;
1384         case 0:
1385                 protection = false;
1386                 /* src is unused */
1387                 break;
1388         case (1 << PHM_AutoThrottleSource_Thermal):
1389                 protection = true;
1390                 src = DPM_EVENT_SRC_DIGITAL;
1391                 break;
1392         case (1 << PHM_AutoThrottleSource_External):
1393                 protection = true;
1394                 src = DPM_EVENT_SRC_EXTERNAL;
1395                 break;
1396         case (1 << PHM_AutoThrottleSource_External) |
1397                         (1 << PHM_AutoThrottleSource_Thermal):
1398                 protection = true;
1399                 src = DPM_EVENT_SRC_DIGITAL_OR_EXTERNAL;
1400                 break;
1401         }
1402         /* Order matters - don't enable thermal protection for the wrong source. */
1403         if (protection) {
1404                 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, CG_THERMAL_CTRL,
1405                                 DPM_EVENT_SRC, src);
1406                 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
1407                                 THERMAL_PROTECTION_DIS,
1408                                 !phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1409                                                 PHM_PlatformCaps_ThermalController));
1410         } else
1411                 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
1412                                 THERMAL_PROTECTION_DIS, 1);
1413 }
1414
1415 static int smu7_enable_auto_throttle_source(struct pp_hwmgr *hwmgr,
1416                 PHM_AutoThrottleSource source)
1417 {
1418         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1419
1420         if (!(data->active_auto_throttle_sources & (1 << source))) {
1421                 data->active_auto_throttle_sources |= 1 << source;
1422                 smu7_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
1423         }
1424         return 0;
1425 }
1426
1427 static int smu7_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
1428 {
1429         return smu7_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
1430 }
1431
1432 static int smu7_disable_auto_throttle_source(struct pp_hwmgr *hwmgr,
1433                 PHM_AutoThrottleSource source)
1434 {
1435         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1436
1437         if (data->active_auto_throttle_sources & (1 << source)) {
1438                 data->active_auto_throttle_sources &= ~(1 << source);
1439                 smu7_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
1440         }
1441         return 0;
1442 }
1443
1444 static int smu7_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
1445 {
1446         return smu7_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
1447 }
1448
1449 static int smu7_pcie_performance_request(struct pp_hwmgr *hwmgr)
1450 {
1451         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1452         data->pcie_performance_request = true;
1453
1454         return 0;
1455 }
1456
1457 static int smu7_program_edc_didt_registers(struct pp_hwmgr *hwmgr,
1458                                            uint32_t *cac_config_regs,
1459                                            AtomCtrl_EDCLeakgeTable *edc_leakage_table)
1460 {
1461         uint32_t data, i = 0;
1462
1463         while (cac_config_regs[i] != 0xFFFFFFFF) {
1464                 data = edc_leakage_table->DIDT_REG[i];
1465                 cgs_write_ind_register(hwmgr->device,
1466                                        CGS_IND_REG__DIDT,
1467                                        cac_config_regs[i],
1468                                        data);
1469                 i++;
1470         }
1471
1472         return 0;
1473 }
1474
1475 static int smu7_populate_edc_leakage_registers(struct pp_hwmgr *hwmgr)
1476 {
1477         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1478         int ret = 0;
1479
1480         if (!data->disable_edc_leakage_controller &&
1481             data->edc_hilo_leakage_offset_from_vbios.usEdcDidtLoDpm7TableOffset &&
1482             data->edc_hilo_leakage_offset_from_vbios.usEdcDidtHiDpm7TableOffset) {
1483                 ret = smu7_program_edc_didt_registers(hwmgr,
1484                                                       DIDTEDCConfig_P12,
1485                                                       &data->edc_leakage_table);
1486                 if (ret)
1487                         return ret;
1488
1489                 ret = smum_send_msg_to_smc(hwmgr,
1490                                            (PPSMC_Msg)PPSMC_MSG_EnableEDCController,
1491                                            NULL);
1492         } else {
1493                 ret = smum_send_msg_to_smc(hwmgr,
1494                                            (PPSMC_Msg)PPSMC_MSG_DisableEDCController,
1495                                            NULL);
1496         }
1497
1498         return ret;
1499 }
1500
1501 static int smu7_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
1502 {
1503         int tmp_result = 0;
1504         int result = 0;
1505
1506         if (smu7_voltage_control(hwmgr)) {
1507                 tmp_result = smu7_enable_voltage_control(hwmgr);
1508                 PP_ASSERT_WITH_CODE(tmp_result == 0,
1509                                 "Failed to enable voltage control!",
1510                                 result = tmp_result);
1511
1512                 tmp_result = smu7_construct_voltage_tables(hwmgr);
1513                 PP_ASSERT_WITH_CODE((0 == tmp_result),
1514                                 "Failed to construct voltage tables!",
1515                                 result = tmp_result);
1516         }
1517         smum_initialize_mc_reg_table(hwmgr);
1518
1519         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1520                         PHM_PlatformCaps_EngineSpreadSpectrumSupport))
1521                 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1522                                 GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 1);
1523
1524         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1525                         PHM_PlatformCaps_ThermalController))
1526                 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1527                                 GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 0);
1528
1529         tmp_result = smu7_program_static_screen_threshold_parameters(hwmgr);
1530         PP_ASSERT_WITH_CODE((0 == tmp_result),
1531                         "Failed to program static screen threshold parameters!",
1532                         result = tmp_result);
1533
1534         tmp_result = smu7_enable_display_gap(hwmgr);
1535         PP_ASSERT_WITH_CODE((0 == tmp_result),
1536                         "Failed to enable display gap!", result = tmp_result);
1537
1538         tmp_result = smu7_program_voting_clients(hwmgr);
1539         PP_ASSERT_WITH_CODE((0 == tmp_result),
1540                         "Failed to program voting clients!", result = tmp_result);
1541
1542         tmp_result = smum_process_firmware_header(hwmgr);
1543         PP_ASSERT_WITH_CODE((0 == tmp_result),
1544                         "Failed to process firmware header!", result = tmp_result);
1545
1546         if (hwmgr->chip_id != CHIP_VEGAM) {
1547                 tmp_result = smu7_initial_switch_from_arbf0_to_f1(hwmgr);
1548                 PP_ASSERT_WITH_CODE((0 == tmp_result),
1549                                 "Failed to initialize switch from ArbF0 to F1!",
1550                                 result = tmp_result);
1551         }
1552
1553         result = smu7_setup_default_dpm_tables(hwmgr);
1554         PP_ASSERT_WITH_CODE(0 == result,
1555                         "Failed to setup default DPM tables!", return result);
1556
1557         tmp_result = smum_init_smc_table(hwmgr);
1558         PP_ASSERT_WITH_CODE((0 == tmp_result),
1559                         "Failed to initialize SMC table!", result = tmp_result);
1560
1561         tmp_result = smu7_enable_vrhot_gpio_interrupt(hwmgr);
1562         PP_ASSERT_WITH_CODE((0 == tmp_result),
1563                         "Failed to enable VR hot GPIO interrupt!", result = tmp_result);
1564
1565         if (hwmgr->chip_id >= CHIP_POLARIS10 &&
1566             hwmgr->chip_id <= CHIP_VEGAM) {
1567                 tmp_result = smu7_notify_has_display(hwmgr);
1568                 PP_ASSERT_WITH_CODE((0 == tmp_result),
1569                                 "Failed to enable display setting!", result = tmp_result);
1570         } else {
1571                 smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_NoDisplay, NULL);
1572         }
1573
1574         if (hwmgr->chip_id >= CHIP_POLARIS10 &&
1575             hwmgr->chip_id <= CHIP_VEGAM) {
1576                 tmp_result = smu7_populate_edc_leakage_registers(hwmgr);
1577                 PP_ASSERT_WITH_CODE((0 == tmp_result),
1578                                 "Failed to populate edc leakage registers!", result = tmp_result);
1579         }
1580
1581         tmp_result = smu7_enable_sclk_control(hwmgr);
1582         PP_ASSERT_WITH_CODE((0 == tmp_result),
1583                         "Failed to enable SCLK control!", result = tmp_result);
1584
1585         tmp_result = smu7_enable_smc_voltage_controller(hwmgr);
1586         PP_ASSERT_WITH_CODE((0 == tmp_result),
1587                         "Failed to enable voltage control!", result = tmp_result);
1588
1589         tmp_result = smu7_enable_ulv(hwmgr);
1590         PP_ASSERT_WITH_CODE((0 == tmp_result),
1591                         "Failed to enable ULV!", result = tmp_result);
1592
1593         tmp_result = smu7_enable_deep_sleep_master_switch(hwmgr);
1594         PP_ASSERT_WITH_CODE((0 == tmp_result),
1595                         "Failed to enable deep sleep master switch!", result = tmp_result);
1596
1597         tmp_result = smu7_enable_didt_config(hwmgr);
1598         PP_ASSERT_WITH_CODE((tmp_result == 0),
1599                         "Failed to enable deep sleep master switch!", result = tmp_result);
1600
1601         tmp_result = smu7_start_dpm(hwmgr);
1602         PP_ASSERT_WITH_CODE((0 == tmp_result),
1603                         "Failed to start DPM!", result = tmp_result);
1604
1605         tmp_result = smu7_enable_smc_cac(hwmgr);
1606         PP_ASSERT_WITH_CODE((0 == tmp_result),
1607                         "Failed to enable SMC CAC!", result = tmp_result);
1608
1609         tmp_result = smu7_enable_power_containment(hwmgr);
1610         PP_ASSERT_WITH_CODE((0 == tmp_result),
1611                         "Failed to enable power containment!", result = tmp_result);
1612
1613         tmp_result = smu7_power_control_set_level(hwmgr);
1614         PP_ASSERT_WITH_CODE((0 == tmp_result),
1615                         "Failed to power control set level!", result = tmp_result);
1616
1617         tmp_result = smu7_enable_thermal_auto_throttle(hwmgr);
1618         PP_ASSERT_WITH_CODE((0 == tmp_result),
1619                         "Failed to enable thermal auto throttle!", result = tmp_result);
1620
1621         tmp_result = smu7_pcie_performance_request(hwmgr);
1622         PP_ASSERT_WITH_CODE((0 == tmp_result),
1623                         "pcie performance request failed!", result = tmp_result);
1624
1625         return 0;
1626 }
1627
1628 static int smu7_avfs_control(struct pp_hwmgr *hwmgr, bool enable)
1629 {
1630         if (!hwmgr->avfs_supported)
1631                 return 0;
1632
1633         if (enable) {
1634                 if (!PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
1635                                 CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) {
1636                         PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(
1637                                         hwmgr, PPSMC_MSG_EnableAvfs, NULL),
1638                                         "Failed to enable AVFS!",
1639                                         return -EINVAL);
1640                 }
1641         } else if (PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device,
1642                         CGS_IND_REG__SMC, FEATURE_STATUS, AVS_ON)) {
1643                 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(
1644                                 hwmgr, PPSMC_MSG_DisableAvfs, NULL),
1645                                 "Failed to disable AVFS!",
1646                                 return -EINVAL);
1647         }
1648
1649         return 0;
1650 }
1651
1652 static int smu7_update_avfs(struct pp_hwmgr *hwmgr)
1653 {
1654         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1655
1656         if (!hwmgr->avfs_supported)
1657                 return 0;
1658
1659         if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) {
1660                 smu7_avfs_control(hwmgr, false);
1661         } else if (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
1662                 smu7_avfs_control(hwmgr, false);
1663                 smu7_avfs_control(hwmgr, true);
1664         } else {
1665                 smu7_avfs_control(hwmgr, true);
1666         }
1667
1668         return 0;
1669 }
1670
1671 static int smu7_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
1672 {
1673         int tmp_result, result = 0;
1674
1675         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1676                         PHM_PlatformCaps_ThermalController))
1677                 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1678                                 GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 1);
1679
1680         tmp_result = smu7_disable_power_containment(hwmgr);
1681         PP_ASSERT_WITH_CODE((tmp_result == 0),
1682                         "Failed to disable power containment!", result = tmp_result);
1683
1684         tmp_result = smu7_disable_smc_cac(hwmgr);
1685         PP_ASSERT_WITH_CODE((tmp_result == 0),
1686                         "Failed to disable SMC CAC!", result = tmp_result);
1687
1688         tmp_result = smu7_disable_didt_config(hwmgr);
1689         PP_ASSERT_WITH_CODE((tmp_result == 0),
1690                         "Failed to disable DIDT!", result = tmp_result);
1691
1692         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1693                         CG_SPLL_SPREAD_SPECTRUM, SSEN, 0);
1694         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1695                         GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 0);
1696
1697         tmp_result = smu7_disable_thermal_auto_throttle(hwmgr);
1698         PP_ASSERT_WITH_CODE((tmp_result == 0),
1699                         "Failed to disable thermal auto throttle!", result = tmp_result);
1700
1701         tmp_result = smu7_avfs_control(hwmgr, false);
1702         PP_ASSERT_WITH_CODE((tmp_result == 0),
1703                         "Failed to disable AVFS!", result = tmp_result);
1704
1705         tmp_result = smu7_stop_dpm(hwmgr);
1706         PP_ASSERT_WITH_CODE((tmp_result == 0),
1707                         "Failed to stop DPM!", result = tmp_result);
1708
1709         tmp_result = smu7_disable_deep_sleep_master_switch(hwmgr);
1710         PP_ASSERT_WITH_CODE((tmp_result == 0),
1711                         "Failed to disable deep sleep master switch!", result = tmp_result);
1712
1713         tmp_result = smu7_disable_ulv(hwmgr);
1714         PP_ASSERT_WITH_CODE((tmp_result == 0),
1715                         "Failed to disable ULV!", result = tmp_result);
1716
1717         tmp_result = smu7_clear_voting_clients(hwmgr);
1718         PP_ASSERT_WITH_CODE((tmp_result == 0),
1719                         "Failed to clear voting clients!", result = tmp_result);
1720
1721         tmp_result = smu7_reset_to_default(hwmgr);
1722         PP_ASSERT_WITH_CODE((tmp_result == 0),
1723                         "Failed to reset to default!", result = tmp_result);
1724
1725         tmp_result = smum_stop_smc(hwmgr);
1726         PP_ASSERT_WITH_CODE((tmp_result == 0),
1727                         "Failed to stop smc!", result = tmp_result);
1728
1729         tmp_result = smu7_force_switch_to_arbf0(hwmgr);
1730         PP_ASSERT_WITH_CODE((tmp_result == 0),
1731                         "Failed to force to switch arbf0!", result = tmp_result);
1732
1733         return result;
1734 }
1735
1736 static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
1737 {
1738         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1739         struct phm_ppt_v1_information *table_info =
1740                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
1741         struct amdgpu_device *adev = hwmgr->adev;
1742         uint8_t tmp1, tmp2;
1743         uint16_t tmp3 = 0;
1744
1745         data->dll_default_on = false;
1746         data->mclk_dpm0_activity_target = 0xa;
1747         data->vddc_vddgfx_delta = 300;
1748         data->static_screen_threshold = SMU7_STATICSCREENTHRESHOLD_DFLT;
1749         data->static_screen_threshold_unit = SMU7_STATICSCREENTHRESHOLDUNIT_DFLT;
1750         data->voting_rights_clients[0] = SMU7_VOTINGRIGHTSCLIENTS_DFLT0;
1751         data->voting_rights_clients[1]= SMU7_VOTINGRIGHTSCLIENTS_DFLT1;
1752         data->voting_rights_clients[2] = SMU7_VOTINGRIGHTSCLIENTS_DFLT2;
1753         data->voting_rights_clients[3]= SMU7_VOTINGRIGHTSCLIENTS_DFLT3;
1754         data->voting_rights_clients[4]= SMU7_VOTINGRIGHTSCLIENTS_DFLT4;
1755         data->voting_rights_clients[5]= SMU7_VOTINGRIGHTSCLIENTS_DFLT5;
1756         data->voting_rights_clients[6]= SMU7_VOTINGRIGHTSCLIENTS_DFLT6;
1757         data->voting_rights_clients[7]= SMU7_VOTINGRIGHTSCLIENTS_DFLT7;
1758
1759         data->mclk_dpm_key_disabled = hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true;
1760         data->sclk_dpm_key_disabled = hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true;
1761         data->pcie_dpm_key_disabled = hwmgr->feature_mask & PP_PCIE_DPM_MASK ? false : true;
1762         /* need to set voltage control types before EVV patching */
1763         data->voltage_control = SMU7_VOLTAGE_CONTROL_NONE;
1764         data->vddci_control = SMU7_VOLTAGE_CONTROL_NONE;
1765         data->mvdd_control = SMU7_VOLTAGE_CONTROL_NONE;
1766         data->enable_tdc_limit_feature = true;
1767         data->enable_pkg_pwr_tracking_feature = true;
1768         data->force_pcie_gen = PP_PCIEGenInvalid;
1769         data->ulv_supported = hwmgr->feature_mask & PP_ULV_MASK ? true : false;
1770         data->current_profile_setting.bupdate_sclk = 1;
1771         data->current_profile_setting.sclk_up_hyst = 0;
1772         data->current_profile_setting.sclk_down_hyst = 100;
1773         data->current_profile_setting.sclk_activity = SMU7_SCLK_TARGETACTIVITY_DFLT;
1774         data->current_profile_setting.bupdate_mclk = 1;
1775         if (hwmgr->chip_id >= CHIP_POLARIS10) {
1776                 if (adev->gmc.vram_width == 256) {
1777                         data->current_profile_setting.mclk_up_hyst = 10;
1778                         data->current_profile_setting.mclk_down_hyst = 60;
1779                         data->current_profile_setting.mclk_activity = 25;
1780                 } else if (adev->gmc.vram_width == 128) {
1781                         data->current_profile_setting.mclk_up_hyst = 5;
1782                         data->current_profile_setting.mclk_down_hyst = 16;
1783                         data->current_profile_setting.mclk_activity = 20;
1784                 } else if (adev->gmc.vram_width == 64) {
1785                         data->current_profile_setting.mclk_up_hyst = 3;
1786                         data->current_profile_setting.mclk_down_hyst = 16;
1787                         data->current_profile_setting.mclk_activity = 20;
1788                 }
1789         } else {
1790                 data->current_profile_setting.mclk_up_hyst = 0;
1791                 data->current_profile_setting.mclk_down_hyst = 100;
1792                 data->current_profile_setting.mclk_activity = SMU7_MCLK_TARGETACTIVITY_DFLT;
1793         }
1794         hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D];
1795         hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
1796         hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
1797
1798         if (hwmgr->chip_id  == CHIP_HAWAII) {
1799                 data->thermal_temp_setting.temperature_low = 94500;
1800                 data->thermal_temp_setting.temperature_high = 95000;
1801                 data->thermal_temp_setting.temperature_shutdown = 104000;
1802         } else {
1803                 data->thermal_temp_setting.temperature_low = 99500;
1804                 data->thermal_temp_setting.temperature_high = 100000;
1805                 data->thermal_temp_setting.temperature_shutdown = 104000;
1806         }
1807
1808         data->fast_watermark_threshold = 100;
1809         if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1810                         VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
1811                 data->voltage_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1812         else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1813                         VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT))
1814                 data->voltage_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
1815
1816         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1817                         PHM_PlatformCaps_ControlVDDGFX)) {
1818                 if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1819                         VOLTAGE_TYPE_VDDGFX, VOLTAGE_OBJ_SVID2)) {
1820                         data->vdd_gfx_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1821                 }
1822         }
1823
1824         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1825                         PHM_PlatformCaps_EnableMVDDControl)) {
1826                 if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1827                                 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
1828                         data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
1829                 else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1830                                 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
1831                         data->mvdd_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1832         }
1833
1834         if (SMU7_VOLTAGE_CONTROL_NONE == data->vdd_gfx_control)
1835                 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1836                         PHM_PlatformCaps_ControlVDDGFX);
1837
1838         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1839                         PHM_PlatformCaps_ControlVDDCI)) {
1840                 if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1841                                 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
1842                         data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_GPIO;
1843                 else if (atomctrl_is_voltage_controlled_by_gpio_v3(hwmgr,
1844                                 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
1845                         data->vddci_control = SMU7_VOLTAGE_CONTROL_BY_SVID2;
1846         }
1847
1848         if (data->mvdd_control == SMU7_VOLTAGE_CONTROL_NONE)
1849                 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1850                                 PHM_PlatformCaps_EnableMVDDControl);
1851
1852         if (data->vddci_control == SMU7_VOLTAGE_CONTROL_NONE)
1853                 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1854                                 PHM_PlatformCaps_ControlVDDCI);
1855
1856         data->vddc_phase_shed_control = 1;
1857         if ((hwmgr->chip_id == CHIP_POLARIS12) ||
1858             ASICID_IS_P20(adev->pdev->device, adev->pdev->revision) ||
1859             ASICID_IS_P21(adev->pdev->device, adev->pdev->revision) ||
1860             ASICID_IS_P30(adev->pdev->device, adev->pdev->revision) ||
1861             ASICID_IS_P31(adev->pdev->device, adev->pdev->revision)) {
1862                 if (data->voltage_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
1863                         atomctrl_get_svi2_info(hwmgr, VOLTAGE_TYPE_VDDC, &tmp1, &tmp2,
1864                                                         &tmp3);
1865                         tmp3 = (tmp3 >> 5) & 0x3;
1866                         data->vddc_phase_shed_control = ((tmp3 << 1) | (tmp3 >> 1)) & 0x3;
1867                 }
1868         } else if (hwmgr->chip_family == AMDGPU_FAMILY_CI) {
1869                 data->vddc_phase_shed_control = 1;
1870         }
1871
1872         if ((hwmgr->pp_table_version != PP_TABLE_V0) && (hwmgr->feature_mask & PP_CLOCK_STRETCH_MASK)
1873                 && (table_info->cac_dtp_table->usClockStretchAmount != 0))
1874                 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1875                                         PHM_PlatformCaps_ClockStretcher);
1876
1877         data->pcie_gen_performance.max = PP_PCIEGen1;
1878         data->pcie_gen_performance.min = PP_PCIEGen3;
1879         data->pcie_gen_power_saving.max = PP_PCIEGen1;
1880         data->pcie_gen_power_saving.min = PP_PCIEGen3;
1881         data->pcie_lane_performance.max = 0;
1882         data->pcie_lane_performance.min = 16;
1883         data->pcie_lane_power_saving.max = 0;
1884         data->pcie_lane_power_saving.min = 16;
1885
1886
1887         if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
1888                 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1889                               PHM_PlatformCaps_UVDPowerGating);
1890         if (adev->pg_flags & AMD_PG_SUPPORT_VCE)
1891                 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1892                               PHM_PlatformCaps_VCEPowerGating);
1893
1894         data->disable_edc_leakage_controller = true;
1895         if (((adev->asic_type == CHIP_POLARIS10) && hwmgr->is_kicker) ||
1896             ((adev->asic_type == CHIP_POLARIS11) && hwmgr->is_kicker) ||
1897             (adev->asic_type == CHIP_POLARIS12) ||
1898             (adev->asic_type == CHIP_VEGAM))
1899                 data->disable_edc_leakage_controller = false;
1900
1901         if (!atomctrl_is_asic_internal_ss_supported(hwmgr)) {
1902                 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1903                         PHM_PlatformCaps_MemorySpreadSpectrumSupport);
1904                 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1905                         PHM_PlatformCaps_EngineSpreadSpectrumSupport);
1906         }
1907
1908         if ((adev->pdev->device == 0x699F) &&
1909             (adev->pdev->revision == 0xCF)) {
1910                 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1911                                 PHM_PlatformCaps_PowerContainment);
1912                 data->enable_tdc_limit_feature = false;
1913                 data->enable_pkg_pwr_tracking_feature = false;
1914                 data->disable_edc_leakage_controller = true;
1915                 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1916                                         PHM_PlatformCaps_ClockStretcher);
1917         }
1918 }
1919
1920 static int smu7_calculate_ro_range(struct pp_hwmgr *hwmgr)
1921 {
1922         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1923         struct amdgpu_device *adev = hwmgr->adev;
1924         uint32_t asicrev1, evv_revision, max = 0, min = 0;
1925
1926         atomctrl_read_efuse(hwmgr, STRAP_EVV_REVISION_LSB, STRAP_EVV_REVISION_MSB,
1927                         &evv_revision);
1928
1929         atomctrl_read_efuse(hwmgr, 568, 579, &asicrev1);
1930
1931         if (ASICID_IS_P20(adev->pdev->device, adev->pdev->revision) ||
1932             ASICID_IS_P30(adev->pdev->device, adev->pdev->revision)) {
1933                 min = 1200;
1934                 max = 2500;
1935         } else if (ASICID_IS_P21(adev->pdev->device, adev->pdev->revision) ||
1936                    ASICID_IS_P31(adev->pdev->device, adev->pdev->revision)) {
1937                 min = 900;
1938                 max= 2100;
1939         } else if (hwmgr->chip_id == CHIP_POLARIS10) {
1940                 if (adev->pdev->subsystem_vendor == 0x106B) {
1941                         min = 1000;
1942                         max = 2300;
1943                 } else {
1944                         if (evv_revision == 0) {
1945                                 min = 1000;
1946                                 max = 2300;
1947                         } else if (evv_revision == 1) {
1948                                 if (asicrev1 == 326) {
1949                                         min = 1200;
1950                                         max = 2500;
1951                                         /* TODO: PATCH RO in VBIOS */
1952                                 } else {
1953                                         min = 1200;
1954                                         max = 2000;
1955                                 }
1956                         } else if (evv_revision == 2) {
1957                                 min = 1200;
1958                                 max = 2500;
1959                         }
1960                 }
1961         } else {
1962                 min = 1100;
1963                 max = 2100;
1964         }
1965
1966         data->ro_range_minimum = min;
1967         data->ro_range_maximum = max;
1968
1969         /* TODO: PATCH RO in VBIOS here */
1970
1971         return 0;
1972 }
1973
1974 /**
1975  * smu7_get_evv_voltages - Get Leakage VDDC based on leakage ID.
1976  *
1977  * @hwmgr:  the address of the powerplay hardware manager.
1978  * Return:   always 0
1979  */
1980 static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr)
1981 {
1982         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
1983         uint16_t vv_id;
1984         uint16_t vddc = 0;
1985         uint16_t vddgfx = 0;
1986         uint16_t i, j;
1987         uint32_t sclk = 0;
1988         struct phm_ppt_v1_information *table_info =
1989                         (struct phm_ppt_v1_information *)hwmgr->pptable;
1990         struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = NULL;
1991
1992         if (hwmgr->chip_id == CHIP_POLARIS10 ||
1993             hwmgr->chip_id == CHIP_POLARIS11 ||
1994             hwmgr->chip_id == CHIP_POLARIS12)
1995                 smu7_calculate_ro_range(hwmgr);
1996
1997         for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) {
1998                 vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1999
2000                 if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
2001                         if ((hwmgr->pp_table_version == PP_TABLE_V1)
2002                             && !phm_get_sclk_for_voltage_evv(hwmgr,
2003                                                 table_info->vddgfx_lookup_table, vv_id, &sclk)) {
2004                                 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2005                                                         PHM_PlatformCaps_ClockStretcher)) {
2006                                         sclk_table = table_info->vdd_dep_on_sclk;
2007
2008                                         for (j = 1; j < sclk_table->count; j++) {
2009                                                 if (sclk_table->entries[j].clk == sclk &&
2010                                                                 sclk_table->entries[j].cks_enable == 0) {
2011                                                         sclk += 5000;
2012                                                         break;
2013                                                 }
2014                                         }
2015                                 }
2016                                 if (0 == atomctrl_get_voltage_evv_on_sclk
2017                                     (hwmgr, VOLTAGE_TYPE_VDDGFX, sclk,
2018                                      vv_id, &vddgfx)) {
2019                                         /* need to make sure vddgfx is less than 2v or else, it could burn the ASIC. */
2020                                         PP_ASSERT_WITH_CODE((vddgfx < 2000 && vddgfx != 0), "Invalid VDDGFX value!", return -EINVAL);
2021
2022                                         /* the voltage should not be zero nor equal to leakage ID */
2023                                         if (vddgfx != 0 && vddgfx != vv_id) {
2024                                                 data->vddcgfx_leakage.actual_voltage[data->vddcgfx_leakage.count] = vddgfx;
2025                                                 data->vddcgfx_leakage.leakage_id[data->vddcgfx_leakage.count] = vv_id;
2026                                                 data->vddcgfx_leakage.count++;
2027                                         }
2028                                 } else {
2029                                         pr_info("Error retrieving EVV voltage value!\n");
2030                                 }
2031                         }
2032                 } else {
2033                         if ((hwmgr->pp_table_version == PP_TABLE_V0)
2034                                 || !phm_get_sclk_for_voltage_evv(hwmgr,
2035                                         table_info->vddc_lookup_table, vv_id, &sclk)) {
2036                                 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2037                                                 PHM_PlatformCaps_ClockStretcher)) {
2038                                         if (table_info == NULL)
2039                                                 return -EINVAL;
2040                                         sclk_table = table_info->vdd_dep_on_sclk;
2041
2042                                         for (j = 1; j < sclk_table->count; j++) {
2043                                                 if (sclk_table->entries[j].clk == sclk &&
2044                                                                 sclk_table->entries[j].cks_enable == 0) {
2045                                                         sclk += 5000;
2046                                                         break;
2047                                                 }
2048                                         }
2049                                 }
2050
2051                                 if (phm_get_voltage_evv_on_sclk(hwmgr,
2052                                                         VOLTAGE_TYPE_VDDC,
2053                                                         sclk, vv_id, &vddc) == 0) {
2054                                         if (vddc >= 2000 || vddc == 0)
2055                                                 return -EINVAL;
2056                                 } else {
2057                                         pr_debug("failed to retrieving EVV voltage!\n");
2058                                         continue;
2059                                 }
2060
2061                                 /* the voltage should not be zero nor equal to leakage ID */
2062                                 if (vddc != 0 && vddc != vv_id) {
2063                                         data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = (uint16_t)(vddc);
2064                                         data->vddc_leakage.leakage_id[data->vddc_leakage.count] = vv_id;
2065                                         data->vddc_leakage.count++;
2066                                 }
2067                         }
2068                 }
2069         }
2070
2071         return 0;
2072 }
2073
2074 /**
2075  * smu7_patch_ppt_v1_with_vdd_leakage - Change virtual leakage voltage to actual value.
2076  *
2077  * @hwmgr:  the address of the powerplay hardware manager.
2078  * @voltage: pointer to changing voltage
2079  * @leakage_table: pointer to leakage table
2080  */
2081 static void smu7_patch_ppt_v1_with_vdd_leakage(struct pp_hwmgr *hwmgr,
2082                 uint16_t *voltage, struct smu7_leakage_voltage *leakage_table)
2083 {
2084         uint32_t index;
2085
2086         /* search for leakage voltage ID 0xff01 ~ 0xff08 */
2087         for (index = 0; index < leakage_table->count; index++) {
2088                 /* if this voltage matches a leakage voltage ID */
2089                 /* patch with actual leakage voltage */
2090                 if (leakage_table->leakage_id[index] == *voltage) {
2091                         *voltage = leakage_table->actual_voltage[index];
2092                         break;
2093                 }
2094         }
2095
2096         if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
2097                 pr_err("Voltage value looks like a Leakage ID but it's not patched \n");
2098 }
2099
2100 /**
2101  * smu7_patch_lookup_table_with_leakage - Patch voltage lookup table by EVV leakages.
2102  *
2103  * @hwmgr:  the address of the powerplay hardware manager.
2104  * @lookup_table: pointer to voltage lookup table
2105  * @leakage_table: pointer to leakage table
2106  * Return:     always 0
2107  */
2108 static int smu7_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
2109                 phm_ppt_v1_voltage_lookup_table *lookup_table,
2110                 struct smu7_leakage_voltage *leakage_table)
2111 {
2112         uint32_t i;
2113
2114         for (i = 0; i < lookup_table->count; i++)
2115                 smu7_patch_ppt_v1_with_vdd_leakage(hwmgr,
2116                                 &lookup_table->entries[i].us_vdd, leakage_table);
2117
2118         return 0;
2119 }
2120
2121 static int smu7_patch_clock_voltage_limits_with_vddc_leakage(
2122                 struct pp_hwmgr *hwmgr, struct smu7_leakage_voltage *leakage_table,
2123                 uint16_t *vddc)
2124 {
2125         struct phm_ppt_v1_information *table_info =
2126                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
2127         smu7_patch_ppt_v1_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table);
2128         hwmgr->dyn_state.max_clock_voltage_on_dc.vddc =
2129                         table_info->max_clock_voltage_on_dc.vddc;
2130         return 0;
2131 }
2132
2133 static int smu7_patch_voltage_dependency_tables_with_lookup_table(
2134                 struct pp_hwmgr *hwmgr)
2135 {
2136         uint8_t entry_id;
2137         uint8_t voltage_id;
2138         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2139         struct phm_ppt_v1_information *table_info =
2140                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
2141
2142         struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
2143                         table_info->vdd_dep_on_sclk;
2144         struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table =
2145                         table_info->vdd_dep_on_mclk;
2146         struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
2147                         table_info->mm_dep_table;
2148
2149         if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
2150                 for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
2151                         voltage_id = sclk_table->entries[entry_id].vddInd;
2152                         sclk_table->entries[entry_id].vddgfx =
2153                                 table_info->vddgfx_lookup_table->entries[voltage_id].us_vdd;
2154                 }
2155         } else {
2156                 for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
2157                         voltage_id = sclk_table->entries[entry_id].vddInd;
2158                         sclk_table->entries[entry_id].vddc =
2159                                 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
2160                 }
2161         }
2162
2163         for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) {
2164                 voltage_id = mclk_table->entries[entry_id].vddInd;
2165                 mclk_table->entries[entry_id].vddc =
2166                         table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
2167         }
2168
2169         for (entry_id = 0; entry_id < mm_table->count; ++entry_id) {
2170                 voltage_id = mm_table->entries[entry_id].vddcInd;
2171                 mm_table->entries[entry_id].vddc =
2172                         table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
2173         }
2174
2175         return 0;
2176
2177 }
2178
2179 static int phm_add_voltage(struct pp_hwmgr *hwmgr,
2180                         phm_ppt_v1_voltage_lookup_table *look_up_table,
2181                         phm_ppt_v1_voltage_lookup_record *record)
2182 {
2183         uint32_t i;
2184
2185         PP_ASSERT_WITH_CODE((NULL != look_up_table),
2186                 "Lookup Table empty.", return -EINVAL);
2187         PP_ASSERT_WITH_CODE((0 != look_up_table->count),
2188                 "Lookup Table empty.", return -EINVAL);
2189
2190         i = smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_VDDGFX);
2191         PP_ASSERT_WITH_CODE((i >= look_up_table->count),
2192                 "Lookup Table is full.", return -EINVAL);
2193
2194         /* This is to avoid entering duplicate calculated records. */
2195         for (i = 0; i < look_up_table->count; i++) {
2196                 if (look_up_table->entries[i].us_vdd == record->us_vdd) {
2197                         if (look_up_table->entries[i].us_calculated == 1)
2198                                 return 0;
2199                         break;
2200                 }
2201         }
2202
2203         look_up_table->entries[i].us_calculated = 1;
2204         look_up_table->entries[i].us_vdd = record->us_vdd;
2205         look_up_table->entries[i].us_cac_low = record->us_cac_low;
2206         look_up_table->entries[i].us_cac_mid = record->us_cac_mid;
2207         look_up_table->entries[i].us_cac_high = record->us_cac_high;
2208         /* Only increment the count when we're appending, not replacing duplicate entry. */
2209         if (i == look_up_table->count)
2210                 look_up_table->count++;
2211
2212         return 0;
2213 }
2214
2215
2216 static int smu7_calc_voltage_dependency_tables(struct pp_hwmgr *hwmgr)
2217 {
2218         uint8_t entry_id;
2219         struct phm_ppt_v1_voltage_lookup_record v_record;
2220         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2221         struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
2222
2223         phm_ppt_v1_clock_voltage_dependency_table *sclk_table = pptable_info->vdd_dep_on_sclk;
2224         phm_ppt_v1_clock_voltage_dependency_table *mclk_table = pptable_info->vdd_dep_on_mclk;
2225
2226         if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
2227                 for (entry_id = 0; entry_id < sclk_table->count; ++entry_id) {
2228                         if (sclk_table->entries[entry_id].vdd_offset & (1 << 15))
2229                                 v_record.us_vdd = sclk_table->entries[entry_id].vddgfx +
2230                                         sclk_table->entries[entry_id].vdd_offset - 0xFFFF;
2231                         else
2232                                 v_record.us_vdd = sclk_table->entries[entry_id].vddgfx +
2233                                         sclk_table->entries[entry_id].vdd_offset;
2234
2235                         sclk_table->entries[entry_id].vddc =
2236                                 v_record.us_cac_low = v_record.us_cac_mid =
2237                                 v_record.us_cac_high = v_record.us_vdd;
2238
2239                         phm_add_voltage(hwmgr, pptable_info->vddc_lookup_table, &v_record);
2240                 }
2241
2242                 for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) {
2243                         if (mclk_table->entries[entry_id].vdd_offset & (1 << 15))
2244                                 v_record.us_vdd = mclk_table->entries[entry_id].vddc +
2245                                         mclk_table->entries[entry_id].vdd_offset - 0xFFFF;
2246                         else
2247                                 v_record.us_vdd = mclk_table->entries[entry_id].vddc +
2248                                         mclk_table->entries[entry_id].vdd_offset;
2249
2250                         mclk_table->entries[entry_id].vddgfx = v_record.us_cac_low =
2251                                 v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd;
2252                         phm_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record);
2253                 }
2254         }
2255         return 0;
2256 }
2257
2258 static int smu7_calc_mm_voltage_dependency_table(struct pp_hwmgr *hwmgr)
2259 {
2260         uint8_t entry_id;
2261         struct phm_ppt_v1_voltage_lookup_record v_record;
2262         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2263         struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
2264         phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table = pptable_info->mm_dep_table;
2265
2266         if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
2267                 for (entry_id = 0; entry_id < mm_table->count; entry_id++) {
2268                         if (mm_table->entries[entry_id].vddgfx_offset & (1 << 15))
2269                                 v_record.us_vdd = mm_table->entries[entry_id].vddc +
2270                                         mm_table->entries[entry_id].vddgfx_offset - 0xFFFF;
2271                         else
2272                                 v_record.us_vdd = mm_table->entries[entry_id].vddc +
2273                                         mm_table->entries[entry_id].vddgfx_offset;
2274
2275                         /* Add the calculated VDDGFX to the VDDGFX lookup table */
2276                         mm_table->entries[entry_id].vddgfx = v_record.us_cac_low =
2277                                 v_record.us_cac_mid = v_record.us_cac_high = v_record.us_vdd;
2278                         phm_add_voltage(hwmgr, pptable_info->vddgfx_lookup_table, &v_record);
2279                 }
2280         }
2281         return 0;
2282 }
2283
2284 static int smu7_sort_lookup_table(struct pp_hwmgr *hwmgr,
2285                 struct phm_ppt_v1_voltage_lookup_table *lookup_table)
2286 {
2287         uint32_t table_size, i, j;
2288         table_size = lookup_table->count;
2289
2290         PP_ASSERT_WITH_CODE(0 != lookup_table->count,
2291                 "Lookup table is empty", return -EINVAL);
2292
2293         /* Sorting voltages */
2294         for (i = 0; i < table_size - 1; i++) {
2295                 for (j = i + 1; j > 0; j--) {
2296                         if (lookup_table->entries[j].us_vdd <
2297                                         lookup_table->entries[j - 1].us_vdd) {
2298                                 swap(lookup_table->entries[j - 1],
2299                                      lookup_table->entries[j]);
2300                         }
2301                 }
2302         }
2303
2304         return 0;
2305 }
2306
2307 static int smu7_complete_dependency_tables(struct pp_hwmgr *hwmgr)
2308 {
2309         int result = 0;
2310         int tmp_result;
2311         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2312         struct phm_ppt_v1_information *table_info =
2313                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
2314
2315         if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
2316                 tmp_result = smu7_patch_lookup_table_with_leakage(hwmgr,
2317                         table_info->vddgfx_lookup_table, &(data->vddcgfx_leakage));
2318                 if (tmp_result != 0)
2319                         result = tmp_result;
2320
2321                 smu7_patch_ppt_v1_with_vdd_leakage(hwmgr,
2322                         &table_info->max_clock_voltage_on_dc.vddgfx, &(data->vddcgfx_leakage));
2323         } else {
2324
2325                 tmp_result = smu7_patch_lookup_table_with_leakage(hwmgr,
2326                                 table_info->vddc_lookup_table, &(data->vddc_leakage));
2327                 if (tmp_result)
2328                         result = tmp_result;
2329
2330                 tmp_result = smu7_patch_clock_voltage_limits_with_vddc_leakage(hwmgr,
2331                                 &(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc);
2332                 if (tmp_result)
2333                         result = tmp_result;
2334         }
2335
2336         tmp_result = smu7_patch_voltage_dependency_tables_with_lookup_table(hwmgr);
2337         if (tmp_result)
2338                 result = tmp_result;
2339
2340         tmp_result = smu7_calc_voltage_dependency_tables(hwmgr);
2341         if (tmp_result)
2342                 result = tmp_result;
2343
2344         tmp_result = smu7_calc_mm_voltage_dependency_table(hwmgr);
2345         if (tmp_result)
2346                 result = tmp_result;
2347
2348         tmp_result = smu7_sort_lookup_table(hwmgr, table_info->vddgfx_lookup_table);
2349         if (tmp_result)
2350                 result = tmp_result;
2351
2352         tmp_result = smu7_sort_lookup_table(hwmgr, table_info->vddc_lookup_table);
2353         if (tmp_result)
2354                 result = tmp_result;
2355
2356         return result;
2357 }
2358
2359 static int smu7_find_highest_vddc(struct pp_hwmgr *hwmgr)
2360 {
2361         struct phm_ppt_v1_information *table_info =
2362                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
2363         struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
2364                                                 table_info->vdd_dep_on_sclk;
2365         struct phm_ppt_v1_voltage_lookup_table *lookup_table =
2366                                                 table_info->vddc_lookup_table;
2367         uint16_t highest_voltage;
2368         uint32_t i;
2369
2370         highest_voltage = allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
2371
2372         for (i = 0; i < lookup_table->count; i++) {
2373                 if (lookup_table->entries[i].us_vdd < ATOM_VIRTUAL_VOLTAGE_ID0 &&
2374                     lookup_table->entries[i].us_vdd > highest_voltage)
2375                         highest_voltage = lookup_table->entries[i].us_vdd;
2376         }
2377
2378         return highest_voltage;
2379 }
2380
2381 static int smu7_set_private_data_based_on_pptable_v1(struct pp_hwmgr *hwmgr)
2382 {
2383         struct phm_ppt_v1_information *table_info =
2384                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
2385
2386         struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
2387                                                 table_info->vdd_dep_on_sclk;
2388         struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table =
2389                                                 table_info->vdd_dep_on_mclk;
2390
2391         PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table != NULL,
2392                 "VDD dependency on SCLK table is missing.",
2393                 return -EINVAL);
2394         PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
2395                 "VDD dependency on SCLK table has to have is missing.",
2396                 return -EINVAL);
2397
2398         PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table != NULL,
2399                 "VDD dependency on MCLK table is missing",
2400                 return -EINVAL);
2401         PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
2402                 "VDD dependency on MCLK table has to have is missing.",
2403                 return -EINVAL);
2404
2405         table_info->max_clock_voltage_on_ac.sclk =
2406                 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk;
2407         table_info->max_clock_voltage_on_ac.mclk =
2408                 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk;
2409         if (hwmgr->chip_id >= CHIP_POLARIS10 && hwmgr->chip_id <= CHIP_VEGAM)
2410                 table_info->max_clock_voltage_on_ac.vddc =
2411                         smu7_find_highest_vddc(hwmgr);
2412         else
2413                 table_info->max_clock_voltage_on_ac.vddc =
2414                         allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
2415         table_info->max_clock_voltage_on_ac.vddci =
2416                 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci;
2417
2418         hwmgr->dyn_state.max_clock_voltage_on_ac.sclk = table_info->max_clock_voltage_on_ac.sclk;
2419         hwmgr->dyn_state.max_clock_voltage_on_ac.mclk = table_info->max_clock_voltage_on_ac.mclk;
2420         hwmgr->dyn_state.max_clock_voltage_on_ac.vddc = table_info->max_clock_voltage_on_ac.vddc;
2421         hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = table_info->max_clock_voltage_on_ac.vddci;
2422
2423         return 0;
2424 }
2425
2426 static int smu7_patch_voltage_workaround(struct pp_hwmgr *hwmgr)
2427 {
2428         struct phm_ppt_v1_information *table_info =
2429                        (struct phm_ppt_v1_information *)(hwmgr->pptable);
2430         struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
2431         struct phm_ppt_v1_voltage_lookup_table *lookup_table;
2432         uint32_t i;
2433         uint32_t hw_revision, sub_vendor_id, sub_sys_id;
2434         struct amdgpu_device *adev = hwmgr->adev;
2435
2436         if (table_info != NULL) {
2437                 dep_mclk_table = table_info->vdd_dep_on_mclk;
2438                 lookup_table = table_info->vddc_lookup_table;
2439         } else
2440                 return 0;
2441
2442         hw_revision = adev->pdev->revision;
2443         sub_sys_id = adev->pdev->subsystem_device;
2444         sub_vendor_id = adev->pdev->subsystem_vendor;
2445
2446         if (adev->pdev->device == 0x67DF && hw_revision == 0xC7 &&
2447             ((sub_sys_id == 0xb37 && sub_vendor_id == 0x1002) ||
2448              (sub_sys_id == 0x4a8 && sub_vendor_id == 0x1043) ||
2449              (sub_sys_id == 0x9480 && sub_vendor_id == 0x1682))) {
2450
2451                 PHM_WRITE_VFPF_INDIRECT_FIELD(hwmgr->device,
2452                                               CGS_IND_REG__SMC,
2453                                               PWR_CKS_CNTL,
2454                                               CKS_STRETCH_AMOUNT,
2455                                               0x3);
2456
2457                 if (lookup_table->entries[dep_mclk_table->entries[dep_mclk_table->count-1].vddInd].us_vdd >= 1000)
2458                         return 0;
2459
2460                 for (i = 0; i < lookup_table->count; i++) {
2461                         if (lookup_table->entries[i].us_vdd < 0xff01 && lookup_table->entries[i].us_vdd >= 1000) {
2462                                 dep_mclk_table->entries[dep_mclk_table->count-1].vddInd = (uint8_t) i;
2463                                 return 0;
2464                         }
2465                 }
2466         }
2467         return 0;
2468 }
2469
2470 static int smu7_thermal_parameter_init(struct pp_hwmgr *hwmgr)
2471 {
2472         struct pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
2473         uint32_t temp_reg;
2474         struct phm_ppt_v1_information *table_info =
2475                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
2476
2477
2478         if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) {
2479                 temp_reg = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL);
2480                 switch (gpio_pin_assignment.uc_gpio_pin_bit_shift) {
2481                 case 0:
2482                         temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x1);
2483                         break;
2484                 case 1:
2485                         temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW_MODE, 0x2);
2486                         break;
2487                 case 2:
2488                         temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, GNB_SLOW, 0x1);
2489                         break;
2490                 case 3:
2491                         temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, FORCE_NB_PS1, 0x1);
2492                         break;
2493                 case 4:
2494                         temp_reg = PHM_SET_FIELD(temp_reg, CNB_PWRMGT_CNTL, DPM_ENABLED, 0x1);
2495                         break;
2496                 default:
2497                         break;
2498                 }
2499                 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL, temp_reg);
2500         }
2501
2502         if (table_info == NULL)
2503                 return 0;
2504
2505         if (table_info->cac_dtp_table->usDefaultTargetOperatingTemp != 0 &&
2506                 hwmgr->thermal_controller.advanceFanControlParameters.ucFanControlMode) {
2507                 hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMinLimit =
2508                         (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit;
2509
2510                 hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMMaxLimit =
2511                         (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
2512
2513                 hwmgr->thermal_controller.advanceFanControlParameters.usFanPWMStep = 1;
2514
2515                 hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMaxLimit = 100;
2516
2517                 hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMMinLimit =
2518                         (uint16_t)hwmgr->thermal_controller.advanceFanControlParameters.ucMinimumPWMLimit;
2519
2520                 hwmgr->thermal_controller.advanceFanControlParameters.usFanRPMStep = 1;
2521
2522                 table_info->cac_dtp_table->usDefaultTargetOperatingTemp = (table_info->cac_dtp_table->usDefaultTargetOperatingTemp >= 50) ?
2523                                                                 (table_info->cac_dtp_table->usDefaultTargetOperatingTemp - 50) : 0;
2524
2525                 table_info->cac_dtp_table->usOperatingTempMaxLimit = table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
2526                 table_info->cac_dtp_table->usOperatingTempStep = 1;
2527                 table_info->cac_dtp_table->usOperatingTempHyst = 1;
2528
2529                 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanPWM =
2530                                hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanPWM;
2531
2532                 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM =
2533                                hwmgr->thermal_controller.advanceFanControlParameters.usDefaultMaxFanRPM;
2534
2535                 hwmgr->dyn_state.cac_dtp_table->usOperatingTempMinLimit =
2536                                table_info->cac_dtp_table->usOperatingTempMinLimit;
2537
2538                 hwmgr->dyn_state.cac_dtp_table->usOperatingTempMaxLimit =
2539                                table_info->cac_dtp_table->usOperatingTempMaxLimit;
2540
2541                 hwmgr->dyn_state.cac_dtp_table->usDefaultTargetOperatingTemp =
2542                                table_info->cac_dtp_table->usDefaultTargetOperatingTemp;
2543
2544                 hwmgr->dyn_state.cac_dtp_table->usOperatingTempStep =
2545                                table_info->cac_dtp_table->usOperatingTempStep;
2546
2547                 hwmgr->dyn_state.cac_dtp_table->usTargetOperatingTemp =
2548                                table_info->cac_dtp_table->usTargetOperatingTemp;
2549                 if (hwmgr->feature_mask & PP_OD_FUZZY_FAN_CONTROL_MASK)
2550                         phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2551                                         PHM_PlatformCaps_ODFuzzyFanControlSupport);
2552         }
2553
2554         return 0;
2555 }
2556
2557 /**
2558  * smu7_patch_ppt_v0_with_vdd_leakage - Change virtual leakage voltage to actual value.
2559  *
2560  * @hwmgr:  the address of the powerplay hardware manager.
2561  * @voltage: pointer to changing voltage
2562  * @leakage_table: pointer to leakage table
2563  */
2564 static void smu7_patch_ppt_v0_with_vdd_leakage(struct pp_hwmgr *hwmgr,
2565                 uint32_t *voltage, struct smu7_leakage_voltage *leakage_table)
2566 {
2567         uint32_t index;
2568
2569         /* search for leakage voltage ID 0xff01 ~ 0xff08 */
2570         for (index = 0; index < leakage_table->count; index++) {
2571                 /* if this voltage matches a leakage voltage ID */
2572                 /* patch with actual leakage voltage */
2573                 if (leakage_table->leakage_id[index] == *voltage) {
2574                         *voltage = leakage_table->actual_voltage[index];
2575                         break;
2576                 }
2577         }
2578
2579         if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
2580                 pr_err("Voltage value looks like a Leakage ID but it's not patched \n");
2581 }
2582
2583
2584 static int smu7_patch_vddc(struct pp_hwmgr *hwmgr,
2585                               struct phm_clock_voltage_dependency_table *tab)
2586 {
2587         uint16_t i;
2588         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2589
2590         if (tab)
2591                 for (i = 0; i < tab->count; i++)
2592                         smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2593                                                 &data->vddc_leakage);
2594
2595         return 0;
2596 }
2597
2598 static int smu7_patch_vddci(struct pp_hwmgr *hwmgr,
2599                                struct phm_clock_voltage_dependency_table *tab)
2600 {
2601         uint16_t i;
2602         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2603
2604         if (tab)
2605                 for (i = 0; i < tab->count; i++)
2606                         smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2607                                                         &data->vddci_leakage);
2608
2609         return 0;
2610 }
2611
2612 static int smu7_patch_vce_vddc(struct pp_hwmgr *hwmgr,
2613                                   struct phm_vce_clock_voltage_dependency_table *tab)
2614 {
2615         uint16_t i;
2616         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2617
2618         if (tab)
2619                 for (i = 0; i < tab->count; i++)
2620                         smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2621                                                         &data->vddc_leakage);
2622
2623         return 0;
2624 }
2625
2626
2627 static int smu7_patch_uvd_vddc(struct pp_hwmgr *hwmgr,
2628                                   struct phm_uvd_clock_voltage_dependency_table *tab)
2629 {
2630         uint16_t i;
2631         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2632
2633         if (tab)
2634                 for (i = 0; i < tab->count; i++)
2635                         smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2636                                                         &data->vddc_leakage);
2637
2638         return 0;
2639 }
2640
2641 static int smu7_patch_vddc_shed_limit(struct pp_hwmgr *hwmgr,
2642                                          struct phm_phase_shedding_limits_table *tab)
2643 {
2644         uint16_t i;
2645         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2646
2647         if (tab)
2648                 for (i = 0; i < tab->count; i++)
2649                         smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].Voltage,
2650                                                         &data->vddc_leakage);
2651
2652         return 0;
2653 }
2654
2655 static int smu7_patch_samu_vddc(struct pp_hwmgr *hwmgr,
2656                                    struct phm_samu_clock_voltage_dependency_table *tab)
2657 {
2658         uint16_t i;
2659         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2660
2661         if (tab)
2662                 for (i = 0; i < tab->count; i++)
2663                         smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2664                                                         &data->vddc_leakage);
2665
2666         return 0;
2667 }
2668
2669 static int smu7_patch_acp_vddc(struct pp_hwmgr *hwmgr,
2670                                   struct phm_acp_clock_voltage_dependency_table *tab)
2671 {
2672         uint16_t i;
2673         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2674
2675         if (tab)
2676                 for (i = 0; i < tab->count; i++)
2677                         smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &tab->entries[i].v,
2678                                         &data->vddc_leakage);
2679
2680         return 0;
2681 }
2682
2683 static int smu7_patch_limits_vddc(struct pp_hwmgr *hwmgr,
2684                                   struct phm_clock_and_voltage_limits *tab)
2685 {
2686         uint32_t vddc, vddci;
2687         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2688
2689         if (tab) {
2690                 vddc = tab->vddc;
2691                 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc,
2692                                                    &data->vddc_leakage);
2693                 tab->vddc = vddc;
2694                 vddci = tab->vddci;
2695                 smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddci,
2696                                                    &data->vddci_leakage);
2697                 tab->vddci = vddci;
2698         }
2699
2700         return 0;
2701 }
2702
2703 static int smu7_patch_cac_vddc(struct pp_hwmgr *hwmgr, struct phm_cac_leakage_table *tab)
2704 {
2705         uint32_t i;
2706         uint32_t vddc;
2707         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2708
2709         if (tab) {
2710                 for (i = 0; i < tab->count; i++) {
2711                         vddc = (uint32_t)(tab->entries[i].Vddc);
2712                         smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc, &data->vddc_leakage);
2713                         tab->entries[i].Vddc = (uint16_t)vddc;
2714                 }
2715         }
2716
2717         return 0;
2718 }
2719
2720 static int smu7_patch_dependency_tables_with_leakage(struct pp_hwmgr *hwmgr)
2721 {
2722         int tmp;
2723
2724         tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_sclk);
2725         if (tmp)
2726                 return -EINVAL;
2727
2728         tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dependency_on_mclk);
2729         if (tmp)
2730                 return -EINVAL;
2731
2732         tmp = smu7_patch_vddc(hwmgr, hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
2733         if (tmp)
2734                 return -EINVAL;
2735
2736         tmp = smu7_patch_vddci(hwmgr, hwmgr->dyn_state.vddci_dependency_on_mclk);
2737         if (tmp)
2738                 return -EINVAL;
2739
2740         tmp = smu7_patch_vce_vddc(hwmgr, hwmgr->dyn_state.vce_clock_voltage_dependency_table);
2741         if (tmp)
2742                 return -EINVAL;
2743
2744         tmp = smu7_patch_uvd_vddc(hwmgr, hwmgr->dyn_state.uvd_clock_voltage_dependency_table);
2745         if (tmp)
2746                 return -EINVAL;
2747
2748         tmp = smu7_patch_samu_vddc(hwmgr, hwmgr->dyn_state.samu_clock_voltage_dependency_table);
2749         if (tmp)
2750                 return -EINVAL;
2751
2752         tmp = smu7_patch_acp_vddc(hwmgr, hwmgr->dyn_state.acp_clock_voltage_dependency_table);
2753         if (tmp)
2754                 return -EINVAL;
2755
2756         tmp = smu7_patch_vddc_shed_limit(hwmgr, hwmgr->dyn_state.vddc_phase_shed_limits_table);
2757         if (tmp)
2758                 return -EINVAL;
2759
2760         tmp = smu7_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_ac);
2761         if (tmp)
2762                 return -EINVAL;
2763
2764         tmp = smu7_patch_limits_vddc(hwmgr, &hwmgr->dyn_state.max_clock_voltage_on_dc);
2765         if (tmp)
2766                 return -EINVAL;
2767
2768         tmp = smu7_patch_cac_vddc(hwmgr, hwmgr->dyn_state.cac_leakage_table);
2769         if (tmp)
2770                 return -EINVAL;
2771
2772         return 0;
2773 }
2774
2775
2776 static int smu7_set_private_data_based_on_pptable_v0(struct pp_hwmgr *hwmgr)
2777 {
2778         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2779
2780         struct phm_clock_voltage_dependency_table *allowed_sclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_sclk;
2781         struct phm_clock_voltage_dependency_table *allowed_mclk_vddc_table = hwmgr->dyn_state.vddc_dependency_on_mclk;
2782         struct phm_clock_voltage_dependency_table *allowed_mclk_vddci_table = hwmgr->dyn_state.vddci_dependency_on_mclk;
2783
2784         PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table != NULL,
2785                 "VDDC dependency on SCLK table is missing. This table is mandatory",
2786                 return -EINVAL);
2787         PP_ASSERT_WITH_CODE(allowed_sclk_vddc_table->count >= 1,
2788                 "VDDC dependency on SCLK table has to have is missing. This table is mandatory",
2789                 return -EINVAL);
2790
2791         PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table != NULL,
2792                 "VDDC dependency on MCLK table is missing. This table is mandatory",
2793                 return -EINVAL);
2794         PP_ASSERT_WITH_CODE(allowed_mclk_vddc_table->count >= 1,
2795                 "VDD dependency on MCLK table has to have is missing. This table is mandatory",
2796                 return -EINVAL);
2797
2798         data->min_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[0].v;
2799         data->max_vddc_in_pptable = (uint16_t)allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
2800
2801         hwmgr->dyn_state.max_clock_voltage_on_ac.sclk =
2802                 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
2803         hwmgr->dyn_state.max_clock_voltage_on_ac.mclk =
2804                 allowed_mclk_vddc_table->entries[allowed_mclk_vddc_table->count - 1].clk;
2805         hwmgr->dyn_state.max_clock_voltage_on_ac.vddc =
2806                 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
2807
2808         if (allowed_mclk_vddci_table != NULL && allowed_mclk_vddci_table->count >= 1) {
2809                 data->min_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[0].v;
2810                 data->max_vddci_in_pptable = (uint16_t)allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
2811         }
2812
2813         if (hwmgr->dyn_state.vddci_dependency_on_mclk != NULL && hwmgr->dyn_state.vddci_dependency_on_mclk->count >= 1)
2814                 hwmgr->dyn_state.max_clock_voltage_on_ac.vddci = hwmgr->dyn_state.vddci_dependency_on_mclk->entries[hwmgr->dyn_state.vddci_dependency_on_mclk->count - 1].v;
2815
2816         return 0;
2817 }
2818
2819 static int smu7_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
2820 {
2821         kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
2822         hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
2823         kfree(hwmgr->backend);
2824         hwmgr->backend = NULL;
2825
2826         return 0;
2827 }
2828
2829 static int smu7_get_elb_voltages(struct pp_hwmgr *hwmgr)
2830 {
2831         uint16_t virtual_voltage_id, vddc, vddci, efuse_voltage_id;
2832         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2833         int i;
2834
2835         if (atomctrl_get_leakage_id_from_efuse(hwmgr, &efuse_voltage_id) == 0) {
2836                 for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) {
2837                         virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
2838                         if (atomctrl_get_leakage_vddc_base_on_leakage(hwmgr, &vddc, &vddci,
2839                                                                 virtual_voltage_id,
2840                                                                 efuse_voltage_id) == 0) {
2841                                 if (vddc != 0 && vddc != virtual_voltage_id) {
2842                                         data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = vddc;
2843                                         data->vddc_leakage.leakage_id[data->vddc_leakage.count] = virtual_voltage_id;
2844                                         data->vddc_leakage.count++;
2845                                 }
2846                                 if (vddci != 0 && vddci != virtual_voltage_id) {
2847                                         data->vddci_leakage.actual_voltage[data->vddci_leakage.count] = vddci;
2848                                         data->vddci_leakage.leakage_id[data->vddci_leakage.count] = virtual_voltage_id;
2849                                         data->vddci_leakage.count++;
2850                                 }
2851                         }
2852                 }
2853         }
2854         return 0;
2855 }
2856
2857 #define LEAKAGE_ID_MSB                  463
2858 #define LEAKAGE_ID_LSB                  454
2859
2860 static int smu7_update_edc_leakage_table(struct pp_hwmgr *hwmgr)
2861 {
2862         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2863         uint32_t efuse;
2864         uint16_t offset;
2865         int ret = 0;
2866
2867         if (data->disable_edc_leakage_controller)
2868                 return 0;
2869
2870         ret = atomctrl_get_edc_hilo_leakage_offset_table(hwmgr,
2871                                                          &data->edc_hilo_leakage_offset_from_vbios);
2872         if (ret)
2873                 return ret;
2874
2875         if (data->edc_hilo_leakage_offset_from_vbios.usEdcDidtLoDpm7TableOffset &&
2876             data->edc_hilo_leakage_offset_from_vbios.usEdcDidtHiDpm7TableOffset) {
2877                 atomctrl_read_efuse(hwmgr, LEAKAGE_ID_LSB, LEAKAGE_ID_MSB, &efuse);
2878                 if (efuse < data->edc_hilo_leakage_offset_from_vbios.usHiLoLeakageThreshold)
2879                         offset = data->edc_hilo_leakage_offset_from_vbios.usEdcDidtLoDpm7TableOffset;
2880                 else
2881                         offset = data->edc_hilo_leakage_offset_from_vbios.usEdcDidtHiDpm7TableOffset;
2882
2883                 ret = atomctrl_get_edc_leakage_table(hwmgr,
2884                                                      &data->edc_leakage_table,
2885                                                      offset);
2886                 if (ret)
2887                         return ret;
2888         }
2889
2890         return ret;
2891 }
2892
2893 static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
2894 {
2895         struct smu7_hwmgr *data;
2896         int result = 0;
2897
2898         data = kzalloc(sizeof(struct smu7_hwmgr), GFP_KERNEL);
2899         if (data == NULL)
2900                 return -ENOMEM;
2901
2902         hwmgr->backend = data;
2903         smu7_patch_voltage_workaround(hwmgr);
2904         smu7_init_dpm_defaults(hwmgr);
2905
2906         /* Get leakage voltage based on leakage ID. */
2907         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2908                         PHM_PlatformCaps_EVV)) {
2909                 result = smu7_get_evv_voltages(hwmgr);
2910                 if (result) {
2911                         pr_info("Get EVV Voltage Failed.  Abort Driver loading!\n");
2912                         return -EINVAL;
2913                 }
2914         } else {
2915                 smu7_get_elb_voltages(hwmgr);
2916         }
2917
2918         if (hwmgr->pp_table_version == PP_TABLE_V1) {
2919                 smu7_complete_dependency_tables(hwmgr);
2920                 smu7_set_private_data_based_on_pptable_v1(hwmgr);
2921         } else if (hwmgr->pp_table_version == PP_TABLE_V0) {
2922                 smu7_patch_dependency_tables_with_leakage(hwmgr);
2923                 smu7_set_private_data_based_on_pptable_v0(hwmgr);
2924         }
2925
2926         /* Initalize Dynamic State Adjustment Rule Settings */
2927         result = phm_initializa_dynamic_state_adjustment_rule_settings(hwmgr);
2928
2929         if (0 == result) {
2930                 struct amdgpu_device *adev = hwmgr->adev;
2931
2932                 data->is_tlu_enabled = false;
2933
2934                 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
2935                                                         SMU7_MAX_HARDWARE_POWERLEVELS;
2936                 hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
2937                 hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
2938
2939                 data->pcie_gen_cap = adev->pm.pcie_gen_mask;
2940                 if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
2941                         data->pcie_spc_cap = 20;
2942                 else
2943                         data->pcie_spc_cap = 16;
2944                 data->pcie_lane_cap = adev->pm.pcie_mlw_mask;
2945
2946                 hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
2947 /* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
2948                 hwmgr->platform_descriptor.clockStep.engineClock = 500;
2949                 hwmgr->platform_descriptor.clockStep.memoryClock = 500;
2950                 smu7_thermal_parameter_init(hwmgr);
2951         } else {
2952                 /* Ignore return value in here, we are cleaning up a mess. */
2953                 smu7_hwmgr_backend_fini(hwmgr);
2954         }
2955
2956         result = smu7_update_edc_leakage_table(hwmgr);
2957         if (result)
2958                 return result;
2959
2960         return 0;
2961 }
2962
2963 static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr)
2964 {
2965         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
2966         uint32_t level, tmp;
2967
2968         if (!data->pcie_dpm_key_disabled) {
2969                 if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
2970                         level = 0;
2971                         tmp = data->dpm_level_enable_mask.pcie_dpm_enable_mask;
2972                         while (tmp >>= 1)
2973                                 level++;
2974
2975                         if (level)
2976                                 smum_send_msg_to_smc_with_parameter(hwmgr,
2977                                                 PPSMC_MSG_PCIeDPM_ForceLevel, level,
2978                                                 NULL);
2979                 }
2980         }
2981
2982         if (!data->sclk_dpm_key_disabled) {
2983                 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
2984                         level = 0;
2985                         tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask;
2986                         while (tmp >>= 1)
2987                                 level++;
2988
2989                         if (level)
2990                                 smum_send_msg_to_smc_with_parameter(hwmgr,
2991                                                 PPSMC_MSG_SCLKDPM_SetEnabledMask,
2992                                                 (1 << level),
2993                                                 NULL);
2994                 }
2995         }
2996
2997         if (!data->mclk_dpm_key_disabled) {
2998                 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
2999                         level = 0;
3000                         tmp = data->dpm_level_enable_mask.mclk_dpm_enable_mask;
3001                         while (tmp >>= 1)
3002                                 level++;
3003
3004                         if (level)
3005                                 smum_send_msg_to_smc_with_parameter(hwmgr,
3006                                                 PPSMC_MSG_MCLKDPM_SetEnabledMask,
3007                                                 (1 << level),
3008                                                 NULL);
3009                 }
3010         }
3011
3012         return 0;
3013 }
3014
3015 static int smu7_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
3016 {
3017         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3018
3019         if (hwmgr->pp_table_version == PP_TABLE_V1)
3020                 phm_apply_dal_min_voltage_request(hwmgr);
3021 /* TO DO  for v0 iceland and Ci*/
3022
3023         if (!data->sclk_dpm_key_disabled) {
3024                 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask)
3025                         smum_send_msg_to_smc_with_parameter(hwmgr,
3026                                         PPSMC_MSG_SCLKDPM_SetEnabledMask,
3027                                         data->dpm_level_enable_mask.sclk_dpm_enable_mask,
3028                                         NULL);
3029         }
3030
3031         if (!data->mclk_dpm_key_disabled) {
3032                 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask)
3033                         smum_send_msg_to_smc_with_parameter(hwmgr,
3034                                         PPSMC_MSG_MCLKDPM_SetEnabledMask,
3035                                         data->dpm_level_enable_mask.mclk_dpm_enable_mask,
3036                                         NULL);
3037         }
3038
3039         return 0;
3040 }
3041
3042 static int smu7_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
3043 {
3044         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3045
3046         if (!smum_is_dpm_running(hwmgr))
3047                 return -EINVAL;
3048
3049         if (!data->pcie_dpm_key_disabled) {
3050                 smum_send_msg_to_smc(hwmgr,
3051                                 PPSMC_MSG_PCIeDPM_UnForceLevel,
3052                                 NULL);
3053         }
3054
3055         return smu7_upload_dpm_level_enable_mask(hwmgr);
3056 }
3057
3058 static int smu7_force_dpm_lowest(struct pp_hwmgr *hwmgr)
3059 {
3060         struct smu7_hwmgr *data =
3061                         (struct smu7_hwmgr *)(hwmgr->backend);
3062         uint32_t level;
3063
3064         if (!data->sclk_dpm_key_disabled)
3065                 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3066                         level = phm_get_lowest_enabled_level(hwmgr,
3067                                                               data->dpm_level_enable_mask.sclk_dpm_enable_mask);
3068                         smum_send_msg_to_smc_with_parameter(hwmgr,
3069                                                             PPSMC_MSG_SCLKDPM_SetEnabledMask,
3070                                                             (1 << level),
3071                                                             NULL);
3072
3073         }
3074
3075         if (!data->mclk_dpm_key_disabled) {
3076                 if (data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3077                         level = phm_get_lowest_enabled_level(hwmgr,
3078                                                               data->dpm_level_enable_mask.mclk_dpm_enable_mask);
3079                         smum_send_msg_to_smc_with_parameter(hwmgr,
3080                                                             PPSMC_MSG_MCLKDPM_SetEnabledMask,
3081                                                             (1 << level),
3082                                                             NULL);
3083                 }
3084         }
3085
3086         if (!data->pcie_dpm_key_disabled) {
3087                 if (data->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3088                         level = phm_get_lowest_enabled_level(hwmgr,
3089                                                               data->dpm_level_enable_mask.pcie_dpm_enable_mask);
3090                         smum_send_msg_to_smc_with_parameter(hwmgr,
3091                                                             PPSMC_MSG_PCIeDPM_ForceLevel,
3092                                                             (level),
3093                                                             NULL);
3094                 }
3095         }
3096
3097         return 0;
3098 }
3099
3100 static int smu7_get_profiling_clk(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level,
3101                                 uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *pcie_mask)
3102 {
3103         uint32_t percentage;
3104         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3105         struct smu7_dpm_table *golden_dpm_table = &data->golden_dpm_table;
3106         int32_t tmp_mclk;
3107         int32_t tmp_sclk;
3108         int32_t count;
3109
3110         if (golden_dpm_table->mclk_table.count < 1)
3111                 return -EINVAL;
3112
3113         percentage = 100 * golden_dpm_table->sclk_table.dpm_levels[golden_dpm_table->sclk_table.count - 1].value /
3114                         golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value;
3115
3116         if (golden_dpm_table->mclk_table.count == 1) {
3117                 percentage = 70;
3118                 tmp_mclk = golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 1].value;
3119                 *mclk_mask = golden_dpm_table->mclk_table.count - 1;
3120         } else {
3121                 tmp_mclk = golden_dpm_table->mclk_table.dpm_levels[golden_dpm_table->mclk_table.count - 2].value;
3122                 *mclk_mask = golden_dpm_table->mclk_table.count - 2;
3123         }
3124
3125         tmp_sclk = tmp_mclk * percentage / 100;
3126
3127         if (hwmgr->pp_table_version == PP_TABLE_V0) {
3128                 for (count = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1;
3129                         count >= 0; count--) {
3130                         if (tmp_sclk >= hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk) {
3131                                 tmp_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[count].clk;
3132                                 *sclk_mask = count;
3133                                 break;
3134                         }
3135                 }
3136                 if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
3137                         *sclk_mask = 0;
3138                         tmp_sclk = hwmgr->dyn_state.vddc_dependency_on_sclk->entries[0].clk;
3139                 }
3140
3141                 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
3142                         *sclk_mask = hwmgr->dyn_state.vddc_dependency_on_sclk->count-1;
3143         } else if (hwmgr->pp_table_version == PP_TABLE_V1) {
3144                 struct phm_ppt_v1_information *table_info =
3145                                 (struct phm_ppt_v1_information *)(hwmgr->pptable);
3146
3147                 for (count = table_info->vdd_dep_on_sclk->count-1; count >= 0; count--) {
3148                         if (tmp_sclk >= table_info->vdd_dep_on_sclk->entries[count].clk) {
3149                                 tmp_sclk = table_info->vdd_dep_on_sclk->entries[count].clk;
3150                                 *sclk_mask = count;
3151                                 break;
3152                         }
3153                 }
3154                 if (count < 0 || level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
3155                         *sclk_mask = 0;
3156                         tmp_sclk =  table_info->vdd_dep_on_sclk->entries[0].clk;
3157                 }
3158
3159                 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
3160                         *sclk_mask = table_info->vdd_dep_on_sclk->count - 1;
3161         }
3162
3163         if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK)
3164                 *mclk_mask = 0;
3165         else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
3166                 *mclk_mask = golden_dpm_table->mclk_table.count - 1;
3167
3168         *pcie_mask = data->dpm_table.pcie_speed_table.count - 1;
3169         hwmgr->pstate_sclk = tmp_sclk;
3170         hwmgr->pstate_mclk = tmp_mclk;
3171
3172         return 0;
3173 }
3174
3175 static int smu7_force_dpm_level(struct pp_hwmgr *hwmgr,
3176                                 enum amd_dpm_forced_level level)
3177 {
3178         int ret = 0;
3179         uint32_t sclk_mask = 0;
3180         uint32_t mclk_mask = 0;
3181         uint32_t pcie_mask = 0;
3182
3183         if (hwmgr->pstate_sclk == 0)
3184                 smu7_get_profiling_clk(hwmgr, level, &sclk_mask, &mclk_mask, &pcie_mask);
3185
3186         switch (level) {
3187         case AMD_DPM_FORCED_LEVEL_HIGH:
3188                 ret = smu7_force_dpm_highest(hwmgr);
3189                 break;
3190         case AMD_DPM_FORCED_LEVEL_LOW:
3191                 ret = smu7_force_dpm_lowest(hwmgr);
3192                 break;
3193         case AMD_DPM_FORCED_LEVEL_AUTO:
3194                 ret = smu7_unforce_dpm_levels(hwmgr);
3195                 break;
3196         case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
3197         case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
3198         case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
3199         case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
3200                 ret = smu7_get_profiling_clk(hwmgr, level, &sclk_mask, &mclk_mask, &pcie_mask);
3201                 if (ret)
3202                         return ret;
3203                 smu7_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
3204                 smu7_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
3205                 smu7_force_clock_level(hwmgr, PP_PCIE, 1<<pcie_mask);
3206                 break;
3207         case AMD_DPM_FORCED_LEVEL_MANUAL:
3208         case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
3209         default:
3210                 break;
3211         }
3212
3213         if (!ret) {
3214                 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
3215                         smu7_fan_ctrl_set_fan_speed_pwm(hwmgr, 255);
3216                 else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
3217                         smu7_fan_ctrl_reset_fan_speed_to_default(hwmgr);
3218         }
3219         return ret;
3220 }
3221
3222 static int smu7_get_power_state_size(struct pp_hwmgr *hwmgr)
3223 {
3224         return sizeof(struct smu7_power_state);
3225 }
3226
3227 static int smu7_vblank_too_short(struct pp_hwmgr *hwmgr,
3228                                  uint32_t vblank_time_us)
3229 {
3230         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3231         uint32_t switch_limit_us;
3232
3233         switch (hwmgr->chip_id) {
3234         case CHIP_POLARIS10:
3235         case CHIP_POLARIS11:
3236         case CHIP_POLARIS12:
3237                 if (hwmgr->is_kicker || (hwmgr->chip_id == CHIP_POLARIS12))
3238                         switch_limit_us = data->is_memory_gddr5 ? 450 : 150;
3239                 else
3240                         switch_limit_us = data->is_memory_gddr5 ? 200 : 150;
3241                 break;
3242         case CHIP_VEGAM:
3243                 switch_limit_us = 30;
3244                 break;
3245         default:
3246                 switch_limit_us = data->is_memory_gddr5 ? 450 : 150;
3247                 break;
3248         }
3249
3250         if (vblank_time_us < switch_limit_us)
3251                 return true;
3252         else
3253                 return false;
3254 }
3255
3256 static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
3257                                 struct pp_power_state *request_ps,
3258                         const struct pp_power_state *current_ps)
3259 {
3260         struct amdgpu_device *adev = hwmgr->adev;
3261         struct smu7_power_state *smu7_ps =
3262                                 cast_phw_smu7_power_state(&request_ps->hardware);
3263         uint32_t sclk;
3264         uint32_t mclk;
3265         struct PP_Clocks minimum_clocks = {0};
3266         bool disable_mclk_switching;
3267         bool disable_mclk_switching_for_frame_lock;
3268         bool disable_mclk_switching_for_display;
3269         const struct phm_clock_and_voltage_limits *max_limits;
3270         uint32_t i;
3271         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3272         struct phm_ppt_v1_information *table_info =
3273                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
3274         int32_t count;
3275         int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
3276         uint32_t latency;
3277         bool latency_allowed = false;
3278
3279         data->battery_state = (PP_StateUILabel_Battery ==
3280                         request_ps->classification.ui_label);
3281         data->mclk_ignore_signal = false;
3282
3283         PP_ASSERT_WITH_CODE(smu7_ps->performance_level_count == 2,
3284                                  "VI should always have 2 performance levels",
3285                                 );
3286
3287         max_limits = adev->pm.ac_power ?
3288                         &(hwmgr->dyn_state.max_clock_voltage_on_ac) :
3289                         &(hwmgr->dyn_state.max_clock_voltage_on_dc);
3290
3291         /* Cap clock DPM tables at DC MAX if it is in DC. */
3292         if (!adev->pm.ac_power) {
3293                 for (i = 0; i < smu7_ps->performance_level_count; i++) {
3294                         if (smu7_ps->performance_levels[i].memory_clock > max_limits->mclk)
3295                                 smu7_ps->performance_levels[i].memory_clock = max_limits->mclk;
3296                         if (smu7_ps->performance_levels[i].engine_clock > max_limits->sclk)
3297                                 smu7_ps->performance_levels[i].engine_clock = max_limits->sclk;
3298                 }
3299         }
3300
3301         minimum_clocks.engineClock = hwmgr->display_config->min_core_set_clock;
3302         minimum_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock;
3303
3304         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3305                         PHM_PlatformCaps_StablePState)) {
3306                 max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
3307                 stable_pstate_sclk = (max_limits->sclk * 75) / 100;
3308
3309                 for (count = table_info->vdd_dep_on_sclk->count - 1;
3310                                 count >= 0; count--) {
3311                         if (stable_pstate_sclk >=
3312                                         table_info->vdd_dep_on_sclk->entries[count].clk) {
3313                                 stable_pstate_sclk =
3314                                                 table_info->vdd_dep_on_sclk->entries[count].clk;
3315                                 break;
3316                         }
3317                 }
3318
3319                 if (count < 0)
3320                         stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
3321
3322                 stable_pstate_mclk = max_limits->mclk;
3323
3324                 minimum_clocks.engineClock = stable_pstate_sclk;
3325                 minimum_clocks.memoryClock = stable_pstate_mclk;
3326         }
3327
3328         disable_mclk_switching_for_frame_lock = phm_cap_enabled(
3329                                     hwmgr->platform_descriptor.platformCaps,
3330                                     PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
3331
3332         disable_mclk_switching_for_display = ((1 < hwmgr->display_config->num_display) &&
3333                                                 !hwmgr->display_config->multi_monitor_in_sync) ||
3334                                                 (hwmgr->display_config->num_display &&
3335                                                 smu7_vblank_too_short(hwmgr, hwmgr->display_config->min_vblank_time));
3336
3337         disable_mclk_switching = disable_mclk_switching_for_frame_lock ||
3338                                          disable_mclk_switching_for_display;
3339
3340         if (hwmgr->display_config->num_display == 0) {
3341                 if (hwmgr->chip_id >= CHIP_POLARIS10 && hwmgr->chip_id <= CHIP_VEGAM)
3342                         data->mclk_ignore_signal = true;
3343                 else
3344                         disable_mclk_switching = false;
3345         }
3346
3347         sclk = smu7_ps->performance_levels[0].engine_clock;
3348         mclk = smu7_ps->performance_levels[0].memory_clock;
3349
3350         if (disable_mclk_switching &&
3351             (!(hwmgr->chip_id >= CHIP_POLARIS10 &&
3352             hwmgr->chip_id <= CHIP_VEGAM)))
3353                 mclk = smu7_ps->performance_levels
3354                 [smu7_ps->performance_level_count - 1].memory_clock;
3355
3356         if (sclk < minimum_clocks.engineClock)
3357                 sclk = (minimum_clocks.engineClock > max_limits->sclk) ?
3358                                 max_limits->sclk : minimum_clocks.engineClock;
3359
3360         if (mclk < minimum_clocks.memoryClock)
3361                 mclk = (minimum_clocks.memoryClock > max_limits->mclk) ?
3362                                 max_limits->mclk : minimum_clocks.memoryClock;
3363
3364         smu7_ps->performance_levels[0].engine_clock = sclk;
3365         smu7_ps->performance_levels[0].memory_clock = mclk;
3366
3367         smu7_ps->performance_levels[1].engine_clock =
3368                 (smu7_ps->performance_levels[1].engine_clock >=
3369                                 smu7_ps->performance_levels[0].engine_clock) ?
3370                                                 smu7_ps->performance_levels[1].engine_clock :
3371                                                 smu7_ps->performance_levels[0].engine_clock;
3372
3373         if (disable_mclk_switching) {
3374                 if (mclk < smu7_ps->performance_levels[1].memory_clock)
3375                         mclk = smu7_ps->performance_levels[1].memory_clock;
3376
3377                 if (hwmgr->chip_id >= CHIP_POLARIS10 && hwmgr->chip_id <= CHIP_VEGAM) {
3378                         if (disable_mclk_switching_for_display) {
3379                                 /* Find the lowest MCLK frequency that is within
3380                                  * the tolerable latency defined in DAL
3381                                  */
3382                                 latency = hwmgr->display_config->dce_tolerable_mclk_in_active_latency;
3383                                 for (i = 0; i < data->mclk_latency_table.count; i++) {
3384                                         if (data->mclk_latency_table.entries[i].latency <= latency) {
3385                                                 latency_allowed = true;
3386
3387                                                 if ((data->mclk_latency_table.entries[i].frequency >=
3388                                                                 smu7_ps->performance_levels[0].memory_clock) &&
3389                                                     (data->mclk_latency_table.entries[i].frequency <=
3390                                                                 smu7_ps->performance_levels[1].memory_clock)) {
3391                                                         mclk = data->mclk_latency_table.entries[i].frequency;
3392                                                         break;
3393                                                 }
3394                                         }
3395                                 }
3396                                 if ((i >= data->mclk_latency_table.count - 1) && !latency_allowed) {
3397                                         data->mclk_ignore_signal = true;
3398                                 } else {
3399                                         data->mclk_ignore_signal = false;
3400                                 }
3401                         }
3402
3403                         if (disable_mclk_switching_for_frame_lock)
3404                                 mclk = smu7_ps->performance_levels[1].memory_clock;
3405                 }
3406
3407                 smu7_ps->performance_levels[0].memory_clock = mclk;
3408
3409                 if (!(hwmgr->chip_id >= CHIP_POLARIS10 &&
3410                       hwmgr->chip_id <= CHIP_VEGAM))
3411                         smu7_ps->performance_levels[1].memory_clock = mclk;
3412         } else {
3413                 if (smu7_ps->performance_levels[1].memory_clock <
3414                                 smu7_ps->performance_levels[0].memory_clock)
3415                         smu7_ps->performance_levels[1].memory_clock =
3416                                         smu7_ps->performance_levels[0].memory_clock;
3417         }
3418
3419         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3420                         PHM_PlatformCaps_StablePState)) {
3421                 for (i = 0; i < smu7_ps->performance_level_count; i++) {
3422                         smu7_ps->performance_levels[i].engine_clock = stable_pstate_sclk;
3423                         smu7_ps->performance_levels[i].memory_clock = stable_pstate_mclk;
3424                         smu7_ps->performance_levels[i].pcie_gen = data->pcie_gen_performance.max;
3425                         smu7_ps->performance_levels[i].pcie_lane = data->pcie_gen_performance.max;
3426                 }
3427         }
3428         return 0;
3429 }
3430
3431
3432 static uint32_t smu7_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
3433 {
3434         struct pp_power_state  *ps;
3435         struct smu7_power_state  *smu7_ps;
3436
3437         if (hwmgr == NULL)
3438                 return -EINVAL;
3439
3440         ps = hwmgr->request_ps;
3441
3442         if (ps == NULL)
3443                 return -EINVAL;
3444
3445         smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
3446
3447         if (low)
3448                 return smu7_ps->performance_levels[0].memory_clock;
3449         else
3450                 return smu7_ps->performance_levels
3451                                 [smu7_ps->performance_level_count-1].memory_clock;
3452 }
3453
3454 static uint32_t smu7_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
3455 {
3456         struct pp_power_state  *ps;
3457         struct smu7_power_state  *smu7_ps;
3458
3459         if (hwmgr == NULL)
3460                 return -EINVAL;
3461
3462         ps = hwmgr->request_ps;
3463
3464         if (ps == NULL)
3465                 return -EINVAL;
3466
3467         smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
3468
3469         if (low)
3470                 return smu7_ps->performance_levels[0].engine_clock;
3471         else
3472                 return smu7_ps->performance_levels
3473                                 [smu7_ps->performance_level_count-1].engine_clock;
3474 }
3475
3476 static int smu7_dpm_patch_boot_state(struct pp_hwmgr *hwmgr,
3477                                         struct pp_hw_power_state *hw_ps)
3478 {
3479         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3480         struct smu7_power_state *ps = (struct smu7_power_state *)hw_ps;
3481         ATOM_FIRMWARE_INFO_V2_2 *fw_info;
3482         uint16_t size;
3483         uint8_t frev, crev;
3484         int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
3485
3486         /* First retrieve the Boot clocks and VDDC from the firmware info table.
3487          * We assume here that fw_info is unchanged if this call fails.
3488          */
3489         fw_info = (ATOM_FIRMWARE_INFO_V2_2 *)smu_atom_get_data_table(hwmgr->adev, index,
3490                         &size, &frev, &crev);
3491         if (!fw_info)
3492                 /* During a test, there is no firmware info table. */
3493                 return 0;
3494
3495         /* Patch the state. */
3496         data->vbios_boot_state.sclk_bootup_value =
3497                         le32_to_cpu(fw_info->ulDefaultEngineClock);
3498         data->vbios_boot_state.mclk_bootup_value =
3499                         le32_to_cpu(fw_info->ulDefaultMemoryClock);
3500         data->vbios_boot_state.mvdd_bootup_value =
3501                         le16_to_cpu(fw_info->usBootUpMVDDCVoltage);
3502         data->vbios_boot_state.vddc_bootup_value =
3503                         le16_to_cpu(fw_info->usBootUpVDDCVoltage);
3504         data->vbios_boot_state.vddci_bootup_value =
3505                         le16_to_cpu(fw_info->usBootUpVDDCIVoltage);
3506         data->vbios_boot_state.pcie_gen_bootup_value =
3507                         smu7_get_current_pcie_speed(hwmgr);
3508
3509         data->vbios_boot_state.pcie_lane_bootup_value =
3510                         (uint16_t)smu7_get_current_pcie_lane_number(hwmgr);
3511
3512         /* set boot power state */
3513         ps->performance_levels[0].memory_clock = data->vbios_boot_state.mclk_bootup_value;
3514         ps->performance_levels[0].engine_clock = data->vbios_boot_state.sclk_bootup_value;
3515         ps->performance_levels[0].pcie_gen = data->vbios_boot_state.pcie_gen_bootup_value;
3516         ps->performance_levels[0].pcie_lane = data->vbios_boot_state.pcie_lane_bootup_value;
3517
3518         return 0;
3519 }
3520
3521 static int smu7_get_number_of_powerplay_table_entries(struct pp_hwmgr *hwmgr)
3522 {
3523         int result;
3524         unsigned long ret = 0;
3525
3526         if (hwmgr->pp_table_version == PP_TABLE_V0) {
3527                 result = pp_tables_get_num_of_entries(hwmgr, &ret);
3528                 return result ? 0 : ret;
3529         } else if (hwmgr->pp_table_version == PP_TABLE_V1) {
3530                 result = get_number_of_powerplay_table_entries_v1_0(hwmgr);
3531                 return result;
3532         }
3533         return 0;
3534 }
3535
3536 static int smu7_get_pp_table_entry_callback_func_v1(struct pp_hwmgr *hwmgr,
3537                 void *state, struct pp_power_state *power_state,
3538                 void *pp_table, uint32_t classification_flag)
3539 {
3540         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3541         struct smu7_power_state  *smu7_power_state =
3542                         (struct smu7_power_state *)(&(power_state->hardware));
3543         struct smu7_performance_level *performance_level;
3544         ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state;
3545         ATOM_Tonga_POWERPLAYTABLE *powerplay_table =
3546                         (ATOM_Tonga_POWERPLAYTABLE *)pp_table;
3547         PPTable_Generic_SubTable_Header *sclk_dep_table =
3548                         (PPTable_Generic_SubTable_Header *)
3549                         (((unsigned long)powerplay_table) +
3550                                 le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
3551
3552         ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
3553                         (ATOM_Tonga_MCLK_Dependency_Table *)
3554                         (((unsigned long)powerplay_table) +
3555                                 le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
3556
3557         /* The following fields are not initialized here: id orderedList allStatesList */
3558         power_state->classification.ui_label =
3559                         (le16_to_cpu(state_entry->usClassification) &
3560                         ATOM_PPLIB_CLASSIFICATION_UI_MASK) >>
3561                         ATOM_PPLIB_CLASSIFICATION_UI_SHIFT;
3562         power_state->classification.flags = classification_flag;
3563         /* NOTE: There is a classification2 flag in BIOS that is not being used right now */
3564
3565         power_state->classification.temporary_state = false;
3566         power_state->classification.to_be_deleted = false;
3567
3568         power_state->validation.disallowOnDC =
3569                         (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
3570                                         ATOM_Tonga_DISALLOW_ON_DC));
3571
3572         power_state->pcie.lanes = 0;
3573
3574         power_state->display.disableFrameModulation = false;
3575         power_state->display.limitRefreshrate = false;
3576         power_state->display.enableVariBright =
3577                         (0 != (le32_to_cpu(state_entry->ulCapsAndSettings) &
3578                                         ATOM_Tonga_ENABLE_VARIBRIGHT));
3579
3580         power_state->validation.supportedPowerLevels = 0;
3581         power_state->uvd_clocks.VCLK = 0;
3582         power_state->uvd_clocks.DCLK = 0;
3583         power_state->temperatures.min = 0;
3584         power_state->temperatures.max = 0;
3585
3586         performance_level = &(smu7_power_state->performance_levels
3587                         [smu7_power_state->performance_level_count++]);
3588
3589         PP_ASSERT_WITH_CODE(
3590                         (smu7_power_state->performance_level_count < smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_GRAPHICS)),
3591                         "Performance levels exceeds SMC limit!",
3592                         return -EINVAL);
3593
3594         PP_ASSERT_WITH_CODE(
3595                         (smu7_power_state->performance_level_count <=
3596                                         hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
3597                         "Performance levels exceeds Driver limit!",
3598                         return -EINVAL);
3599
3600         /* Performance levels are arranged from low to high. */
3601         performance_level->memory_clock = mclk_dep_table->entries
3602                         [state_entry->ucMemoryClockIndexLow].ulMclk;
3603         if (sclk_dep_table->ucRevId == 0)
3604                 performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
3605                         [state_entry->ucEngineClockIndexLow].ulSclk;
3606         else if (sclk_dep_table->ucRevId == 1)
3607                 performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
3608                         [state_entry->ucEngineClockIndexLow].ulSclk;
3609         performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
3610                         state_entry->ucPCIEGenLow);
3611         performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
3612                         state_entry->ucPCIELaneLow);
3613
3614         performance_level = &(smu7_power_state->performance_levels
3615                         [smu7_power_state->performance_level_count++]);
3616         performance_level->memory_clock = mclk_dep_table->entries
3617                         [state_entry->ucMemoryClockIndexHigh].ulMclk;
3618
3619         if (sclk_dep_table->ucRevId == 0)
3620                 performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
3621                         [state_entry->ucEngineClockIndexHigh].ulSclk;
3622         else if (sclk_dep_table->ucRevId == 1)
3623                 performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
3624                         [state_entry->ucEngineClockIndexHigh].ulSclk;
3625
3626         performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
3627                         state_entry->ucPCIEGenHigh);
3628         performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
3629                         state_entry->ucPCIELaneHigh);
3630
3631         return 0;
3632 }
3633
3634 static int smu7_get_pp_table_entry_v1(struct pp_hwmgr *hwmgr,
3635                 unsigned long entry_index, struct pp_power_state *state)
3636 {
3637         int result;
3638         struct smu7_power_state *ps;
3639         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3640         struct phm_ppt_v1_information *table_info =
3641                         (struct phm_ppt_v1_information *)(hwmgr->pptable);
3642         struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
3643                         table_info->vdd_dep_on_mclk;
3644
3645         state->hardware.magic = PHM_VIslands_Magic;
3646
3647         ps = (struct smu7_power_state *)(&state->hardware);
3648
3649         result = get_powerplay_table_entry_v1_0(hwmgr, entry_index, state,
3650                         smu7_get_pp_table_entry_callback_func_v1);
3651
3652         /* This is the earliest time we have all the dependency table and the VBIOS boot state
3653          * as PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot state
3654          * if there is only one VDDCI/MCLK level, check if it's the same as VBIOS boot state
3655          */
3656         if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
3657                 if (dep_mclk_table->entries[0].clk !=
3658                                 data->vbios_boot_state.mclk_bootup_value)
3659                         pr_debug("Single MCLK entry VDDCI/MCLK dependency table "
3660                                         "does not match VBIOS boot MCLK level");
3661                 if (dep_mclk_table->entries[0].vddci !=
3662                                 data->vbios_boot_state.vddci_bootup_value)
3663                         pr_debug("Single VDDCI entry VDDCI/MCLK dependency table "
3664                                         "does not match VBIOS boot VDDCI level");
3665         }
3666
3667         /* set DC compatible flag if this state supports DC */
3668         if (!state->validation.disallowOnDC)
3669                 ps->dc_compatible = true;
3670
3671         if (state->classification.flags & PP_StateClassificationFlag_ACPI)
3672                 data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen;
3673
3674         ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
3675         ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
3676
3677         if (!result) {
3678                 uint32_t i;
3679
3680                 switch (state->classification.ui_label) {
3681                 case PP_StateUILabel_Performance:
3682                         data->use_pcie_performance_levels = true;
3683                         for (i = 0; i < ps->performance_level_count; i++) {
3684                                 if (data->pcie_gen_performance.max <
3685                                                 ps->performance_levels[i].pcie_gen)
3686                                         data->pcie_gen_performance.max =
3687                                                         ps->performance_levels[i].pcie_gen;
3688
3689                                 if (data->pcie_gen_performance.min >
3690                                                 ps->performance_levels[i].pcie_gen)
3691                                         data->pcie_gen_performance.min =
3692                                                         ps->performance_levels[i].pcie_gen;
3693
3694                                 if (data->pcie_lane_performance.max <
3695                                                 ps->performance_levels[i].pcie_lane)
3696                                         data->pcie_lane_performance.max =
3697                                                         ps->performance_levels[i].pcie_lane;
3698                                 if (data->pcie_lane_performance.min >
3699                                                 ps->performance_levels[i].pcie_lane)
3700                                         data->pcie_lane_performance.min =
3701                                                         ps->performance_levels[i].pcie_lane;
3702                         }
3703                         break;
3704                 case PP_StateUILabel_Battery:
3705                         data->use_pcie_power_saving_levels = true;
3706
3707                         for (i = 0; i < ps->performance_level_count; i++) {
3708                                 if (data->pcie_gen_power_saving.max <
3709                                                 ps->performance_levels[i].pcie_gen)
3710                                         data->pcie_gen_power_saving.max =
3711                                                         ps->performance_levels[i].pcie_gen;
3712
3713                                 if (data->pcie_gen_power_saving.min >
3714                                                 ps->performance_levels[i].pcie_gen)
3715                                         data->pcie_gen_power_saving.min =
3716                                                         ps->performance_levels[i].pcie_gen;
3717
3718                                 if (data->pcie_lane_power_saving.max <
3719                                                 ps->performance_levels[i].pcie_lane)
3720                                         data->pcie_lane_power_saving.max =
3721                                                         ps->performance_levels[i].pcie_lane;
3722
3723                                 if (data->pcie_lane_power_saving.min >
3724                                                 ps->performance_levels[i].pcie_lane)
3725                                         data->pcie_lane_power_saving.min =
3726                                                         ps->performance_levels[i].pcie_lane;
3727                         }
3728                         break;
3729                 default:
3730                         break;
3731                 }
3732         }
3733         return 0;
3734 }
3735
3736 static int smu7_get_pp_table_entry_callback_func_v0(struct pp_hwmgr *hwmgr,
3737                                         struct pp_hw_power_state *power_state,
3738                                         unsigned int index, const void *clock_info)
3739 {
3740         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3741         struct smu7_power_state  *ps = cast_phw_smu7_power_state(power_state);
3742         const ATOM_PPLIB_CI_CLOCK_INFO *visland_clk_info = clock_info;
3743         struct smu7_performance_level *performance_level;
3744         uint32_t engine_clock, memory_clock;
3745         uint16_t pcie_gen_from_bios;
3746
3747         engine_clock = visland_clk_info->ucEngineClockHigh << 16 | visland_clk_info->usEngineClockLow;
3748         memory_clock = visland_clk_info->ucMemoryClockHigh << 16 | visland_clk_info->usMemoryClockLow;
3749
3750         if (!(data->mc_micro_code_feature & DISABLE_MC_LOADMICROCODE) && memory_clock > data->highest_mclk)
3751                 data->highest_mclk = memory_clock;
3752
3753         PP_ASSERT_WITH_CODE(
3754                         (ps->performance_level_count < smum_get_mac_definition(hwmgr, SMU_MAX_LEVELS_GRAPHICS)),
3755                         "Performance levels exceeds SMC limit!",
3756                         return -EINVAL);
3757
3758         PP_ASSERT_WITH_CODE(
3759                         (ps->performance_level_count <
3760                                         hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
3761                         "Performance levels exceeds Driver limit, Skip!",
3762                         return 0);
3763
3764         performance_level = &(ps->performance_levels
3765                         [ps->performance_level_count++]);
3766
3767         /* Performance levels are arranged from low to high. */
3768         performance_level->memory_clock = memory_clock;
3769         performance_level->engine_clock = engine_clock;
3770
3771         pcie_gen_from_bios = visland_clk_info->ucPCIEGen;
3772
3773         performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, pcie_gen_from_bios);
3774         performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, visland_clk_info->usPCIELane);
3775
3776         return 0;
3777 }
3778
3779 static int smu7_get_pp_table_entry_v0(struct pp_hwmgr *hwmgr,
3780                 unsigned long entry_index, struct pp_power_state *state)
3781 {
3782         int result;
3783         struct smu7_power_state *ps;
3784         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3785         struct phm_clock_voltage_dependency_table *dep_mclk_table =
3786                         hwmgr->dyn_state.vddci_dependency_on_mclk;
3787
3788         memset(&state->hardware, 0x00, sizeof(struct pp_hw_power_state));
3789
3790         state->hardware.magic = PHM_VIslands_Magic;
3791
3792         ps = (struct smu7_power_state *)(&state->hardware);
3793
3794         result = pp_tables_get_entry(hwmgr, entry_index, state,
3795                         smu7_get_pp_table_entry_callback_func_v0);
3796
3797         /*
3798          * This is the earliest time we have all the dependency table
3799          * and the VBIOS boot state as
3800          * PP_Tables_GetPowerPlayTableEntry retrieves the VBIOS boot
3801          * state if there is only one VDDCI/MCLK level, check if it's
3802          * the same as VBIOS boot state
3803          */
3804         if (dep_mclk_table != NULL && dep_mclk_table->count == 1) {
3805                 if (dep_mclk_table->entries[0].clk !=
3806                                 data->vbios_boot_state.mclk_bootup_value)
3807                         pr_debug("Single MCLK entry VDDCI/MCLK dependency table "
3808                                         "does not match VBIOS boot MCLK level");
3809                 if (dep_mclk_table->entries[0].v !=
3810                                 data->vbios_boot_state.vddci_bootup_value)
3811                         pr_debug("Single VDDCI entry VDDCI/MCLK dependency table "
3812                                         "does not match VBIOS boot VDDCI level");
3813         }
3814
3815         /* set DC compatible flag if this state supports DC */
3816         if (!state->validation.disallowOnDC)
3817                 ps->dc_compatible = true;
3818
3819         if (state->classification.flags & PP_StateClassificationFlag_ACPI)
3820                 data->acpi_pcie_gen = ps->performance_levels[0].pcie_gen;
3821
3822         ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
3823         ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
3824
3825         if (!result) {
3826                 uint32_t i;
3827
3828                 switch (state->classification.ui_label) {
3829                 case PP_StateUILabel_Performance:
3830                         data->use_pcie_performance_levels = true;
3831
3832                         for (i = 0; i < ps->performance_level_count; i++) {
3833                                 if (data->pcie_gen_performance.max <
3834                                                 ps->performance_levels[i].pcie_gen)
3835                                         data->pcie_gen_performance.max =
3836                                                         ps->performance_levels[i].pcie_gen;
3837
3838                                 if (data->pcie_gen_performance.min >
3839                                                 ps->performance_levels[i].pcie_gen)
3840                                         data->pcie_gen_performance.min =
3841                                                         ps->performance_levels[i].pcie_gen;
3842
3843                                 if (data->pcie_lane_performance.max <
3844                                                 ps->performance_levels[i].pcie_lane)
3845                                         data->pcie_lane_performance.max =
3846                                                         ps->performance_levels[i].pcie_lane;
3847
3848                                 if (data->pcie_lane_performance.min >
3849                                                 ps->performance_levels[i].pcie_lane)
3850                                         data->pcie_lane_performance.min =
3851                                                         ps->performance_levels[i].pcie_lane;
3852                         }
3853                         break;
3854                 case PP_StateUILabel_Battery:
3855                         data->use_pcie_power_saving_levels = true;
3856
3857                         for (i = 0; i < ps->performance_level_count; i++) {
3858                                 if (data->pcie_gen_power_saving.max <
3859                                                 ps->performance_levels[i].pcie_gen)
3860                                         data->pcie_gen_power_saving.max =
3861                                                         ps->performance_levels[i].pcie_gen;
3862
3863                                 if (data->pcie_gen_power_saving.min >
3864                                                 ps->performance_levels[i].pcie_gen)
3865                                         data->pcie_gen_power_saving.min =
3866                                                         ps->performance_levels[i].pcie_gen;
3867
3868                                 if (data->pcie_lane_power_saving.max <
3869                                                 ps->performance_levels[i].pcie_lane)
3870                                         data->pcie_lane_power_saving.max =
3871                                                         ps->performance_levels[i].pcie_lane;
3872
3873                                 if (data->pcie_lane_power_saving.min >
3874                                                 ps->performance_levels[i].pcie_lane)
3875                                         data->pcie_lane_power_saving.min =
3876                                                         ps->performance_levels[i].pcie_lane;
3877                         }
3878                         break;
3879                 default:
3880                         break;
3881                 }
3882         }
3883         return 0;
3884 }
3885
3886 static int smu7_get_pp_table_entry(struct pp_hwmgr *hwmgr,
3887                 unsigned long entry_index, struct pp_power_state *state)
3888 {
3889         if (hwmgr->pp_table_version == PP_TABLE_V0)
3890                 return smu7_get_pp_table_entry_v0(hwmgr, entry_index, state);
3891         else if (hwmgr->pp_table_version == PP_TABLE_V1)
3892                 return smu7_get_pp_table_entry_v1(hwmgr, entry_index, state);
3893
3894         return 0;
3895 }
3896
3897 static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, u32 *query)
3898 {
3899         struct amdgpu_device *adev = hwmgr->adev;
3900         int i;
3901         u32 tmp = 0;
3902
3903         if (!query)
3904                 return -EINVAL;
3905
3906         /*
3907          * PPSMC_MSG_GetCurrPkgPwr is not supported on:
3908          *  - Hawaii
3909          *  - Bonaire
3910          *  - Fiji
3911          *  - Tonga
3912          */
3913         if ((adev->asic_type != CHIP_HAWAII) &&
3914             (adev->asic_type != CHIP_BONAIRE) &&
3915             (adev->asic_type != CHIP_FIJI) &&
3916             (adev->asic_type != CHIP_TONGA)) {
3917                 smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_GetCurrPkgPwr, 0, &tmp);
3918                 *query = tmp;
3919
3920                 if (tmp != 0)
3921                         return 0;
3922         }
3923
3924         smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart, NULL);
3925         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3926                                                         ixSMU_PM_STATUS_95, 0);
3927
3928         for (i = 0; i < 10; i++) {
3929                 msleep(500);
3930                 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogSample, NULL);
3931                 tmp = cgs_read_ind_register(hwmgr->device,
3932                                                 CGS_IND_REG__SMC,
3933                                                 ixSMU_PM_STATUS_95);
3934                 if (tmp != 0)
3935                         break;
3936         }
3937         *query = tmp;
3938
3939         return 0;
3940 }
3941
3942 static int smu7_read_sensor(struct pp_hwmgr *hwmgr, int idx,
3943                             void *value, int *size)
3944 {
3945         uint32_t sclk, mclk, activity_percent;
3946         uint32_t offset, val_vid;
3947         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
3948
3949         /* size must be at least 4 bytes for all sensors */
3950         if (*size < 4)
3951                 return -EINVAL;
3952
3953         switch (idx) {
3954         case AMDGPU_PP_SENSOR_GFX_SCLK:
3955                 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency, &sclk);
3956                 *((uint32_t *)value) = sclk;
3957                 *size = 4;
3958                 return 0;
3959         case AMDGPU_PP_SENSOR_GFX_MCLK:
3960                 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency, &mclk);
3961                 *((uint32_t *)value) = mclk;
3962                 *size = 4;
3963                 return 0;
3964         case AMDGPU_PP_SENSOR_GPU_LOAD:
3965         case AMDGPU_PP_SENSOR_MEM_LOAD:
3966                 offset = data->soft_regs_start + smum_get_offsetof(hwmgr,
3967                                                                 SMU_SoftRegisters,
3968                                                                 (idx == AMDGPU_PP_SENSOR_GPU_LOAD) ?
3969                                                                 AverageGraphicsActivity:
3970                                                                 AverageMemoryActivity);
3971
3972                 activity_percent = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, offset);
3973                 activity_percent += 0x80;
3974                 activity_percent >>= 8;
3975                 *((uint32_t *)value) = activity_percent > 100 ? 100 : activity_percent;
3976                 *size = 4;
3977                 return 0;
3978         case AMDGPU_PP_SENSOR_GPU_TEMP:
3979                 *((uint32_t *)value) = smu7_thermal_get_temperature(hwmgr);
3980                 *size = 4;
3981                 return 0;
3982         case AMDGPU_PP_SENSOR_UVD_POWER:
3983                 *((uint32_t *)value) = data->uvd_power_gated ? 0 : 1;
3984                 *size = 4;
3985                 return 0;
3986         case AMDGPU_PP_SENSOR_VCE_POWER:
3987                 *((uint32_t *)value) = data->vce_power_gated ? 0 : 1;
3988                 *size = 4;
3989                 return 0;
3990         case AMDGPU_PP_SENSOR_GPU_POWER:
3991                 return smu7_get_gpu_power(hwmgr, (uint32_t *)value);
3992         case AMDGPU_PP_SENSOR_VDDGFX:
3993                 if ((data->vr_config & VRCONF_VDDGFX_MASK) ==
3994                     (VR_SVI2_PLANE_2 << VRCONF_VDDGFX_SHIFT))
3995                         val_vid = PHM_READ_INDIRECT_FIELD(hwmgr->device,
3996                                         CGS_IND_REG__SMC, PWR_SVI2_STATUS, PLANE2_VID);
3997                 else
3998                         val_vid = PHM_READ_INDIRECT_FIELD(hwmgr->device,
3999                                         CGS_IND_REG__SMC, PWR_SVI2_STATUS, PLANE1_VID);
4000
4001                 *((uint32_t *)value) = (uint32_t)convert_to_vddc(val_vid);
4002                 return 0;
4003         default:
4004                 return -EOPNOTSUPP;
4005         }
4006 }
4007
4008 static int smu7_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
4009 {
4010         const struct phm_set_power_state_input *states =
4011                         (const struct phm_set_power_state_input *)input;
4012         const struct smu7_power_state *smu7_ps =
4013                         cast_const_phw_smu7_power_state(states->pnew_state);
4014         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4015         struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
4016         uint32_t sclk = smu7_ps->performance_levels
4017                         [smu7_ps->performance_level_count - 1].engine_clock;
4018         struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
4019         uint32_t mclk = smu7_ps->performance_levels
4020                         [smu7_ps->performance_level_count - 1].memory_clock;
4021         struct PP_Clocks min_clocks = {0};
4022         uint32_t i;
4023
4024         for (i = 0; i < sclk_table->count; i++) {
4025                 if (sclk == sclk_table->dpm_levels[i].value)
4026                         break;
4027         }
4028
4029         if (i >= sclk_table->count) {
4030                 if (sclk > sclk_table->dpm_levels[i-1].value) {
4031                         data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
4032                         sclk_table->dpm_levels[i-1].value = sclk;
4033                 }
4034         } else {
4035         /* TODO: Check SCLK in DAL's minimum clocks
4036          * in case DeepSleep divider update is required.
4037          */
4038                 if (data->display_timing.min_clock_in_sr != min_clocks.engineClockInSR &&
4039                         (min_clocks.engineClockInSR >= SMU7_MINIMUM_ENGINE_CLOCK ||
4040                                 data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK))
4041                         data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
4042         }
4043
4044         for (i = 0; i < mclk_table->count; i++) {
4045                 if (mclk == mclk_table->dpm_levels[i].value)
4046                         break;
4047         }
4048
4049         if (i >= mclk_table->count) {
4050                 if (mclk > mclk_table->dpm_levels[i-1].value) {
4051                         data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
4052                         mclk_table->dpm_levels[i-1].value = mclk;
4053                 }
4054         }
4055
4056         if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
4057                 data->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
4058
4059         return 0;
4060 }
4061
4062 static uint16_t smu7_get_maximum_link_speed(struct pp_hwmgr *hwmgr,
4063                 const struct smu7_power_state *smu7_ps)
4064 {
4065         uint32_t i;
4066         uint32_t sclk, max_sclk = 0;
4067         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4068         struct smu7_dpm_table *dpm_table = &data->dpm_table;
4069
4070         for (i = 0; i < smu7_ps->performance_level_count; i++) {
4071                 sclk = smu7_ps->performance_levels[i].engine_clock;
4072                 if (max_sclk < sclk)
4073                         max_sclk = sclk;
4074         }
4075
4076         for (i = 0; i < dpm_table->sclk_table.count; i++) {
4077                 if (dpm_table->sclk_table.dpm_levels[i].value == max_sclk)
4078                         return (uint16_t) ((i >= dpm_table->pcie_speed_table.count) ?
4079                                         dpm_table->pcie_speed_table.dpm_levels
4080                                         [dpm_table->pcie_speed_table.count - 1].value :
4081                                         dpm_table->pcie_speed_table.dpm_levels[i].value);
4082         }
4083
4084         return 0;
4085 }
4086
4087 static int smu7_request_link_speed_change_before_state_change(
4088                 struct pp_hwmgr *hwmgr, const void *input)
4089 {
4090         const struct phm_set_power_state_input *states =
4091                         (const struct phm_set_power_state_input *)input;
4092         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4093         const struct smu7_power_state *smu7_nps =
4094                         cast_const_phw_smu7_power_state(states->pnew_state);
4095         const struct smu7_power_state *polaris10_cps =
4096                         cast_const_phw_smu7_power_state(states->pcurrent_state);
4097
4098         uint16_t target_link_speed = smu7_get_maximum_link_speed(hwmgr, smu7_nps);
4099         uint16_t current_link_speed;
4100
4101         if (data->force_pcie_gen == PP_PCIEGenInvalid)
4102                 current_link_speed = smu7_get_maximum_link_speed(hwmgr, polaris10_cps);
4103         else
4104                 current_link_speed = data->force_pcie_gen;
4105
4106         data->force_pcie_gen = PP_PCIEGenInvalid;
4107         data->pspp_notify_required = false;
4108
4109         if (target_link_speed > current_link_speed) {
4110                 switch (target_link_speed) {
4111 #ifdef CONFIG_ACPI
4112                 case PP_PCIEGen3:
4113                         if (0 == amdgpu_acpi_pcie_performance_request(hwmgr->adev, PCIE_PERF_REQ_GEN3, false))
4114                                 break;
4115                         data->force_pcie_gen = PP_PCIEGen2;
4116                         if (current_link_speed == PP_PCIEGen2)
4117                                 break;
4118                         fallthrough;
4119                 case PP_PCIEGen2:
4120                         if (0 == amdgpu_acpi_pcie_performance_request(hwmgr->adev, PCIE_PERF_REQ_GEN2, false))
4121                                 break;
4122                         fallthrough;
4123 #endif
4124                 default:
4125                         data->force_pcie_gen = smu7_get_current_pcie_speed(hwmgr);
4126                         break;
4127                 }
4128         } else {
4129                 if (target_link_speed < current_link_speed)
4130                         data->pspp_notify_required = true;
4131         }
4132
4133         return 0;
4134 }
4135
4136 static int smu7_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
4137 {
4138         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4139
4140         if (0 == data->need_update_smu7_dpm_table)
4141                 return 0;
4142
4143         if ((0 == data->sclk_dpm_key_disabled) &&
4144                 (data->need_update_smu7_dpm_table &
4145                         (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
4146                 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
4147                                 "Trying to freeze SCLK DPM when DPM is disabled",
4148                                 );
4149                 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
4150                                 PPSMC_MSG_SCLKDPM_FreezeLevel,
4151                                 NULL),
4152                                 "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!",
4153                                 return -EINVAL);
4154         }
4155
4156         if ((0 == data->mclk_dpm_key_disabled) &&
4157                 !data->mclk_ignore_signal &&
4158                 (data->need_update_smu7_dpm_table &
4159                  DPMTABLE_OD_UPDATE_MCLK)) {
4160                 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
4161                                 "Trying to freeze MCLK DPM when DPM is disabled",
4162                                 );
4163                 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
4164                                 PPSMC_MSG_MCLKDPM_FreezeLevel,
4165                                 NULL),
4166                                 "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!",
4167                                 return -EINVAL);
4168         }
4169
4170         return 0;
4171 }
4172
4173 static int smu7_populate_and_upload_sclk_mclk_dpm_levels(
4174                 struct pp_hwmgr *hwmgr, const void *input)
4175 {
4176         int result = 0;
4177         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4178         struct smu7_dpm_table *dpm_table = &data->dpm_table;
4179         uint32_t count;
4180         struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
4181         struct phm_odn_clock_levels *odn_sclk_table = &(odn_table->odn_core_clock_dpm_levels);
4182         struct phm_odn_clock_levels *odn_mclk_table = &(odn_table->odn_memory_clock_dpm_levels);
4183
4184         if (0 == data->need_update_smu7_dpm_table)
4185                 return 0;
4186
4187         if (hwmgr->od_enabled && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
4188                 for (count = 0; count < dpm_table->sclk_table.count; count++) {
4189                         dpm_table->sclk_table.dpm_levels[count].enabled = odn_sclk_table->entries[count].enabled;
4190                         dpm_table->sclk_table.dpm_levels[count].value = odn_sclk_table->entries[count].clock;
4191                 }
4192         }
4193
4194         if (hwmgr->od_enabled && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
4195                 for (count = 0; count < dpm_table->mclk_table.count; count++) {
4196                         dpm_table->mclk_table.dpm_levels[count].enabled = odn_mclk_table->entries[count].enabled;
4197                         dpm_table->mclk_table.dpm_levels[count].value = odn_mclk_table->entries[count].clock;
4198                 }
4199         }
4200
4201         if (data->need_update_smu7_dpm_table &
4202                         (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) {
4203                 result = smum_populate_all_graphic_levels(hwmgr);
4204                 PP_ASSERT_WITH_CODE((0 == result),
4205                                 "Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
4206                                 return result);
4207         }
4208
4209         if (data->need_update_smu7_dpm_table &
4210                         (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
4211                 /*populate MCLK dpm table to SMU7 */
4212                 result = smum_populate_all_memory_levels(hwmgr);
4213                 PP_ASSERT_WITH_CODE((0 == result),
4214                                 "Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
4215                                 return result);
4216         }
4217
4218         return result;
4219 }
4220
4221 static int smu7_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
4222                           struct smu7_single_dpm_table *dpm_table,
4223                         uint32_t low_limit, uint32_t high_limit)
4224 {
4225         uint32_t i;
4226
4227         /* force the trim if mclk_switching is disabled to prevent flicker */
4228         bool force_trim = (low_limit == high_limit);
4229         for (i = 0; i < dpm_table->count; i++) {
4230         /*skip the trim if od is enabled*/
4231                 if ((!hwmgr->od_enabled || force_trim)
4232                         && (dpm_table->dpm_levels[i].value < low_limit
4233                         || dpm_table->dpm_levels[i].value > high_limit))
4234                         dpm_table->dpm_levels[i].enabled = false;
4235                 else
4236                         dpm_table->dpm_levels[i].enabled = true;
4237         }
4238
4239         return 0;
4240 }
4241
4242 static int smu7_trim_dpm_states(struct pp_hwmgr *hwmgr,
4243                 const struct smu7_power_state *smu7_ps)
4244 {
4245         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4246         uint32_t high_limit_count;
4247
4248         PP_ASSERT_WITH_CODE((smu7_ps->performance_level_count >= 1),
4249                         "power state did not have any performance level",
4250                         return -EINVAL);
4251
4252         high_limit_count = (1 == smu7_ps->performance_level_count) ? 0 : 1;
4253
4254         smu7_trim_single_dpm_states(hwmgr,
4255                         &(data->dpm_table.sclk_table),
4256                         smu7_ps->performance_levels[0].engine_clock,
4257                         smu7_ps->performance_levels[high_limit_count].engine_clock);
4258
4259         smu7_trim_single_dpm_states(hwmgr,
4260                         &(data->dpm_table.mclk_table),
4261                         smu7_ps->performance_levels[0].memory_clock,
4262                         smu7_ps->performance_levels[high_limit_count].memory_clock);
4263
4264         return 0;
4265 }
4266
4267 static int smu7_generate_dpm_level_enable_mask(
4268                 struct pp_hwmgr *hwmgr, const void *input)
4269 {
4270         int result = 0;
4271         const struct phm_set_power_state_input *states =
4272                         (const struct phm_set_power_state_input *)input;
4273         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4274         const struct smu7_power_state *smu7_ps =
4275                         cast_const_phw_smu7_power_state(states->pnew_state);
4276
4277
4278         result = smu7_trim_dpm_states(hwmgr, smu7_ps);
4279         if (result)
4280                 return result;
4281
4282         data->dpm_level_enable_mask.sclk_dpm_enable_mask =
4283                         phm_get_dpm_level_enable_mask_value(&data->dpm_table.sclk_table);
4284         data->dpm_level_enable_mask.mclk_dpm_enable_mask =
4285                         phm_get_dpm_level_enable_mask_value(&data->dpm_table.mclk_table);
4286         data->dpm_level_enable_mask.pcie_dpm_enable_mask =
4287                         phm_get_dpm_level_enable_mask_value(&data->dpm_table.pcie_speed_table);
4288
4289         return 0;
4290 }
4291
4292 static int smu7_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
4293 {
4294         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4295
4296         if (0 == data->need_update_smu7_dpm_table)
4297                 return 0;
4298
4299         if ((0 == data->sclk_dpm_key_disabled) &&
4300                 (data->need_update_smu7_dpm_table &
4301                 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
4302
4303                 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
4304                                 "Trying to Unfreeze SCLK DPM when DPM is disabled",
4305                                 );
4306                 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
4307                                 PPSMC_MSG_SCLKDPM_UnfreezeLevel,
4308                                 NULL),
4309                         "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!",
4310                         return -EINVAL);
4311         }
4312
4313         if ((0 == data->mclk_dpm_key_disabled) &&
4314                 !data->mclk_ignore_signal &&
4315                 (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
4316
4317                 PP_ASSERT_WITH_CODE(true == smum_is_dpm_running(hwmgr),
4318                                 "Trying to Unfreeze MCLK DPM when DPM is disabled",
4319                                 );
4320                 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
4321                                 PPSMC_MSG_MCLKDPM_UnfreezeLevel,
4322                                 NULL),
4323                     "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!",
4324                     return -EINVAL);
4325         }
4326
4327         data->need_update_smu7_dpm_table &= DPMTABLE_OD_UPDATE_VDDC;
4328
4329         return 0;
4330 }
4331
4332 static int smu7_notify_link_speed_change_after_state_change(
4333                 struct pp_hwmgr *hwmgr, const void *input)
4334 {
4335         const struct phm_set_power_state_input *states =
4336                         (const struct phm_set_power_state_input *)input;
4337         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4338         const struct smu7_power_state *smu7_ps =
4339                         cast_const_phw_smu7_power_state(states->pnew_state);
4340         uint16_t target_link_speed = smu7_get_maximum_link_speed(hwmgr, smu7_ps);
4341         uint8_t  request;
4342
4343         if (data->pspp_notify_required) {
4344                 if (target_link_speed == PP_PCIEGen3)
4345                         request = PCIE_PERF_REQ_GEN3;
4346                 else if (target_link_speed == PP_PCIEGen2)
4347                         request = PCIE_PERF_REQ_GEN2;
4348                 else
4349                         request = PCIE_PERF_REQ_GEN1;
4350
4351                 if (request == PCIE_PERF_REQ_GEN1 &&
4352                                 smu7_get_current_pcie_speed(hwmgr) > 0)
4353                         return 0;
4354
4355 #ifdef CONFIG_ACPI
4356                 if (amdgpu_acpi_pcie_performance_request(hwmgr->adev, request, false)) {
4357                         if (PP_PCIEGen2 == target_link_speed)
4358                                 pr_info("PSPP request to switch to Gen2 from Gen3 Failed!");
4359                         else
4360                                 pr_info("PSPP request to switch to Gen1 from Gen2 Failed!");
4361                 }
4362 #endif
4363         }
4364
4365         return 0;
4366 }
4367
4368 static int smu7_notify_no_display(struct pp_hwmgr *hwmgr)
4369 {
4370         return (smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_NoDisplay, NULL) == 0) ?  0 : -EINVAL;
4371 }
4372
4373 static int smu7_notify_has_display(struct pp_hwmgr *hwmgr)
4374 {
4375         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4376
4377         if (hwmgr->feature_mask & PP_VBI_TIME_SUPPORT_MASK) {
4378                 if (hwmgr->chip_id == CHIP_VEGAM)
4379                         smum_send_msg_to_smc_with_parameter(hwmgr,
4380                                         (PPSMC_Msg)PPSMC_MSG_SetVBITimeout_VEGAM, data->frame_time_x2,
4381                                         NULL);
4382                 else
4383                         smum_send_msg_to_smc_with_parameter(hwmgr,
4384                                         (PPSMC_Msg)PPSMC_MSG_SetVBITimeout, data->frame_time_x2,
4385                                         NULL);
4386                 data->last_sent_vbi_timeout = data->frame_time_x2;
4387         }
4388
4389         return (smum_send_msg_to_smc(hwmgr, (PPSMC_Msg)PPSMC_HasDisplay, NULL) == 0) ?  0 : -EINVAL;
4390 }
4391
4392 static int smu7_notify_smc_display(struct pp_hwmgr *hwmgr)
4393 {
4394         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4395         int result = 0;
4396
4397         if (data->mclk_ignore_signal)
4398                 result = smu7_notify_no_display(hwmgr);
4399         else
4400                 result = smu7_notify_has_display(hwmgr);
4401
4402         return result;
4403 }
4404
4405 static int smu7_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
4406 {
4407         int tmp_result, result = 0;
4408         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4409
4410         tmp_result = smu7_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
4411         PP_ASSERT_WITH_CODE((0 == tmp_result),
4412                         "Failed to find DPM states clocks in DPM table!",
4413                         result = tmp_result);
4414
4415         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4416                         PHM_PlatformCaps_PCIEPerformanceRequest)) {
4417                 tmp_result =
4418                         smu7_request_link_speed_change_before_state_change(hwmgr, input);
4419                 PP_ASSERT_WITH_CODE((0 == tmp_result),
4420                                 "Failed to request link speed change before state change!",
4421                                 result = tmp_result);
4422         }
4423
4424         tmp_result = smu7_freeze_sclk_mclk_dpm(hwmgr);
4425         PP_ASSERT_WITH_CODE((0 == tmp_result),
4426                         "Failed to freeze SCLK MCLK DPM!", result = tmp_result);
4427
4428         tmp_result = smu7_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input);
4429         PP_ASSERT_WITH_CODE((0 == tmp_result),
4430                         "Failed to populate and upload SCLK MCLK DPM levels!",
4431                         result = tmp_result);
4432
4433         /*
4434          * If a custom pp table is loaded, set DPMTABLE_OD_UPDATE_VDDC flag.
4435          * That effectively disables AVFS feature.
4436          */
4437         if (hwmgr->hardcode_pp_table != NULL)
4438                 data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC;
4439
4440         tmp_result = smu7_update_avfs(hwmgr);
4441         PP_ASSERT_WITH_CODE((0 == tmp_result),
4442                         "Failed to update avfs voltages!",
4443                         result = tmp_result);
4444
4445         tmp_result = smu7_generate_dpm_level_enable_mask(hwmgr, input);
4446         PP_ASSERT_WITH_CODE((0 == tmp_result),
4447                         "Failed to generate DPM level enabled mask!",
4448                         result = tmp_result);
4449
4450         tmp_result = smum_update_sclk_threshold(hwmgr);
4451         PP_ASSERT_WITH_CODE((0 == tmp_result),
4452                         "Failed to update SCLK threshold!",
4453                         result = tmp_result);
4454
4455         tmp_result = smu7_unfreeze_sclk_mclk_dpm(hwmgr);
4456         PP_ASSERT_WITH_CODE((0 == tmp_result),
4457                         "Failed to unfreeze SCLK MCLK DPM!",
4458                         result = tmp_result);
4459
4460         tmp_result = smu7_upload_dpm_level_enable_mask(hwmgr);
4461         PP_ASSERT_WITH_CODE((0 == tmp_result),
4462                         "Failed to upload DPM level enabled mask!",
4463                         result = tmp_result);
4464
4465         tmp_result = smu7_notify_smc_display(hwmgr);
4466         PP_ASSERT_WITH_CODE((0 == tmp_result),
4467                         "Failed to notify smc display settings!",
4468                         result = tmp_result);
4469
4470         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4471                         PHM_PlatformCaps_PCIEPerformanceRequest)) {
4472                 tmp_result =
4473                         smu7_notify_link_speed_change_after_state_change(hwmgr, input);
4474                 PP_ASSERT_WITH_CODE((0 == tmp_result),
4475                                 "Failed to notify link speed change after state change!",
4476                                 result = tmp_result);
4477         }
4478         data->apply_optimized_settings = false;
4479         return result;
4480 }
4481
4482 static int smu7_set_max_fan_pwm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm)
4483 {
4484         hwmgr->thermal_controller.
4485         advanceFanControlParameters.usMaxFanPWM = us_max_fan_pwm;
4486
4487         return smum_send_msg_to_smc_with_parameter(hwmgr,
4488                         PPSMC_MSG_SetFanPwmMax, us_max_fan_pwm,
4489                         NULL);
4490 }
4491
4492 static int
4493 smu7_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr)
4494 {
4495         return 0;
4496 }
4497
4498 /**
4499  * smu7_program_display_gap - Programs the display gap
4500  *
4501  * @hwmgr:  the address of the powerplay hardware manager.
4502  * Return:   always OK
4503  */
4504 static int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
4505 {
4506         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4507         uint32_t display_gap = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL);
4508         uint32_t display_gap2;
4509         uint32_t pre_vbi_time_in_us;
4510         uint32_t frame_time_in_us;
4511         uint32_t ref_clock, refresh_rate;
4512
4513         display_gap = PHM_SET_FIELD(display_gap, CG_DISPLAY_GAP_CNTL, DISP_GAP, (hwmgr->display_config->num_display > 0) ? DISPLAY_GAP_VBLANK_OR_WM : DISPLAY_GAP_IGNORE);
4514         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL, display_gap);
4515
4516         ref_clock =  amdgpu_asic_get_xclk((struct amdgpu_device *)hwmgr->adev);
4517         refresh_rate = hwmgr->display_config->vrefresh;
4518
4519         if (0 == refresh_rate)
4520                 refresh_rate = 60;
4521
4522         frame_time_in_us = 1000000 / refresh_rate;
4523
4524         pre_vbi_time_in_us = frame_time_in_us - 200 - hwmgr->display_config->min_vblank_time;
4525
4526         data->frame_time_x2 = frame_time_in_us * 2 / 100;
4527
4528         if (data->frame_time_x2 < 280) {
4529                 pr_debug("%s: enforce minimal VBITimeout: %d -> 280\n", __func__, data->frame_time_x2);
4530                 data->frame_time_x2 = 280;
4531         }
4532
4533         display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
4534
4535         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2);
4536
4537         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4538                         data->soft_regs_start + smum_get_offsetof(hwmgr,
4539                                                         SMU_SoftRegisters,
4540                                                         PreVBlankGap), 0x64);
4541
4542         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
4543                         data->soft_regs_start + smum_get_offsetof(hwmgr,
4544                                                         SMU_SoftRegisters,
4545                                                         VBlankTimeout),
4546                                         (frame_time_in_us - pre_vbi_time_in_us));
4547
4548         return 0;
4549 }
4550
4551 static int smu7_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
4552 {
4553         return smu7_program_display_gap(hwmgr);
4554 }
4555
4556 /**
4557  * smu7_set_max_fan_rpm_output - Set maximum target operating fan output RPM
4558  *
4559  * @hwmgr:  the address of the powerplay hardware manager.
4560  * @us_max_fan_rpm:  max operating fan RPM value.
4561  * Return:   The response that came from the SMC.
4562  */
4563 static int smu7_set_max_fan_rpm_output(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_rpm)
4564 {
4565         hwmgr->thermal_controller.
4566         advanceFanControlParameters.usMaxFanRPM = us_max_fan_rpm;
4567
4568         return smum_send_msg_to_smc_with_parameter(hwmgr,
4569                         PPSMC_MSG_SetFanRpmMax, us_max_fan_rpm,
4570                         NULL);
4571 }
4572
4573 static const struct amdgpu_irq_src_funcs smu7_irq_funcs = {
4574         .process = phm_irq_process,
4575 };
4576
4577 static int smu7_register_irq_handlers(struct pp_hwmgr *hwmgr)
4578 {
4579         struct amdgpu_irq_src *source =
4580                 kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL);
4581
4582         if (!source)
4583                 return -ENOMEM;
4584
4585         source->funcs = &smu7_irq_funcs;
4586
4587         amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
4588                         AMDGPU_IRQ_CLIENTID_LEGACY,
4589                         VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH,
4590                         source);
4591         amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
4592                         AMDGPU_IRQ_CLIENTID_LEGACY,
4593                         VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW,
4594                         source);
4595
4596         /* Register CTF(GPIO_19) interrupt */
4597         amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
4598                         AMDGPU_IRQ_CLIENTID_LEGACY,
4599                         VISLANDS30_IV_SRCID_GPIO_19,
4600                         source);
4601
4602         return 0;
4603 }
4604
4605 static bool
4606 smu7_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
4607 {
4608         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4609         bool is_update_required = false;
4610
4611         if (data->display_timing.num_existing_displays != hwmgr->display_config->num_display)
4612                 is_update_required = true;
4613
4614         if (data->display_timing.vrefresh != hwmgr->display_config->vrefresh)
4615                 is_update_required = true;
4616
4617         if (hwmgr->chip_id >= CHIP_POLARIS10 &&
4618             hwmgr->chip_id <= CHIP_VEGAM &&
4619             data->last_sent_vbi_timeout != data->frame_time_x2)
4620                 is_update_required = true;
4621
4622         if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
4623                 if (data->display_timing.min_clock_in_sr != hwmgr->display_config->min_core_set_clock_in_sr &&
4624                         (data->display_timing.min_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK ||
4625                         hwmgr->display_config->min_core_set_clock_in_sr >= SMU7_MINIMUM_ENGINE_CLOCK))
4626                         is_update_required = true;
4627         }
4628         return is_update_required;
4629 }
4630
4631 static inline bool smu7_are_power_levels_equal(const struct smu7_performance_level *pl1,
4632                                                            const struct smu7_performance_level *pl2)
4633 {
4634         return ((pl1->memory_clock == pl2->memory_clock) &&
4635                   (pl1->engine_clock == pl2->engine_clock) &&
4636                   (pl1->pcie_gen == pl2->pcie_gen) &&
4637                   (pl1->pcie_lane == pl2->pcie_lane));
4638 }
4639
4640 static int smu7_check_states_equal(struct pp_hwmgr *hwmgr,
4641                 const struct pp_hw_power_state *pstate1,
4642                 const struct pp_hw_power_state *pstate2, bool *equal)
4643 {
4644         const struct smu7_power_state *psa;
4645         const struct smu7_power_state *psb;
4646         int i;
4647         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4648
4649         if (pstate1 == NULL || pstate2 == NULL || equal == NULL)
4650                 return -EINVAL;
4651
4652         psa = cast_const_phw_smu7_power_state(pstate1);
4653         psb = cast_const_phw_smu7_power_state(pstate2);
4654         /* If the two states don't even have the same number of performance levels they cannot be the same state. */
4655         if (psa->performance_level_count != psb->performance_level_count) {
4656                 *equal = false;
4657                 return 0;
4658         }
4659
4660         for (i = 0; i < psa->performance_level_count; i++) {
4661                 if (!smu7_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
4662                         /* If we have found even one performance level pair that is different the states are different. */
4663                         *equal = false;
4664                         return 0;
4665                 }
4666         }
4667
4668         /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
4669         *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
4670         *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
4671         *equal &= (psa->sclk_threshold == psb->sclk_threshold);
4672         /* For OD call, set value based on flag */
4673         *equal &= !(data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK |
4674                                                         DPMTABLE_OD_UPDATE_MCLK |
4675                                                         DPMTABLE_OD_UPDATE_VDDC));
4676
4677         return 0;
4678 }
4679
4680 static int smu7_check_mc_firmware(struct pp_hwmgr *hwmgr)
4681 {
4682         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4683
4684         uint32_t tmp;
4685
4686         /* Read MC indirect register offset 0x9F bits [3:0] to see
4687          * if VBIOS has already loaded a full version of MC ucode
4688          * or not.
4689          */
4690
4691         smu7_get_mc_microcode_version(hwmgr);
4692
4693         data->need_long_memory_training = false;
4694
4695         cgs_write_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_INDEX,
4696                                                         ixMC_IO_DEBUG_UP_13);
4697         tmp = cgs_read_register(hwmgr->device, mmMC_SEQ_IO_DEBUG_DATA);
4698
4699         if (tmp & (1 << 23)) {
4700                 data->mem_latency_high = MEM_LATENCY_HIGH;
4701                 data->mem_latency_low = MEM_LATENCY_LOW;
4702                 if ((hwmgr->chip_id == CHIP_POLARIS10) ||
4703                     (hwmgr->chip_id == CHIP_POLARIS11) ||
4704                     (hwmgr->chip_id == CHIP_POLARIS12))
4705                         smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableFFC, NULL);
4706         } else {
4707                 data->mem_latency_high = 330;
4708                 data->mem_latency_low = 330;
4709                 if ((hwmgr->chip_id == CHIP_POLARIS10) ||
4710                     (hwmgr->chip_id == CHIP_POLARIS11) ||
4711                     (hwmgr->chip_id == CHIP_POLARIS12))
4712                         smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableFFC, NULL);
4713         }
4714
4715         return 0;
4716 }
4717
4718 static int smu7_read_clock_registers(struct pp_hwmgr *hwmgr)
4719 {
4720         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4721
4722         data->clock_registers.vCG_SPLL_FUNC_CNTL         =
4723                 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL);
4724         data->clock_registers.vCG_SPLL_FUNC_CNTL_2       =
4725                 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_2);
4726         data->clock_registers.vCG_SPLL_FUNC_CNTL_3       =
4727                 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_3);
4728         data->clock_registers.vCG_SPLL_FUNC_CNTL_4       =
4729                 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_FUNC_CNTL_4);
4730         data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM   =
4731                 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM);
4732         data->clock_registers.vCG_SPLL_SPREAD_SPECTRUM_2 =
4733                 cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_SPLL_SPREAD_SPECTRUM_2);
4734         data->clock_registers.vDLL_CNTL                  =
4735                 cgs_read_register(hwmgr->device, mmDLL_CNTL);
4736         data->clock_registers.vMCLK_PWRMGT_CNTL          =
4737                 cgs_read_register(hwmgr->device, mmMCLK_PWRMGT_CNTL);
4738         data->clock_registers.vMPLL_AD_FUNC_CNTL         =
4739                 cgs_read_register(hwmgr->device, mmMPLL_AD_FUNC_CNTL);
4740         data->clock_registers.vMPLL_DQ_FUNC_CNTL         =
4741                 cgs_read_register(hwmgr->device, mmMPLL_DQ_FUNC_CNTL);
4742         data->clock_registers.vMPLL_FUNC_CNTL            =
4743                 cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL);
4744         data->clock_registers.vMPLL_FUNC_CNTL_1          =
4745                 cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_1);
4746         data->clock_registers.vMPLL_FUNC_CNTL_2          =
4747                 cgs_read_register(hwmgr->device, mmMPLL_FUNC_CNTL_2);
4748         data->clock_registers.vMPLL_SS1                  =
4749                 cgs_read_register(hwmgr->device, mmMPLL_SS1);
4750         data->clock_registers.vMPLL_SS2                  =
4751                 cgs_read_register(hwmgr->device, mmMPLL_SS2);
4752         return 0;
4753
4754 }
4755
4756 /**
4757  * smu7_get_memory_type - Find out if memory is GDDR5.
4758  *
4759  * @hwmgr:  the address of the powerplay hardware manager.
4760  * Return:   always 0
4761  */
4762 static int smu7_get_memory_type(struct pp_hwmgr *hwmgr)
4763 {
4764         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4765         struct amdgpu_device *adev = hwmgr->adev;
4766
4767         data->is_memory_gddr5 = (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5);
4768
4769         return 0;
4770 }
4771
4772 /**
4773  * smu7_enable_acpi_power_management - Enables Dynamic Power Management by SMC
4774  *
4775  * @hwmgr:  the address of the powerplay hardware manager.
4776  * Return:   always 0
4777  */
4778 static int smu7_enable_acpi_power_management(struct pp_hwmgr *hwmgr)
4779 {
4780         PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
4781                         GENERAL_PWRMGT, STATIC_PM_EN, 1);
4782
4783         return 0;
4784 }
4785
4786 /**
4787  * smu7_init_power_gate_state - Initialize PowerGating States for different engines
4788  *
4789  * @hwmgr:  the address of the powerplay hardware manager.
4790  * Return:   always 0
4791  */
4792 static int smu7_init_power_gate_state(struct pp_hwmgr *hwmgr)
4793 {
4794         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4795
4796         data->uvd_power_gated = false;
4797         data->vce_power_gated = false;
4798
4799         return 0;
4800 }
4801
4802 static int smu7_init_sclk_threshold(struct pp_hwmgr *hwmgr)
4803 {
4804         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4805
4806         data->low_sclk_interrupt_threshold = 0;
4807         return 0;
4808 }
4809
4810 static int smu7_setup_asic_task(struct pp_hwmgr *hwmgr)
4811 {
4812         int tmp_result, result = 0;
4813
4814         smu7_check_mc_firmware(hwmgr);
4815
4816         tmp_result = smu7_read_clock_registers(hwmgr);
4817         PP_ASSERT_WITH_CODE((0 == tmp_result),
4818                         "Failed to read clock registers!", result = tmp_result);
4819
4820         tmp_result = smu7_get_memory_type(hwmgr);
4821         PP_ASSERT_WITH_CODE((0 == tmp_result),
4822                         "Failed to get memory type!", result = tmp_result);
4823
4824         tmp_result = smu7_enable_acpi_power_management(hwmgr);
4825         PP_ASSERT_WITH_CODE((0 == tmp_result),
4826                         "Failed to enable ACPI power management!", result = tmp_result);
4827
4828         tmp_result = smu7_init_power_gate_state(hwmgr);
4829         PP_ASSERT_WITH_CODE((0 == tmp_result),
4830                         "Failed to init power gate state!", result = tmp_result);
4831
4832         tmp_result = smu7_get_mc_microcode_version(hwmgr);
4833         PP_ASSERT_WITH_CODE((0 == tmp_result),
4834                         "Failed to get MC microcode version!", result = tmp_result);
4835
4836         tmp_result = smu7_init_sclk_threshold(hwmgr);
4837         PP_ASSERT_WITH_CODE((0 == tmp_result),
4838                         "Failed to init sclk threshold!", result = tmp_result);
4839
4840         return result;
4841 }
4842
4843 static int smu7_force_clock_level(struct pp_hwmgr *hwmgr,
4844                 enum pp_clock_type type, uint32_t mask)
4845 {
4846         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4847
4848         if (mask == 0)
4849                 return -EINVAL;
4850
4851         switch (type) {
4852         case PP_SCLK:
4853                 if (!data->sclk_dpm_key_disabled)
4854                         smum_send_msg_to_smc_with_parameter(hwmgr,
4855                                         PPSMC_MSG_SCLKDPM_SetEnabledMask,
4856                                         data->dpm_level_enable_mask.sclk_dpm_enable_mask & mask,
4857                                         NULL);
4858                 break;
4859         case PP_MCLK:
4860                 if (!data->mclk_dpm_key_disabled)
4861                         smum_send_msg_to_smc_with_parameter(hwmgr,
4862                                         PPSMC_MSG_MCLKDPM_SetEnabledMask,
4863                                         data->dpm_level_enable_mask.mclk_dpm_enable_mask & mask,
4864                                         NULL);
4865                 break;
4866         case PP_PCIE:
4867         {
4868                 uint32_t tmp = mask & data->dpm_level_enable_mask.pcie_dpm_enable_mask;
4869
4870                 if (!data->pcie_dpm_key_disabled) {
4871                         if (fls(tmp) != ffs(tmp))
4872                                 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PCIeDPM_UnForceLevel,
4873                                                 NULL);
4874                         else
4875                                 smum_send_msg_to_smc_with_parameter(hwmgr,
4876                                         PPSMC_MSG_PCIeDPM_ForceLevel,
4877                                         fls(tmp) - 1,
4878                                         NULL);
4879                 }
4880                 break;
4881         }
4882         default:
4883                 break;
4884         }
4885
4886         return 0;
4887 }
4888
4889 static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
4890                 enum pp_clock_type type, char *buf)
4891 {
4892         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
4893         struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
4894         struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
4895         struct smu7_single_dpm_table *pcie_table = &(data->dpm_table.pcie_speed_table);
4896         struct smu7_odn_dpm_table *odn_table = &(data->odn_dpm_table);
4897         struct phm_odn_clock_levels *odn_sclk_table = &(odn_table->odn_core_clock_dpm_levels);
4898         struct phm_odn_clock_levels *odn_mclk_table = &(odn_table->odn_memory_clock_dpm_levels);
4899         int size = 0;
4900         uint32_t i, now, clock, pcie_speed;
4901
4902         switch (type) {
4903         case PP_SCLK:
4904                 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency, &clock);
4905
4906                 for (i = 0; i < sclk_table->count; i++) {
4907                         if (clock > sclk_table->dpm_levels[i].value)
4908                                 continue;
4909                         break;
4910                 }
4911                 now = i;
4912
4913                 for (i = 0; i < sclk_table->count; i++)
4914                         size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
4915                                         i, sclk_table->dpm_levels[i].value / 100,
4916                                         (i == now) ? "*" : "");
4917                 break;
4918         case PP_MCLK:
4919                 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetMclkFrequency, &clock);
4920
4921                 for (i = 0; i < mclk_table->count; i++) {
4922                         if (clock > mclk_table->dpm_levels[i].value)
4923                                 continue;
4924                         break;
4925                 }
4926                 now = i;
4927
4928                 for (i = 0; i < mclk_table->count; i++)
4929                         size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
4930                                         i, mclk_table->dpm_levels[i].value / 100,
4931                                         (i == now) ? "*" : "");
4932                 break;
4933         case PP_PCIE:
4934                 pcie_speed = smu7_get_current_pcie_speed(hwmgr);
4935                 for (i = 0; i < pcie_table->count; i++) {
4936                         if (pcie_speed != pcie_table->dpm_levels[i].value)
4937                                 continue;
4938                         break;
4939                 }
4940                 now = i;
4941
4942                 for (i = 0; i < pcie_table->count; i++)
4943                         size += sysfs_emit_at(buf, size, "%d: %s %s\n", i,
4944                                         (pcie_table->dpm_levels[i].value == 0) ? "2.5GT/s, x8" :
4945                                         (pcie_table->dpm_levels[i].value == 1) ? "5.0GT/s, x16" :
4946                                         (pcie_table->dpm_levels[i].value == 2) ? "8.0GT/s, x16" : "",
4947                                         (i == now) ? "*" : "");
4948                 break;
4949         case OD_SCLK:
4950                 if (hwmgr->od_enabled) {
4951                         size = sysfs_emit(buf, "%s:\n", "OD_SCLK");
4952                         for (i = 0; i < odn_sclk_table->num_of_pl; i++)
4953                                 size += sysfs_emit_at(buf, size, "%d: %10uMHz %10umV\n",
4954                                         i, odn_sclk_table->entries[i].clock/100,
4955                                         odn_sclk_table->entries[i].vddc);
4956                 }
4957                 break;
4958         case OD_MCLK:
4959                 if (hwmgr->od_enabled) {
4960                         size = sysfs_emit(buf, "%s:\n", "OD_MCLK");
4961                         for (i = 0; i < odn_mclk_table->num_of_pl; i++)
4962                                 size += sysfs_emit_at(buf, size, "%d: %10uMHz %10umV\n",
4963                                         i, odn_mclk_table->entries[i].clock/100,
4964                                         odn_mclk_table->entries[i].vddc);
4965                 }
4966                 break;
4967         case OD_RANGE:
4968                 if (hwmgr->od_enabled) {
4969                         size = sysfs_emit(buf, "%s:\n", "OD_RANGE");
4970                         size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n",
4971                                 data->golden_dpm_table.sclk_table.dpm_levels[0].value/100,
4972                                 hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
4973                         size += sysfs_emit_at(buf, size, "MCLK: %7uMHz %10uMHz\n",
4974                                 data->golden_dpm_table.mclk_table.dpm_levels[0].value/100,
4975                                 hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
4976                         size += sysfs_emit_at(buf, size, "VDDC: %7umV %11umV\n",
4977                                 data->odn_dpm_table.min_vddc,
4978                                 data->odn_dpm_table.max_vddc);
4979                 }
4980                 break;
4981         default:
4982                 break;
4983         }
4984         return size;
4985 }
4986
4987 static void smu7_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
4988 {
4989         switch (mode) {
4990         case AMD_FAN_CTRL_NONE:
4991                 smu7_fan_ctrl_set_fan_speed_pwm(hwmgr, 255);
4992                 break;
4993         case AMD_FAN_CTRL_MANUAL:
4994                 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4995                         PHM_PlatformCaps_MicrocodeFanControl))
4996                         smu7_fan_ctrl_stop_smc_fan_control(hwmgr);
4997                 break;
4998         case AMD_FAN_CTRL_AUTO:
4999                 if (!smu7_fan_ctrl_set_static_mode(hwmgr, mode))
5000                         smu7_fan_ctrl_start_smc_fan_control(hwmgr);
5001                 break;
5002         default:
5003                 break;
5004         }
5005 }
5006
5007 static uint32_t smu7_get_fan_control_mode(struct pp_hwmgr *hwmgr)
5008 {
5009         return hwmgr->fan_ctrl_enabled ? AMD_FAN_CTRL_AUTO : AMD_FAN_CTRL_MANUAL;
5010 }
5011
5012 static int smu7_get_sclk_od(struct pp_hwmgr *hwmgr)
5013 {
5014         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5015         struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
5016         struct smu7_single_dpm_table *golden_sclk_table =
5017                         &(data->golden_dpm_table.sclk_table);
5018         int value = sclk_table->dpm_levels[sclk_table->count - 1].value;
5019         int golden_value = golden_sclk_table->dpm_levels
5020                         [golden_sclk_table->count - 1].value;
5021
5022         value -= golden_value;
5023         value = DIV_ROUND_UP(value * 100, golden_value);
5024
5025         return value;
5026 }
5027
5028 static int smu7_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
5029 {
5030         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5031         struct smu7_single_dpm_table *golden_sclk_table =
5032                         &(data->golden_dpm_table.sclk_table);
5033         struct pp_power_state  *ps;
5034         struct smu7_power_state  *smu7_ps;
5035
5036         if (value > 20)
5037                 value = 20;
5038
5039         ps = hwmgr->request_ps;
5040
5041         if (ps == NULL)
5042                 return -EINVAL;
5043
5044         smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
5045
5046         smu7_ps->performance_levels[smu7_ps->performance_level_count - 1].engine_clock =
5047                         golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
5048                         value / 100 +
5049                         golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
5050
5051         return 0;
5052 }
5053
5054 static int smu7_get_mclk_od(struct pp_hwmgr *hwmgr)
5055 {
5056         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5057         struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
5058         struct smu7_single_dpm_table *golden_mclk_table =
5059                         &(data->golden_dpm_table.mclk_table);
5060         int value = mclk_table->dpm_levels[mclk_table->count - 1].value;
5061         int golden_value = golden_mclk_table->dpm_levels
5062                         [golden_mclk_table->count - 1].value;
5063
5064         value -= golden_value;
5065         value = DIV_ROUND_UP(value * 100, golden_value);
5066
5067         return value;
5068 }
5069
5070 static int smu7_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
5071 {
5072         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5073         struct smu7_single_dpm_table *golden_mclk_table =
5074                         &(data->golden_dpm_table.mclk_table);
5075         struct pp_power_state  *ps;
5076         struct smu7_power_state  *smu7_ps;
5077
5078         if (value > 20)
5079                 value = 20;
5080
5081         ps = hwmgr->request_ps;
5082
5083         if (ps == NULL)
5084                 return -EINVAL;
5085
5086         smu7_ps = cast_phw_smu7_power_state(&ps->hardware);
5087
5088         smu7_ps->performance_levels[smu7_ps->performance_level_count - 1].memory_clock =
5089                         golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
5090                         value / 100 +
5091                         golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
5092
5093         return 0;
5094 }
5095
5096
5097 static int smu7_get_sclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
5098 {
5099         struct phm_ppt_v1_information *table_info =
5100                         (struct phm_ppt_v1_information *)hwmgr->pptable;
5101         struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table = NULL;
5102         struct phm_clock_voltage_dependency_table *sclk_table;
5103         int i;
5104
5105         if (hwmgr->pp_table_version == PP_TABLE_V1) {
5106                 if (table_info == NULL || table_info->vdd_dep_on_sclk == NULL)
5107                         return -EINVAL;
5108                 dep_sclk_table = table_info->vdd_dep_on_sclk;
5109                 for (i = 0; i < dep_sclk_table->count; i++)
5110                         clocks->clock[i] = dep_sclk_table->entries[i].clk * 10;
5111                 clocks->count = dep_sclk_table->count;
5112         } else if (hwmgr->pp_table_version == PP_TABLE_V0) {
5113                 sclk_table = hwmgr->dyn_state.vddc_dependency_on_sclk;
5114                 for (i = 0; i < sclk_table->count; i++)
5115                         clocks->clock[i] = sclk_table->entries[i].clk * 10;
5116                 clocks->count = sclk_table->count;
5117         }
5118
5119         return 0;
5120 }
5121
5122 static uint32_t smu7_get_mem_latency(struct pp_hwmgr *hwmgr, uint32_t clk)
5123 {
5124         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5125
5126         if (clk >= MEM_FREQ_LOW_LATENCY && clk < MEM_FREQ_HIGH_LATENCY)
5127                 return data->mem_latency_high;
5128         else if (clk >= MEM_FREQ_HIGH_LATENCY)
5129                 return data->mem_latency_low;
5130         else
5131                 return MEM_LATENCY_ERR;
5132 }
5133
5134 static int smu7_get_mclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
5135 {
5136         struct phm_ppt_v1_information *table_info =
5137                         (struct phm_ppt_v1_information *)hwmgr->pptable;
5138         struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
5139         int i;
5140         struct phm_clock_voltage_dependency_table *mclk_table;
5141
5142         if (hwmgr->pp_table_version == PP_TABLE_V1) {
5143                 if (table_info == NULL)
5144                         return -EINVAL;
5145                 dep_mclk_table = table_info->vdd_dep_on_mclk;
5146                 for (i = 0; i < dep_mclk_table->count; i++) {
5147                         clocks->clock[i] = dep_mclk_table->entries[i].clk * 10;
5148                         clocks->latency[i] = smu7_get_mem_latency(hwmgr,
5149                                                 dep_mclk_table->entries[i].clk);
5150                 }
5151                 clocks->count = dep_mclk_table->count;
5152         } else if (hwmgr->pp_table_version == PP_TABLE_V0) {
5153                 mclk_table = hwmgr->dyn_state.vddc_dependency_on_mclk;
5154                 for (i = 0; i < mclk_table->count; i++)
5155                         clocks->clock[i] = mclk_table->entries[i].clk * 10;
5156                 clocks->count = mclk_table->count;
5157         }
5158         return 0;
5159 }
5160
5161 static int smu7_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type,
5162                                                 struct amd_pp_clocks *clocks)
5163 {
5164         switch (type) {
5165         case amd_pp_sys_clock:
5166                 smu7_get_sclks(hwmgr, clocks);
5167                 break;
5168         case amd_pp_mem_clock:
5169                 smu7_get_mclks(hwmgr, clocks);
5170                 break;
5171         default:
5172                 return -EINVAL;
5173         }
5174
5175         return 0;
5176 }
5177
5178 static int smu7_get_sclks_with_latency(struct pp_hwmgr *hwmgr,
5179                                        struct pp_clock_levels_with_latency *clocks)
5180 {
5181         struct phm_ppt_v1_information *table_info =
5182                         (struct phm_ppt_v1_information *)hwmgr->pptable;
5183         struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table =
5184                         table_info->vdd_dep_on_sclk;
5185         int i;
5186
5187         clocks->num_levels = 0;
5188         for (i = 0; i < dep_sclk_table->count; i++) {
5189                 if (dep_sclk_table->entries[i].clk) {
5190                         clocks->data[clocks->num_levels].clocks_in_khz =
5191                                 dep_sclk_table->entries[i].clk * 10;
5192                         clocks->num_levels++;
5193                 }
5194         }
5195
5196         return 0;
5197 }
5198
5199 static int smu7_get_mclks_with_latency(struct pp_hwmgr *hwmgr,
5200                                        struct pp_clock_levels_with_latency *clocks)
5201 {
5202         struct phm_ppt_v1_information *table_info =
5203                         (struct phm_ppt_v1_information *)hwmgr->pptable;
5204         struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
5205                         table_info->vdd_dep_on_mclk;
5206         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5207         int i;
5208
5209         clocks->num_levels = 0;
5210         data->mclk_latency_table.count = 0;
5211         for (i = 0; i < dep_mclk_table->count; i++) {
5212                 if (dep_mclk_table->entries[i].clk) {
5213                         clocks->data[clocks->num_levels].clocks_in_khz =
5214                                         dep_mclk_table->entries[i].clk * 10;
5215                         data->mclk_latency_table.entries[data->mclk_latency_table.count].frequency =
5216                                         dep_mclk_table->entries[i].clk;
5217                         clocks->data[clocks->num_levels].latency_in_us =
5218                                 data->mclk_latency_table.entries[data->mclk_latency_table.count].latency =
5219                                         smu7_get_mem_latency(hwmgr, dep_mclk_table->entries[i].clk);
5220                         clocks->num_levels++;
5221                         data->mclk_latency_table.count++;
5222                 }
5223         }
5224
5225         return 0;
5226 }
5227
5228 static int smu7_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
5229                                                enum amd_pp_clock_type type,
5230                                                struct pp_clock_levels_with_latency *clocks)
5231 {
5232         if (!(hwmgr->chip_id >= CHIP_POLARIS10 &&
5233               hwmgr->chip_id <= CHIP_VEGAM))
5234                 return -EINVAL;
5235
5236         switch (type) {
5237         case amd_pp_sys_clock:
5238                 smu7_get_sclks_with_latency(hwmgr, clocks);
5239                 break;
5240         case amd_pp_mem_clock:
5241                 smu7_get_mclks_with_latency(hwmgr, clocks);
5242                 break;
5243         default:
5244                 return -EINVAL;
5245         }
5246
5247         return 0;
5248 }
5249
5250 static int smu7_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
5251                                                  void *clock_range)
5252 {
5253         struct phm_ppt_v1_information *table_info =
5254                         (struct phm_ppt_v1_information *)hwmgr->pptable;
5255         struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
5256                         table_info->vdd_dep_on_mclk;
5257         struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table =
5258                         table_info->vdd_dep_on_sclk;
5259         struct polaris10_smumgr *smu_data =
5260                         (struct polaris10_smumgr *)(hwmgr->smu_backend);
5261         SMU74_Discrete_DpmTable  *table = &(smu_data->smc_state_table);
5262         struct dm_pp_wm_sets_with_clock_ranges *watermarks =
5263                         (struct dm_pp_wm_sets_with_clock_ranges *)clock_range;
5264         uint32_t i, j, k;
5265         bool valid_entry;
5266
5267         if (!(hwmgr->chip_id >= CHIP_POLARIS10 &&
5268               hwmgr->chip_id <= CHIP_VEGAM))
5269                 return -EINVAL;
5270
5271         for (i = 0; i < dep_mclk_table->count; i++) {
5272                 for (j = 0; j < dep_sclk_table->count; j++) {
5273                         valid_entry = false;
5274                         for (k = 0; k < watermarks->num_wm_sets; k++) {
5275                                 if (dep_sclk_table->entries[i].clk >= watermarks->wm_clk_ranges[k].wm_min_eng_clk_in_khz / 10 &&
5276                                     dep_sclk_table->entries[i].clk < watermarks->wm_clk_ranges[k].wm_max_eng_clk_in_khz / 10 &&
5277                                     dep_mclk_table->entries[i].clk >= watermarks->wm_clk_ranges[k].wm_min_mem_clk_in_khz / 10 &&
5278                                     dep_mclk_table->entries[i].clk < watermarks->wm_clk_ranges[k].wm_max_mem_clk_in_khz / 10) {
5279                                         valid_entry = true;
5280                                         table->DisplayWatermark[i][j] = watermarks->wm_clk_ranges[k].wm_set_id;
5281                                         break;
5282                                 }
5283                         }
5284                         PP_ASSERT_WITH_CODE(valid_entry,
5285                                         "Clock is not in range of specified clock range for watermark from DAL!  Using highest water mark set.",
5286                                         table->DisplayWatermark[i][j] = watermarks->wm_clk_ranges[k - 1].wm_set_id);
5287                 }
5288         }
5289
5290         return smu7_copy_bytes_to_smc(hwmgr,
5291                                       smu_data->smu7_data.dpm_table_start + offsetof(SMU74_Discrete_DpmTable, DisplayWatermark),
5292                                       (uint8_t *)table->DisplayWatermark,
5293                                       sizeof(uint8_t) * SMU74_MAX_LEVELS_MEMORY * SMU74_MAX_LEVELS_GRAPHICS,
5294                                       SMC_RAM_END);
5295 }
5296
5297 static int smu7_notify_cac_buffer_info(struct pp_hwmgr *hwmgr,
5298                                         uint32_t virtual_addr_low,
5299                                         uint32_t virtual_addr_hi,
5300                                         uint32_t mc_addr_low,
5301                                         uint32_t mc_addr_hi,
5302                                         uint32_t size)
5303 {
5304         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5305
5306         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
5307                                         data->soft_regs_start +
5308                                         smum_get_offsetof(hwmgr,
5309                                         SMU_SoftRegisters, DRAM_LOG_ADDR_H),
5310                                         mc_addr_hi);
5311
5312         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
5313                                         data->soft_regs_start +
5314                                         smum_get_offsetof(hwmgr,
5315                                         SMU_SoftRegisters, DRAM_LOG_ADDR_L),
5316                                         mc_addr_low);
5317
5318         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
5319                                         data->soft_regs_start +
5320                                         smum_get_offsetof(hwmgr,
5321                                         SMU_SoftRegisters, DRAM_LOG_PHY_ADDR_H),
5322                                         virtual_addr_hi);
5323
5324         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
5325                                         data->soft_regs_start +
5326                                         smum_get_offsetof(hwmgr,
5327                                         SMU_SoftRegisters, DRAM_LOG_PHY_ADDR_L),
5328                                         virtual_addr_low);
5329
5330         cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
5331                                         data->soft_regs_start +
5332                                         smum_get_offsetof(hwmgr,
5333                                         SMU_SoftRegisters, DRAM_LOG_BUFF_SIZE),
5334                                         size);
5335         return 0;
5336 }
5337
5338 static int smu7_get_max_high_clocks(struct pp_hwmgr *hwmgr,
5339                                         struct amd_pp_simple_clock_info *clocks)
5340 {
5341         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5342         struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
5343         struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
5344
5345         if (clocks == NULL)
5346                 return -EINVAL;
5347
5348         clocks->memory_max_clock = mclk_table->count > 1 ?
5349                                 mclk_table->dpm_levels[mclk_table->count-1].value :
5350                                 mclk_table->dpm_levels[0].value;
5351         clocks->engine_max_clock = sclk_table->count > 1 ?
5352                                 sclk_table->dpm_levels[sclk_table->count-1].value :
5353                                 sclk_table->dpm_levels[0].value;
5354         return 0;
5355 }
5356
5357 static int smu7_get_thermal_temperature_range(struct pp_hwmgr *hwmgr,
5358                 struct PP_TemperatureRange *thermal_data)
5359 {
5360         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5361         struct phm_ppt_v1_information *table_info =
5362                         (struct phm_ppt_v1_information *)hwmgr->pptable;
5363
5364         memcpy(thermal_data, &SMU7ThermalPolicy[0], sizeof(struct PP_TemperatureRange));
5365
5366         if (hwmgr->pp_table_version == PP_TABLE_V1)
5367                 thermal_data->max = table_info->cac_dtp_table->usSoftwareShutdownTemp *
5368                         PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
5369         else if (hwmgr->pp_table_version == PP_TABLE_V0)
5370                 thermal_data->max = data->thermal_temp_setting.temperature_shutdown *
5371                         PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
5372
5373         return 0;
5374 }
5375
5376 static bool smu7_check_clk_voltage_valid(struct pp_hwmgr *hwmgr,
5377                                         enum PP_OD_DPM_TABLE_COMMAND type,
5378                                         uint32_t clk,
5379                                         uint32_t voltage)
5380 {
5381         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5382
5383         if (voltage < data->odn_dpm_table.min_vddc || voltage > data->odn_dpm_table.max_vddc) {
5384                 pr_info("OD voltage is out of range [%d - %d] mV\n",
5385                                                 data->odn_dpm_table.min_vddc,
5386                                                 data->odn_dpm_table.max_vddc);
5387                 return false;
5388         }
5389
5390         if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) {
5391                 if (data->golden_dpm_table.sclk_table.dpm_levels[0].value > clk ||
5392                         hwmgr->platform_descriptor.overdriveLimit.engineClock < clk) {
5393                         pr_info("OD engine clock is out of range [%d - %d] MHz\n",
5394                                 data->golden_dpm_table.sclk_table.dpm_levels[0].value/100,
5395                                 hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
5396                         return false;
5397                 }
5398         } else if (type == PP_OD_EDIT_MCLK_VDDC_TABLE) {
5399                 if (data->golden_dpm_table.mclk_table.dpm_levels[0].value > clk ||
5400                         hwmgr->platform_descriptor.overdriveLimit.memoryClock < clk) {
5401                         pr_info("OD memory clock is out of range [%d - %d] MHz\n",
5402                                 data->golden_dpm_table.mclk_table.dpm_levels[0].value/100,
5403                                 hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
5404                         return false;
5405                 }
5406         } else {
5407                 return false;
5408         }
5409
5410         return true;
5411 }
5412
5413 static int smu7_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
5414                                         enum PP_OD_DPM_TABLE_COMMAND type,
5415                                         long *input, uint32_t size)
5416 {
5417         uint32_t i;
5418         struct phm_odn_clock_levels *podn_dpm_table_in_backend = NULL;
5419         struct smu7_odn_clock_voltage_dependency_table *podn_vdd_dep_in_backend = NULL;
5420         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5421
5422         uint32_t input_clk;
5423         uint32_t input_vol;
5424         uint32_t input_level;
5425
5426         PP_ASSERT_WITH_CODE(input, "NULL user input for clock and voltage",
5427                                 return -EINVAL);
5428
5429         if (!hwmgr->od_enabled) {
5430                 pr_info("OverDrive feature not enabled\n");
5431                 return -EINVAL;
5432         }
5433
5434         if (PP_OD_EDIT_SCLK_VDDC_TABLE == type) {
5435                 podn_dpm_table_in_backend = &data->odn_dpm_table.odn_core_clock_dpm_levels;
5436                 podn_vdd_dep_in_backend = &data->odn_dpm_table.vdd_dependency_on_sclk;
5437                 PP_ASSERT_WITH_CODE((podn_dpm_table_in_backend && podn_vdd_dep_in_backend),
5438                                 "Failed to get ODN SCLK and Voltage tables",
5439                                 return -EINVAL);
5440         } else if (PP_OD_EDIT_MCLK_VDDC_TABLE == type) {
5441                 podn_dpm_table_in_backend = &data->odn_dpm_table.odn_memory_clock_dpm_levels;
5442                 podn_vdd_dep_in_backend = &data->odn_dpm_table.vdd_dependency_on_mclk;
5443
5444                 PP_ASSERT_WITH_CODE((podn_dpm_table_in_backend && podn_vdd_dep_in_backend),
5445                         "Failed to get ODN MCLK and Voltage tables",
5446                         return -EINVAL);
5447         } else if (PP_OD_RESTORE_DEFAULT_TABLE == type) {
5448                 smu7_odn_initial_default_setting(hwmgr);
5449                 return 0;
5450         } else if (PP_OD_COMMIT_DPM_TABLE == type) {
5451                 smu7_check_dpm_table_updated(hwmgr);
5452                 return 0;
5453         } else {
5454                 return -EINVAL;
5455         }
5456
5457         for (i = 0; i < size; i += 3) {
5458                 if (i + 3 > size || input[i] >= podn_dpm_table_in_backend->num_of_pl) {
5459                         pr_info("invalid clock voltage input \n");
5460                         return 0;
5461                 }
5462                 input_level = input[i];
5463                 input_clk = input[i+1] * 100;
5464                 input_vol = input[i+2];
5465
5466                 if (smu7_check_clk_voltage_valid(hwmgr, type, input_clk, input_vol)) {
5467                         podn_dpm_table_in_backend->entries[input_level].clock = input_clk;
5468                         podn_vdd_dep_in_backend->entries[input_level].clk = input_clk;
5469                         podn_dpm_table_in_backend->entries[input_level].vddc = input_vol;
5470                         podn_vdd_dep_in_backend->entries[input_level].vddc = input_vol;
5471                         podn_vdd_dep_in_backend->entries[input_level].vddgfx = input_vol;
5472                 } else {
5473                         return -EINVAL;
5474                 }
5475         }
5476
5477         return 0;
5478 }
5479
5480 static int smu7_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
5481 {
5482         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5483         uint32_t i, size = 0;
5484         uint32_t len;
5485
5486         static const char *profile_name[7] = {"BOOTUP_DEFAULT",
5487                                         "3D_FULL_SCREEN",
5488                                         "POWER_SAVING",
5489                                         "VIDEO",
5490                                         "VR",
5491                                         "COMPUTE",
5492                                         "CUSTOM"};
5493
5494         static const char *title[8] = {"NUM",
5495                         "MODE_NAME",
5496                         "SCLK_UP_HYST",
5497                         "SCLK_DOWN_HYST",
5498                         "SCLK_ACTIVE_LEVEL",
5499                         "MCLK_UP_HYST",
5500                         "MCLK_DOWN_HYST",
5501                         "MCLK_ACTIVE_LEVEL"};
5502
5503         if (!buf)
5504                 return -EINVAL;
5505
5506         size += sysfs_emit_at(buf, size, "%s %16s %16s %16s %16s %16s %16s %16s\n",
5507                         title[0], title[1], title[2], title[3],
5508                         title[4], title[5], title[6], title[7]);
5509
5510         len = ARRAY_SIZE(smu7_profiling);
5511
5512         for (i = 0; i < len; i++) {
5513                 if (i == hwmgr->power_profile_mode) {
5514                         size += sysfs_emit_at(buf, size, "%3d %14s %s: %8d %16d %16d %16d %16d %16d\n",
5515                         i, profile_name[i], "*",
5516                         data->current_profile_setting.sclk_up_hyst,
5517                         data->current_profile_setting.sclk_down_hyst,
5518                         data->current_profile_setting.sclk_activity,
5519                         data->current_profile_setting.mclk_up_hyst,
5520                         data->current_profile_setting.mclk_down_hyst,
5521                         data->current_profile_setting.mclk_activity);
5522                         continue;
5523                 }
5524                 if (smu7_profiling[i].bupdate_sclk)
5525                         size += sysfs_emit_at(buf, size, "%3d %16s: %8d %16d %16d ",
5526                         i, profile_name[i], smu7_profiling[i].sclk_up_hyst,
5527                         smu7_profiling[i].sclk_down_hyst,
5528                         smu7_profiling[i].sclk_activity);
5529                 else
5530                         size += sysfs_emit_at(buf, size, "%3d %16s: %8s %16s %16s ",
5531                         i, profile_name[i], "-", "-", "-");
5532
5533                 if (smu7_profiling[i].bupdate_mclk)
5534                         size += sysfs_emit_at(buf, size, "%16d %16d %16d\n",
5535                         smu7_profiling[i].mclk_up_hyst,
5536                         smu7_profiling[i].mclk_down_hyst,
5537                         smu7_profiling[i].mclk_activity);
5538                 else
5539                         size += sysfs_emit_at(buf, size, "%16s %16s %16s\n",
5540                         "-", "-", "-");
5541         }
5542
5543         return size;
5544 }
5545
5546 static void smu7_patch_compute_profile_mode(struct pp_hwmgr *hwmgr,
5547                                         enum PP_SMC_POWER_PROFILE requst)
5548 {
5549         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5550         uint32_t tmp, level;
5551
5552         if (requst == PP_SMC_POWER_PROFILE_COMPUTE) {
5553                 if (data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
5554                         level = 0;
5555                         tmp = data->dpm_level_enable_mask.sclk_dpm_enable_mask;
5556                         while (tmp >>= 1)
5557                                 level++;
5558                         if (level > 0)
5559                                 smu7_force_clock_level(hwmgr, PP_SCLK, 3 << (level-1));
5560                 }
5561         } else if (hwmgr->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE) {
5562                 smu7_force_clock_level(hwmgr, PP_SCLK, data->dpm_level_enable_mask.sclk_dpm_enable_mask);
5563         }
5564 }
5565
5566 static int smu7_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size)
5567 {
5568         struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
5569         struct profile_mode_setting tmp;
5570         enum PP_SMC_POWER_PROFILE mode;
5571
5572         if (input == NULL)
5573                 return -EINVAL;
5574
5575         mode = input[size];
5576         switch (mode) {
5577         case PP_SMC_POWER_PROFILE_CUSTOM:
5578                 if (size < 8 && size != 0)
5579                         return -EINVAL;
5580                 /* If only CUSTOM is passed in, use the saved values. Check
5581                  * that we actually have a CUSTOM profile by ensuring that
5582                  * the "use sclk" or the "use mclk" bits are set
5583                  */
5584                 tmp = smu7_profiling[PP_SMC_POWER_PROFILE_CUSTOM];
5585                 if (size == 0) {
5586                         if (tmp.bupdate_sclk == 0 && tmp.bupdate_mclk == 0)
5587                                 return -EINVAL;
5588                 } else {
5589                         tmp.bupdate_sclk = input[0];
5590                         tmp.sclk_up_hyst = input[1];
5591                         tmp.sclk_down_hyst = input[2];
5592                         tmp.sclk_activity = input[3];
5593                         tmp.bupdate_mclk = input[4];
5594                         tmp.mclk_up_hyst = input[5];
5595                         tmp.mclk_down_hyst = input[6];
5596                         tmp.mclk_activity = input[7];
5597                         smu7_profiling[PP_SMC_POWER_PROFILE_CUSTOM] = tmp;
5598                 }
5599                 if (!smum_update_dpm_settings(hwmgr, &tmp)) {
5600                         memcpy(&data->current_profile_setting, &tmp, sizeof(struct profile_mode_setting));
5601                         hwmgr->power_profile_mode = mode;
5602                 }
5603                 break;
5604         case PP_SMC_POWER_PROFILE_FULLSCREEN3D:
5605         case PP_SMC_POWER_PROFILE_POWERSAVING:
5606         case PP_SMC_POWER_PROFILE_VIDEO:
5607         case PP_SMC_POWER_PROFILE_VR:
5608         case PP_SMC_POWER_PROFILE_COMPUTE:
5609                 if (mode == hwmgr->power_profile_mode)
5610                         return 0;
5611
5612                 memcpy(&tmp, &smu7_profiling[mode], sizeof(struct profile_mode_setting));
5613                 if (!smum_update_dpm_settings(hwmgr, &tmp)) {
5614                         if (tmp.bupdate_sclk) {
5615                                 data->current_profile_setting.bupdate_sclk = tmp.bupdate_sclk;
5616                                 data->current_profile_setting.sclk_up_hyst = tmp.sclk_up_hyst;
5617                                 data->current_profile_setting.sclk_down_hyst = tmp.sclk_down_hyst;
5618                                 data->current_profile_setting.sclk_activity = tmp.sclk_activity;
5619                         }
5620                         if (tmp.bupdate_mclk) {
5621                                 data->current_profile_setting.bupdate_mclk = tmp.bupdate_mclk;
5622                                 data->current_profile_setting.mclk_up_hyst = tmp.mclk_up_hyst;
5623                                 data->current_profile_setting.mclk_down_hyst = tmp.mclk_down_hyst;
5624                                 data->current_profile_setting.mclk_activity = tmp.mclk_activity;
5625                         }
5626                         smu7_patch_compute_profile_mode(hwmgr, mode);
5627                         hwmgr->power_profile_mode = mode;
5628                 }
5629                 break;
5630         default:
5631                 return -EINVAL;
5632         }
5633
5634         return 0;
5635 }
5636
5637 static int smu7_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state,
5638                                 PHM_PerformanceLevelDesignation designation, uint32_t index,
5639                                 PHM_PerformanceLevel *level)
5640 {
5641         const struct smu7_power_state *ps;
5642         uint32_t i;
5643
5644         if (level == NULL || hwmgr == NULL || state == NULL)
5645                 return -EINVAL;
5646
5647         ps = cast_const_phw_smu7_power_state(state);
5648
5649         i = index > ps->performance_level_count - 1 ?
5650                         ps->performance_level_count - 1 : index;
5651
5652         level->coreClock = ps->performance_levels[i].engine_clock;
5653         level->memory_clock = ps->performance_levels[i].memory_clock;
5654
5655         return 0;
5656 }
5657
5658 static int smu7_power_off_asic(struct pp_hwmgr *hwmgr)
5659 {
5660         int result;
5661
5662         result = smu7_disable_dpm_tasks(hwmgr);
5663         PP_ASSERT_WITH_CODE((0 == result),
5664                         "[disable_dpm_tasks] Failed to disable DPM!",
5665                         );
5666
5667         return result;
5668 }
5669
5670 static const struct pp_hwmgr_func smu7_hwmgr_funcs = {
5671         .backend_init = &smu7_hwmgr_backend_init,
5672         .backend_fini = &smu7_hwmgr_backend_fini,
5673         .asic_setup = &smu7_setup_asic_task,
5674         .dynamic_state_management_enable = &smu7_enable_dpm_tasks,
5675         .apply_state_adjust_rules = smu7_apply_state_adjust_rules,
5676         .force_dpm_level = &smu7_force_dpm_level,
5677         .power_state_set = smu7_set_power_state_tasks,
5678         .get_power_state_size = smu7_get_power_state_size,
5679         .get_mclk = smu7_dpm_get_mclk,
5680         .get_sclk = smu7_dpm_get_sclk,
5681         .patch_boot_state = smu7_dpm_patch_boot_state,
5682         .get_pp_table_entry = smu7_get_pp_table_entry,
5683         .get_num_of_pp_table_entries = smu7_get_number_of_powerplay_table_entries,
5684         .powerdown_uvd = smu7_powerdown_uvd,
5685         .powergate_uvd = smu7_powergate_uvd,
5686         .powergate_vce = smu7_powergate_vce,
5687         .disable_clock_power_gating = smu7_disable_clock_power_gating,
5688         .update_clock_gatings = smu7_update_clock_gatings,
5689         .notify_smc_display_config_after_ps_adjustment = smu7_notify_smc_display_config_after_ps_adjustment,
5690         .display_config_changed = smu7_display_configuration_changed_task,
5691         .set_max_fan_pwm_output = smu7_set_max_fan_pwm_output,
5692         .set_max_fan_rpm_output = smu7_set_max_fan_rpm_output,
5693         .stop_thermal_controller = smu7_thermal_stop_thermal_controller,
5694         .get_fan_speed_info = smu7_fan_ctrl_get_fan_speed_info,
5695         .get_fan_speed_pwm = smu7_fan_ctrl_get_fan_speed_pwm,
5696         .set_fan_speed_pwm = smu7_fan_ctrl_set_fan_speed_pwm,
5697         .reset_fan_speed_to_default = smu7_fan_ctrl_reset_fan_speed_to_default,
5698         .get_fan_speed_rpm = smu7_fan_ctrl_get_fan_speed_rpm,
5699         .set_fan_speed_rpm = smu7_fan_ctrl_set_fan_speed_rpm,
5700         .uninitialize_thermal_controller = smu7_thermal_ctrl_uninitialize_thermal_controller,
5701         .register_irq_handlers = smu7_register_irq_handlers,
5702         .check_smc_update_required_for_display_configuration = smu7_check_smc_update_required_for_display_configuration,
5703         .check_states_equal = smu7_check_states_equal,
5704         .set_fan_control_mode = smu7_set_fan_control_mode,
5705         .get_fan_control_mode = smu7_get_fan_control_mode,
5706         .force_clock_level = smu7_force_clock_level,
5707         .print_clock_levels = smu7_print_clock_levels,
5708         .powergate_gfx = smu7_powergate_gfx,
5709         .get_sclk_od = smu7_get_sclk_od,
5710         .set_sclk_od = smu7_set_sclk_od,
5711         .get_mclk_od = smu7_get_mclk_od,
5712         .set_mclk_od = smu7_set_mclk_od,
5713         .get_clock_by_type = smu7_get_clock_by_type,
5714         .get_clock_by_type_with_latency = smu7_get_clock_by_type_with_latency,
5715         .set_watermarks_for_clocks_ranges = smu7_set_watermarks_for_clocks_ranges,
5716         .read_sensor = smu7_read_sensor,
5717         .dynamic_state_management_disable = smu7_disable_dpm_tasks,
5718         .avfs_control = smu7_avfs_control,
5719         .disable_smc_firmware_ctf = smu7_thermal_disable_alert,
5720         .start_thermal_controller = smu7_start_thermal_controller,
5721         .notify_cac_buffer_info = smu7_notify_cac_buffer_info,
5722         .get_max_high_clocks = smu7_get_max_high_clocks,
5723         .get_thermal_temperature_range = smu7_get_thermal_temperature_range,
5724         .odn_edit_dpm_table = smu7_odn_edit_dpm_table,
5725         .set_power_limit = smu7_set_power_limit,
5726         .get_power_profile_mode = smu7_get_power_profile_mode,
5727         .set_power_profile_mode = smu7_set_power_profile_mode,
5728         .get_performance_level = smu7_get_performance_level,
5729         .get_asic_baco_capability = smu7_baco_get_capability,
5730         .get_asic_baco_state = smu7_baco_get_state,
5731         .set_asic_baco_state = smu7_baco_set_state,
5732         .power_off_asic = smu7_power_off_asic,
5733 };
5734
5735 uint8_t smu7_get_sleep_divider_id_from_clock(uint32_t clock,
5736                 uint32_t clock_insr)
5737 {
5738         uint8_t i;
5739         uint32_t temp;
5740         uint32_t min = max(clock_insr, (uint32_t)SMU7_MINIMUM_ENGINE_CLOCK);
5741
5742         PP_ASSERT_WITH_CODE((clock >= min), "Engine clock can't satisfy stutter requirement!", return 0);
5743         for (i = SMU7_MAX_DEEPSLEEP_DIVIDER_ID;  ; i--) {
5744                 temp = clock >> i;
5745
5746                 if (temp >= min || i == 0)
5747                         break;
5748         }
5749         return i;
5750 }
5751
5752 int smu7_init_function_pointers(struct pp_hwmgr *hwmgr)
5753 {
5754         hwmgr->hwmgr_func = &smu7_hwmgr_funcs;
5755         if (hwmgr->pp_table_version == PP_TABLE_V0)
5756                 hwmgr->pptable_func = &pptable_funcs;
5757         else if (hwmgr->pp_table_version == PP_TABLE_V1)
5758                 hwmgr->pptable_func = &pptable_v1_0_funcs;
5759
5760         return 0;
5761 }