drm/amdgpu: comment out unused defaults_bonaire_pro static const structures to
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / amdgpu / ci_dpm.c
1 /*
2  * Copyright 2013 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include <linux/firmware.h>
25 #include "drmP.h"
26 #include "amdgpu.h"
27 #include "amdgpu_pm.h"
28 #include "amdgpu_ucode.h"
29 #include "cikd.h"
30 #include "amdgpu_dpm.h"
31 #include "ci_dpm.h"
32 #include "gfx_v7_0.h"
33 #include "atom.h"
34 #include "amd_pcie.h"
35 #include <linux/seq_file.h>
36
37 #include "smu/smu_7_0_1_d.h"
38 #include "smu/smu_7_0_1_sh_mask.h"
39
40 #include "dce/dce_8_0_d.h"
41 #include "dce/dce_8_0_sh_mask.h"
42
43 #include "bif/bif_4_1_d.h"
44 #include "bif/bif_4_1_sh_mask.h"
45
46 #include "gca/gfx_7_2_d.h"
47 #include "gca/gfx_7_2_sh_mask.h"
48
49 #include "gmc/gmc_7_1_d.h"
50 #include "gmc/gmc_7_1_sh_mask.h"
51
52 MODULE_FIRMWARE("radeon/bonaire_smc.bin");
53 MODULE_FIRMWARE("radeon/bonaire_k_smc.bin");
54 MODULE_FIRMWARE("radeon/hawaii_smc.bin");
55 MODULE_FIRMWARE("radeon/hawaii_k_smc.bin");
56
57 #define MC_CG_ARB_FREQ_F0           0x0a
58 #define MC_CG_ARB_FREQ_F1           0x0b
59 #define MC_CG_ARB_FREQ_F2           0x0c
60 #define MC_CG_ARB_FREQ_F3           0x0d
61
62 #define SMC_RAM_END 0x40000
63
64 #define VOLTAGE_SCALE               4
65 #define VOLTAGE_VID_OFFSET_SCALE1    625
66 #define VOLTAGE_VID_OFFSET_SCALE2    100
67
68 static const struct ci_pt_defaults defaults_hawaii_xt =
69 {
70         1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
71         { 0x2E,  0x00,  0x00,  0x88,  0x00,  0x00,  0x72,  0x60,  0x51,  0xA7,  0x79,  0x6B,  0x90,  0xBD,  0x79  },
72         { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
73 };
74
75 static const struct ci_pt_defaults defaults_hawaii_pro =
76 {
77         1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
78         { 0x2E,  0x00,  0x00,  0x88,  0x00,  0x00,  0x72,  0x60,  0x51,  0xA7,  0x79,  0x6B,  0x90,  0xBD,  0x79  },
79         { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
80 };
81
82 static const struct ci_pt_defaults defaults_bonaire_xt =
83 {
84         1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
85         { 0x79,  0x253, 0x25D, 0xAE,  0x72,  0x80,  0x83,  0x86,  0x6F,  0xC8,  0xC9,  0xC9,  0x2F,  0x4D,  0x61  },
86         { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
87 };
88
89 #if 0
90 static const struct ci_pt_defaults defaults_bonaire_pro =
91 {
92         1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062,
93         { 0x8C,  0x23F, 0x244, 0xA6,  0x83,  0x85,  0x86,  0x86,  0x83,  0xDB,  0xDB,  0xDA,  0x67,  0x60,  0x5F  },
94         { 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB }
95 };
96 #endif
97
98 static const struct ci_pt_defaults defaults_saturn_xt =
99 {
100         1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
101         { 0x8C,  0x247, 0x249, 0xA6,  0x80,  0x81,  0x8B,  0x89,  0x86,  0xC9,  0xCA,  0xC9,  0x4D,  0x4D,  0x4D  },
102         { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
103 };
104
105 static const struct ci_pt_defaults defaults_saturn_pro =
106 {
107         1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000,
108         { 0x96,  0x21D, 0x23B, 0xA1,  0x85,  0x87,  0x83,  0x84,  0x81,  0xE6,  0xE6,  0xE6,  0x71,  0x6A,  0x6A  },
109         { 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 }
110 };
111
112 static const struct ci_pt_config_reg didt_config_ci[] =
113 {
114         { 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
115         { 0x10, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
116         { 0x10, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
117         { 0x10, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
118         { 0x11, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
119         { 0x11, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
120         { 0x11, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
121         { 0x11, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
122         { 0x12, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
123         { 0x12, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
124         { 0x12, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
125         { 0x12, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
126         { 0x2, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
127         { 0x2, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
128         { 0x2, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
129         { 0x1, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
130         { 0x1, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
131         { 0x0, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
132         { 0x30, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
133         { 0x30, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
134         { 0x30, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
135         { 0x30, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
136         { 0x31, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
137         { 0x31, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
138         { 0x31, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
139         { 0x31, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
140         { 0x32, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
141         { 0x32, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
142         { 0x32, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
143         { 0x32, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
144         { 0x22, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
145         { 0x22, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
146         { 0x22, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
147         { 0x21, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
148         { 0x21, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
149         { 0x20, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
150         { 0x50, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
151         { 0x50, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
152         { 0x50, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
153         { 0x50, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
154         { 0x51, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
155         { 0x51, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
156         { 0x51, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
157         { 0x51, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
158         { 0x52, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
159         { 0x52, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
160         { 0x52, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
161         { 0x52, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
162         { 0x42, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
163         { 0x42, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
164         { 0x42, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
165         { 0x41, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
166         { 0x41, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
167         { 0x40, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
168         { 0x70, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
169         { 0x70, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
170         { 0x70, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
171         { 0x70, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
172         { 0x71, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
173         { 0x71, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
174         { 0x71, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
175         { 0x71, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
176         { 0x72, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
177         { 0x72, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
178         { 0x72, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
179         { 0x72, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
180         { 0x62, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
181         { 0x62, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
182         { 0x62, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
183         { 0x61, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
184         { 0x61, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
185         { 0x60, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
186         { 0xFFFFFFFF }
187 };
188
189 static u8 ci_get_memory_module_index(struct amdgpu_device *adev)
190 {
191         return (u8) ((RREG32(mmBIOS_SCRATCH_4) >> 16) & 0xff);
192 }
193
194 #define MC_CG_ARB_FREQ_F0           0x0a
195 #define MC_CG_ARB_FREQ_F1           0x0b
196 #define MC_CG_ARB_FREQ_F2           0x0c
197 #define MC_CG_ARB_FREQ_F3           0x0d
198
199 static int ci_copy_and_switch_arb_sets(struct amdgpu_device *adev,
200                                        u32 arb_freq_src, u32 arb_freq_dest)
201 {
202         u32 mc_arb_dram_timing;
203         u32 mc_arb_dram_timing2;
204         u32 burst_time;
205         u32 mc_cg_config;
206
207         switch (arb_freq_src) {
208         case MC_CG_ARB_FREQ_F0:
209                 mc_arb_dram_timing  = RREG32(mmMC_ARB_DRAM_TIMING);
210                 mc_arb_dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2);
211                 burst_time = (RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE0_MASK) >>
212                          MC_ARB_BURST_TIME__STATE0__SHIFT;
213                 break;
214         case MC_CG_ARB_FREQ_F1:
215                 mc_arb_dram_timing  = RREG32(mmMC_ARB_DRAM_TIMING_1);
216                 mc_arb_dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2_1);
217                 burst_time = (RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE1_MASK) >>
218                          MC_ARB_BURST_TIME__STATE1__SHIFT;
219                 break;
220         default:
221                 return -EINVAL;
222         }
223
224         switch (arb_freq_dest) {
225         case MC_CG_ARB_FREQ_F0:
226                 WREG32(mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
227                 WREG32(mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
228                 WREG32_P(mmMC_ARB_BURST_TIME, (burst_time << MC_ARB_BURST_TIME__STATE0__SHIFT),
229                         ~MC_ARB_BURST_TIME__STATE0_MASK);
230                 break;
231         case MC_CG_ARB_FREQ_F1:
232                 WREG32(mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
233                 WREG32(mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
234                 WREG32_P(mmMC_ARB_BURST_TIME, (burst_time << MC_ARB_BURST_TIME__STATE1__SHIFT),
235                         ~MC_ARB_BURST_TIME__STATE1_MASK);
236                 break;
237         default:
238                 return -EINVAL;
239         }
240
241         mc_cg_config = RREG32(mmMC_CG_CONFIG) | 0x0000000F;
242         WREG32(mmMC_CG_CONFIG, mc_cg_config);
243         WREG32_P(mmMC_ARB_CG, (arb_freq_dest) << MC_ARB_CG__CG_ARB_REQ__SHIFT,
244                 ~MC_ARB_CG__CG_ARB_REQ_MASK);
245
246         return 0;
247 }
248
249 static u8 ci_get_ddr3_mclk_frequency_ratio(u32 memory_clock)
250 {
251         u8 mc_para_index;
252
253         if (memory_clock < 10000)
254                 mc_para_index = 0;
255         else if (memory_clock >= 80000)
256                 mc_para_index = 0x0f;
257         else
258                 mc_para_index = (u8)((memory_clock - 10000) / 5000 + 1);
259         return mc_para_index;
260 }
261
262 static u8 ci_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode)
263 {
264         u8 mc_para_index;
265
266         if (strobe_mode) {
267                 if (memory_clock < 12500)
268                         mc_para_index = 0x00;
269                 else if (memory_clock > 47500)
270                         mc_para_index = 0x0f;
271                 else
272                         mc_para_index = (u8)((memory_clock - 10000) / 2500);
273         } else {
274                 if (memory_clock < 65000)
275                         mc_para_index = 0x00;
276                 else if (memory_clock > 135000)
277                         mc_para_index = 0x0f;
278                 else
279                         mc_para_index = (u8)((memory_clock - 60000) / 5000);
280         }
281         return mc_para_index;
282 }
283
284 static void ci_trim_voltage_table_to_fit_state_table(struct amdgpu_device *adev,
285                                                      u32 max_voltage_steps,
286                                                      struct atom_voltage_table *voltage_table)
287 {
288         unsigned int i, diff;
289
290         if (voltage_table->count <= max_voltage_steps)
291                 return;
292
293         diff = voltage_table->count - max_voltage_steps;
294
295         for (i = 0; i < max_voltage_steps; i++)
296                 voltage_table->entries[i] = voltage_table->entries[i + diff];
297
298         voltage_table->count = max_voltage_steps;
299 }
300
301 static int ci_get_std_voltage_value_sidd(struct amdgpu_device *adev,
302                                          struct atom_voltage_table_entry *voltage_table,
303                                          u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd);
304 static int ci_set_power_limit(struct amdgpu_device *adev, u32 n);
305 static int ci_set_overdrive_target_tdp(struct amdgpu_device *adev,
306                                        u32 target_tdp);
307 static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate);
308 static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev);
309 static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev);
310
311 static PPSMC_Result amdgpu_ci_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
312                                                              PPSMC_Msg msg, u32 parameter);
313 static void ci_thermal_start_smc_fan_control(struct amdgpu_device *adev);
314 static void ci_fan_ctrl_set_default_mode(struct amdgpu_device *adev);
315
316 static struct ci_power_info *ci_get_pi(struct amdgpu_device *adev)
317 {
318         struct ci_power_info *pi = adev->pm.dpm.priv;
319
320         return pi;
321 }
322
323 static struct ci_ps *ci_get_ps(struct amdgpu_ps *rps)
324 {
325         struct ci_ps *ps = rps->ps_priv;
326
327         return ps;
328 }
329
330 static void ci_initialize_powertune_defaults(struct amdgpu_device *adev)
331 {
332         struct ci_power_info *pi = ci_get_pi(adev);
333
334         switch (adev->pdev->device) {
335         case 0x6649:
336         case 0x6650:
337         case 0x6651:
338         case 0x6658:
339         case 0x665C:
340         case 0x665D:
341         default:
342                 pi->powertune_defaults = &defaults_bonaire_xt;
343                 break;
344         case 0x6640:
345         case 0x6641:
346         case 0x6646:
347         case 0x6647:
348                 pi->powertune_defaults = &defaults_saturn_xt;
349                 break;
350         case 0x67B8:
351         case 0x67B0:
352                 pi->powertune_defaults = &defaults_hawaii_xt;
353                 break;
354         case 0x67BA:
355         case 0x67B1:
356                 pi->powertune_defaults = &defaults_hawaii_pro;
357                 break;
358         case 0x67A0:
359         case 0x67A1:
360         case 0x67A2:
361         case 0x67A8:
362         case 0x67A9:
363         case 0x67AA:
364         case 0x67B9:
365         case 0x67BE:
366                 pi->powertune_defaults = &defaults_bonaire_xt;
367                 break;
368         }
369
370         pi->dte_tj_offset = 0;
371
372         pi->caps_power_containment = true;
373         pi->caps_cac = false;
374         pi->caps_sq_ramping = false;
375         pi->caps_db_ramping = false;
376         pi->caps_td_ramping = false;
377         pi->caps_tcp_ramping = false;
378
379         if (pi->caps_power_containment) {
380                 pi->caps_cac = true;
381                 if (adev->asic_type == CHIP_HAWAII)
382                         pi->enable_bapm_feature = false;
383                 else
384                         pi->enable_bapm_feature = true;
385                 pi->enable_tdc_limit_feature = true;
386                 pi->enable_pkg_pwr_tracking_feature = true;
387         }
388 }
389
390 static u8 ci_convert_to_vid(u16 vddc)
391 {
392         return (6200 - (vddc * VOLTAGE_SCALE)) / 25;
393 }
394
395 static int ci_populate_bapm_vddc_vid_sidd(struct amdgpu_device *adev)
396 {
397         struct ci_power_info *pi = ci_get_pi(adev);
398         u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
399         u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
400         u8 *hi2_vid = pi->smc_powertune_table.BapmVddCVidHiSidd2;
401         u32 i;
402
403         if (adev->pm.dpm.dyn_state.cac_leakage_table.entries == NULL)
404                 return -EINVAL;
405         if (adev->pm.dpm.dyn_state.cac_leakage_table.count > 8)
406                 return -EINVAL;
407         if (adev->pm.dpm.dyn_state.cac_leakage_table.count !=
408             adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count)
409                 return -EINVAL;
410
411         for (i = 0; i < adev->pm.dpm.dyn_state.cac_leakage_table.count; i++) {
412                 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
413                         lo_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1);
414                         hi_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2);
415                         hi2_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3);
416                 } else {
417                         lo_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc);
418                         hi_vid[i] = ci_convert_to_vid((u16)adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage);
419                 }
420         }
421         return 0;
422 }
423
424 static int ci_populate_vddc_vid(struct amdgpu_device *adev)
425 {
426         struct ci_power_info *pi = ci_get_pi(adev);
427         u8 *vid = pi->smc_powertune_table.VddCVid;
428         u32 i;
429
430         if (pi->vddc_voltage_table.count > 8)
431                 return -EINVAL;
432
433         for (i = 0; i < pi->vddc_voltage_table.count; i++)
434                 vid[i] = ci_convert_to_vid(pi->vddc_voltage_table.entries[i].value);
435
436         return 0;
437 }
438
439 static int ci_populate_svi_load_line(struct amdgpu_device *adev)
440 {
441         struct ci_power_info *pi = ci_get_pi(adev);
442         const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
443
444         pi->smc_powertune_table.SviLoadLineEn = pt_defaults->svi_load_line_en;
445         pi->smc_powertune_table.SviLoadLineVddC = pt_defaults->svi_load_line_vddc;
446         pi->smc_powertune_table.SviLoadLineTrimVddC = 3;
447         pi->smc_powertune_table.SviLoadLineOffsetVddC = 0;
448
449         return 0;
450 }
451
452 static int ci_populate_tdc_limit(struct amdgpu_device *adev)
453 {
454         struct ci_power_info *pi = ci_get_pi(adev);
455         const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
456         u16 tdc_limit;
457
458         tdc_limit = adev->pm.dpm.dyn_state.cac_tdp_table->tdc * 256;
459         pi->smc_powertune_table.TDC_VDDC_PkgLimit = cpu_to_be16(tdc_limit);
460         pi->smc_powertune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
461                 pt_defaults->tdc_vddc_throttle_release_limit_perc;
462         pi->smc_powertune_table.TDC_MAWt = pt_defaults->tdc_mawt;
463
464         return 0;
465 }
466
467 static int ci_populate_dw8(struct amdgpu_device *adev)
468 {
469         struct ci_power_info *pi = ci_get_pi(adev);
470         const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
471         int ret;
472
473         ret = amdgpu_ci_read_smc_sram_dword(adev,
474                                      SMU7_FIRMWARE_HEADER_LOCATION +
475                                      offsetof(SMU7_Firmware_Header, PmFuseTable) +
476                                      offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl),
477                                      (u32 *)&pi->smc_powertune_table.TdcWaterfallCtl,
478                                      pi->sram_end);
479         if (ret)
480                 return -EINVAL;
481         else
482                 pi->smc_powertune_table.TdcWaterfallCtl = pt_defaults->tdc_waterfall_ctl;
483
484         return 0;
485 }
486
487 static int ci_populate_fuzzy_fan(struct amdgpu_device *adev)
488 {
489         struct ci_power_info *pi = ci_get_pi(adev);
490
491         if ((adev->pm.dpm.fan.fan_output_sensitivity & (1 << 15)) ||
492             (adev->pm.dpm.fan.fan_output_sensitivity == 0))
493                 adev->pm.dpm.fan.fan_output_sensitivity =
494                         adev->pm.dpm.fan.default_fan_output_sensitivity;
495
496         pi->smc_powertune_table.FuzzyFan_PwmSetDelta =
497                 cpu_to_be16(adev->pm.dpm.fan.fan_output_sensitivity);
498
499         return 0;
500 }
501
502 static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct amdgpu_device *adev)
503 {
504         struct ci_power_info *pi = ci_get_pi(adev);
505         u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
506         u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
507         int i, min, max;
508
509         min = max = hi_vid[0];
510         for (i = 0; i < 8; i++) {
511                 if (0 != hi_vid[i]) {
512                         if (min > hi_vid[i])
513                                 min = hi_vid[i];
514                         if (max < hi_vid[i])
515                                 max = hi_vid[i];
516                 }
517
518                 if (0 != lo_vid[i]) {
519                         if (min > lo_vid[i])
520                                 min = lo_vid[i];
521                         if (max < lo_vid[i])
522                                 max = lo_vid[i];
523                 }
524         }
525
526         if ((min == 0) || (max == 0))
527                 return -EINVAL;
528         pi->smc_powertune_table.GnbLPMLMaxVid = (u8)max;
529         pi->smc_powertune_table.GnbLPMLMinVid = (u8)min;
530
531         return 0;
532 }
533
534 static int ci_populate_bapm_vddc_base_leakage_sidd(struct amdgpu_device *adev)
535 {
536         struct ci_power_info *pi = ci_get_pi(adev);
537         u16 hi_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd;
538         u16 lo_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd;
539         struct amdgpu_cac_tdp_table *cac_tdp_table =
540                 adev->pm.dpm.dyn_state.cac_tdp_table;
541
542         hi_sidd = cac_tdp_table->high_cac_leakage / 100 * 256;
543         lo_sidd = cac_tdp_table->low_cac_leakage / 100 * 256;
544
545         pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd = cpu_to_be16(hi_sidd);
546         pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd = cpu_to_be16(lo_sidd);
547
548         return 0;
549 }
550
551 static int ci_populate_bapm_parameters_in_dpm_table(struct amdgpu_device *adev)
552 {
553         struct ci_power_info *pi = ci_get_pi(adev);
554         const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
555         SMU7_Discrete_DpmTable  *dpm_table = &pi->smc_state_table;
556         struct amdgpu_cac_tdp_table *cac_tdp_table =
557                 adev->pm.dpm.dyn_state.cac_tdp_table;
558         struct amdgpu_ppm_table *ppm = adev->pm.dpm.dyn_state.ppm_table;
559         int i, j, k;
560         const u16 *def1;
561         const u16 *def2;
562
563         dpm_table->DefaultTdp = cac_tdp_table->tdp * 256;
564         dpm_table->TargetTdp = cac_tdp_table->configurable_tdp * 256;
565
566         dpm_table->DTETjOffset = (u8)pi->dte_tj_offset;
567         dpm_table->GpuTjMax =
568                 (u8)(pi->thermal_temp_setting.temperature_high / 1000);
569         dpm_table->GpuTjHyst = 8;
570
571         dpm_table->DTEAmbientTempBase = pt_defaults->dte_ambient_temp_base;
572
573         if (ppm) {
574                 dpm_table->PPM_PkgPwrLimit = cpu_to_be16((u16)ppm->dgpu_tdp * 256 / 1000);
575                 dpm_table->PPM_TemperatureLimit = cpu_to_be16((u16)ppm->tj_max * 256);
576         } else {
577                 dpm_table->PPM_PkgPwrLimit = cpu_to_be16(0);
578                 dpm_table->PPM_TemperatureLimit = cpu_to_be16(0);
579         }
580
581         dpm_table->BAPM_TEMP_GRADIENT = cpu_to_be32(pt_defaults->bapm_temp_gradient);
582         def1 = pt_defaults->bapmti_r;
583         def2 = pt_defaults->bapmti_rc;
584
585         for (i = 0; i < SMU7_DTE_ITERATIONS; i++) {
586                 for (j = 0; j < SMU7_DTE_SOURCES; j++) {
587                         for (k = 0; k < SMU7_DTE_SINKS; k++) {
588                                 dpm_table->BAPMTI_R[i][j][k] = cpu_to_be16(*def1);
589                                 dpm_table->BAPMTI_RC[i][j][k] = cpu_to_be16(*def2);
590                                 def1++;
591                                 def2++;
592                         }
593                 }
594         }
595
596         return 0;
597 }
598
599 static int ci_populate_pm_base(struct amdgpu_device *adev)
600 {
601         struct ci_power_info *pi = ci_get_pi(adev);
602         u32 pm_fuse_table_offset;
603         int ret;
604
605         if (pi->caps_power_containment) {
606                 ret = amdgpu_ci_read_smc_sram_dword(adev,
607                                              SMU7_FIRMWARE_HEADER_LOCATION +
608                                              offsetof(SMU7_Firmware_Header, PmFuseTable),
609                                              &pm_fuse_table_offset, pi->sram_end);
610                 if (ret)
611                         return ret;
612                 ret = ci_populate_bapm_vddc_vid_sidd(adev);
613                 if (ret)
614                         return ret;
615                 ret = ci_populate_vddc_vid(adev);
616                 if (ret)
617                         return ret;
618                 ret = ci_populate_svi_load_line(adev);
619                 if (ret)
620                         return ret;
621                 ret = ci_populate_tdc_limit(adev);
622                 if (ret)
623                         return ret;
624                 ret = ci_populate_dw8(adev);
625                 if (ret)
626                         return ret;
627                 ret = ci_populate_fuzzy_fan(adev);
628                 if (ret)
629                         return ret;
630                 ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(adev);
631                 if (ret)
632                         return ret;
633                 ret = ci_populate_bapm_vddc_base_leakage_sidd(adev);
634                 if (ret)
635                         return ret;
636                 ret = amdgpu_ci_copy_bytes_to_smc(adev, pm_fuse_table_offset,
637                                            (u8 *)&pi->smc_powertune_table,
638                                            sizeof(SMU7_Discrete_PmFuses), pi->sram_end);
639                 if (ret)
640                         return ret;
641         }
642
643         return 0;
644 }
645
646 static void ci_do_enable_didt(struct amdgpu_device *adev, const bool enable)
647 {
648         struct ci_power_info *pi = ci_get_pi(adev);
649         u32 data;
650
651         if (pi->caps_sq_ramping) {
652                 data = RREG32_DIDT(ixDIDT_SQ_CTRL0);
653                 if (enable)
654                         data |= DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
655                 else
656                         data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
657                 WREG32_DIDT(ixDIDT_SQ_CTRL0, data);
658         }
659
660         if (pi->caps_db_ramping) {
661                 data = RREG32_DIDT(ixDIDT_DB_CTRL0);
662                 if (enable)
663                         data |= DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
664                 else
665                         data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
666                 WREG32_DIDT(ixDIDT_DB_CTRL0, data);
667         }
668
669         if (pi->caps_td_ramping) {
670                 data = RREG32_DIDT(ixDIDT_TD_CTRL0);
671                 if (enable)
672                         data |= DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
673                 else
674                         data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
675                 WREG32_DIDT(ixDIDT_TD_CTRL0, data);
676         }
677
678         if (pi->caps_tcp_ramping) {
679                 data = RREG32_DIDT(ixDIDT_TCP_CTRL0);
680                 if (enable)
681                         data |= DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
682                 else
683                         data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
684                 WREG32_DIDT(ixDIDT_TCP_CTRL0, data);
685         }
686 }
687
688 static int ci_program_pt_config_registers(struct amdgpu_device *adev,
689                                           const struct ci_pt_config_reg *cac_config_regs)
690 {
691         const struct ci_pt_config_reg *config_regs = cac_config_regs;
692         u32 data;
693         u32 cache = 0;
694
695         if (config_regs == NULL)
696                 return -EINVAL;
697
698         while (config_regs->offset != 0xFFFFFFFF) {
699                 if (config_regs->type == CISLANDS_CONFIGREG_CACHE) {
700                         cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
701                 } else {
702                         switch (config_regs->type) {
703                         case CISLANDS_CONFIGREG_SMC_IND:
704                                 data = RREG32_SMC(config_regs->offset);
705                                 break;
706                         case CISLANDS_CONFIGREG_DIDT_IND:
707                                 data = RREG32_DIDT(config_regs->offset);
708                                 break;
709                         default:
710                                 data = RREG32(config_regs->offset);
711                                 break;
712                         }
713
714                         data &= ~config_regs->mask;
715                         data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
716                         data |= cache;
717
718                         switch (config_regs->type) {
719                         case CISLANDS_CONFIGREG_SMC_IND:
720                                 WREG32_SMC(config_regs->offset, data);
721                                 break;
722                         case CISLANDS_CONFIGREG_DIDT_IND:
723                                 WREG32_DIDT(config_regs->offset, data);
724                                 break;
725                         default:
726                                 WREG32(config_regs->offset, data);
727                                 break;
728                         }
729                         cache = 0;
730                 }
731                 config_regs++;
732         }
733         return 0;
734 }
735
736 static int ci_enable_didt(struct amdgpu_device *adev, bool enable)
737 {
738         struct ci_power_info *pi = ci_get_pi(adev);
739         int ret;
740
741         if (pi->caps_sq_ramping || pi->caps_db_ramping ||
742             pi->caps_td_ramping || pi->caps_tcp_ramping) {
743                 adev->gfx.rlc.funcs->enter_safe_mode(adev);
744
745                 if (enable) {
746                         ret = ci_program_pt_config_registers(adev, didt_config_ci);
747                         if (ret) {
748                                 adev->gfx.rlc.funcs->exit_safe_mode(adev);
749                                 return ret;
750                         }
751                 }
752
753                 ci_do_enable_didt(adev, enable);
754
755                 adev->gfx.rlc.funcs->exit_safe_mode(adev);
756         }
757
758         return 0;
759 }
760
761 static int ci_enable_power_containment(struct amdgpu_device *adev, bool enable)
762 {
763         struct ci_power_info *pi = ci_get_pi(adev);
764         PPSMC_Result smc_result;
765         int ret = 0;
766
767         if (enable) {
768                 pi->power_containment_features = 0;
769                 if (pi->caps_power_containment) {
770                         if (pi->enable_bapm_feature) {
771                                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableDTE);
772                                 if (smc_result != PPSMC_Result_OK)
773                                         ret = -EINVAL;
774                                 else
775                                         pi->power_containment_features |= POWERCONTAINMENT_FEATURE_BAPM;
776                         }
777
778                         if (pi->enable_tdc_limit_feature) {
779                                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_TDCLimitEnable);
780                                 if (smc_result != PPSMC_Result_OK)
781                                         ret = -EINVAL;
782                                 else
783                                         pi->power_containment_features |= POWERCONTAINMENT_FEATURE_TDCLimit;
784                         }
785
786                         if (pi->enable_pkg_pwr_tracking_feature) {
787                                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PkgPwrLimitEnable);
788                                 if (smc_result != PPSMC_Result_OK) {
789                                         ret = -EINVAL;
790                                 } else {
791                                         struct amdgpu_cac_tdp_table *cac_tdp_table =
792                                                 adev->pm.dpm.dyn_state.cac_tdp_table;
793                                         u32 default_pwr_limit =
794                                                 (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
795
796                                         pi->power_containment_features |= POWERCONTAINMENT_FEATURE_PkgPwrLimit;
797
798                                         ci_set_power_limit(adev, default_pwr_limit);
799                                 }
800                         }
801                 }
802         } else {
803                 if (pi->caps_power_containment && pi->power_containment_features) {
804                         if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_TDCLimit)
805                                 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_TDCLimitDisable);
806
807                         if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)
808                                 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableDTE);
809
810                         if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit)
811                                 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PkgPwrLimitDisable);
812                         pi->power_containment_features = 0;
813                 }
814         }
815
816         return ret;
817 }
818
819 static int ci_enable_smc_cac(struct amdgpu_device *adev, bool enable)
820 {
821         struct ci_power_info *pi = ci_get_pi(adev);
822         PPSMC_Result smc_result;
823         int ret = 0;
824
825         if (pi->caps_cac) {
826                 if (enable) {
827                         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableCac);
828                         if (smc_result != PPSMC_Result_OK) {
829                                 ret = -EINVAL;
830                                 pi->cac_enabled = false;
831                         } else {
832                                 pi->cac_enabled = true;
833                         }
834                 } else if (pi->cac_enabled) {
835                         amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableCac);
836                         pi->cac_enabled = false;
837                 }
838         }
839
840         return ret;
841 }
842
843 static int ci_enable_thermal_based_sclk_dpm(struct amdgpu_device *adev,
844                                             bool enable)
845 {
846         struct ci_power_info *pi = ci_get_pi(adev);
847         PPSMC_Result smc_result = PPSMC_Result_OK;
848
849         if (pi->thermal_sclk_dpm_enabled) {
850                 if (enable)
851                         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_ENABLE_THERMAL_DPM);
852                 else
853                         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DISABLE_THERMAL_DPM);
854         }
855
856         if (smc_result == PPSMC_Result_OK)
857                 return 0;
858         else
859                 return -EINVAL;
860 }
861
862 static int ci_power_control_set_level(struct amdgpu_device *adev)
863 {
864         struct ci_power_info *pi = ci_get_pi(adev);
865         struct amdgpu_cac_tdp_table *cac_tdp_table =
866                 adev->pm.dpm.dyn_state.cac_tdp_table;
867         s32 adjust_percent;
868         s32 target_tdp;
869         int ret = 0;
870         bool adjust_polarity = false; /* ??? */
871
872         if (pi->caps_power_containment) {
873                 adjust_percent = adjust_polarity ?
874                         adev->pm.dpm.tdp_adjustment : (-1 * adev->pm.dpm.tdp_adjustment);
875                 target_tdp = ((100 + adjust_percent) *
876                               (s32)cac_tdp_table->configurable_tdp) / 100;
877
878                 ret = ci_set_overdrive_target_tdp(adev, (u32)target_tdp);
879         }
880
881         return ret;
882 }
883
884 static void ci_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate)
885 {
886         struct ci_power_info *pi = ci_get_pi(adev);
887
888         if (pi->uvd_power_gated == gate)
889                 return;
890
891         pi->uvd_power_gated = gate;
892
893         ci_update_uvd_dpm(adev, gate);
894 }
895
896 static bool ci_dpm_vblank_too_short(struct amdgpu_device *adev)
897 {
898         u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
899         u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300;
900
901         if (vblank_time < switch_limit)
902                 return true;
903         else
904                 return false;
905
906 }
907
908 static void ci_apply_state_adjust_rules(struct amdgpu_device *adev,
909                                         struct amdgpu_ps *rps)
910 {
911         struct ci_ps *ps = ci_get_ps(rps);
912         struct ci_power_info *pi = ci_get_pi(adev);
913         struct amdgpu_clock_and_voltage_limits *max_limits;
914         bool disable_mclk_switching;
915         u32 sclk, mclk;
916         int i;
917
918         if (rps->vce_active) {
919                 rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk;
920                 rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk;
921         } else {
922                 rps->evclk = 0;
923                 rps->ecclk = 0;
924         }
925
926         if ((adev->pm.dpm.new_active_crtc_count > 1) ||
927             ci_dpm_vblank_too_short(adev))
928                 disable_mclk_switching = true;
929         else
930                 disable_mclk_switching = false;
931
932         if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
933                 pi->battery_state = true;
934         else
935                 pi->battery_state = false;
936
937         if (adev->pm.dpm.ac_power)
938                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
939         else
940                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
941
942         if (adev->pm.dpm.ac_power == false) {
943                 for (i = 0; i < ps->performance_level_count; i++) {
944                         if (ps->performance_levels[i].mclk > max_limits->mclk)
945                                 ps->performance_levels[i].mclk = max_limits->mclk;
946                         if (ps->performance_levels[i].sclk > max_limits->sclk)
947                                 ps->performance_levels[i].sclk = max_limits->sclk;
948                 }
949         }
950
951         /* XXX validate the min clocks required for display */
952
953         if (disable_mclk_switching) {
954                 mclk  = ps->performance_levels[ps->performance_level_count - 1].mclk;
955                 sclk = ps->performance_levels[0].sclk;
956         } else {
957                 mclk = ps->performance_levels[0].mclk;
958                 sclk = ps->performance_levels[0].sclk;
959         }
960
961         if (rps->vce_active) {
962                 if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk)
963                         sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk;
964                 if (mclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk)
965                         mclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk;
966         }
967
968         ps->performance_levels[0].sclk = sclk;
969         ps->performance_levels[0].mclk = mclk;
970
971         if (ps->performance_levels[1].sclk < ps->performance_levels[0].sclk)
972                 ps->performance_levels[1].sclk = ps->performance_levels[0].sclk;
973
974         if (disable_mclk_switching) {
975                 if (ps->performance_levels[0].mclk < ps->performance_levels[1].mclk)
976                         ps->performance_levels[0].mclk = ps->performance_levels[1].mclk;
977         } else {
978                 if (ps->performance_levels[1].mclk < ps->performance_levels[0].mclk)
979                         ps->performance_levels[1].mclk = ps->performance_levels[0].mclk;
980         }
981 }
982
983 static int ci_thermal_set_temperature_range(struct amdgpu_device *adev,
984                                             int min_temp, int max_temp)
985 {
986         int low_temp = 0 * 1000;
987         int high_temp = 255 * 1000;
988         u32 tmp;
989
990         if (low_temp < min_temp)
991                 low_temp = min_temp;
992         if (high_temp > max_temp)
993                 high_temp = max_temp;
994         if (high_temp < low_temp) {
995                 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
996                 return -EINVAL;
997         }
998
999         tmp = RREG32_SMC(ixCG_THERMAL_INT);
1000         tmp &= ~(CG_THERMAL_INT__DIG_THERM_INTH_MASK | CG_THERMAL_INT__DIG_THERM_INTL_MASK);
1001         tmp |= ((high_temp / 1000) << CG_THERMAL_INT__DIG_THERM_INTH__SHIFT) |
1002                 ((low_temp / 1000)) << CG_THERMAL_INT__DIG_THERM_INTL__SHIFT;
1003         WREG32_SMC(ixCG_THERMAL_INT, tmp);
1004
1005 #if 0
1006         /* XXX: need to figure out how to handle this properly */
1007         tmp = RREG32_SMC(ixCG_THERMAL_CTRL);
1008         tmp &= DIG_THERM_DPM_MASK;
1009         tmp |= DIG_THERM_DPM(high_temp / 1000);
1010         WREG32_SMC(ixCG_THERMAL_CTRL, tmp);
1011 #endif
1012
1013         adev->pm.dpm.thermal.min_temp = low_temp;
1014         adev->pm.dpm.thermal.max_temp = high_temp;
1015         return 0;
1016 }
1017
1018 static int ci_thermal_enable_alert(struct amdgpu_device *adev,
1019                                    bool enable)
1020 {
1021         u32 thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
1022         PPSMC_Result result;
1023
1024         if (enable) {
1025                 thermal_int &= ~(CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK |
1026                                  CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK);
1027                 WREG32_SMC(ixCG_THERMAL_INT, thermal_int);
1028                 result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Thermal_Cntl_Enable);
1029                 if (result != PPSMC_Result_OK) {
1030                         DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
1031                         return -EINVAL;
1032                 }
1033         } else {
1034                 thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK |
1035                         CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
1036                 WREG32_SMC(ixCG_THERMAL_INT, thermal_int);
1037                 result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Thermal_Cntl_Disable);
1038                 if (result != PPSMC_Result_OK) {
1039                         DRM_DEBUG_KMS("Could not disable thermal interrupts.\n");
1040                         return -EINVAL;
1041                 }
1042         }
1043
1044         return 0;
1045 }
1046
1047 static void ci_fan_ctrl_set_static_mode(struct amdgpu_device *adev, u32 mode)
1048 {
1049         struct ci_power_info *pi = ci_get_pi(adev);
1050         u32 tmp;
1051
1052         if (pi->fan_ctrl_is_in_default_mode) {
1053                 tmp = (RREG32_SMC(ixCG_FDO_CTRL2) & CG_FDO_CTRL2__FDO_PWM_MODE_MASK)
1054                         >> CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
1055                 pi->fan_ctrl_default_mode = tmp;
1056                 tmp = (RREG32_SMC(ixCG_FDO_CTRL2) & CG_FDO_CTRL2__TMIN_MASK)
1057                         >> CG_FDO_CTRL2__TMIN__SHIFT;
1058                 pi->t_min = tmp;
1059                 pi->fan_ctrl_is_in_default_mode = false;
1060         }
1061
1062         tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TMIN_MASK;
1063         tmp |= 0 << CG_FDO_CTRL2__TMIN__SHIFT;
1064         WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1065
1066         tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
1067         tmp |= mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
1068         WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1069 }
1070
1071 static int ci_thermal_setup_fan_table(struct amdgpu_device *adev)
1072 {
1073         struct ci_power_info *pi = ci_get_pi(adev);
1074         SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
1075         u32 duty100;
1076         u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2;
1077         u16 fdo_min, slope1, slope2;
1078         u32 reference_clock, tmp;
1079         int ret;
1080         u64 tmp64;
1081
1082         if (!pi->fan_table_start) {
1083                 adev->pm.dpm.fan.ucode_fan_control = false;
1084                 return 0;
1085         }
1086
1087         duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK)
1088                 >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
1089
1090         if (duty100 == 0) {
1091                 adev->pm.dpm.fan.ucode_fan_control = false;
1092                 return 0;
1093         }
1094
1095         tmp64 = (u64)adev->pm.dpm.fan.pwm_min * duty100;
1096         do_div(tmp64, 10000);
1097         fdo_min = (u16)tmp64;
1098
1099         t_diff1 = adev->pm.dpm.fan.t_med - adev->pm.dpm.fan.t_min;
1100         t_diff2 = adev->pm.dpm.fan.t_high - adev->pm.dpm.fan.t_med;
1101
1102         pwm_diff1 = adev->pm.dpm.fan.pwm_med - adev->pm.dpm.fan.pwm_min;
1103         pwm_diff2 = adev->pm.dpm.fan.pwm_high - adev->pm.dpm.fan.pwm_med;
1104
1105         slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
1106         slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
1107
1108         fan_table.TempMin = cpu_to_be16((50 + adev->pm.dpm.fan.t_min) / 100);
1109         fan_table.TempMed = cpu_to_be16((50 + adev->pm.dpm.fan.t_med) / 100);
1110         fan_table.TempMax = cpu_to_be16((50 + adev->pm.dpm.fan.t_max) / 100);
1111
1112         fan_table.Slope1 = cpu_to_be16(slope1);
1113         fan_table.Slope2 = cpu_to_be16(slope2);
1114
1115         fan_table.FdoMin = cpu_to_be16(fdo_min);
1116
1117         fan_table.HystDown = cpu_to_be16(adev->pm.dpm.fan.t_hyst);
1118
1119         fan_table.HystUp = cpu_to_be16(1);
1120
1121         fan_table.HystSlope = cpu_to_be16(1);
1122
1123         fan_table.TempRespLim = cpu_to_be16(5);
1124
1125         reference_clock = amdgpu_asic_get_xclk(adev);
1126
1127         fan_table.RefreshPeriod = cpu_to_be32((adev->pm.dpm.fan.cycle_delay *
1128                                                reference_clock) / 1600);
1129
1130         fan_table.FdoMax = cpu_to_be16((u16)duty100);
1131
1132         tmp = (RREG32_SMC(ixCG_MULT_THERMAL_CTRL) & CG_MULT_THERMAL_CTRL__TEMP_SEL_MASK)
1133                 >> CG_MULT_THERMAL_CTRL__TEMP_SEL__SHIFT;
1134         fan_table.TempSrc = (uint8_t)tmp;
1135
1136         ret = amdgpu_ci_copy_bytes_to_smc(adev,
1137                                           pi->fan_table_start,
1138                                           (u8 *)(&fan_table),
1139                                           sizeof(fan_table),
1140                                           pi->sram_end);
1141
1142         if (ret) {
1143                 DRM_ERROR("Failed to load fan table to the SMC.");
1144                 adev->pm.dpm.fan.ucode_fan_control = false;
1145         }
1146
1147         return 0;
1148 }
1149
1150 static int ci_fan_ctrl_start_smc_fan_control(struct amdgpu_device *adev)
1151 {
1152         struct ci_power_info *pi = ci_get_pi(adev);
1153         PPSMC_Result ret;
1154
1155         if (pi->caps_od_fuzzy_fan_control_support) {
1156                 ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
1157                                                                PPSMC_StartFanControl,
1158                                                                FAN_CONTROL_FUZZY);
1159                 if (ret != PPSMC_Result_OK)
1160                         return -EINVAL;
1161                 ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
1162                                                                PPSMC_MSG_SetFanPwmMax,
1163                                                                adev->pm.dpm.fan.default_max_fan_pwm);
1164                 if (ret != PPSMC_Result_OK)
1165                         return -EINVAL;
1166         } else {
1167                 ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
1168                                                                PPSMC_StartFanControl,
1169                                                                FAN_CONTROL_TABLE);
1170                 if (ret != PPSMC_Result_OK)
1171                         return -EINVAL;
1172         }
1173
1174         pi->fan_is_controlled_by_smc = true;
1175         return 0;
1176 }
1177
1178
1179 static int ci_fan_ctrl_stop_smc_fan_control(struct amdgpu_device *adev)
1180 {
1181         PPSMC_Result ret;
1182         struct ci_power_info *pi = ci_get_pi(adev);
1183
1184         ret = amdgpu_ci_send_msg_to_smc(adev, PPSMC_StopFanControl);
1185         if (ret == PPSMC_Result_OK) {
1186                 pi->fan_is_controlled_by_smc = false;
1187                 return 0;
1188         } else {
1189                 return -EINVAL;
1190         }
1191 }
1192
1193 static int ci_dpm_get_fan_speed_percent(struct amdgpu_device *adev,
1194                                         u32 *speed)
1195 {
1196         u32 duty, duty100;
1197         u64 tmp64;
1198
1199         if (adev->pm.no_fan)
1200                 return -ENOENT;
1201
1202         duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK)
1203                 >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
1204         duty = (RREG32_SMC(ixCG_THERMAL_STATUS) & CG_THERMAL_STATUS__FDO_PWM_DUTY_MASK)
1205                 >> CG_THERMAL_STATUS__FDO_PWM_DUTY__SHIFT;
1206
1207         if (duty100 == 0)
1208                 return -EINVAL;
1209
1210         tmp64 = (u64)duty * 100;
1211         do_div(tmp64, duty100);
1212         *speed = (u32)tmp64;
1213
1214         if (*speed > 100)
1215                 *speed = 100;
1216
1217         return 0;
1218 }
1219
1220 static int ci_dpm_set_fan_speed_percent(struct amdgpu_device *adev,
1221                                         u32 speed)
1222 {
1223         u32 tmp;
1224         u32 duty, duty100;
1225         u64 tmp64;
1226         struct ci_power_info *pi = ci_get_pi(adev);
1227
1228         if (adev->pm.no_fan)
1229                 return -ENOENT;
1230
1231         if (pi->fan_is_controlled_by_smc)
1232                 return -EINVAL;
1233
1234         if (speed > 100)
1235                 return -EINVAL;
1236
1237         duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK)
1238                 >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
1239
1240         if (duty100 == 0)
1241                 return -EINVAL;
1242
1243         tmp64 = (u64)speed * duty100;
1244         do_div(tmp64, 100);
1245         duty = (u32)tmp64;
1246
1247         tmp = RREG32_SMC(ixCG_FDO_CTRL0) & ~CG_FDO_CTRL0__FDO_STATIC_DUTY_MASK;
1248         tmp |= duty << CG_FDO_CTRL0__FDO_STATIC_DUTY__SHIFT;
1249         WREG32_SMC(ixCG_FDO_CTRL0, tmp);
1250
1251         return 0;
1252 }
1253
1254 static void ci_dpm_set_fan_control_mode(struct amdgpu_device *adev, u32 mode)
1255 {
1256         if (mode) {
1257                 /* stop auto-manage */
1258                 if (adev->pm.dpm.fan.ucode_fan_control)
1259                         ci_fan_ctrl_stop_smc_fan_control(adev);
1260                 ci_fan_ctrl_set_static_mode(adev, mode);
1261         } else {
1262                 /* restart auto-manage */
1263                 if (adev->pm.dpm.fan.ucode_fan_control)
1264                         ci_thermal_start_smc_fan_control(adev);
1265                 else
1266                         ci_fan_ctrl_set_default_mode(adev);
1267         }
1268 }
1269
1270 static u32 ci_dpm_get_fan_control_mode(struct amdgpu_device *adev)
1271 {
1272         struct ci_power_info *pi = ci_get_pi(adev);
1273         u32 tmp;
1274
1275         if (pi->fan_is_controlled_by_smc)
1276                 return 0;
1277
1278         tmp = RREG32_SMC(ixCG_FDO_CTRL2) & CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
1279         return (tmp >> CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT);
1280 }
1281
1282 #if 0
1283 static int ci_fan_ctrl_get_fan_speed_rpm(struct amdgpu_device *adev,
1284                                          u32 *speed)
1285 {
1286         u32 tach_period;
1287         u32 xclk = amdgpu_asic_get_xclk(adev);
1288
1289         if (adev->pm.no_fan)
1290                 return -ENOENT;
1291
1292         if (adev->pm.fan_pulses_per_revolution == 0)
1293                 return -ENOENT;
1294
1295         tach_period = (RREG32_SMC(ixCG_TACH_STATUS) & CG_TACH_STATUS__TACH_PERIOD_MASK)
1296                 >> CG_TACH_STATUS__TACH_PERIOD__SHIFT;
1297         if (tach_period == 0)
1298                 return -ENOENT;
1299
1300         *speed = 60 * xclk * 10000 / tach_period;
1301
1302         return 0;
1303 }
1304
1305 static int ci_fan_ctrl_set_fan_speed_rpm(struct amdgpu_device *adev,
1306                                          u32 speed)
1307 {
1308         u32 tach_period, tmp;
1309         u32 xclk = amdgpu_asic_get_xclk(adev);
1310
1311         if (adev->pm.no_fan)
1312                 return -ENOENT;
1313
1314         if (adev->pm.fan_pulses_per_revolution == 0)
1315                 return -ENOENT;
1316
1317         if ((speed < adev->pm.fan_min_rpm) ||
1318             (speed > adev->pm.fan_max_rpm))
1319                 return -EINVAL;
1320
1321         if (adev->pm.dpm.fan.ucode_fan_control)
1322                 ci_fan_ctrl_stop_smc_fan_control(adev);
1323
1324         tach_period = 60 * xclk * 10000 / (8 * speed);
1325         tmp = RREG32_SMC(ixCG_TACH_CTRL) & ~CG_TACH_CTRL__TARGET_PERIOD_MASK;
1326         tmp |= tach_period << CG_TACH_CTRL__TARGET_PERIOD__SHIFT;
1327         WREG32_SMC(CG_TACH_CTRL, tmp);
1328
1329         ci_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC_RPM);
1330
1331         return 0;
1332 }
1333 #endif
1334
1335 static void ci_fan_ctrl_set_default_mode(struct amdgpu_device *adev)
1336 {
1337         struct ci_power_info *pi = ci_get_pi(adev);
1338         u32 tmp;
1339
1340         if (!pi->fan_ctrl_is_in_default_mode) {
1341                 tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
1342                 tmp |= pi->fan_ctrl_default_mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
1343                 WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1344
1345                 tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TMIN_MASK;
1346                 tmp |= pi->t_min << CG_FDO_CTRL2__TMIN__SHIFT;
1347                 WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1348                 pi->fan_ctrl_is_in_default_mode = true;
1349         }
1350 }
1351
1352 static void ci_thermal_start_smc_fan_control(struct amdgpu_device *adev)
1353 {
1354         if (adev->pm.dpm.fan.ucode_fan_control) {
1355                 ci_fan_ctrl_start_smc_fan_control(adev);
1356                 ci_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC);
1357         }
1358 }
1359
1360 static void ci_thermal_initialize(struct amdgpu_device *adev)
1361 {
1362         u32 tmp;
1363
1364         if (adev->pm.fan_pulses_per_revolution) {
1365                 tmp = RREG32_SMC(ixCG_TACH_CTRL) & ~CG_TACH_CTRL__EDGE_PER_REV_MASK;
1366                 tmp |= (adev->pm.fan_pulses_per_revolution - 1)
1367                         << CG_TACH_CTRL__EDGE_PER_REV__SHIFT;
1368                 WREG32_SMC(ixCG_TACH_CTRL, tmp);
1369         }
1370
1371         tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TACH_PWM_RESP_RATE_MASK;
1372         tmp |= 0x28 << CG_FDO_CTRL2__TACH_PWM_RESP_RATE__SHIFT;
1373         WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1374 }
1375
1376 static int ci_thermal_start_thermal_controller(struct amdgpu_device *adev)
1377 {
1378         int ret;
1379
1380         ci_thermal_initialize(adev);
1381         ret = ci_thermal_set_temperature_range(adev, CISLANDS_TEMP_RANGE_MIN, CISLANDS_TEMP_RANGE_MAX);
1382         if (ret)
1383                 return ret;
1384         ret = ci_thermal_enable_alert(adev, true);
1385         if (ret)
1386                 return ret;
1387         if (adev->pm.dpm.fan.ucode_fan_control) {
1388                 ret = ci_thermal_setup_fan_table(adev);
1389                 if (ret)
1390                         return ret;
1391                 ci_thermal_start_smc_fan_control(adev);
1392         }
1393
1394         return 0;
1395 }
1396
1397 static void ci_thermal_stop_thermal_controller(struct amdgpu_device *adev)
1398 {
1399         if (!adev->pm.no_fan)
1400                 ci_fan_ctrl_set_default_mode(adev);
1401 }
1402
1403 static int ci_read_smc_soft_register(struct amdgpu_device *adev,
1404                                      u16 reg_offset, u32 *value)
1405 {
1406         struct ci_power_info *pi = ci_get_pi(adev);
1407
1408         return amdgpu_ci_read_smc_sram_dword(adev,
1409                                       pi->soft_regs_start + reg_offset,
1410                                       value, pi->sram_end);
1411 }
1412
1413 static int ci_write_smc_soft_register(struct amdgpu_device *adev,
1414                                       u16 reg_offset, u32 value)
1415 {
1416         struct ci_power_info *pi = ci_get_pi(adev);
1417
1418         return amdgpu_ci_write_smc_sram_dword(adev,
1419                                        pi->soft_regs_start + reg_offset,
1420                                        value, pi->sram_end);
1421 }
1422
1423 static void ci_init_fps_limits(struct amdgpu_device *adev)
1424 {
1425         struct ci_power_info *pi = ci_get_pi(adev);
1426         SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
1427
1428         if (pi->caps_fps) {
1429                 u16 tmp;
1430
1431                 tmp = 45;
1432                 table->FpsHighT = cpu_to_be16(tmp);
1433
1434                 tmp = 30;
1435                 table->FpsLowT = cpu_to_be16(tmp);
1436         }
1437 }
1438
1439 static int ci_update_sclk_t(struct amdgpu_device *adev)
1440 {
1441         struct ci_power_info *pi = ci_get_pi(adev);
1442         int ret = 0;
1443         u32 low_sclk_interrupt_t = 0;
1444
1445         if (pi->caps_sclk_throttle_low_notification) {
1446                 low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
1447
1448                 ret = amdgpu_ci_copy_bytes_to_smc(adev,
1449                                            pi->dpm_table_start +
1450                                            offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT),
1451                                            (u8 *)&low_sclk_interrupt_t,
1452                                            sizeof(u32), pi->sram_end);
1453
1454         }
1455
1456         return ret;
1457 }
1458
1459 static void ci_get_leakage_voltages(struct amdgpu_device *adev)
1460 {
1461         struct ci_power_info *pi = ci_get_pi(adev);
1462         u16 leakage_id, virtual_voltage_id;
1463         u16 vddc, vddci;
1464         int i;
1465
1466         pi->vddc_leakage.count = 0;
1467         pi->vddci_leakage.count = 0;
1468
1469         if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
1470                 for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
1471                         virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1472                         if (amdgpu_atombios_get_voltage_evv(adev, virtual_voltage_id, &vddc) != 0)
1473                                 continue;
1474                         if (vddc != 0 && vddc != virtual_voltage_id) {
1475                                 pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
1476                                 pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
1477                                 pi->vddc_leakage.count++;
1478                         }
1479                 }
1480         } else if (amdgpu_atombios_get_leakage_id_from_vbios(adev, &leakage_id) == 0) {
1481                 for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
1482                         virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1483                         if (amdgpu_atombios_get_leakage_vddc_based_on_leakage_params(adev, &vddc, &vddci,
1484                                                                                      virtual_voltage_id,
1485                                                                                      leakage_id) == 0) {
1486                                 if (vddc != 0 && vddc != virtual_voltage_id) {
1487                                         pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
1488                                         pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
1489                                         pi->vddc_leakage.count++;
1490                                 }
1491                                 if (vddci != 0 && vddci != virtual_voltage_id) {
1492                                         pi->vddci_leakage.actual_voltage[pi->vddci_leakage.count] = vddci;
1493                                         pi->vddci_leakage.leakage_id[pi->vddci_leakage.count] = virtual_voltage_id;
1494                                         pi->vddci_leakage.count++;
1495                                 }
1496                         }
1497                 }
1498         }
1499 }
1500
1501 static void ci_set_dpm_event_sources(struct amdgpu_device *adev, u32 sources)
1502 {
1503         struct ci_power_info *pi = ci_get_pi(adev);
1504         bool want_thermal_protection;
1505         enum amdgpu_dpm_event_src dpm_event_src;
1506         u32 tmp;
1507
1508         switch (sources) {
1509         case 0:
1510         default:
1511                 want_thermal_protection = false;
1512                 break;
1513         case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL):
1514                 want_thermal_protection = true;
1515                 dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGITAL;
1516                 break;
1517         case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
1518                 want_thermal_protection = true;
1519                 dpm_event_src = AMDGPU_DPM_EVENT_SRC_EXTERNAL;
1520                 break;
1521         case ((1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
1522               (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL)):
1523                 want_thermal_protection = true;
1524                 dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
1525                 break;
1526         }
1527
1528         if (want_thermal_protection) {
1529 #if 0
1530                 /* XXX: need to figure out how to handle this properly */
1531                 tmp = RREG32_SMC(ixCG_THERMAL_CTRL);
1532                 tmp &= DPM_EVENT_SRC_MASK;
1533                 tmp |= DPM_EVENT_SRC(dpm_event_src);
1534                 WREG32_SMC(ixCG_THERMAL_CTRL, tmp);
1535 #endif
1536
1537                 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
1538                 if (pi->thermal_protection)
1539                         tmp &= ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
1540                 else
1541                         tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
1542                 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
1543         } else {
1544                 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
1545                 tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
1546                 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
1547         }
1548 }
1549
1550 static void ci_enable_auto_throttle_source(struct amdgpu_device *adev,
1551                                            enum amdgpu_dpm_auto_throttle_src source,
1552                                            bool enable)
1553 {
1554         struct ci_power_info *pi = ci_get_pi(adev);
1555
1556         if (enable) {
1557                 if (!(pi->active_auto_throttle_sources & (1 << source))) {
1558                         pi->active_auto_throttle_sources |= 1 << source;
1559                         ci_set_dpm_event_sources(adev, pi->active_auto_throttle_sources);
1560                 }
1561         } else {
1562                 if (pi->active_auto_throttle_sources & (1 << source)) {
1563                         pi->active_auto_throttle_sources &= ~(1 << source);
1564                         ci_set_dpm_event_sources(adev, pi->active_auto_throttle_sources);
1565                 }
1566         }
1567 }
1568
1569 static void ci_enable_vr_hot_gpio_interrupt(struct amdgpu_device *adev)
1570 {
1571         if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT)
1572                 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableVRHotGPIOInterrupt);
1573 }
1574
1575 static int ci_unfreeze_sclk_mclk_dpm(struct amdgpu_device *adev)
1576 {
1577         struct ci_power_info *pi = ci_get_pi(adev);
1578         PPSMC_Result smc_result;
1579
1580         if (!pi->need_update_smu7_dpm_table)
1581                 return 0;
1582
1583         if ((!pi->sclk_dpm_key_disabled) &&
1584             (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1585                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
1586                 if (smc_result != PPSMC_Result_OK)
1587                         return -EINVAL;
1588         }
1589
1590         if ((!pi->mclk_dpm_key_disabled) &&
1591             (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1592                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
1593                 if (smc_result != PPSMC_Result_OK)
1594                         return -EINVAL;
1595         }
1596
1597         pi->need_update_smu7_dpm_table = 0;
1598         return 0;
1599 }
1600
1601 static int ci_enable_sclk_mclk_dpm(struct amdgpu_device *adev, bool enable)
1602 {
1603         struct ci_power_info *pi = ci_get_pi(adev);
1604         PPSMC_Result smc_result;
1605
1606         if (enable) {
1607                 if (!pi->sclk_dpm_key_disabled) {
1608                         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DPM_Enable);
1609                         if (smc_result != PPSMC_Result_OK)
1610                                 return -EINVAL;
1611                 }
1612
1613                 if (!pi->mclk_dpm_key_disabled) {
1614                         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_Enable);
1615                         if (smc_result != PPSMC_Result_OK)
1616                                 return -EINVAL;
1617
1618                         WREG32_P(mmMC_SEQ_CNTL_3, MC_SEQ_CNTL_3__CAC_EN_MASK,
1619                                         ~MC_SEQ_CNTL_3__CAC_EN_MASK);
1620
1621                         WREG32_SMC(ixLCAC_MC0_CNTL, 0x05);
1622                         WREG32_SMC(ixLCAC_MC1_CNTL, 0x05);
1623                         WREG32_SMC(ixLCAC_CPL_CNTL, 0x100005);
1624
1625                         udelay(10);
1626
1627                         WREG32_SMC(ixLCAC_MC0_CNTL, 0x400005);
1628                         WREG32_SMC(ixLCAC_MC1_CNTL, 0x400005);
1629                         WREG32_SMC(ixLCAC_CPL_CNTL, 0x500005);
1630                 }
1631         } else {
1632                 if (!pi->sclk_dpm_key_disabled) {
1633                         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DPM_Disable);
1634                         if (smc_result != PPSMC_Result_OK)
1635                                 return -EINVAL;
1636                 }
1637
1638                 if (!pi->mclk_dpm_key_disabled) {
1639                         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_Disable);
1640                         if (smc_result != PPSMC_Result_OK)
1641                                 return -EINVAL;
1642                 }
1643         }
1644
1645         return 0;
1646 }
1647
1648 static int ci_start_dpm(struct amdgpu_device *adev)
1649 {
1650         struct ci_power_info *pi = ci_get_pi(adev);
1651         PPSMC_Result smc_result;
1652         int ret;
1653         u32 tmp;
1654
1655         tmp = RREG32_SMC(ixGENERAL_PWRMGT);
1656         tmp |= GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK;
1657         WREG32_SMC(ixGENERAL_PWRMGT, tmp);
1658
1659         tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
1660         tmp |= SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK;
1661         WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
1662
1663         ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, VoltageChangeTimeout), 0x1000);
1664
1665         WREG32_P(mmBIF_LNCNT_RESET, 0, ~BIF_LNCNT_RESET__RESET_LNCNT_EN_MASK);
1666
1667         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Voltage_Cntl_Enable);
1668         if (smc_result != PPSMC_Result_OK)
1669                 return -EINVAL;
1670
1671         ret = ci_enable_sclk_mclk_dpm(adev, true);
1672         if (ret)
1673                 return ret;
1674
1675         if (!pi->pcie_dpm_key_disabled) {
1676                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PCIeDPM_Enable);
1677                 if (smc_result != PPSMC_Result_OK)
1678                         return -EINVAL;
1679         }
1680
1681         return 0;
1682 }
1683
1684 static int ci_freeze_sclk_mclk_dpm(struct amdgpu_device *adev)
1685 {
1686         struct ci_power_info *pi = ci_get_pi(adev);
1687         PPSMC_Result smc_result;
1688
1689         if (!pi->need_update_smu7_dpm_table)
1690                 return 0;
1691
1692         if ((!pi->sclk_dpm_key_disabled) &&
1693             (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1694                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_SCLKDPM_FreezeLevel);
1695                 if (smc_result != PPSMC_Result_OK)
1696                         return -EINVAL;
1697         }
1698
1699         if ((!pi->mclk_dpm_key_disabled) &&
1700             (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1701                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_FreezeLevel);
1702                 if (smc_result != PPSMC_Result_OK)
1703                         return -EINVAL;
1704         }
1705
1706         return 0;
1707 }
1708
1709 static int ci_stop_dpm(struct amdgpu_device *adev)
1710 {
1711         struct ci_power_info *pi = ci_get_pi(adev);
1712         PPSMC_Result smc_result;
1713         int ret;
1714         u32 tmp;
1715
1716         tmp = RREG32_SMC(ixGENERAL_PWRMGT);
1717         tmp &= ~GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK;
1718         WREG32_SMC(ixGENERAL_PWRMGT, tmp);
1719
1720         tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
1721         tmp &= ~SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK;
1722         WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
1723
1724         if (!pi->pcie_dpm_key_disabled) {
1725                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PCIeDPM_Disable);
1726                 if (smc_result != PPSMC_Result_OK)
1727                         return -EINVAL;
1728         }
1729
1730         ret = ci_enable_sclk_mclk_dpm(adev, false);
1731         if (ret)
1732                 return ret;
1733
1734         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Voltage_Cntl_Disable);
1735         if (smc_result != PPSMC_Result_OK)
1736                 return -EINVAL;
1737
1738         return 0;
1739 }
1740
1741 static void ci_enable_sclk_control(struct amdgpu_device *adev, bool enable)
1742 {
1743         u32 tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
1744
1745         if (enable)
1746                 tmp &= ~SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK;
1747         else
1748                 tmp |= SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK;
1749         WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
1750 }
1751
1752 #if 0
1753 static int ci_notify_hw_of_power_source(struct amdgpu_device *adev,
1754                                         bool ac_power)
1755 {
1756         struct ci_power_info *pi = ci_get_pi(adev);
1757         struct amdgpu_cac_tdp_table *cac_tdp_table =
1758                 adev->pm.dpm.dyn_state.cac_tdp_table;
1759         u32 power_limit;
1760
1761         if (ac_power)
1762                 power_limit = (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
1763         else
1764                 power_limit = (u32)(cac_tdp_table->battery_power_limit * 256);
1765
1766         ci_set_power_limit(adev, power_limit);
1767
1768         if (pi->caps_automatic_dc_transition) {
1769                 if (ac_power)
1770                         amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_RunningOnAC);
1771                 else
1772                         amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Remove_DC_Clamp);
1773         }
1774
1775         return 0;
1776 }
1777 #endif
1778
1779 static PPSMC_Result amdgpu_ci_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
1780                                                       PPSMC_Msg msg, u32 parameter)
1781 {
1782         WREG32(mmSMC_MSG_ARG_0, parameter);
1783         return amdgpu_ci_send_msg_to_smc(adev, msg);
1784 }
1785
1786 static PPSMC_Result amdgpu_ci_send_msg_to_smc_return_parameter(struct amdgpu_device *adev,
1787                                                         PPSMC_Msg msg, u32 *parameter)
1788 {
1789         PPSMC_Result smc_result;
1790
1791         smc_result = amdgpu_ci_send_msg_to_smc(adev, msg);
1792
1793         if ((smc_result == PPSMC_Result_OK) && parameter)
1794                 *parameter = RREG32(mmSMC_MSG_ARG_0);
1795
1796         return smc_result;
1797 }
1798
1799 static int ci_dpm_force_state_sclk(struct amdgpu_device *adev, u32 n)
1800 {
1801         struct ci_power_info *pi = ci_get_pi(adev);
1802
1803         if (!pi->sclk_dpm_key_disabled) {
1804                 PPSMC_Result smc_result =
1805                         amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SCLKDPM_SetEnabledMask, 1 << n);
1806                 if (smc_result != PPSMC_Result_OK)
1807                         return -EINVAL;
1808         }
1809
1810         return 0;
1811 }
1812
1813 static int ci_dpm_force_state_mclk(struct amdgpu_device *adev, u32 n)
1814 {
1815         struct ci_power_info *pi = ci_get_pi(adev);
1816
1817         if (!pi->mclk_dpm_key_disabled) {
1818                 PPSMC_Result smc_result =
1819                         amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_MCLKDPM_SetEnabledMask, 1 << n);
1820                 if (smc_result != PPSMC_Result_OK)
1821                         return -EINVAL;
1822         }
1823
1824         return 0;
1825 }
1826
1827 static int ci_dpm_force_state_pcie(struct amdgpu_device *adev, u32 n)
1828 {
1829         struct ci_power_info *pi = ci_get_pi(adev);
1830
1831         if (!pi->pcie_dpm_key_disabled) {
1832                 PPSMC_Result smc_result =
1833                         amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_PCIeDPM_ForceLevel, n);
1834                 if (smc_result != PPSMC_Result_OK)
1835                         return -EINVAL;
1836         }
1837
1838         return 0;
1839 }
1840
1841 static int ci_set_power_limit(struct amdgpu_device *adev, u32 n)
1842 {
1843         struct ci_power_info *pi = ci_get_pi(adev);
1844
1845         if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
1846                 PPSMC_Result smc_result =
1847                         amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_PkgPwrSetLimit, n);
1848                 if (smc_result != PPSMC_Result_OK)
1849                         return -EINVAL;
1850         }
1851
1852         return 0;
1853 }
1854
1855 static int ci_set_overdrive_target_tdp(struct amdgpu_device *adev,
1856                                        u32 target_tdp)
1857 {
1858         PPSMC_Result smc_result =
1859                 amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
1860         if (smc_result != PPSMC_Result_OK)
1861                 return -EINVAL;
1862         return 0;
1863 }
1864
1865 #if 0
1866 static int ci_set_boot_state(struct amdgpu_device *adev)
1867 {
1868         return ci_enable_sclk_mclk_dpm(adev, false);
1869 }
1870 #endif
1871
1872 static u32 ci_get_average_sclk_freq(struct amdgpu_device *adev)
1873 {
1874         u32 sclk_freq;
1875         PPSMC_Result smc_result =
1876                 amdgpu_ci_send_msg_to_smc_return_parameter(adev,
1877                                                     PPSMC_MSG_API_GetSclkFrequency,
1878                                                     &sclk_freq);
1879         if (smc_result != PPSMC_Result_OK)
1880                 sclk_freq = 0;
1881
1882         return sclk_freq;
1883 }
1884
1885 static u32 ci_get_average_mclk_freq(struct amdgpu_device *adev)
1886 {
1887         u32 mclk_freq;
1888         PPSMC_Result smc_result =
1889                 amdgpu_ci_send_msg_to_smc_return_parameter(adev,
1890                                                     PPSMC_MSG_API_GetMclkFrequency,
1891                                                     &mclk_freq);
1892         if (smc_result != PPSMC_Result_OK)
1893                 mclk_freq = 0;
1894
1895         return mclk_freq;
1896 }
1897
1898 static void ci_dpm_start_smc(struct amdgpu_device *adev)
1899 {
1900         int i;
1901
1902         amdgpu_ci_program_jump_on_start(adev);
1903         amdgpu_ci_start_smc_clock(adev);
1904         amdgpu_ci_start_smc(adev);
1905         for (i = 0; i < adev->usec_timeout; i++) {
1906                 if (RREG32_SMC(ixFIRMWARE_FLAGS) & FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK)
1907                         break;
1908         }
1909 }
1910
1911 static void ci_dpm_stop_smc(struct amdgpu_device *adev)
1912 {
1913         amdgpu_ci_reset_smc(adev);
1914         amdgpu_ci_stop_smc_clock(adev);
1915 }
1916
1917 static int ci_process_firmware_header(struct amdgpu_device *adev)
1918 {
1919         struct ci_power_info *pi = ci_get_pi(adev);
1920         u32 tmp;
1921         int ret;
1922
1923         ret = amdgpu_ci_read_smc_sram_dword(adev,
1924                                      SMU7_FIRMWARE_HEADER_LOCATION +
1925                                      offsetof(SMU7_Firmware_Header, DpmTable),
1926                                      &tmp, pi->sram_end);
1927         if (ret)
1928                 return ret;
1929
1930         pi->dpm_table_start = tmp;
1931
1932         ret = amdgpu_ci_read_smc_sram_dword(adev,
1933                                      SMU7_FIRMWARE_HEADER_LOCATION +
1934                                      offsetof(SMU7_Firmware_Header, SoftRegisters),
1935                                      &tmp, pi->sram_end);
1936         if (ret)
1937                 return ret;
1938
1939         pi->soft_regs_start = tmp;
1940
1941         ret = amdgpu_ci_read_smc_sram_dword(adev,
1942                                      SMU7_FIRMWARE_HEADER_LOCATION +
1943                                      offsetof(SMU7_Firmware_Header, mcRegisterTable),
1944                                      &tmp, pi->sram_end);
1945         if (ret)
1946                 return ret;
1947
1948         pi->mc_reg_table_start = tmp;
1949
1950         ret = amdgpu_ci_read_smc_sram_dword(adev,
1951                                      SMU7_FIRMWARE_HEADER_LOCATION +
1952                                      offsetof(SMU7_Firmware_Header, FanTable),
1953                                      &tmp, pi->sram_end);
1954         if (ret)
1955                 return ret;
1956
1957         pi->fan_table_start = tmp;
1958
1959         ret = amdgpu_ci_read_smc_sram_dword(adev,
1960                                      SMU7_FIRMWARE_HEADER_LOCATION +
1961                                      offsetof(SMU7_Firmware_Header, mcArbDramTimingTable),
1962                                      &tmp, pi->sram_end);
1963         if (ret)
1964                 return ret;
1965
1966         pi->arb_table_start = tmp;
1967
1968         return 0;
1969 }
1970
1971 static void ci_read_clock_registers(struct amdgpu_device *adev)
1972 {
1973         struct ci_power_info *pi = ci_get_pi(adev);
1974
1975         pi->clock_registers.cg_spll_func_cntl =
1976                 RREG32_SMC(ixCG_SPLL_FUNC_CNTL);
1977         pi->clock_registers.cg_spll_func_cntl_2 =
1978                 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_2);
1979         pi->clock_registers.cg_spll_func_cntl_3 =
1980                 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_3);
1981         pi->clock_registers.cg_spll_func_cntl_4 =
1982                 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_4);
1983         pi->clock_registers.cg_spll_spread_spectrum =
1984                 RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM);
1985         pi->clock_registers.cg_spll_spread_spectrum_2 =
1986                 RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM_2);
1987         pi->clock_registers.dll_cntl = RREG32(mmDLL_CNTL);
1988         pi->clock_registers.mclk_pwrmgt_cntl = RREG32(mmMCLK_PWRMGT_CNTL);
1989         pi->clock_registers.mpll_ad_func_cntl = RREG32(mmMPLL_AD_FUNC_CNTL);
1990         pi->clock_registers.mpll_dq_func_cntl = RREG32(mmMPLL_DQ_FUNC_CNTL);
1991         pi->clock_registers.mpll_func_cntl = RREG32(mmMPLL_FUNC_CNTL);
1992         pi->clock_registers.mpll_func_cntl_1 = RREG32(mmMPLL_FUNC_CNTL_1);
1993         pi->clock_registers.mpll_func_cntl_2 = RREG32(mmMPLL_FUNC_CNTL_2);
1994         pi->clock_registers.mpll_ss1 = RREG32(mmMPLL_SS1);
1995         pi->clock_registers.mpll_ss2 = RREG32(mmMPLL_SS2);
1996 }
1997
1998 static void ci_init_sclk_t(struct amdgpu_device *adev)
1999 {
2000         struct ci_power_info *pi = ci_get_pi(adev);
2001
2002         pi->low_sclk_interrupt_t = 0;
2003 }
2004
2005 static void ci_enable_thermal_protection(struct amdgpu_device *adev,
2006                                          bool enable)
2007 {
2008         u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
2009
2010         if (enable)
2011                 tmp &= ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
2012         else
2013                 tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
2014         WREG32_SMC(ixGENERAL_PWRMGT, tmp);
2015 }
2016
2017 static void ci_enable_acpi_power_management(struct amdgpu_device *adev)
2018 {
2019         u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
2020
2021         tmp |= GENERAL_PWRMGT__STATIC_PM_EN_MASK;
2022
2023         WREG32_SMC(ixGENERAL_PWRMGT, tmp);
2024 }
2025
2026 #if 0
2027 static int ci_enter_ulp_state(struct amdgpu_device *adev)
2028 {
2029
2030         WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower);
2031
2032         udelay(25000);
2033
2034         return 0;
2035 }
2036
2037 static int ci_exit_ulp_state(struct amdgpu_device *adev)
2038 {
2039         int i;
2040
2041         WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower);
2042
2043         udelay(7000);
2044
2045         for (i = 0; i < adev->usec_timeout; i++) {
2046                 if (RREG32(mmSMC_RESP_0) == 1)
2047                         break;
2048                 udelay(1000);
2049         }
2050
2051         return 0;
2052 }
2053 #endif
2054
2055 static int ci_notify_smc_display_change(struct amdgpu_device *adev,
2056                                         bool has_display)
2057 {
2058         PPSMC_Msg msg = has_display ? PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay;
2059
2060         return (amdgpu_ci_send_msg_to_smc(adev, msg) == PPSMC_Result_OK) ?  0 : -EINVAL;
2061 }
2062
2063 static int ci_enable_ds_master_switch(struct amdgpu_device *adev,
2064                                       bool enable)
2065 {
2066         struct ci_power_info *pi = ci_get_pi(adev);
2067
2068         if (enable) {
2069                 if (pi->caps_sclk_ds) {
2070                         if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_ON) != PPSMC_Result_OK)
2071                                 return -EINVAL;
2072                 } else {
2073                         if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
2074                                 return -EINVAL;
2075                 }
2076         } else {
2077                 if (pi->caps_sclk_ds) {
2078                         if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
2079                                 return -EINVAL;
2080                 }
2081         }
2082
2083         return 0;
2084 }
2085
2086 static void ci_program_display_gap(struct amdgpu_device *adev)
2087 {
2088         u32 tmp = RREG32_SMC(ixCG_DISPLAY_GAP_CNTL);
2089         u32 pre_vbi_time_in_us;
2090         u32 frame_time_in_us;
2091         u32 ref_clock = adev->clock.spll.reference_freq;
2092         u32 refresh_rate = amdgpu_dpm_get_vrefresh(adev);
2093         u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
2094
2095         tmp &= ~CG_DISPLAY_GAP_CNTL__DISP_GAP_MASK;
2096         if (adev->pm.dpm.new_active_crtc_count > 0)
2097                 tmp |= (AMDGPU_PM_DISPLAY_GAP_VBLANK_OR_WM << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT);
2098         else
2099                 tmp |= (AMDGPU_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT);
2100         WREG32_SMC(ixCG_DISPLAY_GAP_CNTL, tmp);
2101
2102         if (refresh_rate == 0)
2103                 refresh_rate = 60;
2104         if (vblank_time == 0xffffffff)
2105                 vblank_time = 500;
2106         frame_time_in_us = 1000000 / refresh_rate;
2107         pre_vbi_time_in_us =
2108                 frame_time_in_us - 200 - vblank_time;
2109         tmp = pre_vbi_time_in_us * (ref_clock / 100);
2110
2111         WREG32_SMC(ixCG_DISPLAY_GAP_CNTL2, tmp);
2112         ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, PreVBlankGap), 0x64);
2113         ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
2114
2115
2116         ci_notify_smc_display_change(adev, (adev->pm.dpm.new_active_crtc_count == 1));
2117
2118 }
2119
2120 static void ci_enable_spread_spectrum(struct amdgpu_device *adev, bool enable)
2121 {
2122         struct ci_power_info *pi = ci_get_pi(adev);
2123         u32 tmp;
2124
2125         if (enable) {
2126                 if (pi->caps_sclk_ss_support) {
2127                         tmp = RREG32_SMC(ixGENERAL_PWRMGT);
2128                         tmp |= GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK;
2129                         WREG32_SMC(ixGENERAL_PWRMGT, tmp);
2130                 }
2131         } else {
2132                 tmp = RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM);
2133                 tmp &= ~CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK;
2134                 WREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM, tmp);
2135
2136                 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
2137                 tmp &= ~GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK;
2138                 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
2139         }
2140 }
2141
2142 static void ci_program_sstp(struct amdgpu_device *adev)
2143 {
2144         WREG32_SMC(ixCG_STATIC_SCREEN_PARAMETER,
2145         ((CISLANDS_SSTU_DFLT << CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD_UNIT__SHIFT) |
2146          (CISLANDS_SST_DFLT << CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD__SHIFT)));
2147 }
2148
2149 static void ci_enable_display_gap(struct amdgpu_device *adev)
2150 {
2151         u32 tmp = RREG32_SMC(ixCG_DISPLAY_GAP_CNTL);
2152
2153         tmp &= ~(CG_DISPLAY_GAP_CNTL__DISP_GAP_MASK |
2154                         CG_DISPLAY_GAP_CNTL__DISP_GAP_MCHG_MASK);
2155         tmp |= ((AMDGPU_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT) |
2156                 (AMDGPU_PM_DISPLAY_GAP_VBLANK << CG_DISPLAY_GAP_CNTL__DISP_GAP_MCHG__SHIFT));
2157
2158         WREG32_SMC(ixCG_DISPLAY_GAP_CNTL, tmp);
2159 }
2160
2161 static void ci_program_vc(struct amdgpu_device *adev)
2162 {
2163         u32 tmp;
2164
2165         tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
2166         tmp &= ~(SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK);
2167         WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
2168
2169         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, CISLANDS_VRC_DFLT0);
2170         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_1, CISLANDS_VRC_DFLT1);
2171         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_2, CISLANDS_VRC_DFLT2);
2172         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_3, CISLANDS_VRC_DFLT3);
2173         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_4, CISLANDS_VRC_DFLT4);
2174         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_5, CISLANDS_VRC_DFLT5);
2175         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_6, CISLANDS_VRC_DFLT6);
2176         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_7, CISLANDS_VRC_DFLT7);
2177 }
2178
2179 static void ci_clear_vc(struct amdgpu_device *adev)
2180 {
2181         u32 tmp;
2182
2183         tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
2184         tmp |= (SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK);
2185         WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
2186
2187         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0);
2188         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_1, 0);
2189         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_2, 0);
2190         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_3, 0);
2191         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_4, 0);
2192         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_5, 0);
2193         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_6, 0);
2194         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_7, 0);
2195 }
2196
2197 static int ci_upload_firmware(struct amdgpu_device *adev)
2198 {
2199         struct ci_power_info *pi = ci_get_pi(adev);
2200         int i, ret;
2201
2202         for (i = 0; i < adev->usec_timeout; i++) {
2203                 if (RREG32_SMC(ixRCU_UC_EVENTS) & RCU_UC_EVENTS__boot_seq_done_MASK)
2204                         break;
2205         }
2206         WREG32_SMC(ixSMC_SYSCON_MISC_CNTL, 1);
2207
2208         amdgpu_ci_stop_smc_clock(adev);
2209         amdgpu_ci_reset_smc(adev);
2210
2211         ret = amdgpu_ci_load_smc_ucode(adev, pi->sram_end);
2212
2213         return ret;
2214
2215 }
2216
2217 static int ci_get_svi2_voltage_table(struct amdgpu_device *adev,
2218                                      struct amdgpu_clock_voltage_dependency_table *voltage_dependency_table,
2219                                      struct atom_voltage_table *voltage_table)
2220 {
2221         u32 i;
2222
2223         if (voltage_dependency_table == NULL)
2224                 return -EINVAL;
2225
2226         voltage_table->mask_low = 0;
2227         voltage_table->phase_delay = 0;
2228
2229         voltage_table->count = voltage_dependency_table->count;
2230         for (i = 0; i < voltage_table->count; i++) {
2231                 voltage_table->entries[i].value = voltage_dependency_table->entries[i].v;
2232                 voltage_table->entries[i].smio_low = 0;
2233         }
2234
2235         return 0;
2236 }
2237
2238 static int ci_construct_voltage_tables(struct amdgpu_device *adev)
2239 {
2240         struct ci_power_info *pi = ci_get_pi(adev);
2241         int ret;
2242
2243         if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2244                 ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDC,
2245                                                         VOLTAGE_OBJ_GPIO_LUT,
2246                                                         &pi->vddc_voltage_table);
2247                 if (ret)
2248                         return ret;
2249         } else if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2250                 ret = ci_get_svi2_voltage_table(adev,
2251                                                 &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2252                                                 &pi->vddc_voltage_table);
2253                 if (ret)
2254                         return ret;
2255         }
2256
2257         if (pi->vddc_voltage_table.count > SMU7_MAX_LEVELS_VDDC)
2258                 ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_VDDC,
2259                                                          &pi->vddc_voltage_table);
2260
2261         if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2262                 ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDCI,
2263                                                         VOLTAGE_OBJ_GPIO_LUT,
2264                                                         &pi->vddci_voltage_table);
2265                 if (ret)
2266                         return ret;
2267         } else if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2268                 ret = ci_get_svi2_voltage_table(adev,
2269                                                 &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2270                                                 &pi->vddci_voltage_table);
2271                 if (ret)
2272                         return ret;
2273         }
2274
2275         if (pi->vddci_voltage_table.count > SMU7_MAX_LEVELS_VDDCI)
2276                 ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_VDDCI,
2277                                                          &pi->vddci_voltage_table);
2278
2279         if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2280                 ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_MVDDC,
2281                                                         VOLTAGE_OBJ_GPIO_LUT,
2282                                                         &pi->mvdd_voltage_table);
2283                 if (ret)
2284                         return ret;
2285         } else if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2286                 ret = ci_get_svi2_voltage_table(adev,
2287                                                 &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
2288                                                 &pi->mvdd_voltage_table);
2289                 if (ret)
2290                         return ret;
2291         }
2292
2293         if (pi->mvdd_voltage_table.count > SMU7_MAX_LEVELS_MVDD)
2294                 ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_MVDD,
2295                                                          &pi->mvdd_voltage_table);
2296
2297         return 0;
2298 }
2299
2300 static void ci_populate_smc_voltage_table(struct amdgpu_device *adev,
2301                                           struct atom_voltage_table_entry *voltage_table,
2302                                           SMU7_Discrete_VoltageLevel *smc_voltage_table)
2303 {
2304         int ret;
2305
2306         ret = ci_get_std_voltage_value_sidd(adev, voltage_table,
2307                                             &smc_voltage_table->StdVoltageHiSidd,
2308                                             &smc_voltage_table->StdVoltageLoSidd);
2309
2310         if (ret) {
2311                 smc_voltage_table->StdVoltageHiSidd = voltage_table->value * VOLTAGE_SCALE;
2312                 smc_voltage_table->StdVoltageLoSidd = voltage_table->value * VOLTAGE_SCALE;
2313         }
2314
2315         smc_voltage_table->Voltage = cpu_to_be16(voltage_table->value * VOLTAGE_SCALE);
2316         smc_voltage_table->StdVoltageHiSidd =
2317                 cpu_to_be16(smc_voltage_table->StdVoltageHiSidd);
2318         smc_voltage_table->StdVoltageLoSidd =
2319                 cpu_to_be16(smc_voltage_table->StdVoltageLoSidd);
2320 }
2321
2322 static int ci_populate_smc_vddc_table(struct amdgpu_device *adev,
2323                                       SMU7_Discrete_DpmTable *table)
2324 {
2325         struct ci_power_info *pi = ci_get_pi(adev);
2326         unsigned int count;
2327
2328         table->VddcLevelCount = pi->vddc_voltage_table.count;
2329         for (count = 0; count < table->VddcLevelCount; count++) {
2330                 ci_populate_smc_voltage_table(adev,
2331                                               &pi->vddc_voltage_table.entries[count],
2332                                               &table->VddcLevel[count]);
2333
2334                 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2335                         table->VddcLevel[count].Smio |=
2336                                 pi->vddc_voltage_table.entries[count].smio_low;
2337                 else
2338                         table->VddcLevel[count].Smio = 0;
2339         }
2340         table->VddcLevelCount = cpu_to_be32(table->VddcLevelCount);
2341
2342         return 0;
2343 }
2344
2345 static int ci_populate_smc_vddci_table(struct amdgpu_device *adev,
2346                                        SMU7_Discrete_DpmTable *table)
2347 {
2348         unsigned int count;
2349         struct ci_power_info *pi = ci_get_pi(adev);
2350
2351         table->VddciLevelCount = pi->vddci_voltage_table.count;
2352         for (count = 0; count < table->VddciLevelCount; count++) {
2353                 ci_populate_smc_voltage_table(adev,
2354                                               &pi->vddci_voltage_table.entries[count],
2355                                               &table->VddciLevel[count]);
2356
2357                 if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2358                         table->VddciLevel[count].Smio |=
2359                                 pi->vddci_voltage_table.entries[count].smio_low;
2360                 else
2361                         table->VddciLevel[count].Smio = 0;
2362         }
2363         table->VddciLevelCount = cpu_to_be32(table->VddciLevelCount);
2364
2365         return 0;
2366 }
2367
2368 static int ci_populate_smc_mvdd_table(struct amdgpu_device *adev,
2369                                       SMU7_Discrete_DpmTable *table)
2370 {
2371         struct ci_power_info *pi = ci_get_pi(adev);
2372         unsigned int count;
2373
2374         table->MvddLevelCount = pi->mvdd_voltage_table.count;
2375         for (count = 0; count < table->MvddLevelCount; count++) {
2376                 ci_populate_smc_voltage_table(adev,
2377                                               &pi->mvdd_voltage_table.entries[count],
2378                                               &table->MvddLevel[count]);
2379
2380                 if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2381                         table->MvddLevel[count].Smio |=
2382                                 pi->mvdd_voltage_table.entries[count].smio_low;
2383                 else
2384                         table->MvddLevel[count].Smio = 0;
2385         }
2386         table->MvddLevelCount = cpu_to_be32(table->MvddLevelCount);
2387
2388         return 0;
2389 }
2390
2391 static int ci_populate_smc_voltage_tables(struct amdgpu_device *adev,
2392                                           SMU7_Discrete_DpmTable *table)
2393 {
2394         int ret;
2395
2396         ret = ci_populate_smc_vddc_table(adev, table);
2397         if (ret)
2398                 return ret;
2399
2400         ret = ci_populate_smc_vddci_table(adev, table);
2401         if (ret)
2402                 return ret;
2403
2404         ret = ci_populate_smc_mvdd_table(adev, table);
2405         if (ret)
2406                 return ret;
2407
2408         return 0;
2409 }
2410
2411 static int ci_populate_mvdd_value(struct amdgpu_device *adev, u32 mclk,
2412                                   SMU7_Discrete_VoltageLevel *voltage)
2413 {
2414         struct ci_power_info *pi = ci_get_pi(adev);
2415         u32 i = 0;
2416
2417         if (pi->mvdd_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
2418                 for (i = 0; i < adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count; i++) {
2419                         if (mclk <= adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries[i].clk) {
2420                                 voltage->Voltage = pi->mvdd_voltage_table.entries[i].value;
2421                                 break;
2422                         }
2423                 }
2424
2425                 if (i >= adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count)
2426                         return -EINVAL;
2427         }
2428
2429         return -EINVAL;
2430 }
2431
2432 static int ci_get_std_voltage_value_sidd(struct amdgpu_device *adev,
2433                                          struct atom_voltage_table_entry *voltage_table,
2434                                          u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd)
2435 {
2436         u16 v_index, idx;
2437         bool voltage_found = false;
2438         *std_voltage_hi_sidd = voltage_table->value * VOLTAGE_SCALE;
2439         *std_voltage_lo_sidd = voltage_table->value * VOLTAGE_SCALE;
2440
2441         if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL)
2442                 return -EINVAL;
2443
2444         if (adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
2445                 for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
2446                         if (voltage_table->value ==
2447                             adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
2448                                 voltage_found = true;
2449                                 if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
2450                                         idx = v_index;
2451                                 else
2452                                         idx = adev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
2453                                 *std_voltage_lo_sidd =
2454                                         adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
2455                                 *std_voltage_hi_sidd =
2456                                         adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
2457                                 break;
2458                         }
2459                 }
2460
2461                 if (!voltage_found) {
2462                         for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
2463                                 if (voltage_table->value <=
2464                                     adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
2465                                         voltage_found = true;
2466                                         if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
2467                                                 idx = v_index;
2468                                         else
2469                                                 idx = adev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
2470                                         *std_voltage_lo_sidd =
2471                                                 adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
2472                                         *std_voltage_hi_sidd =
2473                                                 adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
2474                                         break;
2475                                 }
2476                         }
2477                 }
2478         }
2479
2480         return 0;
2481 }
2482
2483 static void ci_populate_phase_value_based_on_sclk(struct amdgpu_device *adev,
2484                                                   const struct amdgpu_phase_shedding_limits_table *limits,
2485                                                   u32 sclk,
2486                                                   u32 *phase_shedding)
2487 {
2488         unsigned int i;
2489
2490         *phase_shedding = 1;
2491
2492         for (i = 0; i < limits->count; i++) {
2493                 if (sclk < limits->entries[i].sclk) {
2494                         *phase_shedding = i;
2495                         break;
2496                 }
2497         }
2498 }
2499
2500 static void ci_populate_phase_value_based_on_mclk(struct amdgpu_device *adev,
2501                                                   const struct amdgpu_phase_shedding_limits_table *limits,
2502                                                   u32 mclk,
2503                                                   u32 *phase_shedding)
2504 {
2505         unsigned int i;
2506
2507         *phase_shedding = 1;
2508
2509         for (i = 0; i < limits->count; i++) {
2510                 if (mclk < limits->entries[i].mclk) {
2511                         *phase_shedding = i;
2512                         break;
2513                 }
2514         }
2515 }
2516
2517 static int ci_init_arb_table_index(struct amdgpu_device *adev)
2518 {
2519         struct ci_power_info *pi = ci_get_pi(adev);
2520         u32 tmp;
2521         int ret;
2522
2523         ret = amdgpu_ci_read_smc_sram_dword(adev, pi->arb_table_start,
2524                                      &tmp, pi->sram_end);
2525         if (ret)
2526                 return ret;
2527
2528         tmp &= 0x00FFFFFF;
2529         tmp |= MC_CG_ARB_FREQ_F1 << 24;
2530
2531         return amdgpu_ci_write_smc_sram_dword(adev, pi->arb_table_start,
2532                                        tmp, pi->sram_end);
2533 }
2534
2535 static int ci_get_dependency_volt_by_clk(struct amdgpu_device *adev,
2536                                          struct amdgpu_clock_voltage_dependency_table *allowed_clock_voltage_table,
2537                                          u32 clock, u32 *voltage)
2538 {
2539         u32 i = 0;
2540
2541         if (allowed_clock_voltage_table->count == 0)
2542                 return -EINVAL;
2543
2544         for (i = 0; i < allowed_clock_voltage_table->count; i++) {
2545                 if (allowed_clock_voltage_table->entries[i].clk >= clock) {
2546                         *voltage = allowed_clock_voltage_table->entries[i].v;
2547                         return 0;
2548                 }
2549         }
2550
2551         *voltage = allowed_clock_voltage_table->entries[i-1].v;
2552
2553         return 0;
2554 }
2555
2556 static u8 ci_get_sleep_divider_id_from_clock(u32 sclk, u32 min_sclk_in_sr)
2557 {
2558         u32 i;
2559         u32 tmp;
2560         u32 min = max(min_sclk_in_sr, (u32)CISLAND_MINIMUM_ENGINE_CLOCK);
2561
2562         if (sclk < min)
2563                 return 0;
2564
2565         for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID;  ; i--) {
2566                 tmp = sclk >> i;
2567                 if (tmp >= min || i == 0)
2568                         break;
2569         }
2570
2571         return (u8)i;
2572 }
2573
2574 static int ci_initial_switch_from_arb_f0_to_f1(struct amdgpu_device *adev)
2575 {
2576         return ci_copy_and_switch_arb_sets(adev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
2577 }
2578
2579 static int ci_reset_to_default(struct amdgpu_device *adev)
2580 {
2581         return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ?
2582                 0 : -EINVAL;
2583 }
2584
2585 static int ci_force_switch_to_arb_f0(struct amdgpu_device *adev)
2586 {
2587         u32 tmp;
2588
2589         tmp = (RREG32_SMC(ixSMC_SCRATCH9) & 0x0000ff00) >> 8;
2590
2591         if (tmp == MC_CG_ARB_FREQ_F0)
2592                 return 0;
2593
2594         return ci_copy_and_switch_arb_sets(adev, tmp, MC_CG_ARB_FREQ_F0);
2595 }
2596
2597 static void ci_register_patching_mc_arb(struct amdgpu_device *adev,
2598                                         const u32 engine_clock,
2599                                         const u32 memory_clock,
2600                                         u32 *dram_timimg2)
2601 {
2602         bool patch;
2603         u32 tmp, tmp2;
2604
2605         tmp = RREG32(mmMC_SEQ_MISC0);
2606         patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
2607
2608         if (patch &&
2609             ((adev->pdev->device == 0x67B0) ||
2610              (adev->pdev->device == 0x67B1))) {
2611                 if ((memory_clock > 100000) && (memory_clock <= 125000)) {
2612                         tmp2 = (((0x31 * engine_clock) / 125000) - 1) & 0xff;
2613                         *dram_timimg2 &= ~0x00ff0000;
2614                         *dram_timimg2 |= tmp2 << 16;
2615                 } else if ((memory_clock > 125000) && (memory_clock <= 137500)) {
2616                         tmp2 = (((0x36 * engine_clock) / 137500) - 1) & 0xff;
2617                         *dram_timimg2 &= ~0x00ff0000;
2618                         *dram_timimg2 |= tmp2 << 16;
2619                 }
2620         }
2621 }
2622
2623 static int ci_populate_memory_timing_parameters(struct amdgpu_device *adev,
2624                                                 u32 sclk,
2625                                                 u32 mclk,
2626                                                 SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs)
2627 {
2628         u32 dram_timing;
2629         u32 dram_timing2;
2630         u32 burst_time;
2631
2632         amdgpu_atombios_set_engine_dram_timings(adev, sclk, mclk);
2633
2634         dram_timing  = RREG32(mmMC_ARB_DRAM_TIMING);
2635         dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2);
2636         burst_time = RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE0_MASK;
2637
2638         ci_register_patching_mc_arb(adev, sclk, mclk, &dram_timing2);
2639
2640         arb_regs->McArbDramTiming  = cpu_to_be32(dram_timing);
2641         arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2);
2642         arb_regs->McArbBurstTime = (u8)burst_time;
2643
2644         return 0;
2645 }
2646
2647 static int ci_do_program_memory_timing_parameters(struct amdgpu_device *adev)
2648 {
2649         struct ci_power_info *pi = ci_get_pi(adev);
2650         SMU7_Discrete_MCArbDramTimingTable arb_regs;
2651         u32 i, j;
2652         int ret =  0;
2653
2654         memset(&arb_regs, 0, sizeof(SMU7_Discrete_MCArbDramTimingTable));
2655
2656         for (i = 0; i < pi->dpm_table.sclk_table.count; i++) {
2657                 for (j = 0; j < pi->dpm_table.mclk_table.count; j++) {
2658                         ret = ci_populate_memory_timing_parameters(adev,
2659                                                                    pi->dpm_table.sclk_table.dpm_levels[i].value,
2660                                                                    pi->dpm_table.mclk_table.dpm_levels[j].value,
2661                                                                    &arb_regs.entries[i][j]);
2662                         if (ret)
2663                                 break;
2664                 }
2665         }
2666
2667         if (ret == 0)
2668                 ret = amdgpu_ci_copy_bytes_to_smc(adev,
2669                                            pi->arb_table_start,
2670                                            (u8 *)&arb_regs,
2671                                            sizeof(SMU7_Discrete_MCArbDramTimingTable),
2672                                            pi->sram_end);
2673
2674         return ret;
2675 }
2676
2677 static int ci_program_memory_timing_parameters(struct amdgpu_device *adev)
2678 {
2679         struct ci_power_info *pi = ci_get_pi(adev);
2680
2681         if (pi->need_update_smu7_dpm_table == 0)
2682                 return 0;
2683
2684         return ci_do_program_memory_timing_parameters(adev);
2685 }
2686
2687 static void ci_populate_smc_initial_state(struct amdgpu_device *adev,
2688                                           struct amdgpu_ps *amdgpu_boot_state)
2689 {
2690         struct ci_ps *boot_state = ci_get_ps(amdgpu_boot_state);
2691         struct ci_power_info *pi = ci_get_pi(adev);
2692         u32 level = 0;
2693
2694         for (level = 0; level < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; level++) {
2695                 if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[level].clk >=
2696                     boot_state->performance_levels[0].sclk) {
2697                         pi->smc_state_table.GraphicsBootLevel = level;
2698                         break;
2699                 }
2700         }
2701
2702         for (level = 0; level < adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.count; level++) {
2703                 if (adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries[level].clk >=
2704                     boot_state->performance_levels[0].mclk) {
2705                         pi->smc_state_table.MemoryBootLevel = level;
2706                         break;
2707                 }
2708         }
2709 }
2710
2711 static u32 ci_get_dpm_level_enable_mask_value(struct ci_single_dpm_table *dpm_table)
2712 {
2713         u32 i;
2714         u32 mask_value = 0;
2715
2716         for (i = dpm_table->count; i > 0; i--) {
2717                 mask_value = mask_value << 1;
2718                 if (dpm_table->dpm_levels[i-1].enabled)
2719                         mask_value |= 0x1;
2720                 else
2721                         mask_value &= 0xFFFFFFFE;
2722         }
2723
2724         return mask_value;
2725 }
2726
2727 static void ci_populate_smc_link_level(struct amdgpu_device *adev,
2728                                        SMU7_Discrete_DpmTable *table)
2729 {
2730         struct ci_power_info *pi = ci_get_pi(adev);
2731         struct ci_dpm_table *dpm_table = &pi->dpm_table;
2732         u32 i;
2733
2734         for (i = 0; i < dpm_table->pcie_speed_table.count; i++) {
2735                 table->LinkLevel[i].PcieGenSpeed =
2736                         (u8)dpm_table->pcie_speed_table.dpm_levels[i].value;
2737                 table->LinkLevel[i].PcieLaneCount =
2738                         amdgpu_encode_pci_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
2739                 table->LinkLevel[i].EnabledForActivity = 1;
2740                 table->LinkLevel[i].DownT = cpu_to_be32(5);
2741                 table->LinkLevel[i].UpT = cpu_to_be32(30);
2742         }
2743
2744         pi->smc_state_table.LinkLevelCount = (u8)dpm_table->pcie_speed_table.count;
2745         pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
2746                 ci_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
2747 }
2748
2749 static int ci_populate_smc_uvd_level(struct amdgpu_device *adev,
2750                                      SMU7_Discrete_DpmTable *table)
2751 {
2752         u32 count;
2753         struct atom_clock_dividers dividers;
2754         int ret = -EINVAL;
2755
2756         table->UvdLevelCount =
2757                 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count;
2758
2759         for (count = 0; count < table->UvdLevelCount; count++) {
2760                 table->UvdLevel[count].VclkFrequency =
2761                         adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].vclk;
2762                 table->UvdLevel[count].DclkFrequency =
2763                         adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].dclk;
2764                 table->UvdLevel[count].MinVddc =
2765                         adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2766                 table->UvdLevel[count].MinVddcPhases = 1;
2767
2768                 ret = amdgpu_atombios_get_clock_dividers(adev,
2769                                                          COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2770                                                          table->UvdLevel[count].VclkFrequency, false, &dividers);
2771                 if (ret)
2772                         return ret;
2773
2774                 table->UvdLevel[count].VclkDivider = (u8)dividers.post_divider;
2775
2776                 ret = amdgpu_atombios_get_clock_dividers(adev,
2777                                                          COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2778                                                          table->UvdLevel[count].DclkFrequency, false, &dividers);
2779                 if (ret)
2780                         return ret;
2781
2782                 table->UvdLevel[count].DclkDivider = (u8)dividers.post_divider;
2783
2784                 table->UvdLevel[count].VclkFrequency = cpu_to_be32(table->UvdLevel[count].VclkFrequency);
2785                 table->UvdLevel[count].DclkFrequency = cpu_to_be32(table->UvdLevel[count].DclkFrequency);
2786                 table->UvdLevel[count].MinVddc = cpu_to_be16(table->UvdLevel[count].MinVddc);
2787         }
2788
2789         return ret;
2790 }
2791
2792 static int ci_populate_smc_vce_level(struct amdgpu_device *adev,
2793                                      SMU7_Discrete_DpmTable *table)
2794 {
2795         u32 count;
2796         struct atom_clock_dividers dividers;
2797         int ret = -EINVAL;
2798
2799         table->VceLevelCount =
2800                 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count;
2801
2802         for (count = 0; count < table->VceLevelCount; count++) {
2803                 table->VceLevel[count].Frequency =
2804                         adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].evclk;
2805                 table->VceLevel[count].MinVoltage =
2806                         (u16)adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2807                 table->VceLevel[count].MinPhases = 1;
2808
2809                 ret = amdgpu_atombios_get_clock_dividers(adev,
2810                                                          COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2811                                                          table->VceLevel[count].Frequency, false, &dividers);
2812                 if (ret)
2813                         return ret;
2814
2815                 table->VceLevel[count].Divider = (u8)dividers.post_divider;
2816
2817                 table->VceLevel[count].Frequency = cpu_to_be32(table->VceLevel[count].Frequency);
2818                 table->VceLevel[count].MinVoltage = cpu_to_be16(table->VceLevel[count].MinVoltage);
2819         }
2820
2821         return ret;
2822
2823 }
2824
2825 static int ci_populate_smc_acp_level(struct amdgpu_device *adev,
2826                                      SMU7_Discrete_DpmTable *table)
2827 {
2828         u32 count;
2829         struct atom_clock_dividers dividers;
2830         int ret = -EINVAL;
2831
2832         table->AcpLevelCount = (u8)
2833                 (adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count);
2834
2835         for (count = 0; count < table->AcpLevelCount; count++) {
2836                 table->AcpLevel[count].Frequency =
2837                         adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].clk;
2838                 table->AcpLevel[count].MinVoltage =
2839                         adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].v;
2840                 table->AcpLevel[count].MinPhases = 1;
2841
2842                 ret = amdgpu_atombios_get_clock_dividers(adev,
2843                                                          COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2844                                                          table->AcpLevel[count].Frequency, false, &dividers);
2845                 if (ret)
2846                         return ret;
2847
2848                 table->AcpLevel[count].Divider = (u8)dividers.post_divider;
2849
2850                 table->AcpLevel[count].Frequency = cpu_to_be32(table->AcpLevel[count].Frequency);
2851                 table->AcpLevel[count].MinVoltage = cpu_to_be16(table->AcpLevel[count].MinVoltage);
2852         }
2853
2854         return ret;
2855 }
2856
2857 static int ci_populate_smc_samu_level(struct amdgpu_device *adev,
2858                                       SMU7_Discrete_DpmTable *table)
2859 {
2860         u32 count;
2861         struct atom_clock_dividers dividers;
2862         int ret = -EINVAL;
2863
2864         table->SamuLevelCount =
2865                 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count;
2866
2867         for (count = 0; count < table->SamuLevelCount; count++) {
2868                 table->SamuLevel[count].Frequency =
2869                         adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].clk;
2870                 table->SamuLevel[count].MinVoltage =
2871                         adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2872                 table->SamuLevel[count].MinPhases = 1;
2873
2874                 ret = amdgpu_atombios_get_clock_dividers(adev,
2875                                                          COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2876                                                          table->SamuLevel[count].Frequency, false, &dividers);
2877                 if (ret)
2878                         return ret;
2879
2880                 table->SamuLevel[count].Divider = (u8)dividers.post_divider;
2881
2882                 table->SamuLevel[count].Frequency = cpu_to_be32(table->SamuLevel[count].Frequency);
2883                 table->SamuLevel[count].MinVoltage = cpu_to_be16(table->SamuLevel[count].MinVoltage);
2884         }
2885
2886         return ret;
2887 }
2888
2889 static int ci_calculate_mclk_params(struct amdgpu_device *adev,
2890                                     u32 memory_clock,
2891                                     SMU7_Discrete_MemoryLevel *mclk,
2892                                     bool strobe_mode,
2893                                     bool dll_state_on)
2894 {
2895         struct ci_power_info *pi = ci_get_pi(adev);
2896         u32  dll_cntl = pi->clock_registers.dll_cntl;
2897         u32  mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
2898         u32  mpll_ad_func_cntl = pi->clock_registers.mpll_ad_func_cntl;
2899         u32  mpll_dq_func_cntl = pi->clock_registers.mpll_dq_func_cntl;
2900         u32  mpll_func_cntl = pi->clock_registers.mpll_func_cntl;
2901         u32  mpll_func_cntl_1 = pi->clock_registers.mpll_func_cntl_1;
2902         u32  mpll_func_cntl_2 = pi->clock_registers.mpll_func_cntl_2;
2903         u32  mpll_ss1 = pi->clock_registers.mpll_ss1;
2904         u32  mpll_ss2 = pi->clock_registers.mpll_ss2;
2905         struct atom_mpll_param mpll_param;
2906         int ret;
2907
2908         ret = amdgpu_atombios_get_memory_pll_dividers(adev, memory_clock, strobe_mode, &mpll_param);
2909         if (ret)
2910                 return ret;
2911
2912         mpll_func_cntl &= ~MPLL_FUNC_CNTL__BWCTRL_MASK;
2913         mpll_func_cntl |= (mpll_param.bwcntl << MPLL_FUNC_CNTL__BWCTRL__SHIFT);
2914
2915         mpll_func_cntl_1 &= ~(MPLL_FUNC_CNTL_1__CLKF_MASK | MPLL_FUNC_CNTL_1__CLKFRAC_MASK |
2916                         MPLL_FUNC_CNTL_1__VCO_MODE_MASK);
2917         mpll_func_cntl_1 |= (mpll_param.clkf) << MPLL_FUNC_CNTL_1__CLKF__SHIFT |
2918                 (mpll_param.clkfrac << MPLL_FUNC_CNTL_1__CLKFRAC__SHIFT) |
2919                 (mpll_param.vco_mode << MPLL_FUNC_CNTL_1__VCO_MODE__SHIFT);
2920
2921         mpll_ad_func_cntl &= ~MPLL_AD_FUNC_CNTL__YCLK_POST_DIV_MASK;
2922         mpll_ad_func_cntl |= (mpll_param.post_div << MPLL_AD_FUNC_CNTL__YCLK_POST_DIV__SHIFT);
2923
2924         if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
2925                 mpll_dq_func_cntl &= ~(MPLL_DQ_FUNC_CNTL__YCLK_SEL_MASK |
2926                                 MPLL_AD_FUNC_CNTL__YCLK_POST_DIV_MASK);
2927                 mpll_dq_func_cntl |= (mpll_param.yclk_sel << MPLL_DQ_FUNC_CNTL__YCLK_SEL__SHIFT) |
2928                                 (mpll_param.post_div << MPLL_AD_FUNC_CNTL__YCLK_POST_DIV__SHIFT);
2929         }
2930
2931         if (pi->caps_mclk_ss_support) {
2932                 struct amdgpu_atom_ss ss;
2933                 u32 freq_nom;
2934                 u32 tmp;
2935                 u32 reference_clock = adev->clock.mpll.reference_freq;
2936
2937                 if (mpll_param.qdr == 1)
2938                         freq_nom = memory_clock * 4 * (1 << mpll_param.post_div);
2939                 else
2940                         freq_nom = memory_clock * 2 * (1 << mpll_param.post_div);
2941
2942                 tmp = (freq_nom / reference_clock);
2943                 tmp = tmp * tmp;
2944                 if (amdgpu_atombios_get_asic_ss_info(adev, &ss,
2945                                                      ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
2946                         u32 clks = reference_clock * 5 / ss.rate;
2947                         u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
2948
2949                         mpll_ss1 &= ~MPLL_SS1__CLKV_MASK;
2950                         mpll_ss1 |= (clkv << MPLL_SS1__CLKV__SHIFT);
2951
2952                         mpll_ss2 &= ~MPLL_SS2__CLKS_MASK;
2953                         mpll_ss2 |= (clks << MPLL_SS2__CLKS__SHIFT);
2954                 }
2955         }
2956
2957         mclk_pwrmgt_cntl &= ~MCLK_PWRMGT_CNTL__DLL_SPEED_MASK;
2958         mclk_pwrmgt_cntl |= (mpll_param.dll_speed << MCLK_PWRMGT_CNTL__DLL_SPEED__SHIFT);
2959
2960         if (dll_state_on)
2961                 mclk_pwrmgt_cntl |= MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK |
2962                         MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK;
2963         else
2964                 mclk_pwrmgt_cntl &= ~(MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK |
2965                         MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK);
2966
2967         mclk->MclkFrequency = memory_clock;
2968         mclk->MpllFuncCntl = mpll_func_cntl;
2969         mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
2970         mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
2971         mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
2972         mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
2973         mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
2974         mclk->DllCntl = dll_cntl;
2975         mclk->MpllSs1 = mpll_ss1;
2976         mclk->MpllSs2 = mpll_ss2;
2977
2978         return 0;
2979 }
2980
2981 static int ci_populate_single_memory_level(struct amdgpu_device *adev,
2982                                            u32 memory_clock,
2983                                            SMU7_Discrete_MemoryLevel *memory_level)
2984 {
2985         struct ci_power_info *pi = ci_get_pi(adev);
2986         int ret;
2987         bool dll_state_on;
2988
2989         if (adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) {
2990                 ret = ci_get_dependency_volt_by_clk(adev,
2991                                                     &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2992                                                     memory_clock, &memory_level->MinVddc);
2993                 if (ret)
2994                         return ret;
2995         }
2996
2997         if (adev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) {
2998                 ret = ci_get_dependency_volt_by_clk(adev,
2999                                                     &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
3000                                                     memory_clock, &memory_level->MinVddci);
3001                 if (ret)
3002                         return ret;
3003         }
3004
3005         if (adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries) {
3006                 ret = ci_get_dependency_volt_by_clk(adev,
3007                                                     &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
3008                                                     memory_clock, &memory_level->MinMvdd);
3009                 if (ret)
3010                         return ret;
3011         }
3012
3013         memory_level->MinVddcPhases = 1;
3014
3015         if (pi->vddc_phase_shed_control)
3016                 ci_populate_phase_value_based_on_mclk(adev,
3017                                                       &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
3018                                                       memory_clock,
3019                                                       &memory_level->MinVddcPhases);
3020
3021         memory_level->EnabledForThrottle = 1;
3022         memory_level->UpH = 0;
3023         memory_level->DownH = 100;
3024         memory_level->VoltageDownH = 0;
3025         memory_level->ActivityLevel = (u16)pi->mclk_activity_target;
3026
3027         memory_level->StutterEnable = false;
3028         memory_level->StrobeEnable = false;
3029         memory_level->EdcReadEnable = false;
3030         memory_level->EdcWriteEnable = false;
3031         memory_level->RttEnable = false;
3032
3033         memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
3034
3035         if (pi->mclk_stutter_mode_threshold &&
3036             (memory_clock <= pi->mclk_stutter_mode_threshold) &&
3037             (pi->uvd_enabled == false) &&
3038             (RREG32(mmDPG_PIPE_STUTTER_CONTROL) & DPG_PIPE_STUTTER_CONTROL__STUTTER_ENABLE_MASK) &&
3039             (adev->pm.dpm.new_active_crtc_count <= 2))
3040                 memory_level->StutterEnable = true;
3041
3042         if (pi->mclk_strobe_mode_threshold &&
3043             (memory_clock <= pi->mclk_strobe_mode_threshold))
3044                 memory_level->StrobeEnable = 1;
3045
3046         if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
3047                 memory_level->StrobeRatio =
3048                         ci_get_mclk_frequency_ratio(memory_clock, memory_level->StrobeEnable);
3049                 if (pi->mclk_edc_enable_threshold &&
3050                     (memory_clock > pi->mclk_edc_enable_threshold))
3051                         memory_level->EdcReadEnable = true;
3052
3053                 if (pi->mclk_edc_wr_enable_threshold &&
3054                     (memory_clock > pi->mclk_edc_wr_enable_threshold))
3055                         memory_level->EdcWriteEnable = true;
3056
3057                 if (memory_level->StrobeEnable) {
3058                         if (ci_get_mclk_frequency_ratio(memory_clock, true) >=
3059                             ((RREG32(mmMC_SEQ_MISC7) >> 16) & 0xf))
3060                                 dll_state_on = ((RREG32(mmMC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
3061                         else
3062                                 dll_state_on = ((RREG32(mmMC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
3063                 } else {
3064                         dll_state_on = pi->dll_default_on;
3065                 }
3066         } else {
3067                 memory_level->StrobeRatio = ci_get_ddr3_mclk_frequency_ratio(memory_clock);
3068                 dll_state_on = ((RREG32(mmMC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
3069         }
3070
3071         ret = ci_calculate_mclk_params(adev, memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
3072         if (ret)
3073                 return ret;
3074
3075         memory_level->MinVddc = cpu_to_be32(memory_level->MinVddc * VOLTAGE_SCALE);
3076         memory_level->MinVddcPhases = cpu_to_be32(memory_level->MinVddcPhases);
3077         memory_level->MinVddci = cpu_to_be32(memory_level->MinVddci * VOLTAGE_SCALE);
3078         memory_level->MinMvdd = cpu_to_be32(memory_level->MinMvdd * VOLTAGE_SCALE);
3079
3080         memory_level->MclkFrequency = cpu_to_be32(memory_level->MclkFrequency);
3081         memory_level->ActivityLevel = cpu_to_be16(memory_level->ActivityLevel);
3082         memory_level->MpllFuncCntl = cpu_to_be32(memory_level->MpllFuncCntl);
3083         memory_level->MpllFuncCntl_1 = cpu_to_be32(memory_level->MpllFuncCntl_1);
3084         memory_level->MpllFuncCntl_2 = cpu_to_be32(memory_level->MpllFuncCntl_2);
3085         memory_level->MpllAdFuncCntl = cpu_to_be32(memory_level->MpllAdFuncCntl);
3086         memory_level->MpllDqFuncCntl = cpu_to_be32(memory_level->MpllDqFuncCntl);
3087         memory_level->MclkPwrmgtCntl = cpu_to_be32(memory_level->MclkPwrmgtCntl);
3088         memory_level->DllCntl = cpu_to_be32(memory_level->DllCntl);
3089         memory_level->MpllSs1 = cpu_to_be32(memory_level->MpllSs1);
3090         memory_level->MpllSs2 = cpu_to_be32(memory_level->MpllSs2);
3091
3092         return 0;
3093 }
3094
3095 static int ci_populate_smc_acpi_level(struct amdgpu_device *adev,
3096                                       SMU7_Discrete_DpmTable *table)
3097 {
3098         struct ci_power_info *pi = ci_get_pi(adev);
3099         struct atom_clock_dividers dividers;
3100         SMU7_Discrete_VoltageLevel voltage_level;
3101         u32 spll_func_cntl = pi->clock_registers.cg_spll_func_cntl;
3102         u32 spll_func_cntl_2 = pi->clock_registers.cg_spll_func_cntl_2;
3103         u32 dll_cntl = pi->clock_registers.dll_cntl;
3104         u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
3105         int ret;
3106
3107         table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
3108
3109         if (pi->acpi_vddc)
3110                 table->ACPILevel.MinVddc = cpu_to_be32(pi->acpi_vddc * VOLTAGE_SCALE);
3111         else
3112                 table->ACPILevel.MinVddc = cpu_to_be32(pi->min_vddc_in_pp_table * VOLTAGE_SCALE);
3113
3114         table->ACPILevel.MinVddcPhases = pi->vddc_phase_shed_control ? 0 : 1;
3115
3116         table->ACPILevel.SclkFrequency = adev->clock.spll.reference_freq;
3117
3118         ret = amdgpu_atombios_get_clock_dividers(adev,
3119                                                  COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
3120                                                  table->ACPILevel.SclkFrequency, false, &dividers);
3121         if (ret)
3122                 return ret;
3123
3124         table->ACPILevel.SclkDid = (u8)dividers.post_divider;
3125         table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
3126         table->ACPILevel.DeepSleepDivId = 0;
3127
3128         spll_func_cntl &= ~CG_SPLL_FUNC_CNTL__SPLL_PWRON_MASK;
3129         spll_func_cntl |= CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK;
3130
3131         spll_func_cntl_2 &= ~CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL_MASK;
3132         spll_func_cntl_2 |= (4 << CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL__SHIFT);
3133
3134         table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
3135         table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
3136         table->ACPILevel.CgSpllFuncCntl3 = pi->clock_registers.cg_spll_func_cntl_3;
3137         table->ACPILevel.CgSpllFuncCntl4 = pi->clock_registers.cg_spll_func_cntl_4;
3138         table->ACPILevel.SpllSpreadSpectrum = pi->clock_registers.cg_spll_spread_spectrum;
3139         table->ACPILevel.SpllSpreadSpectrum2 = pi->clock_registers.cg_spll_spread_spectrum_2;
3140         table->ACPILevel.CcPwrDynRm = 0;
3141         table->ACPILevel.CcPwrDynRm1 = 0;
3142
3143         table->ACPILevel.Flags = cpu_to_be32(table->ACPILevel.Flags);
3144         table->ACPILevel.MinVddcPhases = cpu_to_be32(table->ACPILevel.MinVddcPhases);
3145         table->ACPILevel.SclkFrequency = cpu_to_be32(table->ACPILevel.SclkFrequency);
3146         table->ACPILevel.CgSpllFuncCntl = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl);
3147         table->ACPILevel.CgSpllFuncCntl2 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl2);
3148         table->ACPILevel.CgSpllFuncCntl3 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl3);
3149         table->ACPILevel.CgSpllFuncCntl4 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl4);
3150         table->ACPILevel.SpllSpreadSpectrum = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum);
3151         table->ACPILevel.SpllSpreadSpectrum2 = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum2);
3152         table->ACPILevel.CcPwrDynRm = cpu_to_be32(table->ACPILevel.CcPwrDynRm);
3153         table->ACPILevel.CcPwrDynRm1 = cpu_to_be32(table->ACPILevel.CcPwrDynRm1);
3154
3155         table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
3156         table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
3157
3158         if (pi->vddci_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
3159                 if (pi->acpi_vddci)
3160                         table->MemoryACPILevel.MinVddci =
3161                                 cpu_to_be32(pi->acpi_vddci * VOLTAGE_SCALE);
3162                 else
3163                         table->MemoryACPILevel.MinVddci =
3164                                 cpu_to_be32(pi->min_vddci_in_pp_table * VOLTAGE_SCALE);
3165         }
3166
3167         if (ci_populate_mvdd_value(adev, 0, &voltage_level))
3168                 table->MemoryACPILevel.MinMvdd = 0;
3169         else
3170                 table->MemoryACPILevel.MinMvdd =
3171                         cpu_to_be32(voltage_level.Voltage * VOLTAGE_SCALE);
3172
3173         mclk_pwrmgt_cntl |= MCLK_PWRMGT_CNTL__MRDCK0_RESET_MASK |
3174                 MCLK_PWRMGT_CNTL__MRDCK1_RESET_MASK;
3175         mclk_pwrmgt_cntl &= ~(MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK |
3176                         MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK);
3177
3178         dll_cntl &= ~(DLL_CNTL__MRDCK0_BYPASS_MASK | DLL_CNTL__MRDCK1_BYPASS_MASK);
3179
3180         table->MemoryACPILevel.DllCntl = cpu_to_be32(dll_cntl);
3181         table->MemoryACPILevel.MclkPwrmgtCntl = cpu_to_be32(mclk_pwrmgt_cntl);
3182         table->MemoryACPILevel.MpllAdFuncCntl =
3183                 cpu_to_be32(pi->clock_registers.mpll_ad_func_cntl);
3184         table->MemoryACPILevel.MpllDqFuncCntl =
3185                 cpu_to_be32(pi->clock_registers.mpll_dq_func_cntl);
3186         table->MemoryACPILevel.MpllFuncCntl =
3187                 cpu_to_be32(pi->clock_registers.mpll_func_cntl);
3188         table->MemoryACPILevel.MpllFuncCntl_1 =
3189                 cpu_to_be32(pi->clock_registers.mpll_func_cntl_1);
3190         table->MemoryACPILevel.MpllFuncCntl_2 =
3191                 cpu_to_be32(pi->clock_registers.mpll_func_cntl_2);
3192         table->MemoryACPILevel.MpllSs1 = cpu_to_be32(pi->clock_registers.mpll_ss1);
3193         table->MemoryACPILevel.MpllSs2 = cpu_to_be32(pi->clock_registers.mpll_ss2);
3194
3195         table->MemoryACPILevel.EnabledForThrottle = 0;
3196         table->MemoryACPILevel.EnabledForActivity = 0;
3197         table->MemoryACPILevel.UpH = 0;
3198         table->MemoryACPILevel.DownH = 100;
3199         table->MemoryACPILevel.VoltageDownH = 0;
3200         table->MemoryACPILevel.ActivityLevel =
3201                 cpu_to_be16((u16)pi->mclk_activity_target);
3202
3203         table->MemoryACPILevel.StutterEnable = false;
3204         table->MemoryACPILevel.StrobeEnable = false;
3205         table->MemoryACPILevel.EdcReadEnable = false;
3206         table->MemoryACPILevel.EdcWriteEnable = false;
3207         table->MemoryACPILevel.RttEnable = false;
3208
3209         return 0;
3210 }
3211
3212
3213 static int ci_enable_ulv(struct amdgpu_device *adev, bool enable)
3214 {
3215         struct ci_power_info *pi = ci_get_pi(adev);
3216         struct ci_ulv_parm *ulv = &pi->ulv;
3217
3218         if (ulv->supported) {
3219                 if (enable)
3220                         return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ?
3221                                 0 : -EINVAL;
3222                 else
3223                         return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
3224                                 0 : -EINVAL;
3225         }
3226
3227         return 0;
3228 }
3229
3230 static int ci_populate_ulv_level(struct amdgpu_device *adev,
3231                                  SMU7_Discrete_Ulv *state)
3232 {
3233         struct ci_power_info *pi = ci_get_pi(adev);
3234         u16 ulv_voltage = adev->pm.dpm.backbias_response_time;
3235
3236         state->CcPwrDynRm = 0;
3237         state->CcPwrDynRm1 = 0;
3238
3239         if (ulv_voltage == 0) {
3240                 pi->ulv.supported = false;
3241                 return 0;
3242         }
3243
3244         if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
3245                 if (ulv_voltage > adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
3246                         state->VddcOffset = 0;
3247                 else
3248                         state->VddcOffset =
3249                                 adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage;
3250         } else {
3251                 if (ulv_voltage > adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
3252                         state->VddcOffsetVid = 0;
3253                 else
3254                         state->VddcOffsetVid = (u8)
3255                                 ((adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage) *
3256                                  VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
3257         }
3258         state->VddcPhase = pi->vddc_phase_shed_control ? 0 : 1;
3259
3260         state->CcPwrDynRm = cpu_to_be32(state->CcPwrDynRm);
3261         state->CcPwrDynRm1 = cpu_to_be32(state->CcPwrDynRm1);
3262         state->VddcOffset = cpu_to_be16(state->VddcOffset);
3263
3264         return 0;
3265 }
3266
3267 static int ci_calculate_sclk_params(struct amdgpu_device *adev,
3268                                     u32 engine_clock,
3269                                     SMU7_Discrete_GraphicsLevel *sclk)
3270 {
3271         struct ci_power_info *pi = ci_get_pi(adev);
3272         struct atom_clock_dividers dividers;
3273         u32 spll_func_cntl_3 = pi->clock_registers.cg_spll_func_cntl_3;
3274         u32 spll_func_cntl_4 = pi->clock_registers.cg_spll_func_cntl_4;
3275         u32 cg_spll_spread_spectrum = pi->clock_registers.cg_spll_spread_spectrum;
3276         u32 cg_spll_spread_spectrum_2 = pi->clock_registers.cg_spll_spread_spectrum_2;
3277         u32 reference_clock = adev->clock.spll.reference_freq;
3278         u32 reference_divider;
3279         u32 fbdiv;
3280         int ret;
3281
3282         ret = amdgpu_atombios_get_clock_dividers(adev,
3283                                                  COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
3284                                                  engine_clock, false, &dividers);
3285         if (ret)
3286                 return ret;
3287
3288         reference_divider = 1 + dividers.ref_div;
3289         fbdiv = dividers.fb_div & 0x3FFFFFF;
3290
3291         spll_func_cntl_3 &= ~CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV_MASK;
3292         spll_func_cntl_3 |= (fbdiv << CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV__SHIFT);
3293         spll_func_cntl_3 |= CG_SPLL_FUNC_CNTL_3__SPLL_DITHEN_MASK;
3294
3295         if (pi->caps_sclk_ss_support) {
3296                 struct amdgpu_atom_ss ss;
3297                 u32 vco_freq = engine_clock * dividers.post_div;
3298
3299                 if (amdgpu_atombios_get_asic_ss_info(adev, &ss,
3300                                                      ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
3301                         u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
3302                         u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
3303
3304                         cg_spll_spread_spectrum &= ~(CG_SPLL_SPREAD_SPECTRUM__CLKS_MASK | CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK);
3305                         cg_spll_spread_spectrum |= (clk_s << CG_SPLL_SPREAD_SPECTRUM__CLKS__SHIFT);
3306                         cg_spll_spread_spectrum |= (1 << CG_SPLL_SPREAD_SPECTRUM__SSEN__SHIFT);
3307
3308                         cg_spll_spread_spectrum_2 &= ~CG_SPLL_SPREAD_SPECTRUM_2__CLKV_MASK;
3309                         cg_spll_spread_spectrum_2 |= (clk_v << CG_SPLL_SPREAD_SPECTRUM_2__CLKV__SHIFT);
3310                 }
3311         }
3312
3313         sclk->SclkFrequency = engine_clock;
3314         sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
3315         sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
3316         sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
3317         sclk->SpllSpreadSpectrum2  = cg_spll_spread_spectrum_2;
3318         sclk->SclkDid = (u8)dividers.post_divider;
3319
3320         return 0;
3321 }
3322
3323 static int ci_populate_single_graphic_level(struct amdgpu_device *adev,
3324                                             u32 engine_clock,
3325                                             u16 sclk_activity_level_t,
3326                                             SMU7_Discrete_GraphicsLevel *graphic_level)
3327 {
3328         struct ci_power_info *pi = ci_get_pi(adev);
3329         int ret;
3330
3331         ret = ci_calculate_sclk_params(adev, engine_clock, graphic_level);
3332         if (ret)
3333                 return ret;
3334
3335         ret = ci_get_dependency_volt_by_clk(adev,
3336                                             &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
3337                                             engine_clock, &graphic_level->MinVddc);
3338         if (ret)
3339                 return ret;
3340
3341         graphic_level->SclkFrequency = engine_clock;
3342
3343         graphic_level->Flags =  0;
3344         graphic_level->MinVddcPhases = 1;
3345
3346         if (pi->vddc_phase_shed_control)
3347                 ci_populate_phase_value_based_on_sclk(adev,
3348                                                       &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
3349                                                       engine_clock,
3350                                                       &graphic_level->MinVddcPhases);
3351
3352         graphic_level->ActivityLevel = sclk_activity_level_t;
3353
3354         graphic_level->CcPwrDynRm = 0;
3355         graphic_level->CcPwrDynRm1 = 0;
3356         graphic_level->EnabledForThrottle = 1;
3357         graphic_level->UpH = 0;
3358         graphic_level->DownH = 0;
3359         graphic_level->VoltageDownH = 0;
3360         graphic_level->PowerThrottle = 0;
3361
3362         if (pi->caps_sclk_ds)
3363                 graphic_level->DeepSleepDivId = ci_get_sleep_divider_id_from_clock(engine_clock,
3364                                                                                    CISLAND_MINIMUM_ENGINE_CLOCK);
3365
3366         graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
3367
3368         graphic_level->Flags = cpu_to_be32(graphic_level->Flags);
3369         graphic_level->MinVddc = cpu_to_be32(graphic_level->MinVddc * VOLTAGE_SCALE);
3370         graphic_level->MinVddcPhases = cpu_to_be32(graphic_level->MinVddcPhases);
3371         graphic_level->SclkFrequency = cpu_to_be32(graphic_level->SclkFrequency);
3372         graphic_level->ActivityLevel = cpu_to_be16(graphic_level->ActivityLevel);
3373         graphic_level->CgSpllFuncCntl3 = cpu_to_be32(graphic_level->CgSpllFuncCntl3);
3374         graphic_level->CgSpllFuncCntl4 = cpu_to_be32(graphic_level->CgSpllFuncCntl4);
3375         graphic_level->SpllSpreadSpectrum = cpu_to_be32(graphic_level->SpllSpreadSpectrum);
3376         graphic_level->SpllSpreadSpectrum2 = cpu_to_be32(graphic_level->SpllSpreadSpectrum2);
3377         graphic_level->CcPwrDynRm = cpu_to_be32(graphic_level->CcPwrDynRm);
3378         graphic_level->CcPwrDynRm1 = cpu_to_be32(graphic_level->CcPwrDynRm1);
3379
3380         return 0;
3381 }
3382
3383 static int ci_populate_all_graphic_levels(struct amdgpu_device *adev)
3384 {
3385         struct ci_power_info *pi = ci_get_pi(adev);
3386         struct ci_dpm_table *dpm_table = &pi->dpm_table;
3387         u32 level_array_address = pi->dpm_table_start +
3388                 offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
3389         u32 level_array_size = sizeof(SMU7_Discrete_GraphicsLevel) *
3390                 SMU7_MAX_LEVELS_GRAPHICS;
3391         SMU7_Discrete_GraphicsLevel *levels = pi->smc_state_table.GraphicsLevel;
3392         u32 i, ret;
3393
3394         memset(levels, 0, level_array_size);
3395
3396         for (i = 0; i < dpm_table->sclk_table.count; i++) {
3397                 ret = ci_populate_single_graphic_level(adev,
3398                                                        dpm_table->sclk_table.dpm_levels[i].value,
3399                                                        (u16)pi->activity_target[i],
3400                                                        &pi->smc_state_table.GraphicsLevel[i]);
3401                 if (ret)
3402                         return ret;
3403                 if (i > 1)
3404                         pi->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
3405                 if (i == (dpm_table->sclk_table.count - 1))
3406                         pi->smc_state_table.GraphicsLevel[i].DisplayWatermark =
3407                                 PPSMC_DISPLAY_WATERMARK_HIGH;
3408         }
3409         pi->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
3410
3411         pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
3412         pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
3413                 ci_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
3414
3415         ret = amdgpu_ci_copy_bytes_to_smc(adev, level_array_address,
3416                                    (u8 *)levels, level_array_size,
3417                                    pi->sram_end);
3418         if (ret)
3419                 return ret;
3420
3421         return 0;
3422 }
3423
3424 static int ci_populate_ulv_state(struct amdgpu_device *adev,
3425                                  SMU7_Discrete_Ulv *ulv_level)
3426 {
3427         return ci_populate_ulv_level(adev, ulv_level);
3428 }
3429
3430 static int ci_populate_all_memory_levels(struct amdgpu_device *adev)
3431 {
3432         struct ci_power_info *pi = ci_get_pi(adev);
3433         struct ci_dpm_table *dpm_table = &pi->dpm_table;
3434         u32 level_array_address = pi->dpm_table_start +
3435                 offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
3436         u32 level_array_size = sizeof(SMU7_Discrete_MemoryLevel) *
3437                 SMU7_MAX_LEVELS_MEMORY;
3438         SMU7_Discrete_MemoryLevel *levels = pi->smc_state_table.MemoryLevel;
3439         u32 i, ret;
3440
3441         memset(levels, 0, level_array_size);
3442
3443         for (i = 0; i < dpm_table->mclk_table.count; i++) {
3444                 if (dpm_table->mclk_table.dpm_levels[i].value == 0)
3445                         return -EINVAL;
3446                 ret = ci_populate_single_memory_level(adev,
3447                                                       dpm_table->mclk_table.dpm_levels[i].value,
3448                                                       &pi->smc_state_table.MemoryLevel[i]);
3449                 if (ret)
3450                         return ret;
3451         }
3452
3453         pi->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
3454
3455         if ((dpm_table->mclk_table.count >= 2) &&
3456             ((adev->pdev->device == 0x67B0) || (adev->pdev->device == 0x67B1))) {
3457                 pi->smc_state_table.MemoryLevel[1].MinVddc =
3458                         pi->smc_state_table.MemoryLevel[0].MinVddc;
3459                 pi->smc_state_table.MemoryLevel[1].MinVddcPhases =
3460                         pi->smc_state_table.MemoryLevel[0].MinVddcPhases;
3461         }
3462
3463         pi->smc_state_table.MemoryLevel[0].ActivityLevel = cpu_to_be16(0x1F);
3464
3465         pi->smc_state_table.MemoryDpmLevelCount = (u8)dpm_table->mclk_table.count;
3466         pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
3467                 ci_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
3468
3469         pi->smc_state_table.MemoryLevel[dpm_table->mclk_table.count - 1].DisplayWatermark =
3470                 PPSMC_DISPLAY_WATERMARK_HIGH;
3471
3472         ret = amdgpu_ci_copy_bytes_to_smc(adev, level_array_address,
3473                                    (u8 *)levels, level_array_size,
3474                                    pi->sram_end);
3475         if (ret)
3476                 return ret;
3477
3478         return 0;
3479 }
3480
3481 static void ci_reset_single_dpm_table(struct amdgpu_device *adev,
3482                                       struct ci_single_dpm_table* dpm_table,
3483                                       u32 count)
3484 {
3485         u32 i;
3486
3487         dpm_table->count = count;
3488         for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++)
3489                 dpm_table->dpm_levels[i].enabled = false;
3490 }
3491
3492 static void ci_setup_pcie_table_entry(struct ci_single_dpm_table* dpm_table,
3493                                       u32 index, u32 pcie_gen, u32 pcie_lanes)
3494 {
3495         dpm_table->dpm_levels[index].value = pcie_gen;
3496         dpm_table->dpm_levels[index].param1 = pcie_lanes;
3497         dpm_table->dpm_levels[index].enabled = true;
3498 }
3499
3500 static int ci_setup_default_pcie_tables(struct amdgpu_device *adev)
3501 {
3502         struct ci_power_info *pi = ci_get_pi(adev);
3503
3504         if (!pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels)
3505                 return -EINVAL;
3506
3507         if (pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels) {
3508                 pi->pcie_gen_powersaving = pi->pcie_gen_performance;
3509                 pi->pcie_lane_powersaving = pi->pcie_lane_performance;
3510         } else if (!pi->use_pcie_performance_levels && pi->use_pcie_powersaving_levels) {
3511                 pi->pcie_gen_performance = pi->pcie_gen_powersaving;
3512                 pi->pcie_lane_performance = pi->pcie_lane_powersaving;
3513         }
3514
3515         ci_reset_single_dpm_table(adev,
3516                                   &pi->dpm_table.pcie_speed_table,
3517                                   SMU7_MAX_LEVELS_LINK);
3518
3519         if (adev->asic_type == CHIP_BONAIRE)
3520                 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
3521                                           pi->pcie_gen_powersaving.min,
3522                                           pi->pcie_lane_powersaving.max);
3523         else
3524                 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
3525                                           pi->pcie_gen_powersaving.min,
3526                                           pi->pcie_lane_powersaving.min);
3527         ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 1,
3528                                   pi->pcie_gen_performance.min,
3529                                   pi->pcie_lane_performance.min);
3530         ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 2,
3531                                   pi->pcie_gen_powersaving.min,
3532                                   pi->pcie_lane_powersaving.max);
3533         ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 3,
3534                                   pi->pcie_gen_performance.min,
3535                                   pi->pcie_lane_performance.max);
3536         ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 4,
3537                                   pi->pcie_gen_powersaving.max,
3538                                   pi->pcie_lane_powersaving.max);
3539         ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 5,
3540                                   pi->pcie_gen_performance.max,
3541                                   pi->pcie_lane_performance.max);
3542
3543         pi->dpm_table.pcie_speed_table.count = 6;
3544
3545         return 0;
3546 }
3547
3548 static int ci_setup_default_dpm_tables(struct amdgpu_device *adev)
3549 {
3550         struct ci_power_info *pi = ci_get_pi(adev);
3551         struct amdgpu_clock_voltage_dependency_table *allowed_sclk_vddc_table =
3552                 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
3553         struct amdgpu_clock_voltage_dependency_table *allowed_mclk_table =
3554                 &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
3555         struct amdgpu_cac_leakage_table *std_voltage_table =
3556                 &adev->pm.dpm.dyn_state.cac_leakage_table;
3557         u32 i;
3558
3559         if (allowed_sclk_vddc_table == NULL)
3560                 return -EINVAL;
3561         if (allowed_sclk_vddc_table->count < 1)
3562                 return -EINVAL;
3563         if (allowed_mclk_table == NULL)
3564                 return -EINVAL;
3565         if (allowed_mclk_table->count < 1)
3566                 return -EINVAL;
3567
3568         memset(&pi->dpm_table, 0, sizeof(struct ci_dpm_table));
3569
3570         ci_reset_single_dpm_table(adev,
3571                                   &pi->dpm_table.sclk_table,
3572                                   SMU7_MAX_LEVELS_GRAPHICS);
3573         ci_reset_single_dpm_table(adev,
3574                                   &pi->dpm_table.mclk_table,
3575                                   SMU7_MAX_LEVELS_MEMORY);
3576         ci_reset_single_dpm_table(adev,
3577                                   &pi->dpm_table.vddc_table,
3578                                   SMU7_MAX_LEVELS_VDDC);
3579         ci_reset_single_dpm_table(adev,
3580                                   &pi->dpm_table.vddci_table,
3581                                   SMU7_MAX_LEVELS_VDDCI);
3582         ci_reset_single_dpm_table(adev,
3583                                   &pi->dpm_table.mvdd_table,
3584                                   SMU7_MAX_LEVELS_MVDD);
3585
3586         pi->dpm_table.sclk_table.count = 0;
3587         for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
3588                 if ((i == 0) ||
3589                     (pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count-1].value !=
3590                      allowed_sclk_vddc_table->entries[i].clk)) {
3591                         pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value =
3592                                 allowed_sclk_vddc_table->entries[i].clk;
3593                         pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled =
3594                                 (i == 0) ? true : false;
3595                         pi->dpm_table.sclk_table.count++;
3596                 }
3597         }
3598
3599         pi->dpm_table.mclk_table.count = 0;
3600         for (i = 0; i < allowed_mclk_table->count; i++) {
3601                 if ((i == 0) ||
3602                     (pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count-1].value !=
3603                      allowed_mclk_table->entries[i].clk)) {
3604                         pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value =
3605                                 allowed_mclk_table->entries[i].clk;
3606                         pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled =
3607                                 (i == 0) ? true : false;
3608                         pi->dpm_table.mclk_table.count++;
3609                 }
3610         }
3611
3612         for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
3613                 pi->dpm_table.vddc_table.dpm_levels[i].value =
3614                         allowed_sclk_vddc_table->entries[i].v;
3615                 pi->dpm_table.vddc_table.dpm_levels[i].param1 =
3616                         std_voltage_table->entries[i].leakage;
3617                 pi->dpm_table.vddc_table.dpm_levels[i].enabled = true;
3618         }
3619         pi->dpm_table.vddc_table.count = allowed_sclk_vddc_table->count;
3620
3621         allowed_mclk_table = &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
3622         if (allowed_mclk_table) {
3623                 for (i = 0; i < allowed_mclk_table->count; i++) {
3624                         pi->dpm_table.vddci_table.dpm_levels[i].value =
3625                                 allowed_mclk_table->entries[i].v;
3626                         pi->dpm_table.vddci_table.dpm_levels[i].enabled = true;
3627                 }
3628                 pi->dpm_table.vddci_table.count = allowed_mclk_table->count;
3629         }
3630
3631         allowed_mclk_table = &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk;
3632         if (allowed_mclk_table) {
3633                 for (i = 0; i < allowed_mclk_table->count; i++) {
3634                         pi->dpm_table.mvdd_table.dpm_levels[i].value =
3635                                 allowed_mclk_table->entries[i].v;
3636                         pi->dpm_table.mvdd_table.dpm_levels[i].enabled = true;
3637                 }
3638                 pi->dpm_table.mvdd_table.count = allowed_mclk_table->count;
3639         }
3640
3641         ci_setup_default_pcie_tables(adev);
3642
3643         /* save a copy of the default DPM table */
3644         memcpy(&(pi->golden_dpm_table), &(pi->dpm_table),
3645                         sizeof(struct ci_dpm_table));
3646
3647         return 0;
3648 }
3649
3650 static int ci_find_boot_level(struct ci_single_dpm_table *table,
3651                               u32 value, u32 *boot_level)
3652 {
3653         u32 i;
3654         int ret = -EINVAL;
3655
3656         for(i = 0; i < table->count; i++) {
3657                 if (value == table->dpm_levels[i].value) {
3658                         *boot_level = i;
3659                         ret = 0;
3660                 }
3661         }
3662
3663         return ret;
3664 }
3665
3666 static int ci_init_smc_table(struct amdgpu_device *adev)
3667 {
3668         struct ci_power_info *pi = ci_get_pi(adev);
3669         struct ci_ulv_parm *ulv = &pi->ulv;
3670         struct amdgpu_ps *amdgpu_boot_state = adev->pm.dpm.boot_ps;
3671         SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
3672         int ret;
3673
3674         ret = ci_setup_default_dpm_tables(adev);
3675         if (ret)
3676                 return ret;
3677
3678         if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE)
3679                 ci_populate_smc_voltage_tables(adev, table);
3680
3681         ci_init_fps_limits(adev);
3682
3683         if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
3684                 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
3685
3686         if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
3687                 table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
3688
3689         if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
3690                 table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
3691
3692         if (ulv->supported) {
3693                 ret = ci_populate_ulv_state(adev, &pi->smc_state_table.Ulv);
3694                 if (ret)
3695                         return ret;
3696                 WREG32_SMC(ixCG_ULV_PARAMETER, ulv->cg_ulv_parameter);
3697         }
3698
3699         ret = ci_populate_all_graphic_levels(adev);
3700         if (ret)
3701                 return ret;
3702
3703         ret = ci_populate_all_memory_levels(adev);
3704         if (ret)
3705                 return ret;
3706
3707         ci_populate_smc_link_level(adev, table);
3708
3709         ret = ci_populate_smc_acpi_level(adev, table);
3710         if (ret)
3711                 return ret;
3712
3713         ret = ci_populate_smc_vce_level(adev, table);
3714         if (ret)
3715                 return ret;
3716
3717         ret = ci_populate_smc_acp_level(adev, table);
3718         if (ret)
3719                 return ret;
3720
3721         ret = ci_populate_smc_samu_level(adev, table);
3722         if (ret)
3723                 return ret;
3724
3725         ret = ci_do_program_memory_timing_parameters(adev);
3726         if (ret)
3727                 return ret;
3728
3729         ret = ci_populate_smc_uvd_level(adev, table);
3730         if (ret)
3731                 return ret;
3732
3733         table->UvdBootLevel  = 0;
3734         table->VceBootLevel  = 0;
3735         table->AcpBootLevel  = 0;
3736         table->SamuBootLevel  = 0;
3737         table->GraphicsBootLevel  = 0;
3738         table->MemoryBootLevel  = 0;
3739
3740         ret = ci_find_boot_level(&pi->dpm_table.sclk_table,
3741                                  pi->vbios_boot_state.sclk_bootup_value,
3742                                  (u32 *)&pi->smc_state_table.GraphicsBootLevel);
3743
3744         ret = ci_find_boot_level(&pi->dpm_table.mclk_table,
3745                                  pi->vbios_boot_state.mclk_bootup_value,
3746                                  (u32 *)&pi->smc_state_table.MemoryBootLevel);
3747
3748         table->BootVddc = pi->vbios_boot_state.vddc_bootup_value;
3749         table->BootVddci = pi->vbios_boot_state.vddci_bootup_value;
3750         table->BootMVdd = pi->vbios_boot_state.mvdd_bootup_value;
3751
3752         ci_populate_smc_initial_state(adev, amdgpu_boot_state);
3753
3754         ret = ci_populate_bapm_parameters_in_dpm_table(adev);
3755         if (ret)
3756                 return ret;
3757
3758         table->UVDInterval = 1;
3759         table->VCEInterval = 1;
3760         table->ACPInterval = 1;
3761         table->SAMUInterval = 1;
3762         table->GraphicsVoltageChangeEnable = 1;
3763         table->GraphicsThermThrottleEnable = 1;
3764         table->GraphicsInterval = 1;
3765         table->VoltageInterval = 1;
3766         table->ThermalInterval = 1;
3767         table->TemperatureLimitHigh = (u16)((pi->thermal_temp_setting.temperature_high *
3768                                              CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3769         table->TemperatureLimitLow = (u16)((pi->thermal_temp_setting.temperature_low *
3770                                             CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3771         table->MemoryVoltageChangeEnable = 1;
3772         table->MemoryInterval = 1;
3773         table->VoltageResponseTime = 0;
3774         table->VddcVddciDelta = 4000;
3775         table->PhaseResponseTime = 0;
3776         table->MemoryThermThrottleEnable = 1;
3777         table->PCIeBootLinkLevel = pi->dpm_table.pcie_speed_table.count - 1;
3778         table->PCIeGenInterval = 1;
3779         if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2)
3780                 table->SVI2Enable  = 1;
3781         else
3782                 table->SVI2Enable  = 0;
3783
3784         table->ThermGpio = 17;
3785         table->SclkStepSize = 0x4000;
3786
3787         table->SystemFlags = cpu_to_be32(table->SystemFlags);
3788         table->SmioMaskVddcVid = cpu_to_be32(table->SmioMaskVddcVid);
3789         table->SmioMaskVddcPhase = cpu_to_be32(table->SmioMaskVddcPhase);
3790         table->SmioMaskVddciVid = cpu_to_be32(table->SmioMaskVddciVid);
3791         table->SmioMaskMvddVid = cpu_to_be32(table->SmioMaskMvddVid);
3792         table->SclkStepSize = cpu_to_be32(table->SclkStepSize);
3793         table->TemperatureLimitHigh = cpu_to_be16(table->TemperatureLimitHigh);
3794         table->TemperatureLimitLow = cpu_to_be16(table->TemperatureLimitLow);
3795         table->VddcVddciDelta = cpu_to_be16(table->VddcVddciDelta);
3796         table->VoltageResponseTime = cpu_to_be16(table->VoltageResponseTime);
3797         table->PhaseResponseTime = cpu_to_be16(table->PhaseResponseTime);
3798         table->BootVddc = cpu_to_be16(table->BootVddc * VOLTAGE_SCALE);
3799         table->BootVddci = cpu_to_be16(table->BootVddci * VOLTAGE_SCALE);
3800         table->BootMVdd = cpu_to_be16(table->BootMVdd * VOLTAGE_SCALE);
3801
3802         ret = amdgpu_ci_copy_bytes_to_smc(adev,
3803                                    pi->dpm_table_start +
3804                                    offsetof(SMU7_Discrete_DpmTable, SystemFlags),
3805                                    (u8 *)&table->SystemFlags,
3806                                    sizeof(SMU7_Discrete_DpmTable) - 3 * sizeof(SMU7_PIDController),
3807                                    pi->sram_end);
3808         if (ret)
3809                 return ret;
3810
3811         return 0;
3812 }
3813
3814 static void ci_trim_single_dpm_states(struct amdgpu_device *adev,
3815                                       struct ci_single_dpm_table *dpm_table,
3816                                       u32 low_limit, u32 high_limit)
3817 {
3818         u32 i;
3819
3820         for (i = 0; i < dpm_table->count; i++) {
3821                 if ((dpm_table->dpm_levels[i].value < low_limit) ||
3822                     (dpm_table->dpm_levels[i].value > high_limit))
3823                         dpm_table->dpm_levels[i].enabled = false;
3824                 else
3825                         dpm_table->dpm_levels[i].enabled = true;
3826         }
3827 }
3828
3829 static void ci_trim_pcie_dpm_states(struct amdgpu_device *adev,
3830                                     u32 speed_low, u32 lanes_low,
3831                                     u32 speed_high, u32 lanes_high)
3832 {
3833         struct ci_power_info *pi = ci_get_pi(adev);
3834         struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
3835         u32 i, j;
3836
3837         for (i = 0; i < pcie_table->count; i++) {
3838                 if ((pcie_table->dpm_levels[i].value < speed_low) ||
3839                     (pcie_table->dpm_levels[i].param1 < lanes_low) ||
3840                     (pcie_table->dpm_levels[i].value > speed_high) ||
3841                     (pcie_table->dpm_levels[i].param1 > lanes_high))
3842                         pcie_table->dpm_levels[i].enabled = false;
3843                 else
3844                         pcie_table->dpm_levels[i].enabled = true;
3845         }
3846
3847         for (i = 0; i < pcie_table->count; i++) {
3848                 if (pcie_table->dpm_levels[i].enabled) {
3849                         for (j = i + 1; j < pcie_table->count; j++) {
3850                                 if (pcie_table->dpm_levels[j].enabled) {
3851                                         if ((pcie_table->dpm_levels[i].value == pcie_table->dpm_levels[j].value) &&
3852                                             (pcie_table->dpm_levels[i].param1 == pcie_table->dpm_levels[j].param1))
3853                                                 pcie_table->dpm_levels[j].enabled = false;
3854                                 }
3855                         }
3856                 }
3857         }
3858 }
3859
3860 static int ci_trim_dpm_states(struct amdgpu_device *adev,
3861                               struct amdgpu_ps *amdgpu_state)
3862 {
3863         struct ci_ps *state = ci_get_ps(amdgpu_state);
3864         struct ci_power_info *pi = ci_get_pi(adev);
3865         u32 high_limit_count;
3866
3867         if (state->performance_level_count < 1)
3868                 return -EINVAL;
3869
3870         if (state->performance_level_count == 1)
3871                 high_limit_count = 0;
3872         else
3873                 high_limit_count = 1;
3874
3875         ci_trim_single_dpm_states(adev,
3876                                   &pi->dpm_table.sclk_table,
3877                                   state->performance_levels[0].sclk,
3878                                   state->performance_levels[high_limit_count].sclk);
3879
3880         ci_trim_single_dpm_states(adev,
3881                                   &pi->dpm_table.mclk_table,
3882                                   state->performance_levels[0].mclk,
3883                                   state->performance_levels[high_limit_count].mclk);
3884
3885         ci_trim_pcie_dpm_states(adev,
3886                                 state->performance_levels[0].pcie_gen,
3887                                 state->performance_levels[0].pcie_lane,
3888                                 state->performance_levels[high_limit_count].pcie_gen,
3889                                 state->performance_levels[high_limit_count].pcie_lane);
3890
3891         return 0;
3892 }
3893
3894 static int ci_apply_disp_minimum_voltage_request(struct amdgpu_device *adev)
3895 {
3896         struct amdgpu_clock_voltage_dependency_table *disp_voltage_table =
3897                 &adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk;
3898         struct amdgpu_clock_voltage_dependency_table *vddc_table =
3899                 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
3900         u32 requested_voltage = 0;
3901         u32 i;
3902
3903         if (disp_voltage_table == NULL)
3904                 return -EINVAL;
3905         if (!disp_voltage_table->count)
3906                 return -EINVAL;
3907
3908         for (i = 0; i < disp_voltage_table->count; i++) {
3909                 if (adev->clock.current_dispclk == disp_voltage_table->entries[i].clk)
3910                         requested_voltage = disp_voltage_table->entries[i].v;
3911         }
3912
3913         for (i = 0; i < vddc_table->count; i++) {
3914                 if (requested_voltage <= vddc_table->entries[i].v) {
3915                         requested_voltage = vddc_table->entries[i].v;
3916                         return (amdgpu_ci_send_msg_to_smc_with_parameter(adev,
3917                                                                   PPSMC_MSG_VddC_Request,
3918                                                                   requested_voltage * VOLTAGE_SCALE) == PPSMC_Result_OK) ?
3919                                 0 : -EINVAL;
3920                 }
3921         }
3922
3923         return -EINVAL;
3924 }
3925
3926 static int ci_upload_dpm_level_enable_mask(struct amdgpu_device *adev)
3927 {
3928         struct ci_power_info *pi = ci_get_pi(adev);
3929         PPSMC_Result result;
3930
3931         ci_apply_disp_minimum_voltage_request(adev);
3932
3933         if (!pi->sclk_dpm_key_disabled) {
3934                 if (pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3935                         result = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
3936                                                                    PPSMC_MSG_SCLKDPM_SetEnabledMask,
3937                                                                    pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
3938                         if (result != PPSMC_Result_OK)
3939                                 return -EINVAL;
3940                 }
3941         }
3942
3943         if (!pi->mclk_dpm_key_disabled) {
3944                 if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3945                         result = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
3946                                                                    PPSMC_MSG_MCLKDPM_SetEnabledMask,
3947                                                                    pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3948                         if (result != PPSMC_Result_OK)
3949                                 return -EINVAL;
3950                 }
3951         }
3952
3953 #if 0
3954         if (!pi->pcie_dpm_key_disabled) {
3955                 if (pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3956                         result = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
3957                                                                    PPSMC_MSG_PCIeDPM_SetEnabledMask,
3958                                                                    pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
3959                         if (result != PPSMC_Result_OK)
3960                                 return -EINVAL;
3961                 }
3962         }
3963 #endif
3964
3965         return 0;
3966 }
3967
3968 static void ci_find_dpm_states_clocks_in_dpm_table(struct amdgpu_device *adev,
3969                                                    struct amdgpu_ps *amdgpu_state)
3970 {
3971         struct ci_power_info *pi = ci_get_pi(adev);
3972         struct ci_ps *state = ci_get_ps(amdgpu_state);
3973         struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
3974         u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
3975         struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
3976         u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
3977         u32 i;
3978
3979         pi->need_update_smu7_dpm_table = 0;
3980
3981         for (i = 0; i < sclk_table->count; i++) {
3982                 if (sclk == sclk_table->dpm_levels[i].value)
3983                         break;
3984         }
3985
3986         if (i >= sclk_table->count) {
3987                 pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
3988         } else {
3989                 /* XXX check display min clock requirements */
3990                 if (CISLAND_MINIMUM_ENGINE_CLOCK != CISLAND_MINIMUM_ENGINE_CLOCK)
3991                         pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
3992         }
3993
3994         for (i = 0; i < mclk_table->count; i++) {
3995                 if (mclk == mclk_table->dpm_levels[i].value)
3996                         break;
3997         }
3998
3999         if (i >= mclk_table->count)
4000                 pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
4001
4002         if (adev->pm.dpm.current_active_crtc_count !=
4003             adev->pm.dpm.new_active_crtc_count)
4004                 pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
4005 }
4006
4007 static int ci_populate_and_upload_sclk_mclk_dpm_levels(struct amdgpu_device *adev,
4008                                                        struct amdgpu_ps *amdgpu_state)
4009 {
4010         struct ci_power_info *pi = ci_get_pi(adev);
4011         struct ci_ps *state = ci_get_ps(amdgpu_state);
4012         u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
4013         u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
4014         struct ci_dpm_table *dpm_table = &pi->dpm_table;
4015         int ret;
4016
4017         if (!pi->need_update_smu7_dpm_table)
4018                 return 0;
4019
4020         if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK)
4021                 dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value = sclk;
4022
4023         if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)
4024                 dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value = mclk;
4025
4026         if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK)) {
4027                 ret = ci_populate_all_graphic_levels(adev);
4028                 if (ret)
4029                         return ret;
4030         }
4031
4032         if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) {
4033                 ret = ci_populate_all_memory_levels(adev);
4034                 if (ret)
4035                         return ret;
4036         }
4037
4038         return 0;
4039 }
4040
4041 static int ci_enable_uvd_dpm(struct amdgpu_device *adev, bool enable)
4042 {
4043         struct ci_power_info *pi = ci_get_pi(adev);
4044         const struct amdgpu_clock_and_voltage_limits *max_limits;
4045         int i;
4046
4047         if (adev->pm.dpm.ac_power)
4048                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4049         else
4050                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4051
4052         if (enable) {
4053                 pi->dpm_level_enable_mask.uvd_dpm_enable_mask = 0;
4054
4055                 for (i = adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4056                         if (adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4057                                 pi->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i;
4058
4059                                 if (!pi->caps_uvd_dpm)
4060                                         break;
4061                         }
4062                 }
4063
4064                 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4065                                                   PPSMC_MSG_UVDDPM_SetEnabledMask,
4066                                                   pi->dpm_level_enable_mask.uvd_dpm_enable_mask);
4067
4068                 if (pi->last_mclk_dpm_enable_mask & 0x1) {
4069                         pi->uvd_enabled = true;
4070                         pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
4071                         amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4072                                                           PPSMC_MSG_MCLKDPM_SetEnabledMask,
4073                                                           pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
4074                 }
4075         } else {
4076                 if (pi->last_mclk_dpm_enable_mask & 0x1) {
4077                         pi->uvd_enabled = false;
4078                         pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1;
4079                         amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4080                                                           PPSMC_MSG_MCLKDPM_SetEnabledMask,
4081                                                           pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
4082                 }
4083         }
4084
4085         return (amdgpu_ci_send_msg_to_smc(adev, enable ?
4086                                    PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable) == PPSMC_Result_OK) ?
4087                 0 : -EINVAL;
4088 }
4089
4090 static int ci_enable_vce_dpm(struct amdgpu_device *adev, bool enable)
4091 {
4092         struct ci_power_info *pi = ci_get_pi(adev);
4093         const struct amdgpu_clock_and_voltage_limits *max_limits;
4094         int i;
4095
4096         if (adev->pm.dpm.ac_power)
4097                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4098         else
4099                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4100
4101         if (enable) {
4102                 pi->dpm_level_enable_mask.vce_dpm_enable_mask = 0;
4103                 for (i = adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4104                         if (adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4105                                 pi->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i;
4106
4107                                 if (!pi->caps_vce_dpm)
4108                                         break;
4109                         }
4110                 }
4111
4112                 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4113                                                   PPSMC_MSG_VCEDPM_SetEnabledMask,
4114                                                   pi->dpm_level_enable_mask.vce_dpm_enable_mask);
4115         }
4116
4117         return (amdgpu_ci_send_msg_to_smc(adev, enable ?
4118                                    PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable) == PPSMC_Result_OK) ?
4119                 0 : -EINVAL;
4120 }
4121
4122 #if 0
4123 static int ci_enable_samu_dpm(struct amdgpu_device *adev, bool enable)
4124 {
4125         struct ci_power_info *pi = ci_get_pi(adev);
4126         const struct amdgpu_clock_and_voltage_limits *max_limits;
4127         int i;
4128
4129         if (adev->pm.dpm.ac_power)
4130                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4131         else
4132                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4133
4134         if (enable) {
4135                 pi->dpm_level_enable_mask.samu_dpm_enable_mask = 0;
4136                 for (i = adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4137                         if (adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4138                                 pi->dpm_level_enable_mask.samu_dpm_enable_mask |= 1 << i;
4139
4140                                 if (!pi->caps_samu_dpm)
4141                                         break;
4142                         }
4143                 }
4144
4145                 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4146                                                   PPSMC_MSG_SAMUDPM_SetEnabledMask,
4147                                                   pi->dpm_level_enable_mask.samu_dpm_enable_mask);
4148         }
4149         return (amdgpu_ci_send_msg_to_smc(adev, enable ?
4150                                    PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable) == PPSMC_Result_OK) ?
4151                 0 : -EINVAL;
4152 }
4153
4154 static int ci_enable_acp_dpm(struct amdgpu_device *adev, bool enable)
4155 {
4156         struct ci_power_info *pi = ci_get_pi(adev);
4157         const struct amdgpu_clock_and_voltage_limits *max_limits;
4158         int i;
4159
4160         if (adev->pm.dpm.ac_power)
4161                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4162         else
4163                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4164
4165         if (enable) {
4166                 pi->dpm_level_enable_mask.acp_dpm_enable_mask = 0;
4167                 for (i = adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4168                         if (adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4169                                 pi->dpm_level_enable_mask.acp_dpm_enable_mask |= 1 << i;
4170
4171                                 if (!pi->caps_acp_dpm)
4172                                         break;
4173                         }
4174                 }
4175
4176                 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4177                                                   PPSMC_MSG_ACPDPM_SetEnabledMask,
4178                                                   pi->dpm_level_enable_mask.acp_dpm_enable_mask);
4179         }
4180
4181         return (amdgpu_ci_send_msg_to_smc(adev, enable ?
4182                                    PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable) == PPSMC_Result_OK) ?
4183                 0 : -EINVAL;
4184 }
4185 #endif
4186
4187 static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate)
4188 {
4189         struct ci_power_info *pi = ci_get_pi(adev);
4190         u32 tmp;
4191
4192         if (!gate) {
4193                 if (pi->caps_uvd_dpm ||
4194                     (adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0))
4195                         pi->smc_state_table.UvdBootLevel = 0;
4196                 else
4197                         pi->smc_state_table.UvdBootLevel =
4198                                 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1;
4199
4200                 tmp = RREG32_SMC(ixDPM_TABLE_475);
4201                 tmp &= ~DPM_TABLE_475__UvdBootLevel_MASK;
4202                 tmp |= (pi->smc_state_table.UvdBootLevel << DPM_TABLE_475__UvdBootLevel__SHIFT);
4203                 WREG32_SMC(ixDPM_TABLE_475, tmp);
4204         }
4205
4206         return ci_enable_uvd_dpm(adev, !gate);
4207 }
4208
4209 static u8 ci_get_vce_boot_level(struct amdgpu_device *adev)
4210 {
4211         u8 i;
4212         u32 min_evclk = 30000; /* ??? */
4213         struct amdgpu_vce_clock_voltage_dependency_table *table =
4214                 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
4215
4216         for (i = 0; i < table->count; i++) {
4217                 if (table->entries[i].evclk >= min_evclk)
4218                         return i;
4219         }
4220
4221         return table->count - 1;
4222 }
4223
4224 static int ci_update_vce_dpm(struct amdgpu_device *adev,
4225                              struct amdgpu_ps *amdgpu_new_state,
4226                              struct amdgpu_ps *amdgpu_current_state)
4227 {
4228         struct ci_power_info *pi = ci_get_pi(adev);
4229         int ret = 0;
4230         u32 tmp;
4231
4232         if (amdgpu_current_state->evclk != amdgpu_new_state->evclk) {
4233                 if (amdgpu_new_state->evclk) {
4234                         /* turn the clocks on when encoding */
4235                         ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
4236                                                             AMD_CG_STATE_UNGATE);
4237                         if (ret)
4238                                 return ret;
4239
4240                         pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(adev);
4241                         tmp = RREG32_SMC(ixDPM_TABLE_475);
4242                         tmp &= ~DPM_TABLE_475__VceBootLevel_MASK;
4243                         tmp |= (pi->smc_state_table.VceBootLevel << DPM_TABLE_475__VceBootLevel__SHIFT);
4244                         WREG32_SMC(ixDPM_TABLE_475, tmp);
4245
4246                         ret = ci_enable_vce_dpm(adev, true);
4247                 } else {
4248                         /* turn the clocks off when not encoding */
4249                         ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
4250                                                             AMD_CG_STATE_GATE);
4251                         if (ret)
4252                                 return ret;
4253
4254                         ret = ci_enable_vce_dpm(adev, false);
4255                 }
4256         }
4257         return ret;
4258 }
4259
4260 #if 0
4261 static int ci_update_samu_dpm(struct amdgpu_device *adev, bool gate)
4262 {
4263         return ci_enable_samu_dpm(adev, gate);
4264 }
4265
4266 static int ci_update_acp_dpm(struct amdgpu_device *adev, bool gate)
4267 {
4268         struct ci_power_info *pi = ci_get_pi(adev);
4269         u32 tmp;
4270
4271         if (!gate) {
4272                 pi->smc_state_table.AcpBootLevel = 0;
4273
4274                 tmp = RREG32_SMC(ixDPM_TABLE_475);
4275                 tmp &= ~AcpBootLevel_MASK;
4276                 tmp |= AcpBootLevel(pi->smc_state_table.AcpBootLevel);
4277                 WREG32_SMC(ixDPM_TABLE_475, tmp);
4278         }
4279
4280         return ci_enable_acp_dpm(adev, !gate);
4281 }
4282 #endif
4283
4284 static int ci_generate_dpm_level_enable_mask(struct amdgpu_device *adev,
4285                                              struct amdgpu_ps *amdgpu_state)
4286 {
4287         struct ci_power_info *pi = ci_get_pi(adev);
4288         int ret;
4289
4290         ret = ci_trim_dpm_states(adev, amdgpu_state);
4291         if (ret)
4292                 return ret;
4293
4294         pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
4295                 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.sclk_table);
4296         pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
4297                 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.mclk_table);
4298         pi->last_mclk_dpm_enable_mask =
4299                 pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
4300         if (pi->uvd_enabled) {
4301                 if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask & 1)
4302                         pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
4303         }
4304         pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
4305                 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.pcie_speed_table);
4306
4307         return 0;
4308 }
4309
4310 static u32 ci_get_lowest_enabled_level(struct amdgpu_device *adev,
4311                                        u32 level_mask)
4312 {
4313         u32 level = 0;
4314
4315         while ((level_mask & (1 << level)) == 0)
4316                 level++;
4317
4318         return level;
4319 }
4320
4321
4322 static int ci_dpm_force_performance_level(struct amdgpu_device *adev,
4323                                           enum amdgpu_dpm_forced_level level)
4324 {
4325         struct ci_power_info *pi = ci_get_pi(adev);
4326         u32 tmp, levels, i;
4327         int ret;
4328
4329         if (level == AMDGPU_DPM_FORCED_LEVEL_HIGH) {
4330                 if ((!pi->pcie_dpm_key_disabled) &&
4331                     pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
4332                         levels = 0;
4333                         tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
4334                         while (tmp >>= 1)
4335                                 levels++;
4336                         if (levels) {
4337                                 ret = ci_dpm_force_state_pcie(adev, level);
4338                                 if (ret)
4339                                         return ret;
4340                                 for (i = 0; i < adev->usec_timeout; i++) {
4341                                         tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_1) &
4342                                         TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK) >>
4343                                         TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT;
4344                                         if (tmp == levels)
4345                                                 break;
4346                                         udelay(1);
4347                                 }
4348                         }
4349                 }
4350                 if ((!pi->sclk_dpm_key_disabled) &&
4351                     pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
4352                         levels = 0;
4353                         tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask;
4354                         while (tmp >>= 1)
4355                                 levels++;
4356                         if (levels) {
4357                                 ret = ci_dpm_force_state_sclk(adev, levels);
4358                                 if (ret)
4359                                         return ret;
4360                                 for (i = 0; i < adev->usec_timeout; i++) {
4361                                         tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
4362                                         TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
4363                                         TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
4364                                         if (tmp == levels)
4365                                                 break;
4366                                         udelay(1);
4367                                 }
4368                         }
4369                 }
4370                 if ((!pi->mclk_dpm_key_disabled) &&
4371                     pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
4372                         levels = 0;
4373                         tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
4374                         while (tmp >>= 1)
4375                                 levels++;
4376                         if (levels) {
4377                                 ret = ci_dpm_force_state_mclk(adev, levels);
4378                                 if (ret)
4379                                         return ret;
4380                                 for (i = 0; i < adev->usec_timeout; i++) {
4381                                         tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
4382                                         TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX_MASK) >>
4383                                         TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX__SHIFT;
4384                                         if (tmp == levels)
4385                                                 break;
4386                                         udelay(1);
4387                                 }
4388                         }
4389                 }
4390         } else if (level == AMDGPU_DPM_FORCED_LEVEL_LOW) {
4391                 if ((!pi->sclk_dpm_key_disabled) &&
4392                     pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
4393                         levels = ci_get_lowest_enabled_level(adev,
4394                                                              pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
4395                         ret = ci_dpm_force_state_sclk(adev, levels);
4396                         if (ret)
4397                                 return ret;
4398                         for (i = 0; i < adev->usec_timeout; i++) {
4399                                 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
4400                                 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
4401                                 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
4402                                 if (tmp == levels)
4403                                         break;
4404                                 udelay(1);
4405                         }
4406                 }
4407                 if ((!pi->mclk_dpm_key_disabled) &&
4408                     pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
4409                         levels = ci_get_lowest_enabled_level(adev,
4410                                                              pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
4411                         ret = ci_dpm_force_state_mclk(adev, levels);
4412                         if (ret)
4413                                 return ret;
4414                         for (i = 0; i < adev->usec_timeout; i++) {
4415                                 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
4416                                 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX_MASK) >>
4417                                 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX__SHIFT;
4418                                 if (tmp == levels)
4419                                         break;
4420                                 udelay(1);
4421                         }
4422                 }
4423                 if ((!pi->pcie_dpm_key_disabled) &&
4424                     pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
4425                         levels = ci_get_lowest_enabled_level(adev,
4426                                                              pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
4427                         ret = ci_dpm_force_state_pcie(adev, levels);
4428                         if (ret)
4429                                 return ret;
4430                         for (i = 0; i < adev->usec_timeout; i++) {
4431                                 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_1) &
4432                                 TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK) >>
4433                                 TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT;
4434                                 if (tmp == levels)
4435                                         break;
4436                                 udelay(1);
4437                         }
4438                 }
4439         } else if (level == AMDGPU_DPM_FORCED_LEVEL_AUTO) {
4440                 if (!pi->pcie_dpm_key_disabled) {
4441                         PPSMC_Result smc_result;
4442
4443                         smc_result = amdgpu_ci_send_msg_to_smc(adev,
4444                                                                PPSMC_MSG_PCIeDPM_UnForceLevel);
4445                         if (smc_result != PPSMC_Result_OK)
4446                                 return -EINVAL;
4447                 }
4448                 ret = ci_upload_dpm_level_enable_mask(adev);
4449                 if (ret)
4450                         return ret;
4451         }
4452
4453         adev->pm.dpm.forced_level = level;
4454
4455         return 0;
4456 }
4457
4458 static int ci_set_mc_special_registers(struct amdgpu_device *adev,
4459                                        struct ci_mc_reg_table *table)
4460 {
4461         u8 i, j, k;
4462         u32 temp_reg;
4463
4464         for (i = 0, j = table->last; i < table->last; i++) {
4465                 if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4466                         return -EINVAL;
4467                 switch(table->mc_reg_address[i].s1) {
4468                 case mmMC_SEQ_MISC1:
4469                         temp_reg = RREG32(mmMC_PMG_CMD_EMRS);
4470                         table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
4471                         table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
4472                         for (k = 0; k < table->num_entries; k++) {
4473                                 table->mc_reg_table_entry[k].mc_data[j] =
4474                                         ((temp_reg & 0xffff0000)) | ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
4475                         }
4476                         j++;
4477                         if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4478                                 return -EINVAL;
4479
4480                         temp_reg = RREG32(mmMC_PMG_CMD_MRS);
4481                         table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
4482                         table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
4483                         for (k = 0; k < table->num_entries; k++) {
4484                                 table->mc_reg_table_entry[k].mc_data[j] =
4485                                         (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
4486                                 if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5)
4487                                         table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
4488                         }
4489                         j++;
4490                         if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4491                                 return -EINVAL;
4492
4493                         if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) {
4494                                 table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
4495                                 table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
4496                                 for (k = 0; k < table->num_entries; k++) {
4497                                         table->mc_reg_table_entry[k].mc_data[j] =
4498                                                 (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
4499                                 }
4500                                 j++;
4501                                 if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4502                                         return -EINVAL;
4503                         }
4504                         break;
4505                 case mmMC_SEQ_RESERVE_M:
4506                         temp_reg = RREG32(mmMC_PMG_CMD_MRS1);
4507                         table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
4508                         table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
4509                         for (k = 0; k < table->num_entries; k++) {
4510                                 table->mc_reg_table_entry[k].mc_data[j] =
4511                                         (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
4512                         }
4513                         j++;
4514                         if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4515                                 return -EINVAL;
4516                         break;
4517                 default:
4518                         break;
4519                 }
4520
4521         }
4522
4523         table->last = j;
4524
4525         return 0;
4526 }
4527
4528 static bool ci_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
4529 {
4530         bool result = true;
4531
4532         switch(in_reg) {
4533         case mmMC_SEQ_RAS_TIMING:
4534                 *out_reg = mmMC_SEQ_RAS_TIMING_LP;
4535                 break;
4536         case mmMC_SEQ_DLL_STBY:
4537                 *out_reg = mmMC_SEQ_DLL_STBY_LP;
4538                 break;
4539         case mmMC_SEQ_G5PDX_CMD0:
4540                 *out_reg = mmMC_SEQ_G5PDX_CMD0_LP;
4541                 break;
4542         case mmMC_SEQ_G5PDX_CMD1:
4543                 *out_reg = mmMC_SEQ_G5PDX_CMD1_LP;
4544                 break;
4545         case mmMC_SEQ_G5PDX_CTRL:
4546                 *out_reg = mmMC_SEQ_G5PDX_CTRL_LP;
4547                 break;
4548         case mmMC_SEQ_CAS_TIMING:
4549                 *out_reg = mmMC_SEQ_CAS_TIMING_LP;
4550             break;
4551         case mmMC_SEQ_MISC_TIMING:
4552                 *out_reg = mmMC_SEQ_MISC_TIMING_LP;
4553                 break;
4554         case mmMC_SEQ_MISC_TIMING2:
4555                 *out_reg = mmMC_SEQ_MISC_TIMING2_LP;
4556                 break;
4557         case mmMC_SEQ_PMG_DVS_CMD:
4558                 *out_reg = mmMC_SEQ_PMG_DVS_CMD_LP;
4559                 break;
4560         case mmMC_SEQ_PMG_DVS_CTL:
4561                 *out_reg = mmMC_SEQ_PMG_DVS_CTL_LP;
4562                 break;
4563         case mmMC_SEQ_RD_CTL_D0:
4564                 *out_reg = mmMC_SEQ_RD_CTL_D0_LP;
4565                 break;
4566         case mmMC_SEQ_RD_CTL_D1:
4567                 *out_reg = mmMC_SEQ_RD_CTL_D1_LP;
4568                 break;
4569         case mmMC_SEQ_WR_CTL_D0:
4570                 *out_reg = mmMC_SEQ_WR_CTL_D0_LP;
4571                 break;
4572         case mmMC_SEQ_WR_CTL_D1:
4573                 *out_reg = mmMC_SEQ_WR_CTL_D1_LP;
4574                 break;
4575         case mmMC_PMG_CMD_EMRS:
4576                 *out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP;
4577                 break;
4578         case mmMC_PMG_CMD_MRS:
4579                 *out_reg = mmMC_SEQ_PMG_CMD_MRS_LP;
4580                 break;
4581         case mmMC_PMG_CMD_MRS1:
4582                 *out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP;
4583                 break;
4584         case mmMC_SEQ_PMG_TIMING:
4585                 *out_reg = mmMC_SEQ_PMG_TIMING_LP;
4586                 break;
4587         case mmMC_PMG_CMD_MRS2:
4588                 *out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP;
4589                 break;
4590         case mmMC_SEQ_WR_CTL_2:
4591                 *out_reg = mmMC_SEQ_WR_CTL_2_LP;
4592                 break;
4593         default:
4594                 result = false;
4595                 break;
4596         }
4597
4598         return result;
4599 }
4600
4601 static void ci_set_valid_flag(struct ci_mc_reg_table *table)
4602 {
4603         u8 i, j;
4604
4605         for (i = 0; i < table->last; i++) {
4606                 for (j = 1; j < table->num_entries; j++) {
4607                         if (table->mc_reg_table_entry[j-1].mc_data[i] !=
4608                             table->mc_reg_table_entry[j].mc_data[i]) {
4609                                 table->valid_flag |= 1 << i;
4610                                 break;
4611                         }
4612                 }
4613         }
4614 }
4615
4616 static void ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table)
4617 {
4618         u32 i;
4619         u16 address;
4620
4621         for (i = 0; i < table->last; i++) {
4622                 table->mc_reg_address[i].s0 =
4623                         ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
4624                         address : table->mc_reg_address[i].s1;
4625         }
4626 }
4627
4628 static int ci_copy_vbios_mc_reg_table(const struct atom_mc_reg_table *table,
4629                                       struct ci_mc_reg_table *ci_table)
4630 {
4631         u8 i, j;
4632
4633         if (table->last > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4634                 return -EINVAL;
4635         if (table->num_entries > MAX_AC_TIMING_ENTRIES)
4636                 return -EINVAL;
4637
4638         for (i = 0; i < table->last; i++)
4639                 ci_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
4640
4641         ci_table->last = table->last;
4642
4643         for (i = 0; i < table->num_entries; i++) {
4644                 ci_table->mc_reg_table_entry[i].mclk_max =
4645                         table->mc_reg_table_entry[i].mclk_max;
4646                 for (j = 0; j < table->last; j++)
4647                         ci_table->mc_reg_table_entry[i].mc_data[j] =
4648                                 table->mc_reg_table_entry[i].mc_data[j];
4649         }
4650         ci_table->num_entries = table->num_entries;
4651
4652         return 0;
4653 }
4654
4655 static int ci_register_patching_mc_seq(struct amdgpu_device *adev,
4656                                        struct ci_mc_reg_table *table)
4657 {
4658         u8 i, k;
4659         u32 tmp;
4660         bool patch;
4661
4662         tmp = RREG32(mmMC_SEQ_MISC0);
4663         patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
4664
4665         if (patch &&
4666             ((adev->pdev->device == 0x67B0) ||
4667              (adev->pdev->device == 0x67B1))) {
4668                 for (i = 0; i < table->last; i++) {
4669                         if (table->last >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4670                                 return -EINVAL;
4671                         switch (table->mc_reg_address[i].s1) {
4672                         case mmMC_SEQ_MISC1:
4673                                 for (k = 0; k < table->num_entries; k++) {
4674                                         if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4675                                             (table->mc_reg_table_entry[k].mclk_max == 137500))
4676                                                 table->mc_reg_table_entry[k].mc_data[i] =
4677                                                         (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFF8) |
4678                                                         0x00000007;
4679                                 }
4680                                 break;
4681                         case mmMC_SEQ_WR_CTL_D0:
4682                                 for (k = 0; k < table->num_entries; k++) {
4683                                         if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4684                                             (table->mc_reg_table_entry[k].mclk_max == 137500))
4685                                                 table->mc_reg_table_entry[k].mc_data[i] =
4686                                                         (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
4687                                                         0x0000D0DD;
4688                                 }
4689                                 break;
4690                         case mmMC_SEQ_WR_CTL_D1:
4691                                 for (k = 0; k < table->num_entries; k++) {
4692                                         if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4693                                             (table->mc_reg_table_entry[k].mclk_max == 137500))
4694                                                 table->mc_reg_table_entry[k].mc_data[i] =
4695                                                         (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
4696                                                         0x0000D0DD;
4697                                 }
4698                                 break;
4699                         case mmMC_SEQ_WR_CTL_2:
4700                                 for (k = 0; k < table->num_entries; k++) {
4701                                         if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4702                                             (table->mc_reg_table_entry[k].mclk_max == 137500))
4703                                                 table->mc_reg_table_entry[k].mc_data[i] = 0;
4704                                 }
4705                                 break;
4706                         case mmMC_SEQ_CAS_TIMING:
4707                                 for (k = 0; k < table->num_entries; k++) {
4708                                         if (table->mc_reg_table_entry[k].mclk_max == 125000)
4709                                                 table->mc_reg_table_entry[k].mc_data[i] =
4710                                                         (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
4711                                                         0x000C0140;
4712                                         else if (table->mc_reg_table_entry[k].mclk_max == 137500)
4713                                                 table->mc_reg_table_entry[k].mc_data[i] =
4714                                                         (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
4715                                                         0x000C0150;
4716                                 }
4717                                 break;
4718                         case mmMC_SEQ_MISC_TIMING:
4719                                 for (k = 0; k < table->num_entries; k++) {
4720                                         if (table->mc_reg_table_entry[k].mclk_max == 125000)
4721                                                 table->mc_reg_table_entry[k].mc_data[i] =
4722                                                         (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
4723                                                         0x00000030;
4724                                         else if (table->mc_reg_table_entry[k].mclk_max == 137500)
4725                                                 table->mc_reg_table_entry[k].mc_data[i] =
4726                                                         (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
4727                                                         0x00000035;
4728                                 }
4729                                 break;
4730                         default:
4731                                 break;
4732                         }
4733                 }
4734
4735                 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 3);
4736                 tmp = RREG32(mmMC_SEQ_IO_DEBUG_DATA);
4737                 tmp = (tmp & 0xFFF8FFFF) | (1 << 16);
4738                 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 3);
4739                 WREG32(mmMC_SEQ_IO_DEBUG_DATA, tmp);
4740         }
4741
4742         return 0;
4743 }
4744
4745 static int ci_initialize_mc_reg_table(struct amdgpu_device *adev)
4746 {
4747         struct ci_power_info *pi = ci_get_pi(adev);
4748         struct atom_mc_reg_table *table;
4749         struct ci_mc_reg_table *ci_table = &pi->mc_reg_table;
4750         u8 module_index = ci_get_memory_module_index(adev);
4751         int ret;
4752
4753         table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
4754         if (!table)
4755                 return -ENOMEM;
4756
4757         WREG32(mmMC_SEQ_RAS_TIMING_LP, RREG32(mmMC_SEQ_RAS_TIMING));
4758         WREG32(mmMC_SEQ_CAS_TIMING_LP, RREG32(mmMC_SEQ_CAS_TIMING));
4759         WREG32(mmMC_SEQ_DLL_STBY_LP, RREG32(mmMC_SEQ_DLL_STBY));
4760         WREG32(mmMC_SEQ_G5PDX_CMD0_LP, RREG32(mmMC_SEQ_G5PDX_CMD0));
4761         WREG32(mmMC_SEQ_G5PDX_CMD1_LP, RREG32(mmMC_SEQ_G5PDX_CMD1));
4762         WREG32(mmMC_SEQ_G5PDX_CTRL_LP, RREG32(mmMC_SEQ_G5PDX_CTRL));
4763         WREG32(mmMC_SEQ_PMG_DVS_CMD_LP, RREG32(mmMC_SEQ_PMG_DVS_CMD));
4764         WREG32(mmMC_SEQ_PMG_DVS_CTL_LP, RREG32(mmMC_SEQ_PMG_DVS_CTL));
4765         WREG32(mmMC_SEQ_MISC_TIMING_LP, RREG32(mmMC_SEQ_MISC_TIMING));
4766         WREG32(mmMC_SEQ_MISC_TIMING2_LP, RREG32(mmMC_SEQ_MISC_TIMING2));
4767         WREG32(mmMC_SEQ_PMG_CMD_EMRS_LP, RREG32(mmMC_PMG_CMD_EMRS));
4768         WREG32(mmMC_SEQ_PMG_CMD_MRS_LP, RREG32(mmMC_PMG_CMD_MRS));
4769         WREG32(mmMC_SEQ_PMG_CMD_MRS1_LP, RREG32(mmMC_PMG_CMD_MRS1));
4770         WREG32(mmMC_SEQ_WR_CTL_D0_LP, RREG32(mmMC_SEQ_WR_CTL_D0));
4771         WREG32(mmMC_SEQ_WR_CTL_D1_LP, RREG32(mmMC_SEQ_WR_CTL_D1));
4772         WREG32(mmMC_SEQ_RD_CTL_D0_LP, RREG32(mmMC_SEQ_RD_CTL_D0));
4773         WREG32(mmMC_SEQ_RD_CTL_D1_LP, RREG32(mmMC_SEQ_RD_CTL_D1));
4774         WREG32(mmMC_SEQ_PMG_TIMING_LP, RREG32(mmMC_SEQ_PMG_TIMING));
4775         WREG32(mmMC_SEQ_PMG_CMD_MRS2_LP, RREG32(mmMC_PMG_CMD_MRS2));
4776         WREG32(mmMC_SEQ_WR_CTL_2_LP, RREG32(mmMC_SEQ_WR_CTL_2));
4777
4778         ret = amdgpu_atombios_init_mc_reg_table(adev, module_index, table);
4779         if (ret)
4780                 goto init_mc_done;
4781
4782         ret = ci_copy_vbios_mc_reg_table(table, ci_table);
4783         if (ret)
4784                 goto init_mc_done;
4785
4786         ci_set_s0_mc_reg_index(ci_table);
4787
4788         ret = ci_register_patching_mc_seq(adev, ci_table);
4789         if (ret)
4790                 goto init_mc_done;
4791
4792         ret = ci_set_mc_special_registers(adev, ci_table);
4793         if (ret)
4794                 goto init_mc_done;
4795
4796         ci_set_valid_flag(ci_table);
4797
4798 init_mc_done:
4799         kfree(table);
4800
4801         return ret;
4802 }
4803
4804 static int ci_populate_mc_reg_addresses(struct amdgpu_device *adev,
4805                                         SMU7_Discrete_MCRegisters *mc_reg_table)
4806 {
4807         struct ci_power_info *pi = ci_get_pi(adev);
4808         u32 i, j;
4809
4810         for (i = 0, j = 0; j < pi->mc_reg_table.last; j++) {
4811                 if (pi->mc_reg_table.valid_flag & (1 << j)) {
4812                         if (i >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4813                                 return -EINVAL;
4814                         mc_reg_table->address[i].s0 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s0);
4815                         mc_reg_table->address[i].s1 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s1);
4816                         i++;
4817                 }
4818         }
4819
4820         mc_reg_table->last = (u8)i;
4821
4822         return 0;
4823 }
4824
4825 static void ci_convert_mc_registers(const struct ci_mc_reg_entry *entry,
4826                                     SMU7_Discrete_MCRegisterSet *data,
4827                                     u32 num_entries, u32 valid_flag)
4828 {
4829         u32 i, j;
4830
4831         for (i = 0, j = 0; j < num_entries; j++) {
4832                 if (valid_flag & (1 << j)) {
4833                         data->value[i] = cpu_to_be32(entry->mc_data[j]);
4834                         i++;
4835                 }
4836         }
4837 }
4838
4839 static void ci_convert_mc_reg_table_entry_to_smc(struct amdgpu_device *adev,
4840                                                  const u32 memory_clock,
4841                                                  SMU7_Discrete_MCRegisterSet *mc_reg_table_data)
4842 {
4843         struct ci_power_info *pi = ci_get_pi(adev);
4844         u32 i = 0;
4845
4846         for(i = 0; i < pi->mc_reg_table.num_entries; i++) {
4847                 if (memory_clock <= pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
4848                         break;
4849         }
4850
4851         if ((i == pi->mc_reg_table.num_entries) && (i > 0))
4852                 --i;
4853
4854         ci_convert_mc_registers(&pi->mc_reg_table.mc_reg_table_entry[i],
4855                                 mc_reg_table_data, pi->mc_reg_table.last,
4856                                 pi->mc_reg_table.valid_flag);
4857 }
4858
4859 static void ci_convert_mc_reg_table_to_smc(struct amdgpu_device *adev,
4860                                            SMU7_Discrete_MCRegisters *mc_reg_table)
4861 {
4862         struct ci_power_info *pi = ci_get_pi(adev);
4863         u32 i;
4864
4865         for (i = 0; i < pi->dpm_table.mclk_table.count; i++)
4866                 ci_convert_mc_reg_table_entry_to_smc(adev,
4867                                                      pi->dpm_table.mclk_table.dpm_levels[i].value,
4868                                                      &mc_reg_table->data[i]);
4869 }
4870
4871 static int ci_populate_initial_mc_reg_table(struct amdgpu_device *adev)
4872 {
4873         struct ci_power_info *pi = ci_get_pi(adev);
4874         int ret;
4875
4876         memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4877
4878         ret = ci_populate_mc_reg_addresses(adev, &pi->smc_mc_reg_table);
4879         if (ret)
4880                 return ret;
4881         ci_convert_mc_reg_table_to_smc(adev, &pi->smc_mc_reg_table);
4882
4883         return amdgpu_ci_copy_bytes_to_smc(adev,
4884                                     pi->mc_reg_table_start,
4885                                     (u8 *)&pi->smc_mc_reg_table,
4886                                     sizeof(SMU7_Discrete_MCRegisters),
4887                                     pi->sram_end);
4888 }
4889
4890 static int ci_update_and_upload_mc_reg_table(struct amdgpu_device *adev)
4891 {
4892         struct ci_power_info *pi = ci_get_pi(adev);
4893
4894         if (!(pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
4895                 return 0;
4896
4897         memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4898
4899         ci_convert_mc_reg_table_to_smc(adev, &pi->smc_mc_reg_table);
4900
4901         return amdgpu_ci_copy_bytes_to_smc(adev,
4902                                     pi->mc_reg_table_start +
4903                                     offsetof(SMU7_Discrete_MCRegisters, data[0]),
4904                                     (u8 *)&pi->smc_mc_reg_table.data[0],
4905                                     sizeof(SMU7_Discrete_MCRegisterSet) *
4906                                     pi->dpm_table.mclk_table.count,
4907                                     pi->sram_end);
4908 }
4909
4910 static void ci_enable_voltage_control(struct amdgpu_device *adev)
4911 {
4912         u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
4913
4914         tmp |= GENERAL_PWRMGT__VOLT_PWRMGT_EN_MASK;
4915         WREG32_SMC(ixGENERAL_PWRMGT, tmp);
4916 }
4917
4918 static enum amdgpu_pcie_gen ci_get_maximum_link_speed(struct amdgpu_device *adev,
4919                                                       struct amdgpu_ps *amdgpu_state)
4920 {
4921         struct ci_ps *state = ci_get_ps(amdgpu_state);
4922         int i;
4923         u16 pcie_speed, max_speed = 0;
4924
4925         for (i = 0; i < state->performance_level_count; i++) {
4926                 pcie_speed = state->performance_levels[i].pcie_gen;
4927                 if (max_speed < pcie_speed)
4928                         max_speed = pcie_speed;
4929         }
4930
4931         return max_speed;
4932 }
4933
4934 static u16 ci_get_current_pcie_speed(struct amdgpu_device *adev)
4935 {
4936         u32 speed_cntl = 0;
4937
4938         speed_cntl = RREG32_PCIE(ixPCIE_LC_SPEED_CNTL) &
4939                 PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK;
4940         speed_cntl >>= PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
4941
4942         return (u16)speed_cntl;
4943 }
4944
4945 static int ci_get_current_pcie_lane_number(struct amdgpu_device *adev)
4946 {
4947         u32 link_width = 0;
4948
4949         link_width = RREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL) &
4950                 PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK;
4951         link_width >>= PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
4952
4953         switch (link_width) {
4954         case 1:
4955                 return 1;
4956         case 2:
4957                 return 2;
4958         case 3:
4959                 return 4;
4960         case 4:
4961                 return 8;
4962         case 0:
4963         case 6:
4964         default:
4965                 return 16;
4966         }
4967 }
4968
4969 static void ci_request_link_speed_change_before_state_change(struct amdgpu_device *adev,
4970                                                              struct amdgpu_ps *amdgpu_new_state,
4971                                                              struct amdgpu_ps *amdgpu_current_state)
4972 {
4973         struct ci_power_info *pi = ci_get_pi(adev);
4974         enum amdgpu_pcie_gen target_link_speed =
4975                 ci_get_maximum_link_speed(adev, amdgpu_new_state);
4976         enum amdgpu_pcie_gen current_link_speed;
4977
4978         if (pi->force_pcie_gen == AMDGPU_PCIE_GEN_INVALID)
4979                 current_link_speed = ci_get_maximum_link_speed(adev, amdgpu_current_state);
4980         else
4981                 current_link_speed = pi->force_pcie_gen;
4982
4983         pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
4984         pi->pspp_notify_required = false;
4985         if (target_link_speed > current_link_speed) {
4986                 switch (target_link_speed) {
4987 #ifdef CONFIG_ACPI
4988                 case AMDGPU_PCIE_GEN3:
4989                         if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN3, false) == 0)
4990                                 break;
4991                         pi->force_pcie_gen = AMDGPU_PCIE_GEN2;
4992                         if (current_link_speed == AMDGPU_PCIE_GEN2)
4993                                 break;
4994                 case AMDGPU_PCIE_GEN2:
4995                         if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
4996                                 break;
4997 #endif
4998                 default:
4999                         pi->force_pcie_gen = ci_get_current_pcie_speed(adev);
5000                         break;
5001                 }
5002         } else {
5003                 if (target_link_speed < current_link_speed)
5004                         pi->pspp_notify_required = true;
5005         }
5006 }
5007
5008 static void ci_notify_link_speed_change_after_state_change(struct amdgpu_device *adev,
5009                                                            struct amdgpu_ps *amdgpu_new_state,
5010                                                            struct amdgpu_ps *amdgpu_current_state)
5011 {
5012         struct ci_power_info *pi = ci_get_pi(adev);
5013         enum amdgpu_pcie_gen target_link_speed =
5014                 ci_get_maximum_link_speed(adev, amdgpu_new_state);
5015         u8 request;
5016
5017         if (pi->pspp_notify_required) {
5018                 if (target_link_speed == AMDGPU_PCIE_GEN3)
5019                         request = PCIE_PERF_REQ_PECI_GEN3;
5020                 else if (target_link_speed == AMDGPU_PCIE_GEN2)
5021                         request = PCIE_PERF_REQ_PECI_GEN2;
5022                 else
5023                         request = PCIE_PERF_REQ_PECI_GEN1;
5024
5025                 if ((request == PCIE_PERF_REQ_PECI_GEN1) &&
5026                     (ci_get_current_pcie_speed(adev) > 0))
5027                         return;
5028
5029 #ifdef CONFIG_ACPI
5030                 amdgpu_acpi_pcie_performance_request(adev, request, false);
5031 #endif
5032         }
5033 }
5034
5035 static int ci_set_private_data_variables_based_on_pptable(struct amdgpu_device *adev)
5036 {
5037         struct ci_power_info *pi = ci_get_pi(adev);
5038         struct amdgpu_clock_voltage_dependency_table *allowed_sclk_vddc_table =
5039                 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
5040         struct amdgpu_clock_voltage_dependency_table *allowed_mclk_vddc_table =
5041                 &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
5042         struct amdgpu_clock_voltage_dependency_table *allowed_mclk_vddci_table =
5043                 &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
5044
5045         if (allowed_sclk_vddc_table == NULL)
5046                 return -EINVAL;
5047         if (allowed_sclk_vddc_table->count < 1)
5048                 return -EINVAL;
5049         if (allowed_mclk_vddc_table == NULL)
5050                 return -EINVAL;
5051         if (allowed_mclk_vddc_table->count < 1)
5052                 return -EINVAL;
5053         if (allowed_mclk_vddci_table == NULL)
5054                 return -EINVAL;
5055         if (allowed_mclk_vddci_table->count < 1)
5056                 return -EINVAL;
5057
5058         pi->min_vddc_in_pp_table = allowed_sclk_vddc_table->entries[0].v;
5059         pi->max_vddc_in_pp_table =
5060                 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
5061
5062         pi->min_vddci_in_pp_table = allowed_mclk_vddci_table->entries[0].v;
5063         pi->max_vddci_in_pp_table =
5064                 allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
5065
5066         adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk =
5067                 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
5068         adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk =
5069                 allowed_mclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
5070         adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc =
5071                 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
5072         adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci =
5073                 allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
5074
5075         return 0;
5076 }
5077
5078 static void ci_patch_with_vddc_leakage(struct amdgpu_device *adev, u16 *vddc)
5079 {
5080         struct ci_power_info *pi = ci_get_pi(adev);
5081         struct ci_leakage_voltage *leakage_table = &pi->vddc_leakage;
5082         u32 leakage_index;
5083
5084         for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
5085                 if (leakage_table->leakage_id[leakage_index] == *vddc) {
5086                         *vddc = leakage_table->actual_voltage[leakage_index];
5087                         break;
5088                 }
5089         }
5090 }
5091
5092 static void ci_patch_with_vddci_leakage(struct amdgpu_device *adev, u16 *vddci)
5093 {
5094         struct ci_power_info *pi = ci_get_pi(adev);
5095         struct ci_leakage_voltage *leakage_table = &pi->vddci_leakage;
5096         u32 leakage_index;
5097
5098         for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
5099                 if (leakage_table->leakage_id[leakage_index] == *vddci) {
5100                         *vddci = leakage_table->actual_voltage[leakage_index];
5101                         break;
5102                 }
5103         }
5104 }
5105
5106 static void ci_patch_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev,
5107                                                                       struct amdgpu_clock_voltage_dependency_table *table)
5108 {
5109         u32 i;
5110
5111         if (table) {
5112                 for (i = 0; i < table->count; i++)
5113                         ci_patch_with_vddc_leakage(adev, &table->entries[i].v);
5114         }
5115 }
5116
5117 static void ci_patch_clock_voltage_dependency_table_with_vddci_leakage(struct amdgpu_device *adev,
5118                                                                        struct amdgpu_clock_voltage_dependency_table *table)
5119 {
5120         u32 i;
5121
5122         if (table) {
5123                 for (i = 0; i < table->count; i++)
5124                         ci_patch_with_vddci_leakage(adev, &table->entries[i].v);
5125         }
5126 }
5127
5128 static void ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev,
5129                                                                           struct amdgpu_vce_clock_voltage_dependency_table *table)
5130 {
5131         u32 i;
5132
5133         if (table) {
5134                 for (i = 0; i < table->count; i++)
5135                         ci_patch_with_vddc_leakage(adev, &table->entries[i].v);
5136         }
5137 }
5138
5139 static void ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev,
5140                                                                           struct amdgpu_uvd_clock_voltage_dependency_table *table)
5141 {
5142         u32 i;
5143
5144         if (table) {
5145                 for (i = 0; i < table->count; i++)
5146                         ci_patch_with_vddc_leakage(adev, &table->entries[i].v);
5147         }
5148 }
5149
5150 static void ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(struct amdgpu_device *adev,
5151                                                                    struct amdgpu_phase_shedding_limits_table *table)
5152 {
5153         u32 i;
5154
5155         if (table) {
5156                 for (i = 0; i < table->count; i++)
5157                         ci_patch_with_vddc_leakage(adev, &table->entries[i].voltage);
5158         }
5159 }
5160
5161 static void ci_patch_clock_voltage_limits_with_vddc_leakage(struct amdgpu_device *adev,
5162                                                             struct amdgpu_clock_and_voltage_limits *table)
5163 {
5164         if (table) {
5165                 ci_patch_with_vddc_leakage(adev, (u16 *)&table->vddc);
5166                 ci_patch_with_vddci_leakage(adev, (u16 *)&table->vddci);
5167         }
5168 }
5169
5170 static void ci_patch_cac_leakage_table_with_vddc_leakage(struct amdgpu_device *adev,
5171                                                          struct amdgpu_cac_leakage_table *table)
5172 {
5173         u32 i;
5174
5175         if (table) {
5176                 for (i = 0; i < table->count; i++)
5177                         ci_patch_with_vddc_leakage(adev, &table->entries[i].vddc);
5178         }
5179 }
5180
5181 static void ci_patch_dependency_tables_with_leakage(struct amdgpu_device *adev)
5182 {
5183
5184         ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5185                                                                   &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
5186         ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5187                                                                   &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
5188         ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5189                                                                   &adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk);
5190         ci_patch_clock_voltage_dependency_table_with_vddci_leakage(adev,
5191                                                                    &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk);
5192         ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(adev,
5193                                                                       &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table);
5194         ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(adev,
5195                                                                       &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table);
5196         ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5197                                                                   &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table);
5198         ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5199                                                                   &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table);
5200         ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(adev,
5201                                                                &adev->pm.dpm.dyn_state.phase_shedding_limits_table);
5202         ci_patch_clock_voltage_limits_with_vddc_leakage(adev,
5203                                                         &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
5204         ci_patch_clock_voltage_limits_with_vddc_leakage(adev,
5205                                                         &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc);
5206         ci_patch_cac_leakage_table_with_vddc_leakage(adev,
5207                                                      &adev->pm.dpm.dyn_state.cac_leakage_table);
5208
5209 }
5210
5211 static void ci_update_current_ps(struct amdgpu_device *adev,
5212                                  struct amdgpu_ps *rps)
5213 {
5214         struct ci_ps *new_ps = ci_get_ps(rps);
5215         struct ci_power_info *pi = ci_get_pi(adev);
5216
5217         pi->current_rps = *rps;
5218         pi->current_ps = *new_ps;
5219         pi->current_rps.ps_priv = &pi->current_ps;
5220 }
5221
5222 static void ci_update_requested_ps(struct amdgpu_device *adev,
5223                                    struct amdgpu_ps *rps)
5224 {
5225         struct ci_ps *new_ps = ci_get_ps(rps);
5226         struct ci_power_info *pi = ci_get_pi(adev);
5227
5228         pi->requested_rps = *rps;
5229         pi->requested_ps = *new_ps;
5230         pi->requested_rps.ps_priv = &pi->requested_ps;
5231 }
5232
5233 static int ci_dpm_pre_set_power_state(struct amdgpu_device *adev)
5234 {
5235         struct ci_power_info *pi = ci_get_pi(adev);
5236         struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps;
5237         struct amdgpu_ps *new_ps = &requested_ps;
5238
5239         ci_update_requested_ps(adev, new_ps);
5240
5241         ci_apply_state_adjust_rules(adev, &pi->requested_rps);
5242
5243         return 0;
5244 }
5245
5246 static void ci_dpm_post_set_power_state(struct amdgpu_device *adev)
5247 {
5248         struct ci_power_info *pi = ci_get_pi(adev);
5249         struct amdgpu_ps *new_ps = &pi->requested_rps;
5250
5251         ci_update_current_ps(adev, new_ps);
5252 }
5253
5254
5255 static void ci_dpm_setup_asic(struct amdgpu_device *adev)
5256 {
5257         ci_read_clock_registers(adev);
5258         ci_enable_acpi_power_management(adev);
5259         ci_init_sclk_t(adev);
5260 }
5261
5262 static int ci_dpm_enable(struct amdgpu_device *adev)
5263 {
5264         struct ci_power_info *pi = ci_get_pi(adev);
5265         struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
5266         int ret;
5267
5268         if (amdgpu_ci_is_smc_running(adev))
5269                 return -EINVAL;
5270         if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
5271                 ci_enable_voltage_control(adev);
5272                 ret = ci_construct_voltage_tables(adev);
5273                 if (ret) {
5274                         DRM_ERROR("ci_construct_voltage_tables failed\n");
5275                         return ret;
5276                 }
5277         }
5278         if (pi->caps_dynamic_ac_timing) {
5279                 ret = ci_initialize_mc_reg_table(adev);
5280                 if (ret)
5281                         pi->caps_dynamic_ac_timing = false;
5282         }
5283         if (pi->dynamic_ss)
5284                 ci_enable_spread_spectrum(adev, true);
5285         if (pi->thermal_protection)
5286                 ci_enable_thermal_protection(adev, true);
5287         ci_program_sstp(adev);
5288         ci_enable_display_gap(adev);
5289         ci_program_vc(adev);
5290         ret = ci_upload_firmware(adev);
5291         if (ret) {
5292                 DRM_ERROR("ci_upload_firmware failed\n");
5293                 return ret;
5294         }
5295         ret = ci_process_firmware_header(adev);
5296         if (ret) {
5297                 DRM_ERROR("ci_process_firmware_header failed\n");
5298                 return ret;
5299         }
5300         ret = ci_initial_switch_from_arb_f0_to_f1(adev);
5301         if (ret) {
5302                 DRM_ERROR("ci_initial_switch_from_arb_f0_to_f1 failed\n");
5303                 return ret;
5304         }
5305         ret = ci_init_smc_table(adev);
5306         if (ret) {
5307                 DRM_ERROR("ci_init_smc_table failed\n");
5308                 return ret;
5309         }
5310         ret = ci_init_arb_table_index(adev);
5311         if (ret) {
5312                 DRM_ERROR("ci_init_arb_table_index failed\n");
5313                 return ret;
5314         }
5315         if (pi->caps_dynamic_ac_timing) {
5316                 ret = ci_populate_initial_mc_reg_table(adev);
5317                 if (ret) {
5318                         DRM_ERROR("ci_populate_initial_mc_reg_table failed\n");
5319                         return ret;
5320                 }
5321         }
5322         ret = ci_populate_pm_base(adev);
5323         if (ret) {
5324                 DRM_ERROR("ci_populate_pm_base failed\n");
5325                 return ret;
5326         }
5327         ci_dpm_start_smc(adev);
5328         ci_enable_vr_hot_gpio_interrupt(adev);
5329         ret = ci_notify_smc_display_change(adev, false);
5330         if (ret) {
5331                 DRM_ERROR("ci_notify_smc_display_change failed\n");
5332                 return ret;
5333         }
5334         ci_enable_sclk_control(adev, true);
5335         ret = ci_enable_ulv(adev, true);
5336         if (ret) {
5337                 DRM_ERROR("ci_enable_ulv failed\n");
5338                 return ret;
5339         }
5340         ret = ci_enable_ds_master_switch(adev, true);
5341         if (ret) {
5342                 DRM_ERROR("ci_enable_ds_master_switch failed\n");
5343                 return ret;
5344         }
5345         ret = ci_start_dpm(adev);
5346         if (ret) {
5347                 DRM_ERROR("ci_start_dpm failed\n");
5348                 return ret;
5349         }
5350         ret = ci_enable_didt(adev, true);
5351         if (ret) {
5352                 DRM_ERROR("ci_enable_didt failed\n");
5353                 return ret;
5354         }
5355         ret = ci_enable_smc_cac(adev, true);
5356         if (ret) {
5357                 DRM_ERROR("ci_enable_smc_cac failed\n");
5358                 return ret;
5359         }
5360         ret = ci_enable_power_containment(adev, true);
5361         if (ret) {
5362                 DRM_ERROR("ci_enable_power_containment failed\n");
5363                 return ret;
5364         }
5365
5366         ret = ci_power_control_set_level(adev);
5367         if (ret) {
5368                 DRM_ERROR("ci_power_control_set_level failed\n");
5369                 return ret;
5370         }
5371
5372         ci_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
5373
5374         ret = ci_enable_thermal_based_sclk_dpm(adev, true);
5375         if (ret) {
5376                 DRM_ERROR("ci_enable_thermal_based_sclk_dpm failed\n");
5377                 return ret;
5378         }
5379
5380         ci_thermal_start_thermal_controller(adev);
5381
5382         ci_update_current_ps(adev, boot_ps);
5383
5384         return 0;
5385 }
5386
5387 static void ci_dpm_disable(struct amdgpu_device *adev)
5388 {
5389         struct ci_power_info *pi = ci_get_pi(adev);
5390         struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
5391
5392         amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
5393                        AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
5394         amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
5395                        AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
5396
5397         ci_dpm_powergate_uvd(adev, false);
5398
5399         if (!amdgpu_ci_is_smc_running(adev))
5400                 return;
5401
5402         ci_thermal_stop_thermal_controller(adev);
5403
5404         if (pi->thermal_protection)
5405                 ci_enable_thermal_protection(adev, false);
5406         ci_enable_power_containment(adev, false);
5407         ci_enable_smc_cac(adev, false);
5408         ci_enable_didt(adev, false);
5409         ci_enable_spread_spectrum(adev, false);
5410         ci_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
5411         ci_stop_dpm(adev);
5412         ci_enable_ds_master_switch(adev, false);
5413         ci_enable_ulv(adev, false);
5414         ci_clear_vc(adev);
5415         ci_reset_to_default(adev);
5416         ci_dpm_stop_smc(adev);
5417         ci_force_switch_to_arb_f0(adev);
5418         ci_enable_thermal_based_sclk_dpm(adev, false);
5419
5420         ci_update_current_ps(adev, boot_ps);
5421 }
5422
5423 static int ci_dpm_set_power_state(struct amdgpu_device *adev)
5424 {
5425         struct ci_power_info *pi = ci_get_pi(adev);
5426         struct amdgpu_ps *new_ps = &pi->requested_rps;
5427         struct amdgpu_ps *old_ps = &pi->current_rps;
5428         int ret;
5429
5430         ci_find_dpm_states_clocks_in_dpm_table(adev, new_ps);
5431         if (pi->pcie_performance_request)
5432                 ci_request_link_speed_change_before_state_change(adev, new_ps, old_ps);
5433         ret = ci_freeze_sclk_mclk_dpm(adev);
5434         if (ret) {
5435                 DRM_ERROR("ci_freeze_sclk_mclk_dpm failed\n");
5436                 return ret;
5437         }
5438         ret = ci_populate_and_upload_sclk_mclk_dpm_levels(adev, new_ps);
5439         if (ret) {
5440                 DRM_ERROR("ci_populate_and_upload_sclk_mclk_dpm_levels failed\n");
5441                 return ret;
5442         }
5443         ret = ci_generate_dpm_level_enable_mask(adev, new_ps);
5444         if (ret) {
5445                 DRM_ERROR("ci_generate_dpm_level_enable_mask failed\n");
5446                 return ret;
5447         }
5448
5449         ret = ci_update_vce_dpm(adev, new_ps, old_ps);
5450         if (ret) {
5451                 DRM_ERROR("ci_update_vce_dpm failed\n");
5452                 return ret;
5453         }
5454
5455         ret = ci_update_sclk_t(adev);
5456         if (ret) {
5457                 DRM_ERROR("ci_update_sclk_t failed\n");
5458                 return ret;
5459         }
5460         if (pi->caps_dynamic_ac_timing) {
5461                 ret = ci_update_and_upload_mc_reg_table(adev);
5462                 if (ret) {
5463                         DRM_ERROR("ci_update_and_upload_mc_reg_table failed\n");
5464                         return ret;
5465                 }
5466         }
5467         ret = ci_program_memory_timing_parameters(adev);
5468         if (ret) {
5469                 DRM_ERROR("ci_program_memory_timing_parameters failed\n");
5470                 return ret;
5471         }
5472         ret = ci_unfreeze_sclk_mclk_dpm(adev);
5473         if (ret) {
5474                 DRM_ERROR("ci_unfreeze_sclk_mclk_dpm failed\n");
5475                 return ret;
5476         }
5477         ret = ci_upload_dpm_level_enable_mask(adev);
5478         if (ret) {
5479                 DRM_ERROR("ci_upload_dpm_level_enable_mask failed\n");
5480                 return ret;
5481         }
5482         if (pi->pcie_performance_request)
5483                 ci_notify_link_speed_change_after_state_change(adev, new_ps, old_ps);
5484
5485         return 0;
5486 }
5487
5488 #if 0
5489 static void ci_dpm_reset_asic(struct amdgpu_device *adev)
5490 {
5491         ci_set_boot_state(adev);
5492 }
5493 #endif
5494
5495 static void ci_dpm_display_configuration_changed(struct amdgpu_device *adev)
5496 {
5497         ci_program_display_gap(adev);
5498 }
5499
5500 union power_info {
5501         struct _ATOM_POWERPLAY_INFO info;
5502         struct _ATOM_POWERPLAY_INFO_V2 info_2;
5503         struct _ATOM_POWERPLAY_INFO_V3 info_3;
5504         struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
5505         struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
5506         struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
5507 };
5508
5509 union pplib_clock_info {
5510         struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
5511         struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
5512         struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
5513         struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
5514         struct _ATOM_PPLIB_SI_CLOCK_INFO si;
5515         struct _ATOM_PPLIB_CI_CLOCK_INFO ci;
5516 };
5517
5518 union pplib_power_state {
5519         struct _ATOM_PPLIB_STATE v1;
5520         struct _ATOM_PPLIB_STATE_V2 v2;
5521 };
5522
5523 static void ci_parse_pplib_non_clock_info(struct amdgpu_device *adev,
5524                                           struct amdgpu_ps *rps,
5525                                           struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
5526                                           u8 table_rev)
5527 {
5528         rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
5529         rps->class = le16_to_cpu(non_clock_info->usClassification);
5530         rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
5531
5532         if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
5533                 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
5534                 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
5535         } else {
5536                 rps->vclk = 0;
5537                 rps->dclk = 0;
5538         }
5539
5540         if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
5541                 adev->pm.dpm.boot_ps = rps;
5542         if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
5543                 adev->pm.dpm.uvd_ps = rps;
5544 }
5545
5546 static void ci_parse_pplib_clock_info(struct amdgpu_device *adev,
5547                                       struct amdgpu_ps *rps, int index,
5548                                       union pplib_clock_info *clock_info)
5549 {
5550         struct ci_power_info *pi = ci_get_pi(adev);
5551         struct ci_ps *ps = ci_get_ps(rps);
5552         struct ci_pl *pl = &ps->performance_levels[index];
5553
5554         ps->performance_level_count = index + 1;
5555
5556         pl->sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
5557         pl->sclk |= clock_info->ci.ucEngineClockHigh << 16;
5558         pl->mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
5559         pl->mclk |= clock_info->ci.ucMemoryClockHigh << 16;
5560
5561         pl->pcie_gen = amdgpu_get_pcie_gen_support(adev,
5562                                                    pi->sys_pcie_mask,
5563                                                    pi->vbios_boot_state.pcie_gen_bootup_value,
5564                                                    clock_info->ci.ucPCIEGen);
5565         pl->pcie_lane = amdgpu_get_pcie_lane_support(adev,
5566                                                      pi->vbios_boot_state.pcie_lane_bootup_value,
5567                                                      le16_to_cpu(clock_info->ci.usPCIELane));
5568
5569         if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
5570                 pi->acpi_pcie_gen = pl->pcie_gen;
5571         }
5572
5573         if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) {
5574                 pi->ulv.supported = true;
5575                 pi->ulv.pl = *pl;
5576                 pi->ulv.cg_ulv_parameter = CISLANDS_CGULVPARAMETER_DFLT;
5577         }
5578
5579         /* patch up boot state */
5580         if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
5581                 pl->mclk = pi->vbios_boot_state.mclk_bootup_value;
5582                 pl->sclk = pi->vbios_boot_state.sclk_bootup_value;
5583                 pl->pcie_gen = pi->vbios_boot_state.pcie_gen_bootup_value;
5584                 pl->pcie_lane = pi->vbios_boot_state.pcie_lane_bootup_value;
5585         }
5586
5587         switch (rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
5588         case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
5589                 pi->use_pcie_powersaving_levels = true;
5590                 if (pi->pcie_gen_powersaving.max < pl->pcie_gen)
5591                         pi->pcie_gen_powersaving.max = pl->pcie_gen;
5592                 if (pi->pcie_gen_powersaving.min > pl->pcie_gen)
5593                         pi->pcie_gen_powersaving.min = pl->pcie_gen;
5594                 if (pi->pcie_lane_powersaving.max < pl->pcie_lane)
5595                         pi->pcie_lane_powersaving.max = pl->pcie_lane;
5596                 if (pi->pcie_lane_powersaving.min > pl->pcie_lane)
5597                         pi->pcie_lane_powersaving.min = pl->pcie_lane;
5598                 break;
5599         case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
5600                 pi->use_pcie_performance_levels = true;
5601                 if (pi->pcie_gen_performance.max < pl->pcie_gen)
5602                         pi->pcie_gen_performance.max = pl->pcie_gen;
5603                 if (pi->pcie_gen_performance.min > pl->pcie_gen)
5604                         pi->pcie_gen_performance.min = pl->pcie_gen;
5605                 if (pi->pcie_lane_performance.max < pl->pcie_lane)
5606                         pi->pcie_lane_performance.max = pl->pcie_lane;
5607                 if (pi->pcie_lane_performance.min > pl->pcie_lane)
5608                         pi->pcie_lane_performance.min = pl->pcie_lane;
5609                 break;
5610         default:
5611                 break;
5612         }
5613 }
5614
5615 static int ci_parse_power_table(struct amdgpu_device *adev)
5616 {
5617         struct amdgpu_mode_info *mode_info = &adev->mode_info;
5618         struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
5619         union pplib_power_state *power_state;
5620         int i, j, k, non_clock_array_index, clock_array_index;
5621         union pplib_clock_info *clock_info;
5622         struct _StateArray *state_array;
5623         struct _ClockInfoArray *clock_info_array;
5624         struct _NonClockInfoArray *non_clock_info_array;
5625         union power_info *power_info;
5626         int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
5627         u16 data_offset;
5628         u8 frev, crev;
5629         u8 *power_state_offset;
5630         struct ci_ps *ps;
5631
5632         if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
5633                                    &frev, &crev, &data_offset))
5634                 return -EINVAL;
5635         power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
5636
5637         amdgpu_add_thermal_controller(adev);
5638
5639         state_array = (struct _StateArray *)
5640                 (mode_info->atom_context->bios + data_offset +
5641                  le16_to_cpu(power_info->pplib.usStateArrayOffset));
5642         clock_info_array = (struct _ClockInfoArray *)
5643                 (mode_info->atom_context->bios + data_offset +
5644                  le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
5645         non_clock_info_array = (struct _NonClockInfoArray *)
5646                 (mode_info->atom_context->bios + data_offset +
5647                  le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
5648
5649         adev->pm.dpm.ps = kzalloc(sizeof(struct amdgpu_ps) *
5650                                   state_array->ucNumEntries, GFP_KERNEL);
5651         if (!adev->pm.dpm.ps)
5652                 return -ENOMEM;
5653         power_state_offset = (u8 *)state_array->states;
5654         for (i = 0; i < state_array->ucNumEntries; i++) {
5655                 u8 *idx;
5656                 power_state = (union pplib_power_state *)power_state_offset;
5657                 non_clock_array_index = power_state->v2.nonClockInfoIndex;
5658                 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
5659                         &non_clock_info_array->nonClockInfo[non_clock_array_index];
5660                 ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL);
5661                 if (ps == NULL) {
5662                         kfree(adev->pm.dpm.ps);
5663                         return -ENOMEM;
5664                 }
5665                 adev->pm.dpm.ps[i].ps_priv = ps;
5666                 ci_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i],
5667                                               non_clock_info,
5668                                               non_clock_info_array->ucEntrySize);
5669                 k = 0;
5670                 idx = (u8 *)&power_state->v2.clockInfoIndex[0];
5671                 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
5672                         clock_array_index = idx[j];
5673                         if (clock_array_index >= clock_info_array->ucNumEntries)
5674                                 continue;
5675                         if (k >= CISLANDS_MAX_HARDWARE_POWERLEVELS)
5676                                 break;
5677                         clock_info = (union pplib_clock_info *)
5678                                 ((u8 *)&clock_info_array->clockInfo[0] +
5679                                  (clock_array_index * clock_info_array->ucEntrySize));
5680                         ci_parse_pplib_clock_info(adev,
5681                                                   &adev->pm.dpm.ps[i], k,
5682                                                   clock_info);
5683                         k++;
5684                 }
5685                 power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
5686         }
5687         adev->pm.dpm.num_ps = state_array->ucNumEntries;
5688
5689         /* fill in the vce power states */
5690         for (i = 0; i < AMDGPU_MAX_VCE_LEVELS; i++) {
5691                 u32 sclk, mclk;
5692                 clock_array_index = adev->pm.dpm.vce_states[i].clk_idx;
5693                 clock_info = (union pplib_clock_info *)
5694                         &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
5695                 sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
5696                 sclk |= clock_info->ci.ucEngineClockHigh << 16;
5697                 mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
5698                 mclk |= clock_info->ci.ucMemoryClockHigh << 16;
5699                 adev->pm.dpm.vce_states[i].sclk = sclk;
5700                 adev->pm.dpm.vce_states[i].mclk = mclk;
5701         }
5702
5703         return 0;
5704 }
5705
5706 static int ci_get_vbios_boot_values(struct amdgpu_device *adev,
5707                                     struct ci_vbios_boot_state *boot_state)
5708 {
5709         struct amdgpu_mode_info *mode_info = &adev->mode_info;
5710         int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
5711         ATOM_FIRMWARE_INFO_V2_2 *firmware_info;
5712         u8 frev, crev;
5713         u16 data_offset;
5714
5715         if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
5716                                    &frev, &crev, &data_offset)) {
5717                 firmware_info =
5718                         (ATOM_FIRMWARE_INFO_V2_2 *)(mode_info->atom_context->bios +
5719                                                     data_offset);
5720                 boot_state->mvdd_bootup_value = le16_to_cpu(firmware_info->usBootUpMVDDCVoltage);
5721                 boot_state->vddc_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCVoltage);
5722                 boot_state->vddci_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCIVoltage);
5723                 boot_state->pcie_gen_bootup_value = ci_get_current_pcie_speed(adev);
5724                 boot_state->pcie_lane_bootup_value = ci_get_current_pcie_lane_number(adev);
5725                 boot_state->sclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultEngineClock);
5726                 boot_state->mclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultMemoryClock);
5727
5728                 return 0;
5729         }
5730         return -EINVAL;
5731 }
5732
5733 static void ci_dpm_fini(struct amdgpu_device *adev)
5734 {
5735         int i;
5736
5737         for (i = 0; i < adev->pm.dpm.num_ps; i++) {
5738                 kfree(adev->pm.dpm.ps[i].ps_priv);
5739         }
5740         kfree(adev->pm.dpm.ps);
5741         kfree(adev->pm.dpm.priv);
5742         kfree(adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
5743         amdgpu_free_extended_power_table(adev);
5744 }
5745
5746 /**
5747  * ci_dpm_init_microcode - load ucode images from disk
5748  *
5749  * @adev: amdgpu_device pointer
5750  *
5751  * Use the firmware interface to load the ucode images into
5752  * the driver (not loaded into hw).
5753  * Returns 0 on success, error on failure.
5754  */
5755 static int ci_dpm_init_microcode(struct amdgpu_device *adev)
5756 {
5757         const char *chip_name;
5758         char fw_name[30];
5759         int err;
5760
5761         DRM_DEBUG("\n");
5762
5763         switch (adev->asic_type) {
5764         case CHIP_BONAIRE:
5765                 if ((adev->pdev->revision == 0x80) ||
5766                     (adev->pdev->revision == 0x81) ||
5767                     (adev->pdev->device == 0x665f))
5768                         chip_name = "bonaire_k";
5769                 else
5770                         chip_name = "bonaire";
5771                 break;
5772         case CHIP_HAWAII:
5773                 if (adev->pdev->revision == 0x80)
5774                         chip_name = "hawaii_k";
5775                 else
5776                         chip_name = "hawaii";
5777                 break;
5778         case CHIP_KAVERI:
5779         case CHIP_KABINI:
5780         default: BUG();
5781         }
5782
5783         snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
5784         err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
5785         if (err)
5786                 goto out;
5787         err = amdgpu_ucode_validate(adev->pm.fw);
5788
5789 out:
5790         if (err) {
5791                 printk(KERN_ERR
5792                        "cik_smc: Failed to load firmware \"%s\"\n",
5793                        fw_name);
5794                 release_firmware(adev->pm.fw);
5795                 adev->pm.fw = NULL;
5796         }
5797         return err;
5798 }
5799
5800 static int ci_dpm_init(struct amdgpu_device *adev)
5801 {
5802         int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
5803         SMU7_Discrete_DpmTable *dpm_table;
5804         struct amdgpu_gpio_rec gpio;
5805         u16 data_offset, size;
5806         u8 frev, crev;
5807         struct ci_power_info *pi;
5808         int ret;
5809
5810         pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL);
5811         if (pi == NULL)
5812                 return -ENOMEM;
5813         adev->pm.dpm.priv = pi;
5814
5815         pi->sys_pcie_mask =
5816                 (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) >>
5817                 CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT;
5818
5819         pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
5820
5821         pi->pcie_gen_performance.max = AMDGPU_PCIE_GEN1;
5822         pi->pcie_gen_performance.min = AMDGPU_PCIE_GEN3;
5823         pi->pcie_gen_powersaving.max = AMDGPU_PCIE_GEN1;
5824         pi->pcie_gen_powersaving.min = AMDGPU_PCIE_GEN3;
5825
5826         pi->pcie_lane_performance.max = 0;
5827         pi->pcie_lane_performance.min = 16;
5828         pi->pcie_lane_powersaving.max = 0;
5829         pi->pcie_lane_powersaving.min = 16;
5830
5831         ret = ci_get_vbios_boot_values(adev, &pi->vbios_boot_state);
5832         if (ret) {
5833                 ci_dpm_fini(adev);
5834                 return ret;
5835         }
5836
5837         ret = amdgpu_get_platform_caps(adev);
5838         if (ret) {
5839                 ci_dpm_fini(adev);
5840                 return ret;
5841         }
5842
5843         ret = amdgpu_parse_extended_power_table(adev);
5844         if (ret) {
5845                 ci_dpm_fini(adev);
5846                 return ret;
5847         }
5848
5849         ret = ci_parse_power_table(adev);
5850         if (ret) {
5851                 ci_dpm_fini(adev);
5852                 return ret;
5853         }
5854
5855         pi->dll_default_on = false;
5856         pi->sram_end = SMC_RAM_END;
5857
5858         pi->activity_target[0] = CISLAND_TARGETACTIVITY_DFLT;
5859         pi->activity_target[1] = CISLAND_TARGETACTIVITY_DFLT;
5860         pi->activity_target[2] = CISLAND_TARGETACTIVITY_DFLT;
5861         pi->activity_target[3] = CISLAND_TARGETACTIVITY_DFLT;
5862         pi->activity_target[4] = CISLAND_TARGETACTIVITY_DFLT;
5863         pi->activity_target[5] = CISLAND_TARGETACTIVITY_DFLT;
5864         pi->activity_target[6] = CISLAND_TARGETACTIVITY_DFLT;
5865         pi->activity_target[7] = CISLAND_TARGETACTIVITY_DFLT;
5866
5867         pi->mclk_activity_target = CISLAND_MCLK_TARGETACTIVITY_DFLT;
5868
5869         pi->sclk_dpm_key_disabled = 0;
5870         pi->mclk_dpm_key_disabled = 0;
5871         pi->pcie_dpm_key_disabled = 0;
5872         pi->thermal_sclk_dpm_enabled = 0;
5873
5874         pi->caps_sclk_ds = true;
5875
5876         pi->mclk_strobe_mode_threshold = 40000;
5877         pi->mclk_stutter_mode_threshold = 40000;
5878         pi->mclk_edc_enable_threshold = 40000;
5879         pi->mclk_edc_wr_enable_threshold = 40000;
5880
5881         ci_initialize_powertune_defaults(adev);
5882
5883         pi->caps_fps = false;
5884
5885         pi->caps_sclk_throttle_low_notification = false;
5886
5887         pi->caps_uvd_dpm = true;
5888         pi->caps_vce_dpm = true;
5889
5890         ci_get_leakage_voltages(adev);
5891         ci_patch_dependency_tables_with_leakage(adev);
5892         ci_set_private_data_variables_based_on_pptable(adev);
5893
5894         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
5895                 kzalloc(4 * sizeof(struct amdgpu_clock_voltage_dependency_entry), GFP_KERNEL);
5896         if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
5897                 ci_dpm_fini(adev);
5898                 return -ENOMEM;
5899         }
5900         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
5901         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
5902         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
5903         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
5904         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
5905         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
5906         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
5907         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
5908         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
5909
5910         adev->pm.dpm.dyn_state.mclk_sclk_ratio = 4;
5911         adev->pm.dpm.dyn_state.sclk_mclk_delta = 15000;
5912         adev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
5913
5914         adev->pm.dpm.dyn_state.valid_sclk_values.count = 0;
5915         adev->pm.dpm.dyn_state.valid_sclk_values.values = NULL;
5916         adev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
5917         adev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
5918
5919         if (adev->asic_type == CHIP_HAWAII) {
5920                 pi->thermal_temp_setting.temperature_low = 94500;
5921                 pi->thermal_temp_setting.temperature_high = 95000;
5922                 pi->thermal_temp_setting.temperature_shutdown = 104000;
5923         } else {
5924                 pi->thermal_temp_setting.temperature_low = 99500;
5925                 pi->thermal_temp_setting.temperature_high = 100000;
5926                 pi->thermal_temp_setting.temperature_shutdown = 104000;
5927         }
5928
5929         pi->uvd_enabled = false;
5930
5931         dpm_table = &pi->smc_state_table;
5932
5933         gpio = amdgpu_atombios_lookup_gpio(adev, VDDC_VRHOT_GPIO_PINID);
5934         if (gpio.valid) {
5935                 dpm_table->VRHotGpio = gpio.shift;
5936                 adev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
5937         } else {
5938                 dpm_table->VRHotGpio = CISLANDS_UNUSED_GPIO_PIN;
5939                 adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
5940         }
5941
5942         gpio = amdgpu_atombios_lookup_gpio(adev, PP_AC_DC_SWITCH_GPIO_PINID);
5943         if (gpio.valid) {
5944                 dpm_table->AcDcGpio = gpio.shift;
5945                 adev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_HARDWAREDC;
5946         } else {
5947                 dpm_table->AcDcGpio = CISLANDS_UNUSED_GPIO_PIN;
5948                 adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_HARDWAREDC;
5949         }
5950
5951         gpio = amdgpu_atombios_lookup_gpio(adev, VDDC_PCC_GPIO_PINID);
5952         if (gpio.valid) {
5953                 u32 tmp = RREG32_SMC(ixCNB_PWRMGT_CNTL);
5954
5955                 switch (gpio.shift) {
5956                 case 0:
5957                         tmp &= ~CNB_PWRMGT_CNTL__GNB_SLOW_MODE_MASK;
5958                         tmp |= 1 << CNB_PWRMGT_CNTL__GNB_SLOW_MODE__SHIFT;
5959                         break;
5960                 case 1:
5961                         tmp &= ~CNB_PWRMGT_CNTL__GNB_SLOW_MODE_MASK;
5962                         tmp |= 2 << CNB_PWRMGT_CNTL__GNB_SLOW_MODE__SHIFT;
5963                         break;
5964                 case 2:
5965                         tmp |= CNB_PWRMGT_CNTL__GNB_SLOW_MASK;
5966                         break;
5967                 case 3:
5968                         tmp |= CNB_PWRMGT_CNTL__FORCE_NB_PS1_MASK;
5969                         break;
5970                 case 4:
5971                         tmp |= CNB_PWRMGT_CNTL__DPM_ENABLED_MASK;
5972                         break;
5973                 default:
5974                         DRM_ERROR("Invalid PCC GPIO: %u!\n", gpio.shift);
5975                         break;
5976                 }
5977                 WREG32_SMC(ixCNB_PWRMGT_CNTL, tmp);
5978         }
5979
5980         pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5981         pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5982         pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5983         if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT))
5984                 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5985         else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
5986                 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5987
5988         if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL) {
5989                 if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
5990                         pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5991                 else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
5992                         pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5993                 else
5994                         adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL;
5995         }
5996
5997         if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_MVDDCONTROL) {
5998                 if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
5999                         pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
6000                 else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
6001                         pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
6002                 else
6003                         adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_MVDDCONTROL;
6004         }
6005
6006         pi->vddc_phase_shed_control = true;
6007
6008 #if defined(CONFIG_ACPI)
6009         pi->pcie_performance_request =
6010                 amdgpu_acpi_is_pcie_performance_request_supported(adev);
6011 #else
6012         pi->pcie_performance_request = false;
6013 #endif
6014
6015         if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, &size,
6016                                    &frev, &crev, &data_offset)) {
6017                 pi->caps_sclk_ss_support = true;
6018                 pi->caps_mclk_ss_support = true;
6019                 pi->dynamic_ss = true;
6020         } else {
6021                 pi->caps_sclk_ss_support = false;
6022                 pi->caps_mclk_ss_support = false;
6023                 pi->dynamic_ss = true;
6024         }
6025
6026         if (adev->pm.int_thermal_type != THERMAL_TYPE_NONE)
6027                 pi->thermal_protection = true;
6028         else
6029                 pi->thermal_protection = false;
6030
6031         pi->caps_dynamic_ac_timing = true;
6032
6033         pi->uvd_power_gated = false;
6034
6035         /* make sure dc limits are valid */
6036         if ((adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
6037             (adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
6038                 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
6039                         adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
6040
6041         pi->fan_ctrl_is_in_default_mode = true;
6042
6043         return 0;
6044 }
6045
6046 static void
6047 ci_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
6048                                                struct seq_file *m)
6049 {
6050         struct ci_power_info *pi = ci_get_pi(adev);
6051         struct amdgpu_ps *rps = &pi->current_rps;
6052         u32 sclk = ci_get_average_sclk_freq(adev);
6053         u32 mclk = ci_get_average_mclk_freq(adev);
6054         u32 activity_percent = 50;
6055         int ret;
6056
6057         ret = ci_read_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, AverageGraphicsA),
6058                                         &activity_percent);
6059
6060         if (ret == 0) {
6061                 activity_percent += 0x80;
6062                 activity_percent >>= 8;
6063                 activity_percent = activity_percent > 100 ? 100 : activity_percent;
6064         }
6065
6066         seq_printf(m, "uvd %sabled\n", pi->uvd_enabled ? "en" : "dis");
6067         seq_printf(m, "vce %sabled\n", rps->vce_active ? "en" : "dis");
6068         seq_printf(m, "power level avg    sclk: %u mclk: %u\n",
6069                    sclk, mclk);
6070         seq_printf(m, "GPU load: %u %%\n", activity_percent);
6071 }
6072
6073 static void ci_dpm_print_power_state(struct amdgpu_device *adev,
6074                                      struct amdgpu_ps *rps)
6075 {
6076         struct ci_ps *ps = ci_get_ps(rps);
6077         struct ci_pl *pl;
6078         int i;
6079
6080         amdgpu_dpm_print_class_info(rps->class, rps->class2);
6081         amdgpu_dpm_print_cap_info(rps->caps);
6082         printk("\tuvd    vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
6083         for (i = 0; i < ps->performance_level_count; i++) {
6084                 pl = &ps->performance_levels[i];
6085                 printk("\t\tpower level %d    sclk: %u mclk: %u pcie gen: %u pcie lanes: %u\n",
6086                        i, pl->sclk, pl->mclk, pl->pcie_gen + 1, pl->pcie_lane);
6087         }
6088         amdgpu_dpm_print_ps_status(adev, rps);
6089 }
6090
6091 static u32 ci_dpm_get_sclk(struct amdgpu_device *adev, bool low)
6092 {
6093         struct ci_power_info *pi = ci_get_pi(adev);
6094         struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
6095
6096         if (low)
6097                 return requested_state->performance_levels[0].sclk;
6098         else
6099                 return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
6100 }
6101
6102 static u32 ci_dpm_get_mclk(struct amdgpu_device *adev, bool low)
6103 {
6104         struct ci_power_info *pi = ci_get_pi(adev);
6105         struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
6106
6107         if (low)
6108                 return requested_state->performance_levels[0].mclk;
6109         else
6110                 return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
6111 }
6112
6113 /* get temperature in millidegrees */
6114 static int ci_dpm_get_temp(struct amdgpu_device *adev)
6115 {
6116         u32 temp;
6117         int actual_temp = 0;
6118
6119         temp = (RREG32_SMC(ixCG_MULT_THERMAL_STATUS) & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >>
6120                 CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT;
6121
6122         if (temp & 0x200)
6123                 actual_temp = 255;
6124         else
6125                 actual_temp = temp & 0x1ff;
6126
6127         actual_temp = actual_temp * 1000;
6128
6129         return actual_temp;
6130 }
6131
6132 static int ci_set_temperature_range(struct amdgpu_device *adev)
6133 {
6134         int ret;
6135
6136         ret = ci_thermal_enable_alert(adev, false);
6137         if (ret)
6138                 return ret;
6139         ret = ci_thermal_set_temperature_range(adev, CISLANDS_TEMP_RANGE_MIN,
6140                                                CISLANDS_TEMP_RANGE_MAX);
6141         if (ret)
6142                 return ret;
6143         ret = ci_thermal_enable_alert(adev, true);
6144         if (ret)
6145                 return ret;
6146         return ret;
6147 }
6148
6149 static int ci_dpm_early_init(void *handle)
6150 {
6151         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6152
6153         ci_dpm_set_dpm_funcs(adev);
6154         ci_dpm_set_irq_funcs(adev);
6155
6156         return 0;
6157 }
6158
6159 static int ci_dpm_late_init(void *handle)
6160 {
6161         int ret;
6162         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6163
6164         if (!amdgpu_dpm)
6165                 return 0;
6166
6167         /* init the sysfs and debugfs files late */
6168         ret = amdgpu_pm_sysfs_init(adev);
6169         if (ret)
6170                 return ret;
6171
6172         ret = ci_set_temperature_range(adev);
6173         if (ret)
6174                 return ret;
6175
6176         ci_dpm_powergate_uvd(adev, true);
6177
6178         return 0;
6179 }
6180
6181 static int ci_dpm_sw_init(void *handle)
6182 {
6183         int ret;
6184         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6185
6186         ret = amdgpu_irq_add_id(adev, 230, &adev->pm.dpm.thermal.irq);
6187         if (ret)
6188                 return ret;
6189
6190         ret = amdgpu_irq_add_id(adev, 231, &adev->pm.dpm.thermal.irq);
6191         if (ret)
6192                 return ret;
6193
6194         /* default to balanced state */
6195         adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
6196         adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
6197         adev->pm.dpm.forced_level = AMDGPU_DPM_FORCED_LEVEL_AUTO;
6198         adev->pm.default_sclk = adev->clock.default_sclk;
6199         adev->pm.default_mclk = adev->clock.default_mclk;
6200         adev->pm.current_sclk = adev->clock.default_sclk;
6201         adev->pm.current_mclk = adev->clock.default_mclk;
6202         adev->pm.int_thermal_type = THERMAL_TYPE_NONE;
6203
6204         if (amdgpu_dpm == 0)
6205                 return 0;
6206
6207         ret = ci_dpm_init_microcode(adev);
6208         if (ret)
6209                 return ret;
6210
6211         INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler);
6212         mutex_lock(&adev->pm.mutex);
6213         ret = ci_dpm_init(adev);
6214         if (ret)
6215                 goto dpm_failed;
6216         adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
6217         if (amdgpu_dpm == 1)
6218                 amdgpu_pm_print_power_states(adev);
6219         mutex_unlock(&adev->pm.mutex);
6220         DRM_INFO("amdgpu: dpm initialized\n");
6221
6222         return 0;
6223
6224 dpm_failed:
6225         ci_dpm_fini(adev);
6226         mutex_unlock(&adev->pm.mutex);
6227         DRM_ERROR("amdgpu: dpm initialization failed\n");
6228         return ret;
6229 }
6230
6231 static int ci_dpm_sw_fini(void *handle)
6232 {
6233         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6234
6235         mutex_lock(&adev->pm.mutex);
6236         amdgpu_pm_sysfs_fini(adev);
6237         ci_dpm_fini(adev);
6238         mutex_unlock(&adev->pm.mutex);
6239
6240         release_firmware(adev->pm.fw);
6241         adev->pm.fw = NULL;
6242
6243         return 0;
6244 }
6245
6246 static int ci_dpm_hw_init(void *handle)
6247 {
6248         int ret;
6249
6250         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6251
6252         if (!amdgpu_dpm)
6253                 return 0;
6254
6255         mutex_lock(&adev->pm.mutex);
6256         ci_dpm_setup_asic(adev);
6257         ret = ci_dpm_enable(adev);
6258         if (ret)
6259                 adev->pm.dpm_enabled = false;
6260         else
6261                 adev->pm.dpm_enabled = true;
6262         mutex_unlock(&adev->pm.mutex);
6263
6264         return ret;
6265 }
6266
6267 static int ci_dpm_hw_fini(void *handle)
6268 {
6269         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6270
6271         if (adev->pm.dpm_enabled) {
6272                 mutex_lock(&adev->pm.mutex);
6273                 ci_dpm_disable(adev);
6274                 mutex_unlock(&adev->pm.mutex);
6275         }
6276
6277         return 0;
6278 }
6279
6280 static int ci_dpm_suspend(void *handle)
6281 {
6282         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6283
6284         if (adev->pm.dpm_enabled) {
6285                 mutex_lock(&adev->pm.mutex);
6286                 /* disable dpm */
6287                 ci_dpm_disable(adev);
6288                 /* reset the power state */
6289                 adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
6290                 mutex_unlock(&adev->pm.mutex);
6291         }
6292         return 0;
6293 }
6294
6295 static int ci_dpm_resume(void *handle)
6296 {
6297         int ret;
6298         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6299
6300         if (adev->pm.dpm_enabled) {
6301                 /* asic init will reset to the boot state */
6302                 mutex_lock(&adev->pm.mutex);
6303                 ci_dpm_setup_asic(adev);
6304                 ret = ci_dpm_enable(adev);
6305                 if (ret)
6306                         adev->pm.dpm_enabled = false;
6307                 else
6308                         adev->pm.dpm_enabled = true;
6309                 mutex_unlock(&adev->pm.mutex);
6310                 if (adev->pm.dpm_enabled)
6311                         amdgpu_pm_compute_clocks(adev);
6312         }
6313         return 0;
6314 }
6315
6316 static bool ci_dpm_is_idle(void *handle)
6317 {
6318         /* XXX */
6319         return true;
6320 }
6321
6322 static int ci_dpm_wait_for_idle(void *handle)
6323 {
6324         /* XXX */
6325         return 0;
6326 }
6327
6328 static int ci_dpm_soft_reset(void *handle)
6329 {
6330         return 0;
6331 }
6332
6333 static int ci_dpm_set_interrupt_state(struct amdgpu_device *adev,
6334                                       struct amdgpu_irq_src *source,
6335                                       unsigned type,
6336                                       enum amdgpu_interrupt_state state)
6337 {
6338         u32 cg_thermal_int;
6339
6340         switch (type) {
6341         case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH:
6342                 switch (state) {
6343                 case AMDGPU_IRQ_STATE_DISABLE:
6344                         cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
6345                         cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
6346                         WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6347                         break;
6348                 case AMDGPU_IRQ_STATE_ENABLE:
6349                         cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
6350                         cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
6351                         WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6352                         break;
6353                 default:
6354                         break;
6355                 }
6356                 break;
6357
6358         case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW:
6359                 switch (state) {
6360                 case AMDGPU_IRQ_STATE_DISABLE:
6361                         cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
6362                         cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
6363                         WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6364                         break;
6365                 case AMDGPU_IRQ_STATE_ENABLE:
6366                         cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
6367                         cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
6368                         WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6369                         break;
6370                 default:
6371                         break;
6372                 }
6373                 break;
6374
6375         default:
6376                 break;
6377         }
6378         return 0;
6379 }
6380
6381 static int ci_dpm_process_interrupt(struct amdgpu_device *adev,
6382                                     struct amdgpu_irq_src *source,
6383                                     struct amdgpu_iv_entry *entry)
6384 {
6385         bool queue_thermal = false;
6386
6387         if (entry == NULL)
6388                 return -EINVAL;
6389
6390         switch (entry->src_id) {
6391         case 230: /* thermal low to high */
6392                 DRM_DEBUG("IH: thermal low to high\n");
6393                 adev->pm.dpm.thermal.high_to_low = false;
6394                 queue_thermal = true;
6395                 break;
6396         case 231: /* thermal high to low */
6397                 DRM_DEBUG("IH: thermal high to low\n");
6398                 adev->pm.dpm.thermal.high_to_low = true;
6399                 queue_thermal = true;
6400                 break;
6401         default:
6402                 break;
6403         }
6404
6405         if (queue_thermal)
6406                 schedule_work(&adev->pm.dpm.thermal.work);
6407
6408         return 0;
6409 }
6410
6411 static int ci_dpm_set_clockgating_state(void *handle,
6412                                           enum amd_clockgating_state state)
6413 {
6414         return 0;
6415 }
6416
6417 static int ci_dpm_set_powergating_state(void *handle,
6418                                           enum amd_powergating_state state)
6419 {
6420         return 0;
6421 }
6422
6423 static int ci_dpm_print_clock_levels(struct amdgpu_device *adev,
6424                 enum pp_clock_type type, char *buf)
6425 {
6426         struct ci_power_info *pi = ci_get_pi(adev);
6427         struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
6428         struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
6429         struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
6430
6431         int i, now, size = 0;
6432         uint32_t clock, pcie_speed;
6433
6434         switch (type) {
6435         case PP_SCLK:
6436                 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_API_GetSclkFrequency);
6437                 clock = RREG32(mmSMC_MSG_ARG_0);
6438
6439                 for (i = 0; i < sclk_table->count; i++) {
6440                         if (clock > sclk_table->dpm_levels[i].value)
6441                                 continue;
6442                         break;
6443                 }
6444                 now = i;
6445
6446                 for (i = 0; i < sclk_table->count; i++)
6447                         size += sprintf(buf + size, "%d: %uMhz %s\n",
6448                                         i, sclk_table->dpm_levels[i].value / 100,
6449                                         (i == now) ? "*" : "");
6450                 break;
6451         case PP_MCLK:
6452                 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_API_GetMclkFrequency);
6453                 clock = RREG32(mmSMC_MSG_ARG_0);
6454
6455                 for (i = 0; i < mclk_table->count; i++) {
6456                         if (clock > mclk_table->dpm_levels[i].value)
6457                                 continue;
6458                         break;
6459                 }
6460                 now = i;
6461
6462                 for (i = 0; i < mclk_table->count; i++)
6463                         size += sprintf(buf + size, "%d: %uMhz %s\n",
6464                                         i, mclk_table->dpm_levels[i].value / 100,
6465                                         (i == now) ? "*" : "");
6466                 break;
6467         case PP_PCIE:
6468                 pcie_speed = ci_get_current_pcie_speed(adev);
6469                 for (i = 0; i < pcie_table->count; i++) {
6470                         if (pcie_speed != pcie_table->dpm_levels[i].value)
6471                                 continue;
6472                         break;
6473                 }
6474                 now = i;
6475
6476                 for (i = 0; i < pcie_table->count; i++)
6477                         size += sprintf(buf + size, "%d: %s %s\n", i,
6478                                         (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x1" :
6479                                         (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" :
6480                                         (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "",
6481                                         (i == now) ? "*" : "");
6482                 break;
6483         default:
6484                 break;
6485         }
6486
6487         return size;
6488 }
6489
6490 static int ci_dpm_force_clock_level(struct amdgpu_device *adev,
6491                 enum pp_clock_type type, uint32_t mask)
6492 {
6493         struct ci_power_info *pi = ci_get_pi(adev);
6494
6495         if (adev->pm.dpm.forced_level
6496                         != AMDGPU_DPM_FORCED_LEVEL_MANUAL)
6497                 return -EINVAL;
6498
6499         switch (type) {
6500         case PP_SCLK:
6501                 if (!pi->sclk_dpm_key_disabled)
6502                         amdgpu_ci_send_msg_to_smc_with_parameter(adev,
6503                                         PPSMC_MSG_SCLKDPM_SetEnabledMask,
6504                                         pi->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
6505                 break;
6506
6507         case PP_MCLK:
6508                 if (!pi->mclk_dpm_key_disabled)
6509                         amdgpu_ci_send_msg_to_smc_with_parameter(adev,
6510                                         PPSMC_MSG_MCLKDPM_SetEnabledMask,
6511                                         pi->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
6512                 break;
6513
6514         case PP_PCIE:
6515         {
6516                 uint32_t tmp = mask & pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
6517                 uint32_t level = 0;
6518
6519                 while (tmp >>= 1)
6520                         level++;
6521
6522                 if (!pi->pcie_dpm_key_disabled)
6523                         amdgpu_ci_send_msg_to_smc_with_parameter(adev,
6524                                         PPSMC_MSG_PCIeDPM_ForceLevel,
6525                                         level);
6526                 break;
6527         }
6528         default:
6529                 break;
6530         }
6531
6532         return 0;
6533 }
6534
6535 static int ci_dpm_get_sclk_od(struct amdgpu_device *adev)
6536 {
6537         struct ci_power_info *pi = ci_get_pi(adev);
6538         struct ci_single_dpm_table *sclk_table = &(pi->dpm_table.sclk_table);
6539         struct ci_single_dpm_table *golden_sclk_table =
6540                         &(pi->golden_dpm_table.sclk_table);
6541         int value;
6542
6543         value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
6544                         golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
6545                         100 /
6546                         golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
6547
6548         return value;
6549 }
6550
6551 static int ci_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
6552 {
6553         struct ci_power_info *pi = ci_get_pi(adev);
6554         struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps);
6555         struct ci_single_dpm_table *golden_sclk_table =
6556                         &(pi->golden_dpm_table.sclk_table);
6557
6558         if (value > 20)
6559                 value = 20;
6560
6561         ps->performance_levels[ps->performance_level_count - 1].sclk =
6562                         golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
6563                         value / 100 +
6564                         golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
6565
6566         return 0;
6567 }
6568
6569 static int ci_dpm_get_mclk_od(struct amdgpu_device *adev)
6570 {
6571         struct ci_power_info *pi = ci_get_pi(adev);
6572         struct ci_single_dpm_table *mclk_table = &(pi->dpm_table.mclk_table);
6573         struct ci_single_dpm_table *golden_mclk_table =
6574                         &(pi->golden_dpm_table.mclk_table);
6575         int value;
6576
6577         value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
6578                         golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
6579                         100 /
6580                         golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
6581
6582         return value;
6583 }
6584
6585 static int ci_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
6586 {
6587         struct ci_power_info *pi = ci_get_pi(adev);
6588         struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps);
6589         struct ci_single_dpm_table *golden_mclk_table =
6590                         &(pi->golden_dpm_table.mclk_table);
6591
6592         if (value > 20)
6593                 value = 20;
6594
6595         ps->performance_levels[ps->performance_level_count - 1].mclk =
6596                         golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
6597                         value / 100 +
6598                         golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
6599
6600         return 0;
6601 }
6602
6603 const struct amd_ip_funcs ci_dpm_ip_funcs = {
6604         .name = "ci_dpm",
6605         .early_init = ci_dpm_early_init,
6606         .late_init = ci_dpm_late_init,
6607         .sw_init = ci_dpm_sw_init,
6608         .sw_fini = ci_dpm_sw_fini,
6609         .hw_init = ci_dpm_hw_init,
6610         .hw_fini = ci_dpm_hw_fini,
6611         .suspend = ci_dpm_suspend,
6612         .resume = ci_dpm_resume,
6613         .is_idle = ci_dpm_is_idle,
6614         .wait_for_idle = ci_dpm_wait_for_idle,
6615         .soft_reset = ci_dpm_soft_reset,
6616         .set_clockgating_state = ci_dpm_set_clockgating_state,
6617         .set_powergating_state = ci_dpm_set_powergating_state,
6618 };
6619
6620 static const struct amdgpu_dpm_funcs ci_dpm_funcs = {
6621         .get_temperature = &ci_dpm_get_temp,
6622         .pre_set_power_state = &ci_dpm_pre_set_power_state,
6623         .set_power_state = &ci_dpm_set_power_state,
6624         .post_set_power_state = &ci_dpm_post_set_power_state,
6625         .display_configuration_changed = &ci_dpm_display_configuration_changed,
6626         .get_sclk = &ci_dpm_get_sclk,
6627         .get_mclk = &ci_dpm_get_mclk,
6628         .print_power_state = &ci_dpm_print_power_state,
6629         .debugfs_print_current_performance_level = &ci_dpm_debugfs_print_current_performance_level,
6630         .force_performance_level = &ci_dpm_force_performance_level,
6631         .vblank_too_short = &ci_dpm_vblank_too_short,
6632         .powergate_uvd = &ci_dpm_powergate_uvd,
6633         .set_fan_control_mode = &ci_dpm_set_fan_control_mode,
6634         .get_fan_control_mode = &ci_dpm_get_fan_control_mode,
6635         .set_fan_speed_percent = &ci_dpm_set_fan_speed_percent,
6636         .get_fan_speed_percent = &ci_dpm_get_fan_speed_percent,
6637         .print_clock_levels = ci_dpm_print_clock_levels,
6638         .force_clock_level = ci_dpm_force_clock_level,
6639         .get_sclk_od = ci_dpm_get_sclk_od,
6640         .set_sclk_od = ci_dpm_set_sclk_od,
6641         .get_mclk_od = ci_dpm_get_mclk_od,
6642         .set_mclk_od = ci_dpm_set_mclk_od,
6643 };
6644
6645 static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev)
6646 {
6647         if (adev->pm.funcs == NULL)
6648                 adev->pm.funcs = &ci_dpm_funcs;
6649 }
6650
6651 static const struct amdgpu_irq_src_funcs ci_dpm_irq_funcs = {
6652         .set = ci_dpm_set_interrupt_state,
6653         .process = ci_dpm_process_interrupt,
6654 };
6655
6656 static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev)
6657 {
6658         adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST;
6659         adev->pm.dpm.thermal.irq.funcs = &ci_dpm_irq_funcs;
6660 }