Merge branch 'akpm' (patches from Andrew)
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / pm / amdgpu_dpm.c
1 /*
2  * Copyright 2011 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Alex Deucher
23  */
24
25 #include "amdgpu.h"
26 #include "amdgpu_atombios.h"
27 #include "amdgpu_i2c.h"
28 #include "amdgpu_dpm.h"
29 #include "atom.h"
30 #include "amd_pcie.h"
31 #include "amdgpu_display.h"
32 #include "hwmgr.h"
33 #include <linux/power_supply.h>
34
35 #define WIDTH_4K 3840
36
37 void amdgpu_dpm_print_class_info(u32 class, u32 class2)
38 {
39         const char *s;
40
41         switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
42         case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
43         default:
44                 s = "none";
45                 break;
46         case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
47                 s = "battery";
48                 break;
49         case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
50                 s = "balanced";
51                 break;
52         case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
53                 s = "performance";
54                 break;
55         }
56         printk("\tui class: %s\n", s);
57         printk("\tinternal class:");
58         if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) &&
59             (class2 == 0))
60                 pr_cont(" none");
61         else {
62                 if (class & ATOM_PPLIB_CLASSIFICATION_BOOT)
63                         pr_cont(" boot");
64                 if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
65                         pr_cont(" thermal");
66                 if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
67                         pr_cont(" limited_pwr");
68                 if (class & ATOM_PPLIB_CLASSIFICATION_REST)
69                         pr_cont(" rest");
70                 if (class & ATOM_PPLIB_CLASSIFICATION_FORCED)
71                         pr_cont(" forced");
72                 if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
73                         pr_cont(" 3d_perf");
74                 if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE)
75                         pr_cont(" ovrdrv");
76                 if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
77                         pr_cont(" uvd");
78                 if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW)
79                         pr_cont(" 3d_low");
80                 if (class & ATOM_PPLIB_CLASSIFICATION_ACPI)
81                         pr_cont(" acpi");
82                 if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
83                         pr_cont(" uvd_hd2");
84                 if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
85                         pr_cont(" uvd_hd");
86                 if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
87                         pr_cont(" uvd_sd");
88                 if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
89                         pr_cont(" limited_pwr2");
90                 if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
91                         pr_cont(" ulv");
92                 if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
93                         pr_cont(" uvd_mvc");
94         }
95         pr_cont("\n");
96 }
97
98 void amdgpu_dpm_print_cap_info(u32 caps)
99 {
100         printk("\tcaps:");
101         if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
102                 pr_cont(" single_disp");
103         if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK)
104                 pr_cont(" video");
105         if (caps & ATOM_PPLIB_DISALLOW_ON_DC)
106                 pr_cont(" no_dc");
107         pr_cont("\n");
108 }
109
110 void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
111                                 struct amdgpu_ps *rps)
112 {
113         printk("\tstatus:");
114         if (rps == adev->pm.dpm.current_ps)
115                 pr_cont(" c");
116         if (rps == adev->pm.dpm.requested_ps)
117                 pr_cont(" r");
118         if (rps == adev->pm.dpm.boot_ps)
119                 pr_cont(" b");
120         pr_cont("\n");
121 }
122
123 void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev)
124 {
125         struct drm_device *ddev = adev_to_drm(adev);
126         struct drm_crtc *crtc;
127         struct amdgpu_crtc *amdgpu_crtc;
128
129         adev->pm.dpm.new_active_crtcs = 0;
130         adev->pm.dpm.new_active_crtc_count = 0;
131         if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
132                 list_for_each_entry(crtc,
133                                     &ddev->mode_config.crtc_list, head) {
134                         amdgpu_crtc = to_amdgpu_crtc(crtc);
135                         if (amdgpu_crtc->enabled) {
136                                 adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id);
137                                 adev->pm.dpm.new_active_crtc_count++;
138                         }
139                 }
140         }
141 }
142
143
144 u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
145 {
146         struct drm_device *dev = adev_to_drm(adev);
147         struct drm_crtc *crtc;
148         struct amdgpu_crtc *amdgpu_crtc;
149         u32 vblank_in_pixels;
150         u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
151
152         if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
153                 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
154                         amdgpu_crtc = to_amdgpu_crtc(crtc);
155                         if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
156                                 vblank_in_pixels =
157                                         amdgpu_crtc->hw_mode.crtc_htotal *
158                                         (amdgpu_crtc->hw_mode.crtc_vblank_end -
159                                         amdgpu_crtc->hw_mode.crtc_vdisplay +
160                                         (amdgpu_crtc->v_border * 2));
161
162                                 vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock;
163                                 break;
164                         }
165                 }
166         }
167
168         return vblank_time_us;
169 }
170
171 u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev)
172 {
173         struct drm_device *dev = adev_to_drm(adev);
174         struct drm_crtc *crtc;
175         struct amdgpu_crtc *amdgpu_crtc;
176         u32 vrefresh = 0;
177
178         if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
179                 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
180                         amdgpu_crtc = to_amdgpu_crtc(crtc);
181                         if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
182                                 vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
183                                 break;
184                         }
185                 }
186         }
187
188         return vrefresh;
189 }
190
191 bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor)
192 {
193         switch (sensor) {
194         case THERMAL_TYPE_RV6XX:
195         case THERMAL_TYPE_RV770:
196         case THERMAL_TYPE_EVERGREEN:
197         case THERMAL_TYPE_SUMO:
198         case THERMAL_TYPE_NI:
199         case THERMAL_TYPE_SI:
200         case THERMAL_TYPE_CI:
201         case THERMAL_TYPE_KV:
202                 return true;
203         case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
204         case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
205                 return false; /* need special handling */
206         case THERMAL_TYPE_NONE:
207         case THERMAL_TYPE_EXTERNAL:
208         case THERMAL_TYPE_EXTERNAL_GPIO:
209         default:
210                 return false;
211         }
212 }
213
214 union power_info {
215         struct _ATOM_POWERPLAY_INFO info;
216         struct _ATOM_POWERPLAY_INFO_V2 info_2;
217         struct _ATOM_POWERPLAY_INFO_V3 info_3;
218         struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
219         struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
220         struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
221         struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
222         struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
223 };
224
225 union fan_info {
226         struct _ATOM_PPLIB_FANTABLE fan;
227         struct _ATOM_PPLIB_FANTABLE2 fan2;
228         struct _ATOM_PPLIB_FANTABLE3 fan3;
229 };
230
231 static int amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table *amdgpu_table,
232                                               ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table)
233 {
234         u32 size = atom_table->ucNumEntries *
235                 sizeof(struct amdgpu_clock_voltage_dependency_entry);
236         int i;
237         ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry;
238
239         amdgpu_table->entries = kzalloc(size, GFP_KERNEL);
240         if (!amdgpu_table->entries)
241                 return -ENOMEM;
242
243         entry = &atom_table->entries[0];
244         for (i = 0; i < atom_table->ucNumEntries; i++) {
245                 amdgpu_table->entries[i].clk = le16_to_cpu(entry->usClockLow) |
246                         (entry->ucClockHigh << 16);
247                 amdgpu_table->entries[i].v = le16_to_cpu(entry->usVoltage);
248                 entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *)
249                         ((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record));
250         }
251         amdgpu_table->count = atom_table->ucNumEntries;
252
253         return 0;
254 }
255
256 int amdgpu_get_platform_caps(struct amdgpu_device *adev)
257 {
258         struct amdgpu_mode_info *mode_info = &adev->mode_info;
259         union power_info *power_info;
260         int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
261         u16 data_offset;
262         u8 frev, crev;
263
264         if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
265                                    &frev, &crev, &data_offset))
266                 return -EINVAL;
267         power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
268
269         adev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
270         adev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
271         adev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
272
273         return 0;
274 }
275
276 /* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
277 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
278 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
279 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
280 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
281 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
282 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
283 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8 24
284 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V9 26
285
286 int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
287 {
288         struct amdgpu_mode_info *mode_info = &adev->mode_info;
289         union power_info *power_info;
290         union fan_info *fan_info;
291         ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table;
292         int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
293         u16 data_offset;
294         u8 frev, crev;
295         int ret, i;
296
297         if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
298                                    &frev, &crev, &data_offset))
299                 return -EINVAL;
300         power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
301
302         /* fan table */
303         if (le16_to_cpu(power_info->pplib.usTableSize) >=
304             sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
305                 if (power_info->pplib3.usFanTableOffset) {
306                         fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset +
307                                                       le16_to_cpu(power_info->pplib3.usFanTableOffset));
308                         adev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst;
309                         adev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin);
310                         adev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed);
311                         adev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh);
312                         adev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin);
313                         adev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed);
314                         adev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh);
315                         if (fan_info->fan.ucFanTableFormat >= 2)
316                                 adev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax);
317                         else
318                                 adev->pm.dpm.fan.t_max = 10900;
319                         adev->pm.dpm.fan.cycle_delay = 100000;
320                         if (fan_info->fan.ucFanTableFormat >= 3) {
321                                 adev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode;
322                                 adev->pm.dpm.fan.default_max_fan_pwm =
323                                         le16_to_cpu(fan_info->fan3.usFanPWMMax);
324                                 adev->pm.dpm.fan.default_fan_output_sensitivity = 4836;
325                                 adev->pm.dpm.fan.fan_output_sensitivity =
326                                         le16_to_cpu(fan_info->fan3.usFanOutputSensitivity);
327                         }
328                         adev->pm.dpm.fan.ucode_fan_control = true;
329                 }
330         }
331
332         /* clock dependancy tables, shedding tables */
333         if (le16_to_cpu(power_info->pplib.usTableSize) >=
334             sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) {
335                 if (power_info->pplib4.usVddcDependencyOnSCLKOffset) {
336                         dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
337                                 (mode_info->atom_context->bios + data_offset +
338                                  le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
339                         ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
340                                                                  dep_table);
341                         if (ret) {
342                                 amdgpu_free_extended_power_table(adev);
343                                 return ret;
344                         }
345                 }
346                 if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
347                         dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
348                                 (mode_info->atom_context->bios + data_offset +
349                                  le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
350                         ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
351                                                                  dep_table);
352                         if (ret) {
353                                 amdgpu_free_extended_power_table(adev);
354                                 return ret;
355                         }
356                 }
357                 if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
358                         dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
359                                 (mode_info->atom_context->bios + data_offset +
360                                  le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
361                         ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
362                                                                  dep_table);
363                         if (ret) {
364                                 amdgpu_free_extended_power_table(adev);
365                                 return ret;
366                         }
367                 }
368                 if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
369                         dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
370                                 (mode_info->atom_context->bios + data_offset +
371                                  le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
372                         ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
373                                                                  dep_table);
374                         if (ret) {
375                                 amdgpu_free_extended_power_table(adev);
376                                 return ret;
377                         }
378                 }
379                 if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
380                         ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
381                                 (ATOM_PPLIB_Clock_Voltage_Limit_Table *)
382                                 (mode_info->atom_context->bios + data_offset +
383                                  le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset));
384                         if (clk_v->ucNumEntries) {
385                                 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk =
386                                         le16_to_cpu(clk_v->entries[0].usSclkLow) |
387                                         (clk_v->entries[0].ucSclkHigh << 16);
388                                 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk =
389                                         le16_to_cpu(clk_v->entries[0].usMclkLow) |
390                                         (clk_v->entries[0].ucMclkHigh << 16);
391                                 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc =
392                                         le16_to_cpu(clk_v->entries[0].usVddc);
393                                 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci =
394                                         le16_to_cpu(clk_v->entries[0].usVddci);
395                         }
396                 }
397                 if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) {
398                         ATOM_PPLIB_PhaseSheddingLimits_Table *psl =
399                                 (ATOM_PPLIB_PhaseSheddingLimits_Table *)
400                                 (mode_info->atom_context->bios + data_offset +
401                                  le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset));
402                         ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
403
404                         adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
405                                 kcalloc(psl->ucNumEntries,
406                                         sizeof(struct amdgpu_phase_shedding_limits_entry),
407                                         GFP_KERNEL);
408                         if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
409                                 amdgpu_free_extended_power_table(adev);
410                                 return -ENOMEM;
411                         }
412
413                         entry = &psl->entries[0];
414                         for (i = 0; i < psl->ucNumEntries; i++) {
415                                 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk =
416                                         le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16);
417                                 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk =
418                                         le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16);
419                                 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage =
420                                         le16_to_cpu(entry->usVoltage);
421                                 entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *)
422                                         ((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record));
423                         }
424                         adev->pm.dpm.dyn_state.phase_shedding_limits_table.count =
425                                 psl->ucNumEntries;
426                 }
427         }
428
429         /* cac data */
430         if (le16_to_cpu(power_info->pplib.usTableSize) >=
431             sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) {
432                 adev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit);
433                 adev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit);
434                 adev->pm.dpm.near_tdp_limit_adjusted = adev->pm.dpm.near_tdp_limit;
435                 adev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit);
436                 if (adev->pm.dpm.tdp_od_limit)
437                         adev->pm.dpm.power_control = true;
438                 else
439                         adev->pm.dpm.power_control = false;
440                 adev->pm.dpm.tdp_adjustment = 0;
441                 adev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold);
442                 adev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage);
443                 adev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope);
444                 if (power_info->pplib5.usCACLeakageTableOffset) {
445                         ATOM_PPLIB_CAC_Leakage_Table *cac_table =
446                                 (ATOM_PPLIB_CAC_Leakage_Table *)
447                                 (mode_info->atom_context->bios + data_offset +
448                                  le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset));
449                         ATOM_PPLIB_CAC_Leakage_Record *entry;
450                         u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table);
451                         adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
452                         if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
453                                 amdgpu_free_extended_power_table(adev);
454                                 return -ENOMEM;
455                         }
456                         entry = &cac_table->entries[0];
457                         for (i = 0; i < cac_table->ucNumEntries; i++) {
458                                 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
459                                         adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 =
460                                                 le16_to_cpu(entry->usVddc1);
461                                         adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 =
462                                                 le16_to_cpu(entry->usVddc2);
463                                         adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 =
464                                                 le16_to_cpu(entry->usVddc3);
465                                 } else {
466                                         adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
467                                                 le16_to_cpu(entry->usVddc);
468                                         adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
469                                                 le32_to_cpu(entry->ulLeakageValue);
470                                 }
471                                 entry = (ATOM_PPLIB_CAC_Leakage_Record *)
472                                         ((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record));
473                         }
474                         adev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries;
475                 }
476         }
477
478         /* ext tables */
479         if (le16_to_cpu(power_info->pplib.usTableSize) >=
480             sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
481                 ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *)
482                         (mode_info->atom_context->bios + data_offset +
483                          le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset));
484                 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) &&
485                         ext_hdr->usVCETableOffset) {
486                         VCEClockInfoArray *array = (VCEClockInfoArray *)
487                                 (mode_info->atom_context->bios + data_offset +
488                                  le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
489                         ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
490                                 (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
491                                 (mode_info->atom_context->bios + data_offset +
492                                  le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
493                                  1 + array->ucNumEntries * sizeof(VCEClockInfo));
494                         ATOM_PPLIB_VCE_State_Table *states =
495                                 (ATOM_PPLIB_VCE_State_Table *)
496                                 (mode_info->atom_context->bios + data_offset +
497                                  le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
498                                  1 + (array->ucNumEntries * sizeof (VCEClockInfo)) +
499                                  1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)));
500                         ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry;
501                         ATOM_PPLIB_VCE_State_Record *state_entry;
502                         VCEClockInfo *vce_clk;
503                         u32 size = limits->numEntries *
504                                 sizeof(struct amdgpu_vce_clock_voltage_dependency_entry);
505                         adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
506                                 kzalloc(size, GFP_KERNEL);
507                         if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
508                                 amdgpu_free_extended_power_table(adev);
509                                 return -ENOMEM;
510                         }
511                         adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
512                                 limits->numEntries;
513                         entry = &limits->entries[0];
514                         state_entry = &states->entries[0];
515                         for (i = 0; i < limits->numEntries; i++) {
516                                 vce_clk = (VCEClockInfo *)
517                                         ((u8 *)&array->entries[0] +
518                                          (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
519                                 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk =
520                                         le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
521                                 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk =
522                                         le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
523                                 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v =
524                                         le16_to_cpu(entry->usVoltage);
525                                 entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
526                                         ((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
527                         }
528                         adev->pm.dpm.num_of_vce_states =
529                                         states->numEntries > AMD_MAX_VCE_LEVELS ?
530                                         AMD_MAX_VCE_LEVELS : states->numEntries;
531                         for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
532                                 vce_clk = (VCEClockInfo *)
533                                         ((u8 *)&array->entries[0] +
534                                          (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
535                                 adev->pm.dpm.vce_states[i].evclk =
536                                         le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
537                                 adev->pm.dpm.vce_states[i].ecclk =
538                                         le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
539                                 adev->pm.dpm.vce_states[i].clk_idx =
540                                         state_entry->ucClockInfoIndex & 0x3f;
541                                 adev->pm.dpm.vce_states[i].pstate =
542                                         (state_entry->ucClockInfoIndex & 0xc0) >> 6;
543                                 state_entry = (ATOM_PPLIB_VCE_State_Record *)
544                                         ((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record));
545                         }
546                 }
547                 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) &&
548                         ext_hdr->usUVDTableOffset) {
549                         UVDClockInfoArray *array = (UVDClockInfoArray *)
550                                 (mode_info->atom_context->bios + data_offset +
551                                  le16_to_cpu(ext_hdr->usUVDTableOffset) + 1);
552                         ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits =
553                                 (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *)
554                                 (mode_info->atom_context->bios + data_offset +
555                                  le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 +
556                                  1 + (array->ucNumEntries * sizeof (UVDClockInfo)));
557                         ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry;
558                         u32 size = limits->numEntries *
559                                 sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry);
560                         adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
561                                 kzalloc(size, GFP_KERNEL);
562                         if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
563                                 amdgpu_free_extended_power_table(adev);
564                                 return -ENOMEM;
565                         }
566                         adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
567                                 limits->numEntries;
568                         entry = &limits->entries[0];
569                         for (i = 0; i < limits->numEntries; i++) {
570                                 UVDClockInfo *uvd_clk = (UVDClockInfo *)
571                                         ((u8 *)&array->entries[0] +
572                                          (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo)));
573                                 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk =
574                                         le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16);
575                                 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
576                                         le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
577                                 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
578                                         le16_to_cpu(entry->usVoltage);
579                                 entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
580                                         ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
581                         }
582                 }
583                 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) &&
584                         ext_hdr->usSAMUTableOffset) {
585                         ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits =
586                                 (ATOM_PPLIB_SAMClk_Voltage_Limit_Table *)
587                                 (mode_info->atom_context->bios + data_offset +
588                                  le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1);
589                         ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry;
590                         u32 size = limits->numEntries *
591                                 sizeof(struct amdgpu_clock_voltage_dependency_entry);
592                         adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
593                                 kzalloc(size, GFP_KERNEL);
594                         if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
595                                 amdgpu_free_extended_power_table(adev);
596                                 return -ENOMEM;
597                         }
598                         adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
599                                 limits->numEntries;
600                         entry = &limits->entries[0];
601                         for (i = 0; i < limits->numEntries; i++) {
602                                 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk =
603                                         le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16);
604                                 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v =
605                                         le16_to_cpu(entry->usVoltage);
606                                 entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *)
607                                         ((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record));
608                         }
609                 }
610                 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) &&
611                     ext_hdr->usPPMTableOffset) {
612                         ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *)
613                                 (mode_info->atom_context->bios + data_offset +
614                                  le16_to_cpu(ext_hdr->usPPMTableOffset));
615                         adev->pm.dpm.dyn_state.ppm_table =
616                                 kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL);
617                         if (!adev->pm.dpm.dyn_state.ppm_table) {
618                                 amdgpu_free_extended_power_table(adev);
619                                 return -ENOMEM;
620                         }
621                         adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
622                         adev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
623                                 le16_to_cpu(ppm->usCpuCoreNumber);
624                         adev->pm.dpm.dyn_state.ppm_table->platform_tdp =
625                                 le32_to_cpu(ppm->ulPlatformTDP);
626                         adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp =
627                                 le32_to_cpu(ppm->ulSmallACPlatformTDP);
628                         adev->pm.dpm.dyn_state.ppm_table->platform_tdc =
629                                 le32_to_cpu(ppm->ulPlatformTDC);
630                         adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc =
631                                 le32_to_cpu(ppm->ulSmallACPlatformTDC);
632                         adev->pm.dpm.dyn_state.ppm_table->apu_tdp =
633                                 le32_to_cpu(ppm->ulApuTDP);
634                         adev->pm.dpm.dyn_state.ppm_table->dgpu_tdp =
635                                 le32_to_cpu(ppm->ulDGpuTDP);
636                         adev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power =
637                                 le32_to_cpu(ppm->ulDGpuUlvPower);
638                         adev->pm.dpm.dyn_state.ppm_table->tj_max =
639                                 le32_to_cpu(ppm->ulTjmax);
640                 }
641                 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) &&
642                         ext_hdr->usACPTableOffset) {
643                         ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits =
644                                 (ATOM_PPLIB_ACPClk_Voltage_Limit_Table *)
645                                 (mode_info->atom_context->bios + data_offset +
646                                  le16_to_cpu(ext_hdr->usACPTableOffset) + 1);
647                         ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry;
648                         u32 size = limits->numEntries *
649                                 sizeof(struct amdgpu_clock_voltage_dependency_entry);
650                         adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
651                                 kzalloc(size, GFP_KERNEL);
652                         if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
653                                 amdgpu_free_extended_power_table(adev);
654                                 return -ENOMEM;
655                         }
656                         adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
657                                 limits->numEntries;
658                         entry = &limits->entries[0];
659                         for (i = 0; i < limits->numEntries; i++) {
660                                 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk =
661                                         le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16);
662                                 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v =
663                                         le16_to_cpu(entry->usVoltage);
664                                 entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *)
665                                         ((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record));
666                         }
667                 }
668                 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) &&
669                         ext_hdr->usPowerTuneTableOffset) {
670                         u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset +
671                                          le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
672                         ATOM_PowerTune_Table *pt;
673                         adev->pm.dpm.dyn_state.cac_tdp_table =
674                                 kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL);
675                         if (!adev->pm.dpm.dyn_state.cac_tdp_table) {
676                                 amdgpu_free_extended_power_table(adev);
677                                 return -ENOMEM;
678                         }
679                         if (rev > 0) {
680                                 ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
681                                         (mode_info->atom_context->bios + data_offset +
682                                          le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
683                                 adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
684                                         ppt->usMaximumPowerDeliveryLimit;
685                                 pt = &ppt->power_tune_table;
686                         } else {
687                                 ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
688                                         (mode_info->atom_context->bios + data_offset +
689                                          le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
690                                 adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255;
691                                 pt = &ppt->power_tune_table;
692                         }
693                         adev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP);
694                         adev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp =
695                                 le16_to_cpu(pt->usConfigurableTDP);
696                         adev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC);
697                         adev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit =
698                                 le16_to_cpu(pt->usBatteryPowerLimit);
699                         adev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit =
700                                 le16_to_cpu(pt->usSmallPowerLimit);
701                         adev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage =
702                                 le16_to_cpu(pt->usLowCACLeakage);
703                         adev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage =
704                                 le16_to_cpu(pt->usHighCACLeakage);
705                 }
706                 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8) &&
707                                 ext_hdr->usSclkVddgfxTableOffset) {
708                         dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
709                                 (mode_info->atom_context->bios + data_offset +
710                                  le16_to_cpu(ext_hdr->usSclkVddgfxTableOffset));
711                         ret = amdgpu_parse_clk_voltage_dep_table(
712                                         &adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk,
713                                         dep_table);
714                         if (ret) {
715                                 kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries);
716                                 return ret;
717                         }
718                 }
719         }
720
721         return 0;
722 }
723
724 void amdgpu_free_extended_power_table(struct amdgpu_device *adev)
725 {
726         struct amdgpu_dpm_dynamic_state *dyn_state = &adev->pm.dpm.dyn_state;
727
728         kfree(dyn_state->vddc_dependency_on_sclk.entries);
729         kfree(dyn_state->vddci_dependency_on_mclk.entries);
730         kfree(dyn_state->vddc_dependency_on_mclk.entries);
731         kfree(dyn_state->mvdd_dependency_on_mclk.entries);
732         kfree(dyn_state->cac_leakage_table.entries);
733         kfree(dyn_state->phase_shedding_limits_table.entries);
734         kfree(dyn_state->ppm_table);
735         kfree(dyn_state->cac_tdp_table);
736         kfree(dyn_state->vce_clock_voltage_dependency_table.entries);
737         kfree(dyn_state->uvd_clock_voltage_dependency_table.entries);
738         kfree(dyn_state->samu_clock_voltage_dependency_table.entries);
739         kfree(dyn_state->acp_clock_voltage_dependency_table.entries);
740         kfree(dyn_state->vddgfx_dependency_on_sclk.entries);
741 }
742
743 static const char *pp_lib_thermal_controller_names[] = {
744         "NONE",
745         "lm63",
746         "adm1032",
747         "adm1030",
748         "max6649",
749         "lm64",
750         "f75375",
751         "RV6xx",
752         "RV770",
753         "adt7473",
754         "NONE",
755         "External GPIO",
756         "Evergreen",
757         "emc2103",
758         "Sumo",
759         "Northern Islands",
760         "Southern Islands",
761         "lm96163",
762         "Sea Islands",
763         "Kaveri/Kabini",
764 };
765
766 void amdgpu_add_thermal_controller(struct amdgpu_device *adev)
767 {
768         struct amdgpu_mode_info *mode_info = &adev->mode_info;
769         ATOM_PPLIB_POWERPLAYTABLE *power_table;
770         int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
771         ATOM_PPLIB_THERMALCONTROLLER *controller;
772         struct amdgpu_i2c_bus_rec i2c_bus;
773         u16 data_offset;
774         u8 frev, crev;
775
776         if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
777                                    &frev, &crev, &data_offset))
778                 return;
779         power_table = (ATOM_PPLIB_POWERPLAYTABLE *)
780                 (mode_info->atom_context->bios + data_offset);
781         controller = &power_table->sThermalController;
782
783         /* add the i2c bus for thermal/fan chip */
784         if (controller->ucType > 0) {
785                 if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN)
786                         adev->pm.no_fan = true;
787                 adev->pm.fan_pulses_per_revolution =
788                         controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
789                 if (adev->pm.fan_pulses_per_revolution) {
790                         adev->pm.fan_min_rpm = controller->ucFanMinRPM;
791                         adev->pm.fan_max_rpm = controller->ucFanMaxRPM;
792                 }
793                 if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
794                         DRM_INFO("Internal thermal controller %s fan control\n",
795                                  (controller->ucFanParameters &
796                                   ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
797                         adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
798                 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
799                         DRM_INFO("Internal thermal controller %s fan control\n",
800                                  (controller->ucFanParameters &
801                                   ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
802                         adev->pm.int_thermal_type = THERMAL_TYPE_RV770;
803                 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
804                         DRM_INFO("Internal thermal controller %s fan control\n",
805                                  (controller->ucFanParameters &
806                                   ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
807                         adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
808                 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) {
809                         DRM_INFO("Internal thermal controller %s fan control\n",
810                                  (controller->ucFanParameters &
811                                   ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
812                         adev->pm.int_thermal_type = THERMAL_TYPE_SUMO;
813                 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) {
814                         DRM_INFO("Internal thermal controller %s fan control\n",
815                                  (controller->ucFanParameters &
816                                   ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
817                         adev->pm.int_thermal_type = THERMAL_TYPE_NI;
818                 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) {
819                         DRM_INFO("Internal thermal controller %s fan control\n",
820                                  (controller->ucFanParameters &
821                                   ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
822                         adev->pm.int_thermal_type = THERMAL_TYPE_SI;
823                 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) {
824                         DRM_INFO("Internal thermal controller %s fan control\n",
825                                  (controller->ucFanParameters &
826                                   ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
827                         adev->pm.int_thermal_type = THERMAL_TYPE_CI;
828                 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) {
829                         DRM_INFO("Internal thermal controller %s fan control\n",
830                                  (controller->ucFanParameters &
831                                   ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
832                         adev->pm.int_thermal_type = THERMAL_TYPE_KV;
833                 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
834                         DRM_INFO("External GPIO thermal controller %s fan control\n",
835                                  (controller->ucFanParameters &
836                                   ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
837                         adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
838                 } else if (controller->ucType ==
839                            ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
840                         DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
841                                  (controller->ucFanParameters &
842                                   ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
843                         adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
844                 } else if (controller->ucType ==
845                            ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
846                         DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
847                                  (controller->ucFanParameters &
848                                   ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
849                         adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
850                 } else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
851                         DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
852                                  pp_lib_thermal_controller_names[controller->ucType],
853                                  controller->ucI2cAddress >> 1,
854                                  (controller->ucFanParameters &
855                                   ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
856                         adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL;
857                         i2c_bus = amdgpu_atombios_lookup_i2c_gpio(adev, controller->ucI2cLine);
858                         adev->pm.i2c_bus = amdgpu_i2c_lookup(adev, &i2c_bus);
859                         if (adev->pm.i2c_bus) {
860                                 struct i2c_board_info info = { };
861                                 const char *name = pp_lib_thermal_controller_names[controller->ucType];
862                                 info.addr = controller->ucI2cAddress >> 1;
863                                 strlcpy(info.type, name, sizeof(info.type));
864                                 i2c_new_client_device(&adev->pm.i2c_bus->adapter, &info);
865                         }
866                 } else {
867                         DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
868                                  controller->ucType,
869                                  controller->ucI2cAddress >> 1,
870                                  (controller->ucFanParameters &
871                                   ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
872                 }
873         }
874 }
875
876 enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev,
877                                                  u32 sys_mask,
878                                                  enum amdgpu_pcie_gen asic_gen,
879                                                  enum amdgpu_pcie_gen default_gen)
880 {
881         switch (asic_gen) {
882         case AMDGPU_PCIE_GEN1:
883                 return AMDGPU_PCIE_GEN1;
884         case AMDGPU_PCIE_GEN2:
885                 return AMDGPU_PCIE_GEN2;
886         case AMDGPU_PCIE_GEN3:
887                 return AMDGPU_PCIE_GEN3;
888         default:
889                 if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) &&
890                     (default_gen == AMDGPU_PCIE_GEN3))
891                         return AMDGPU_PCIE_GEN3;
892                 else if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) &&
893                          (default_gen == AMDGPU_PCIE_GEN2))
894                         return AMDGPU_PCIE_GEN2;
895                 else
896                         return AMDGPU_PCIE_GEN1;
897         }
898         return AMDGPU_PCIE_GEN1;
899 }
900
901 struct amd_vce_state*
902 amdgpu_get_vce_clock_state(void *handle, u32 idx)
903 {
904         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
905
906         if (idx < adev->pm.dpm.num_of_vce_states)
907                 return &adev->pm.dpm.vce_states[idx];
908
909         return NULL;
910 }
911
912 int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
913 {
914         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
915
916         return pp_funcs->get_sclk((adev)->powerplay.pp_handle, (low));
917 }
918
919 int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
920 {
921         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
922
923         return pp_funcs->get_mclk((adev)->powerplay.pp_handle, (low));
924 }
925
926 int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate)
927 {
928         int ret = 0;
929         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
930
931         switch (block_type) {
932         case AMD_IP_BLOCK_TYPE_UVD:
933         case AMD_IP_BLOCK_TYPE_VCE:
934                 if (pp_funcs && pp_funcs->set_powergating_by_smu) {
935                         /*
936                          * TODO: need a better lock mechanism
937                          *
938                          * Here adev->pm.mutex lock protection is enforced on
939                          * UVD and VCE cases only. Since for other cases, there
940                          * may be already lock protection in amdgpu_pm.c.
941                          * This is a quick fix for the deadlock issue below.
942                          *     NFO: task ocltst:2028 blocked for more than 120 seconds.
943                          *     Tainted: G           OE     5.0.0-37-generic #40~18.04.1-Ubuntu
944                          *     echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
945                          *     cltst          D    0  2028   2026 0x00000000
946                          *     all Trace:
947                          *     __schedule+0x2c0/0x870
948                          *     schedule+0x2c/0x70
949                          *     schedule_preempt_disabled+0xe/0x10
950                          *     __mutex_lock.isra.9+0x26d/0x4e0
951                          *     __mutex_lock_slowpath+0x13/0x20
952                          *     ? __mutex_lock_slowpath+0x13/0x20
953                          *     mutex_lock+0x2f/0x40
954                          *     amdgpu_dpm_set_powergating_by_smu+0x64/0xe0 [amdgpu]
955                          *     gfx_v8_0_enable_gfx_static_mg_power_gating+0x3c/0x70 [amdgpu]
956                          *     gfx_v8_0_set_powergating_state+0x66/0x260 [amdgpu]
957                          *     amdgpu_device_ip_set_powergating_state+0x62/0xb0 [amdgpu]
958                          *     pp_dpm_force_performance_level+0xe7/0x100 [amdgpu]
959                          *     amdgpu_set_dpm_forced_performance_level+0x129/0x330 [amdgpu]
960                          */
961                         mutex_lock(&adev->pm.mutex);
962                         ret = (pp_funcs->set_powergating_by_smu(
963                                 (adev)->powerplay.pp_handle, block_type, gate));
964                         mutex_unlock(&adev->pm.mutex);
965                 }
966                 break;
967         case AMD_IP_BLOCK_TYPE_GFX:
968         case AMD_IP_BLOCK_TYPE_VCN:
969         case AMD_IP_BLOCK_TYPE_SDMA:
970         case AMD_IP_BLOCK_TYPE_JPEG:
971         case AMD_IP_BLOCK_TYPE_GMC:
972         case AMD_IP_BLOCK_TYPE_ACP:
973                 if (pp_funcs && pp_funcs->set_powergating_by_smu) {
974                         ret = (pp_funcs->set_powergating_by_smu(
975                                 (adev)->powerplay.pp_handle, block_type, gate));
976                 }
977                 break;
978         default:
979                 break;
980         }
981
982         return ret;
983 }
984
985 int amdgpu_dpm_baco_enter(struct amdgpu_device *adev)
986 {
987         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
988         void *pp_handle = adev->powerplay.pp_handle;
989         int ret = 0;
990
991         if (!pp_funcs || !pp_funcs->set_asic_baco_state)
992                 return -ENOENT;
993
994         /* enter BACO state */
995         ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
996
997         return ret;
998 }
999
1000 int amdgpu_dpm_baco_exit(struct amdgpu_device *adev)
1001 {
1002         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1003         void *pp_handle = adev->powerplay.pp_handle;
1004         int ret = 0;
1005
1006         if (!pp_funcs || !pp_funcs->set_asic_baco_state)
1007                 return -ENOENT;
1008
1009         /* exit BACO state */
1010         ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
1011
1012         return ret;
1013 }
1014
1015 int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
1016                              enum pp_mp1_state mp1_state)
1017 {
1018         int ret = 0;
1019         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1020
1021         if (pp_funcs && pp_funcs->set_mp1_state) {
1022                 ret = pp_funcs->set_mp1_state(
1023                                 adev->powerplay.pp_handle,
1024                                 mp1_state);
1025         }
1026
1027         return ret;
1028 }
1029
1030 bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
1031 {
1032         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1033         void *pp_handle = adev->powerplay.pp_handle;
1034         bool baco_cap;
1035
1036         if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
1037                 return false;
1038
1039         if (pp_funcs->get_asic_baco_capability(pp_handle, &baco_cap))
1040                 return false;
1041
1042         return baco_cap;
1043 }
1044
1045 int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev)
1046 {
1047         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1048         void *pp_handle = adev->powerplay.pp_handle;
1049
1050         if (!pp_funcs || !pp_funcs->asic_reset_mode_2)
1051                 return -ENOENT;
1052
1053         return pp_funcs->asic_reset_mode_2(pp_handle);
1054 }
1055
1056 int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
1057 {
1058         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1059         void *pp_handle = adev->powerplay.pp_handle;
1060         int ret = 0;
1061
1062         if (!pp_funcs || !pp_funcs->set_asic_baco_state)
1063                 return -ENOENT;
1064
1065         /* enter BACO state */
1066         ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
1067         if (ret)
1068                 return ret;
1069
1070         /* exit BACO state */
1071         ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
1072         if (ret)
1073                 return ret;
1074
1075         return 0;
1076 }
1077
1078 bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev)
1079 {
1080         struct smu_context *smu = &adev->smu;
1081
1082         if (is_support_sw_smu(adev))
1083                 return smu_mode1_reset_is_support(smu);
1084
1085         return false;
1086 }
1087
1088 int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev)
1089 {
1090         struct smu_context *smu = &adev->smu;
1091
1092         if (is_support_sw_smu(adev))
1093                 return smu_mode1_reset(smu);
1094
1095         return -EOPNOTSUPP;
1096 }
1097
1098 int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
1099                                     enum PP_SMC_POWER_PROFILE type,
1100                                     bool en)
1101 {
1102         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1103         int ret = 0;
1104
1105         if (amdgpu_sriov_vf(adev))
1106                 return 0;
1107
1108         if (pp_funcs && pp_funcs->switch_power_profile)
1109                 ret = pp_funcs->switch_power_profile(
1110                         adev->powerplay.pp_handle, type, en);
1111
1112         return ret;
1113 }
1114
1115 int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
1116                                uint32_t pstate)
1117 {
1118         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1119         int ret = 0;
1120
1121         if (pp_funcs && pp_funcs->set_xgmi_pstate)
1122                 ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
1123                                                                 pstate);
1124
1125         return ret;
1126 }
1127
1128 int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
1129                              uint32_t cstate)
1130 {
1131         int ret = 0;
1132         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1133         void *pp_handle = adev->powerplay.pp_handle;
1134
1135         if (pp_funcs && pp_funcs->set_df_cstate)
1136                 ret = pp_funcs->set_df_cstate(pp_handle, cstate);
1137
1138         return ret;
1139 }
1140
1141 int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en)
1142 {
1143         struct smu_context *smu = &adev->smu;
1144
1145         if (is_support_sw_smu(adev))
1146                 return smu_allow_xgmi_power_down(smu, en);
1147
1148         return 0;
1149 }
1150
1151 int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev)
1152 {
1153         void *pp_handle = adev->powerplay.pp_handle;
1154         const struct amd_pm_funcs *pp_funcs =
1155                         adev->powerplay.pp_funcs;
1156         int ret = 0;
1157
1158         if (pp_funcs && pp_funcs->enable_mgpu_fan_boost)
1159                 ret = pp_funcs->enable_mgpu_fan_boost(pp_handle);
1160
1161         return ret;
1162 }
1163
1164 int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev,
1165                                       uint32_t msg_id)
1166 {
1167         void *pp_handle = adev->powerplay.pp_handle;
1168         const struct amd_pm_funcs *pp_funcs =
1169                         adev->powerplay.pp_funcs;
1170         int ret = 0;
1171
1172         if (pp_funcs && pp_funcs->set_clockgating_by_smu)
1173                 ret = pp_funcs->set_clockgating_by_smu(pp_handle,
1174                                                        msg_id);
1175
1176         return ret;
1177 }
1178
1179 int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev,
1180                                   bool acquire)
1181 {
1182         void *pp_handle = adev->powerplay.pp_handle;
1183         const struct amd_pm_funcs *pp_funcs =
1184                         adev->powerplay.pp_funcs;
1185         int ret = -EOPNOTSUPP;
1186
1187         if (pp_funcs && pp_funcs->smu_i2c_bus_access)
1188                 ret = pp_funcs->smu_i2c_bus_access(pp_handle,
1189                                                    acquire);
1190
1191         return ret;
1192 }
1193
1194 void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
1195 {
1196         if (adev->pm.dpm_enabled) {
1197                 mutex_lock(&adev->pm.mutex);
1198                 if (power_supply_is_system_supplied() > 0)
1199                         adev->pm.ac_power = true;
1200                 else
1201                         adev->pm.ac_power = false;
1202                 if (adev->powerplay.pp_funcs &&
1203                     adev->powerplay.pp_funcs->enable_bapm)
1204                         amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
1205                 mutex_unlock(&adev->pm.mutex);
1206
1207                 if (is_support_sw_smu(adev))
1208                         smu_set_ac_dc(&adev->smu);
1209         }
1210 }
1211
1212 int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
1213                            void *data, uint32_t *size)
1214 {
1215         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1216         int ret = 0;
1217
1218         if (!data || !size)
1219                 return -EINVAL;
1220
1221         if (pp_funcs && pp_funcs->read_sensor)
1222                 ret = pp_funcs->read_sensor((adev)->powerplay.pp_handle,
1223                                                                     sensor, data, size);
1224         else
1225                 ret = -EINVAL;
1226
1227         return ret;
1228 }
1229
1230 void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
1231 {
1232         struct amdgpu_device *adev =
1233                 container_of(work, struct amdgpu_device,
1234                              pm.dpm.thermal.work);
1235         /* switch to the thermal state */
1236         enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
1237         int temp, size = sizeof(temp);
1238
1239         if (!adev->pm.dpm_enabled)
1240                 return;
1241
1242         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP,
1243                                     (void *)&temp, &size)) {
1244                 if (temp < adev->pm.dpm.thermal.min_temp)
1245                         /* switch back the user state */
1246                         dpm_state = adev->pm.dpm.user_state;
1247         } else {
1248                 if (adev->pm.dpm.thermal.high_to_low)
1249                         /* switch back the user state */
1250                         dpm_state = adev->pm.dpm.user_state;
1251         }
1252         mutex_lock(&adev->pm.mutex);
1253         if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
1254                 adev->pm.dpm.thermal_active = true;
1255         else
1256                 adev->pm.dpm.thermal_active = false;
1257         adev->pm.dpm.state = dpm_state;
1258         mutex_unlock(&adev->pm.mutex);
1259
1260         amdgpu_pm_compute_clocks(adev);
1261 }
1262
1263 static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
1264                                                      enum amd_pm_state_type dpm_state)
1265 {
1266         int i;
1267         struct amdgpu_ps *ps;
1268         u32 ui_class;
1269         bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ?
1270                 true : false;
1271
1272         /* check if the vblank period is too short to adjust the mclk */
1273         if (single_display && adev->powerplay.pp_funcs->vblank_too_short) {
1274                 if (amdgpu_dpm_vblank_too_short(adev))
1275                         single_display = false;
1276         }
1277
1278         /* certain older asics have a separare 3D performance state,
1279          * so try that first if the user selected performance
1280          */
1281         if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
1282                 dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
1283         /* balanced states don't exist at the moment */
1284         if (dpm_state == POWER_STATE_TYPE_BALANCED)
1285                 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
1286
1287 restart_search:
1288         /* Pick the best power state based on current conditions */
1289         for (i = 0; i < adev->pm.dpm.num_ps; i++) {
1290                 ps = &adev->pm.dpm.ps[i];
1291                 ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
1292                 switch (dpm_state) {
1293                 /* user states */
1294                 case POWER_STATE_TYPE_BATTERY:
1295                         if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
1296                                 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
1297                                         if (single_display)
1298                                                 return ps;
1299                                 } else
1300                                         return ps;
1301                         }
1302                         break;
1303                 case POWER_STATE_TYPE_BALANCED:
1304                         if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
1305                                 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
1306                                         if (single_display)
1307                                                 return ps;
1308                                 } else
1309                                         return ps;
1310                         }
1311                         break;
1312                 case POWER_STATE_TYPE_PERFORMANCE:
1313                         if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
1314                                 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
1315                                         if (single_display)
1316                                                 return ps;
1317                                 } else
1318                                         return ps;
1319                         }
1320                         break;
1321                 /* internal states */
1322                 case POWER_STATE_TYPE_INTERNAL_UVD:
1323                         if (adev->pm.dpm.uvd_ps)
1324                                 return adev->pm.dpm.uvd_ps;
1325                         else
1326                                 break;
1327                 case POWER_STATE_TYPE_INTERNAL_UVD_SD:
1328                         if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
1329                                 return ps;
1330                         break;
1331                 case POWER_STATE_TYPE_INTERNAL_UVD_HD:
1332                         if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
1333                                 return ps;
1334                         break;
1335                 case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
1336                         if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
1337                                 return ps;
1338                         break;
1339                 case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
1340                         if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
1341                                 return ps;
1342                         break;
1343                 case POWER_STATE_TYPE_INTERNAL_BOOT:
1344                         return adev->pm.dpm.boot_ps;
1345                 case POWER_STATE_TYPE_INTERNAL_THERMAL:
1346                         if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
1347                                 return ps;
1348                         break;
1349                 case POWER_STATE_TYPE_INTERNAL_ACPI:
1350                         if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
1351                                 return ps;
1352                         break;
1353                 case POWER_STATE_TYPE_INTERNAL_ULV:
1354                         if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
1355                                 return ps;
1356                         break;
1357                 case POWER_STATE_TYPE_INTERNAL_3DPERF:
1358                         if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
1359                                 return ps;
1360                         break;
1361                 default:
1362                         break;
1363                 }
1364         }
1365         /* use a fallback state if we didn't match */
1366         switch (dpm_state) {
1367         case POWER_STATE_TYPE_INTERNAL_UVD_SD:
1368                 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
1369                 goto restart_search;
1370         case POWER_STATE_TYPE_INTERNAL_UVD_HD:
1371         case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
1372         case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
1373                 if (adev->pm.dpm.uvd_ps) {
1374                         return adev->pm.dpm.uvd_ps;
1375                 } else {
1376                         dpm_state = POWER_STATE_TYPE_PERFORMANCE;
1377                         goto restart_search;
1378                 }
1379         case POWER_STATE_TYPE_INTERNAL_THERMAL:
1380                 dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
1381                 goto restart_search;
1382         case POWER_STATE_TYPE_INTERNAL_ACPI:
1383                 dpm_state = POWER_STATE_TYPE_BATTERY;
1384                 goto restart_search;
1385         case POWER_STATE_TYPE_BATTERY:
1386         case POWER_STATE_TYPE_BALANCED:
1387         case POWER_STATE_TYPE_INTERNAL_3DPERF:
1388                 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
1389                 goto restart_search;
1390         default:
1391                 break;
1392         }
1393
1394         return NULL;
1395 }
1396
1397 static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
1398 {
1399         struct amdgpu_ps *ps;
1400         enum amd_pm_state_type dpm_state;
1401         int ret;
1402         bool equal = false;
1403
1404         /* if dpm init failed */
1405         if (!adev->pm.dpm_enabled)
1406                 return;
1407
1408         if (adev->pm.dpm.user_state != adev->pm.dpm.state) {
1409                 /* add other state override checks here */
1410                 if ((!adev->pm.dpm.thermal_active) &&
1411                     (!adev->pm.dpm.uvd_active))
1412                         adev->pm.dpm.state = adev->pm.dpm.user_state;
1413         }
1414         dpm_state = adev->pm.dpm.state;
1415
1416         ps = amdgpu_dpm_pick_power_state(adev, dpm_state);
1417         if (ps)
1418                 adev->pm.dpm.requested_ps = ps;
1419         else
1420                 return;
1421
1422         if (amdgpu_dpm == 1 && adev->powerplay.pp_funcs->print_power_state) {
1423                 printk("switching from power state:\n");
1424                 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
1425                 printk("switching to power state:\n");
1426                 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
1427         }
1428
1429         /* update whether vce is active */
1430         ps->vce_active = adev->pm.dpm.vce_active;
1431         if (adev->powerplay.pp_funcs->display_configuration_changed)
1432                 amdgpu_dpm_display_configuration_changed(adev);
1433
1434         ret = amdgpu_dpm_pre_set_power_state(adev);
1435         if (ret)
1436                 return;
1437
1438         if (adev->powerplay.pp_funcs->check_state_equal) {
1439                 if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal))
1440                         equal = false;
1441         }
1442
1443         if (equal)
1444                 return;
1445
1446         amdgpu_dpm_set_power_state(adev);
1447         amdgpu_dpm_post_set_power_state(adev);
1448
1449         adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
1450         adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
1451
1452         if (adev->powerplay.pp_funcs->force_performance_level) {
1453                 if (adev->pm.dpm.thermal_active) {
1454                         enum amd_dpm_forced_level level = adev->pm.dpm.forced_level;
1455                         /* force low perf level for thermal */
1456                         amdgpu_dpm_force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW);
1457                         /* save the user's level */
1458                         adev->pm.dpm.forced_level = level;
1459                 } else {
1460                         /* otherwise, user selected level */
1461                         amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level);
1462                 }
1463         }
1464 }
1465
1466 void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
1467 {
1468         int i = 0;
1469
1470         if (!adev->pm.dpm_enabled)
1471                 return;
1472
1473         if (adev->mode_info.num_crtc)
1474                 amdgpu_display_bandwidth_update(adev);
1475
1476         for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
1477                 struct amdgpu_ring *ring = adev->rings[i];
1478                 if (ring && ring->sched.ready)
1479                         amdgpu_fence_wait_empty(ring);
1480         }
1481
1482         if (adev->powerplay.pp_funcs->dispatch_tasks) {
1483                 if (!amdgpu_device_has_dc_support(adev)) {
1484                         mutex_lock(&adev->pm.mutex);
1485                         amdgpu_dpm_get_active_displays(adev);
1486                         adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
1487                         adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
1488                         adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
1489                         /* we have issues with mclk switching with
1490                          * refresh rates over 120 hz on the non-DC code.
1491                          */
1492                         if (adev->pm.pm_display_cfg.vrefresh > 120)
1493                                 adev->pm.pm_display_cfg.min_vblank_time = 0;
1494                         if (adev->powerplay.pp_funcs->display_configuration_change)
1495                                 adev->powerplay.pp_funcs->display_configuration_change(
1496                                                         adev->powerplay.pp_handle,
1497                                                         &adev->pm.pm_display_cfg);
1498                         mutex_unlock(&adev->pm.mutex);
1499                 }
1500                 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL);
1501         } else {
1502                 mutex_lock(&adev->pm.mutex);
1503                 amdgpu_dpm_get_active_displays(adev);
1504                 amdgpu_dpm_change_power_state_locked(adev);
1505                 mutex_unlock(&adev->pm.mutex);
1506         }
1507 }
1508
1509 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
1510 {
1511         int ret = 0;
1512
1513         if (adev->family == AMDGPU_FAMILY_SI) {
1514                 mutex_lock(&adev->pm.mutex);
1515                 if (enable) {
1516                         adev->pm.dpm.uvd_active = true;
1517                         adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
1518                 } else {
1519                         adev->pm.dpm.uvd_active = false;
1520                 }
1521                 mutex_unlock(&adev->pm.mutex);
1522
1523                 amdgpu_pm_compute_clocks(adev);
1524         } else {
1525                 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
1526                 if (ret)
1527                         DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
1528                                   enable ? "enable" : "disable", ret);
1529
1530                 /* enable/disable Low Memory PState for UVD (4k videos) */
1531                 if (adev->asic_type == CHIP_STONEY &&
1532                         adev->uvd.decode_image_width >= WIDTH_4K) {
1533                         struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
1534
1535                         if (hwmgr && hwmgr->hwmgr_func &&
1536                             hwmgr->hwmgr_func->update_nbdpm_pstate)
1537                                 hwmgr->hwmgr_func->update_nbdpm_pstate(hwmgr,
1538                                                                        !enable,
1539                                                                        true);
1540                 }
1541         }
1542 }
1543
1544 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
1545 {
1546         int ret = 0;
1547
1548         if (adev->family == AMDGPU_FAMILY_SI) {
1549                 mutex_lock(&adev->pm.mutex);
1550                 if (enable) {
1551                         adev->pm.dpm.vce_active = true;
1552                         /* XXX select vce level based on ring/task */
1553                         adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
1554                 } else {
1555                         adev->pm.dpm.vce_active = false;
1556                 }
1557                 mutex_unlock(&adev->pm.mutex);
1558
1559                 amdgpu_pm_compute_clocks(adev);
1560         } else {
1561                 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
1562                 if (ret)
1563                         DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
1564                                   enable ? "enable" : "disable", ret);
1565         }
1566 }
1567
1568 void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
1569 {
1570         int i;
1571
1572         if (adev->powerplay.pp_funcs->print_power_state == NULL)
1573                 return;
1574
1575         for (i = 0; i < adev->pm.dpm.num_ps; i++)
1576                 amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]);
1577
1578 }
1579
1580 void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
1581 {
1582         int ret = 0;
1583
1584         ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable);
1585         if (ret)
1586                 DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
1587                           enable ? "enable" : "disable", ret);
1588 }
1589
1590 int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
1591 {
1592         int r;
1593
1594         if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->load_firmware) {
1595                 r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle);
1596                 if (r) {
1597                         pr_err("smu firmware loading failed\n");
1598                         return r;
1599                 }
1600
1601                 if (smu_version)
1602                         *smu_version = adev->pm.fw_version;
1603         }
1604
1605         return 0;
1606 }