0a6bb3311f0fc88cf8d487b7584d82ae5a48753d
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / pm / amdgpu_dpm.c
1 /*
2  * Copyright 2011 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Alex Deucher
23  */
24
25 #include "amdgpu.h"
26 #include "amdgpu_atombios.h"
27 #include "amdgpu_i2c.h"
28 #include "amdgpu_dpm.h"
29 #include "atom.h"
30 #include "amd_pcie.h"
31 #include "amdgpu_display.h"
32 #include "hwmgr.h"
33 #include <linux/power_supply.h>
34
35 #define WIDTH_4K 3840
36
37 void amdgpu_dpm_print_class_info(u32 class, u32 class2)
38 {
39         const char *s;
40
41         switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
42         case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
43         default:
44                 s = "none";
45                 break;
46         case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
47                 s = "battery";
48                 break;
49         case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
50                 s = "balanced";
51                 break;
52         case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
53                 s = "performance";
54                 break;
55         }
56         printk("\tui class: %s\n", s);
57         printk("\tinternal class:");
58         if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) &&
59             (class2 == 0))
60                 pr_cont(" none");
61         else {
62                 if (class & ATOM_PPLIB_CLASSIFICATION_BOOT)
63                         pr_cont(" boot");
64                 if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
65                         pr_cont(" thermal");
66                 if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
67                         pr_cont(" limited_pwr");
68                 if (class & ATOM_PPLIB_CLASSIFICATION_REST)
69                         pr_cont(" rest");
70                 if (class & ATOM_PPLIB_CLASSIFICATION_FORCED)
71                         pr_cont(" forced");
72                 if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
73                         pr_cont(" 3d_perf");
74                 if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE)
75                         pr_cont(" ovrdrv");
76                 if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
77                         pr_cont(" uvd");
78                 if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW)
79                         pr_cont(" 3d_low");
80                 if (class & ATOM_PPLIB_CLASSIFICATION_ACPI)
81                         pr_cont(" acpi");
82                 if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
83                         pr_cont(" uvd_hd2");
84                 if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
85                         pr_cont(" uvd_hd");
86                 if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
87                         pr_cont(" uvd_sd");
88                 if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
89                         pr_cont(" limited_pwr2");
90                 if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
91                         pr_cont(" ulv");
92                 if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
93                         pr_cont(" uvd_mvc");
94         }
95         pr_cont("\n");
96 }
97
98 void amdgpu_dpm_print_cap_info(u32 caps)
99 {
100         printk("\tcaps:");
101         if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
102                 pr_cont(" single_disp");
103         if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK)
104                 pr_cont(" video");
105         if (caps & ATOM_PPLIB_DISALLOW_ON_DC)
106                 pr_cont(" no_dc");
107         pr_cont("\n");
108 }
109
110 void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
111                                 struct amdgpu_ps *rps)
112 {
113         printk("\tstatus:");
114         if (rps == adev->pm.dpm.current_ps)
115                 pr_cont(" c");
116         if (rps == adev->pm.dpm.requested_ps)
117                 pr_cont(" r");
118         if (rps == adev->pm.dpm.boot_ps)
119                 pr_cont(" b");
120         pr_cont("\n");
121 }
122
123 void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev)
124 {
125         struct drm_device *ddev = adev_to_drm(adev);
126         struct drm_crtc *crtc;
127         struct amdgpu_crtc *amdgpu_crtc;
128
129         adev->pm.dpm.new_active_crtcs = 0;
130         adev->pm.dpm.new_active_crtc_count = 0;
131         if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
132                 list_for_each_entry(crtc,
133                                     &ddev->mode_config.crtc_list, head) {
134                         amdgpu_crtc = to_amdgpu_crtc(crtc);
135                         if (amdgpu_crtc->enabled) {
136                                 adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id);
137                                 adev->pm.dpm.new_active_crtc_count++;
138                         }
139                 }
140         }
141 }
142
143
144 u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
145 {
146         struct drm_device *dev = adev_to_drm(adev);
147         struct drm_crtc *crtc;
148         struct amdgpu_crtc *amdgpu_crtc;
149         u32 vblank_in_pixels;
150         u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
151
152         if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
153                 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
154                         amdgpu_crtc = to_amdgpu_crtc(crtc);
155                         if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
156                                 vblank_in_pixels =
157                                         amdgpu_crtc->hw_mode.crtc_htotal *
158                                         (amdgpu_crtc->hw_mode.crtc_vblank_end -
159                                         amdgpu_crtc->hw_mode.crtc_vdisplay +
160                                         (amdgpu_crtc->v_border * 2));
161
162                                 vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock;
163                                 break;
164                         }
165                 }
166         }
167
168         return vblank_time_us;
169 }
170
171 u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev)
172 {
173         struct drm_device *dev = adev_to_drm(adev);
174         struct drm_crtc *crtc;
175         struct amdgpu_crtc *amdgpu_crtc;
176         u32 vrefresh = 0;
177
178         if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
179                 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
180                         amdgpu_crtc = to_amdgpu_crtc(crtc);
181                         if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
182                                 vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
183                                 break;
184                         }
185                 }
186         }
187
188         return vrefresh;
189 }
190
191 bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor)
192 {
193         switch (sensor) {
194         case THERMAL_TYPE_RV6XX:
195         case THERMAL_TYPE_RV770:
196         case THERMAL_TYPE_EVERGREEN:
197         case THERMAL_TYPE_SUMO:
198         case THERMAL_TYPE_NI:
199         case THERMAL_TYPE_SI:
200         case THERMAL_TYPE_CI:
201         case THERMAL_TYPE_KV:
202                 return true;
203         case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
204         case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
205                 return false; /* need special handling */
206         case THERMAL_TYPE_NONE:
207         case THERMAL_TYPE_EXTERNAL:
208         case THERMAL_TYPE_EXTERNAL_GPIO:
209         default:
210                 return false;
211         }
212 }
213
214 union power_info {
215         struct _ATOM_POWERPLAY_INFO info;
216         struct _ATOM_POWERPLAY_INFO_V2 info_2;
217         struct _ATOM_POWERPLAY_INFO_V3 info_3;
218         struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
219         struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
220         struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
221         struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
222         struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
223 };
224
225 union fan_info {
226         struct _ATOM_PPLIB_FANTABLE fan;
227         struct _ATOM_PPLIB_FANTABLE2 fan2;
228         struct _ATOM_PPLIB_FANTABLE3 fan3;
229 };
230
231 static int amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table *amdgpu_table,
232                                               ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table)
233 {
234         u32 size = atom_table->ucNumEntries *
235                 sizeof(struct amdgpu_clock_voltage_dependency_entry);
236         int i;
237         ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry;
238
239         amdgpu_table->entries = kzalloc(size, GFP_KERNEL);
240         if (!amdgpu_table->entries)
241                 return -ENOMEM;
242
243         entry = &atom_table->entries[0];
244         for (i = 0; i < atom_table->ucNumEntries; i++) {
245                 amdgpu_table->entries[i].clk = le16_to_cpu(entry->usClockLow) |
246                         (entry->ucClockHigh << 16);
247                 amdgpu_table->entries[i].v = le16_to_cpu(entry->usVoltage);
248                 entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *)
249                         ((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record));
250         }
251         amdgpu_table->count = atom_table->ucNumEntries;
252
253         return 0;
254 }
255
256 int amdgpu_get_platform_caps(struct amdgpu_device *adev)
257 {
258         struct amdgpu_mode_info *mode_info = &adev->mode_info;
259         union power_info *power_info;
260         int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
261         u16 data_offset;
262         u8 frev, crev;
263
264         if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
265                                    &frev, &crev, &data_offset))
266                 return -EINVAL;
267         power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
268
269         adev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
270         adev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
271         adev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
272
273         return 0;
274 }
275
276 /* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
277 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
278 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
279 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
280 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
281 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
282 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
283 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8 24
284 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V9 26
285
286 int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
287 {
288         struct amdgpu_mode_info *mode_info = &adev->mode_info;
289         union power_info *power_info;
290         union fan_info *fan_info;
291         ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table;
292         int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
293         u16 data_offset;
294         u8 frev, crev;
295         int ret, i;
296
297         if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
298                                    &frev, &crev, &data_offset))
299                 return -EINVAL;
300         power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
301
302         /* fan table */
303         if (le16_to_cpu(power_info->pplib.usTableSize) >=
304             sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
305                 if (power_info->pplib3.usFanTableOffset) {
306                         fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset +
307                                                       le16_to_cpu(power_info->pplib3.usFanTableOffset));
308                         adev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst;
309                         adev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin);
310                         adev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed);
311                         adev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh);
312                         adev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin);
313                         adev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed);
314                         adev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh);
315                         if (fan_info->fan.ucFanTableFormat >= 2)
316                                 adev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax);
317                         else
318                                 adev->pm.dpm.fan.t_max = 10900;
319                         adev->pm.dpm.fan.cycle_delay = 100000;
320                         if (fan_info->fan.ucFanTableFormat >= 3) {
321                                 adev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode;
322                                 adev->pm.dpm.fan.default_max_fan_pwm =
323                                         le16_to_cpu(fan_info->fan3.usFanPWMMax);
324                                 adev->pm.dpm.fan.default_fan_output_sensitivity = 4836;
325                                 adev->pm.dpm.fan.fan_output_sensitivity =
326                                         le16_to_cpu(fan_info->fan3.usFanOutputSensitivity);
327                         }
328                         adev->pm.dpm.fan.ucode_fan_control = true;
329                 }
330         }
331
332         /* clock dependancy tables, shedding tables */
333         if (le16_to_cpu(power_info->pplib.usTableSize) >=
334             sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) {
335                 if (power_info->pplib4.usVddcDependencyOnSCLKOffset) {
336                         dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
337                                 (mode_info->atom_context->bios + data_offset +
338                                  le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
339                         ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
340                                                                  dep_table);
341                         if (ret) {
342                                 amdgpu_free_extended_power_table(adev);
343                                 return ret;
344                         }
345                 }
346                 if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
347                         dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
348                                 (mode_info->atom_context->bios + data_offset +
349                                  le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
350                         ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
351                                                                  dep_table);
352                         if (ret) {
353                                 amdgpu_free_extended_power_table(adev);
354                                 return ret;
355                         }
356                 }
357                 if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
358                         dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
359                                 (mode_info->atom_context->bios + data_offset +
360                                  le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
361                         ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
362                                                                  dep_table);
363                         if (ret) {
364                                 amdgpu_free_extended_power_table(adev);
365                                 return ret;
366                         }
367                 }
368                 if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
369                         dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
370                                 (mode_info->atom_context->bios + data_offset +
371                                  le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
372                         ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
373                                                                  dep_table);
374                         if (ret) {
375                                 amdgpu_free_extended_power_table(adev);
376                                 return ret;
377                         }
378                 }
379                 if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
380                         ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
381                                 (ATOM_PPLIB_Clock_Voltage_Limit_Table *)
382                                 (mode_info->atom_context->bios + data_offset +
383                                  le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset));
384                         if (clk_v->ucNumEntries) {
385                                 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk =
386                                         le16_to_cpu(clk_v->entries[0].usSclkLow) |
387                                         (clk_v->entries[0].ucSclkHigh << 16);
388                                 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk =
389                                         le16_to_cpu(clk_v->entries[0].usMclkLow) |
390                                         (clk_v->entries[0].ucMclkHigh << 16);
391                                 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc =
392                                         le16_to_cpu(clk_v->entries[0].usVddc);
393                                 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci =
394                                         le16_to_cpu(clk_v->entries[0].usVddci);
395                         }
396                 }
397                 if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) {
398                         ATOM_PPLIB_PhaseSheddingLimits_Table *psl =
399                                 (ATOM_PPLIB_PhaseSheddingLimits_Table *)
400                                 (mode_info->atom_context->bios + data_offset +
401                                  le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset));
402                         ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
403
404                         adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
405                                 kcalloc(psl->ucNumEntries,
406                                         sizeof(struct amdgpu_phase_shedding_limits_entry),
407                                         GFP_KERNEL);
408                         if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
409                                 amdgpu_free_extended_power_table(adev);
410                                 return -ENOMEM;
411                         }
412
413                         entry = &psl->entries[0];
414                         for (i = 0; i < psl->ucNumEntries; i++) {
415                                 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk =
416                                         le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16);
417                                 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk =
418                                         le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16);
419                                 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage =
420                                         le16_to_cpu(entry->usVoltage);
421                                 entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *)
422                                         ((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record));
423                         }
424                         adev->pm.dpm.dyn_state.phase_shedding_limits_table.count =
425                                 psl->ucNumEntries;
426                 }
427         }
428
429         /* cac data */
430         if (le16_to_cpu(power_info->pplib.usTableSize) >=
431             sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) {
432                 adev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit);
433                 adev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit);
434                 adev->pm.dpm.near_tdp_limit_adjusted = adev->pm.dpm.near_tdp_limit;
435                 adev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit);
436                 if (adev->pm.dpm.tdp_od_limit)
437                         adev->pm.dpm.power_control = true;
438                 else
439                         adev->pm.dpm.power_control = false;
440                 adev->pm.dpm.tdp_adjustment = 0;
441                 adev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold);
442                 adev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage);
443                 adev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope);
444                 if (power_info->pplib5.usCACLeakageTableOffset) {
445                         ATOM_PPLIB_CAC_Leakage_Table *cac_table =
446                                 (ATOM_PPLIB_CAC_Leakage_Table *)
447                                 (mode_info->atom_context->bios + data_offset +
448                                  le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset));
449                         ATOM_PPLIB_CAC_Leakage_Record *entry;
450                         u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table);
451                         adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
452                         if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
453                                 amdgpu_free_extended_power_table(adev);
454                                 return -ENOMEM;
455                         }
456                         entry = &cac_table->entries[0];
457                         for (i = 0; i < cac_table->ucNumEntries; i++) {
458                                 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
459                                         adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 =
460                                                 le16_to_cpu(entry->usVddc1);
461                                         adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 =
462                                                 le16_to_cpu(entry->usVddc2);
463                                         adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 =
464                                                 le16_to_cpu(entry->usVddc3);
465                                 } else {
466                                         adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
467                                                 le16_to_cpu(entry->usVddc);
468                                         adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
469                                                 le32_to_cpu(entry->ulLeakageValue);
470                                 }
471                                 entry = (ATOM_PPLIB_CAC_Leakage_Record *)
472                                         ((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record));
473                         }
474                         adev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries;
475                 }
476         }
477
478         /* ext tables */
479         if (le16_to_cpu(power_info->pplib.usTableSize) >=
480             sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
481                 ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *)
482                         (mode_info->atom_context->bios + data_offset +
483                          le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset));
484                 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) &&
485                         ext_hdr->usVCETableOffset) {
486                         VCEClockInfoArray *array = (VCEClockInfoArray *)
487                                 (mode_info->atom_context->bios + data_offset +
488                                  le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
489                         ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
490                                 (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
491                                 (mode_info->atom_context->bios + data_offset +
492                                  le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
493                                  1 + array->ucNumEntries * sizeof(VCEClockInfo));
494                         ATOM_PPLIB_VCE_State_Table *states =
495                                 (ATOM_PPLIB_VCE_State_Table *)
496                                 (mode_info->atom_context->bios + data_offset +
497                                  le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
498                                  1 + (array->ucNumEntries * sizeof (VCEClockInfo)) +
499                                  1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)));
500                         ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry;
501                         ATOM_PPLIB_VCE_State_Record *state_entry;
502                         VCEClockInfo *vce_clk;
503                         u32 size = limits->numEntries *
504                                 sizeof(struct amdgpu_vce_clock_voltage_dependency_entry);
505                         adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
506                                 kzalloc(size, GFP_KERNEL);
507                         if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
508                                 amdgpu_free_extended_power_table(adev);
509                                 return -ENOMEM;
510                         }
511                         adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
512                                 limits->numEntries;
513                         entry = &limits->entries[0];
514                         state_entry = &states->entries[0];
515                         for (i = 0; i < limits->numEntries; i++) {
516                                 vce_clk = (VCEClockInfo *)
517                                         ((u8 *)&array->entries[0] +
518                                          (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
519                                 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk =
520                                         le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
521                                 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk =
522                                         le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
523                                 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v =
524                                         le16_to_cpu(entry->usVoltage);
525                                 entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
526                                         ((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
527                         }
528                         adev->pm.dpm.num_of_vce_states =
529                                         states->numEntries > AMD_MAX_VCE_LEVELS ?
530                                         AMD_MAX_VCE_LEVELS : states->numEntries;
531                         for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
532                                 vce_clk = (VCEClockInfo *)
533                                         ((u8 *)&array->entries[0] +
534                                          (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
535                                 adev->pm.dpm.vce_states[i].evclk =
536                                         le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
537                                 adev->pm.dpm.vce_states[i].ecclk =
538                                         le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
539                                 adev->pm.dpm.vce_states[i].clk_idx =
540                                         state_entry->ucClockInfoIndex & 0x3f;
541                                 adev->pm.dpm.vce_states[i].pstate =
542                                         (state_entry->ucClockInfoIndex & 0xc0) >> 6;
543                                 state_entry = (ATOM_PPLIB_VCE_State_Record *)
544                                         ((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record));
545                         }
546                 }
547                 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) &&
548                         ext_hdr->usUVDTableOffset) {
549                         UVDClockInfoArray *array = (UVDClockInfoArray *)
550                                 (mode_info->atom_context->bios + data_offset +
551                                  le16_to_cpu(ext_hdr->usUVDTableOffset) + 1);
552                         ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits =
553                                 (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *)
554                                 (mode_info->atom_context->bios + data_offset +
555                                  le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 +
556                                  1 + (array->ucNumEntries * sizeof (UVDClockInfo)));
557                         ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry;
558                         u32 size = limits->numEntries *
559                                 sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry);
560                         adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
561                                 kzalloc(size, GFP_KERNEL);
562                         if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
563                                 amdgpu_free_extended_power_table(adev);
564                                 return -ENOMEM;
565                         }
566                         adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
567                                 limits->numEntries;
568                         entry = &limits->entries[0];
569                         for (i = 0; i < limits->numEntries; i++) {
570                                 UVDClockInfo *uvd_clk = (UVDClockInfo *)
571                                         ((u8 *)&array->entries[0] +
572                                          (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo)));
573                                 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk =
574                                         le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16);
575                                 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
576                                         le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
577                                 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
578                                         le16_to_cpu(entry->usVoltage);
579                                 entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
580                                         ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
581                         }
582                 }
583                 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) &&
584                         ext_hdr->usSAMUTableOffset) {
585                         ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits =
586                                 (ATOM_PPLIB_SAMClk_Voltage_Limit_Table *)
587                                 (mode_info->atom_context->bios + data_offset +
588                                  le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1);
589                         ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry;
590                         u32 size = limits->numEntries *
591                                 sizeof(struct amdgpu_clock_voltage_dependency_entry);
592                         adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
593                                 kzalloc(size, GFP_KERNEL);
594                         if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
595                                 amdgpu_free_extended_power_table(adev);
596                                 return -ENOMEM;
597                         }
598                         adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
599                                 limits->numEntries;
600                         entry = &limits->entries[0];
601                         for (i = 0; i < limits->numEntries; i++) {
602                                 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk =
603                                         le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16);
604                                 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v =
605                                         le16_to_cpu(entry->usVoltage);
606                                 entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *)
607                                         ((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record));
608                         }
609                 }
610                 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) &&
611                     ext_hdr->usPPMTableOffset) {
612                         ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *)
613                                 (mode_info->atom_context->bios + data_offset +
614                                  le16_to_cpu(ext_hdr->usPPMTableOffset));
615                         adev->pm.dpm.dyn_state.ppm_table =
616                                 kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL);
617                         if (!adev->pm.dpm.dyn_state.ppm_table) {
618                                 amdgpu_free_extended_power_table(adev);
619                                 return -ENOMEM;
620                         }
621                         adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
622                         adev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
623                                 le16_to_cpu(ppm->usCpuCoreNumber);
624                         adev->pm.dpm.dyn_state.ppm_table->platform_tdp =
625                                 le32_to_cpu(ppm->ulPlatformTDP);
626                         adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp =
627                                 le32_to_cpu(ppm->ulSmallACPlatformTDP);
628                         adev->pm.dpm.dyn_state.ppm_table->platform_tdc =
629                                 le32_to_cpu(ppm->ulPlatformTDC);
630                         adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc =
631                                 le32_to_cpu(ppm->ulSmallACPlatformTDC);
632                         adev->pm.dpm.dyn_state.ppm_table->apu_tdp =
633                                 le32_to_cpu(ppm->ulApuTDP);
634                         adev->pm.dpm.dyn_state.ppm_table->dgpu_tdp =
635                                 le32_to_cpu(ppm->ulDGpuTDP);
636                         adev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power =
637                                 le32_to_cpu(ppm->ulDGpuUlvPower);
638                         adev->pm.dpm.dyn_state.ppm_table->tj_max =
639                                 le32_to_cpu(ppm->ulTjmax);
640                 }
641                 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) &&
642                         ext_hdr->usACPTableOffset) {
643                         ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits =
644                                 (ATOM_PPLIB_ACPClk_Voltage_Limit_Table *)
645                                 (mode_info->atom_context->bios + data_offset +
646                                  le16_to_cpu(ext_hdr->usACPTableOffset) + 1);
647                         ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry;
648                         u32 size = limits->numEntries *
649                                 sizeof(struct amdgpu_clock_voltage_dependency_entry);
650                         adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
651                                 kzalloc(size, GFP_KERNEL);
652                         if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
653                                 amdgpu_free_extended_power_table(adev);
654                                 return -ENOMEM;
655                         }
656                         adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
657                                 limits->numEntries;
658                         entry = &limits->entries[0];
659                         for (i = 0; i < limits->numEntries; i++) {
660                                 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk =
661                                         le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16);
662                                 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v =
663                                         le16_to_cpu(entry->usVoltage);
664                                 entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *)
665                                         ((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record));
666                         }
667                 }
668                 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) &&
669                         ext_hdr->usPowerTuneTableOffset) {
670                         u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset +
671                                          le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
672                         ATOM_PowerTune_Table *pt;
673                         adev->pm.dpm.dyn_state.cac_tdp_table =
674                                 kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL);
675                         if (!adev->pm.dpm.dyn_state.cac_tdp_table) {
676                                 amdgpu_free_extended_power_table(adev);
677                                 return -ENOMEM;
678                         }
679                         if (rev > 0) {
680                                 ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
681                                         (mode_info->atom_context->bios + data_offset +
682                                          le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
683                                 adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
684                                         ppt->usMaximumPowerDeliveryLimit;
685                                 pt = &ppt->power_tune_table;
686                         } else {
687                                 ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
688                                         (mode_info->atom_context->bios + data_offset +
689                                          le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
690                                 adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255;
691                                 pt = &ppt->power_tune_table;
692                         }
693                         adev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP);
694                         adev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp =
695                                 le16_to_cpu(pt->usConfigurableTDP);
696                         adev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC);
697                         adev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit =
698                                 le16_to_cpu(pt->usBatteryPowerLimit);
699                         adev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit =
700                                 le16_to_cpu(pt->usSmallPowerLimit);
701                         adev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage =
702                                 le16_to_cpu(pt->usLowCACLeakage);
703                         adev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage =
704                                 le16_to_cpu(pt->usHighCACLeakage);
705                 }
706                 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8) &&
707                                 ext_hdr->usSclkVddgfxTableOffset) {
708                         dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
709                                 (mode_info->atom_context->bios + data_offset +
710                                  le16_to_cpu(ext_hdr->usSclkVddgfxTableOffset));
711                         ret = amdgpu_parse_clk_voltage_dep_table(
712                                         &adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk,
713                                         dep_table);
714                         if (ret) {
715                                 kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries);
716                                 return ret;
717                         }
718                 }
719         }
720
721         return 0;
722 }
723
724 void amdgpu_free_extended_power_table(struct amdgpu_device *adev)
725 {
726         struct amdgpu_dpm_dynamic_state *dyn_state = &adev->pm.dpm.dyn_state;
727
728         kfree(dyn_state->vddc_dependency_on_sclk.entries);
729         kfree(dyn_state->vddci_dependency_on_mclk.entries);
730         kfree(dyn_state->vddc_dependency_on_mclk.entries);
731         kfree(dyn_state->mvdd_dependency_on_mclk.entries);
732         kfree(dyn_state->cac_leakage_table.entries);
733         kfree(dyn_state->phase_shedding_limits_table.entries);
734         kfree(dyn_state->ppm_table);
735         kfree(dyn_state->cac_tdp_table);
736         kfree(dyn_state->vce_clock_voltage_dependency_table.entries);
737         kfree(dyn_state->uvd_clock_voltage_dependency_table.entries);
738         kfree(dyn_state->samu_clock_voltage_dependency_table.entries);
739         kfree(dyn_state->acp_clock_voltage_dependency_table.entries);
740         kfree(dyn_state->vddgfx_dependency_on_sclk.entries);
741 }
742
743 static const char *pp_lib_thermal_controller_names[] = {
744         "NONE",
745         "lm63",
746         "adm1032",
747         "adm1030",
748         "max6649",
749         "lm64",
750         "f75375",
751         "RV6xx",
752         "RV770",
753         "adt7473",
754         "NONE",
755         "External GPIO",
756         "Evergreen",
757         "emc2103",
758         "Sumo",
759         "Northern Islands",
760         "Southern Islands",
761         "lm96163",
762         "Sea Islands",
763         "Kaveri/Kabini",
764 };
765
766 void amdgpu_add_thermal_controller(struct amdgpu_device *adev)
767 {
768         struct amdgpu_mode_info *mode_info = &adev->mode_info;
769         ATOM_PPLIB_POWERPLAYTABLE *power_table;
770         int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
771         ATOM_PPLIB_THERMALCONTROLLER *controller;
772         struct amdgpu_i2c_bus_rec i2c_bus;
773         u16 data_offset;
774         u8 frev, crev;
775
776         if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
777                                    &frev, &crev, &data_offset))
778                 return;
779         power_table = (ATOM_PPLIB_POWERPLAYTABLE *)
780                 (mode_info->atom_context->bios + data_offset);
781         controller = &power_table->sThermalController;
782
783         /* add the i2c bus for thermal/fan chip */
784         if (controller->ucType > 0) {
785                 if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN)
786                         adev->pm.no_fan = true;
787                 adev->pm.fan_pulses_per_revolution =
788                         controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
789                 if (adev->pm.fan_pulses_per_revolution) {
790                         adev->pm.fan_min_rpm = controller->ucFanMinRPM;
791                         adev->pm.fan_max_rpm = controller->ucFanMaxRPM;
792                 }
793                 if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
794                         DRM_INFO("Internal thermal controller %s fan control\n",
795                                  (controller->ucFanParameters &
796                                   ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
797                         adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
798                 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
799                         DRM_INFO("Internal thermal controller %s fan control\n",
800                                  (controller->ucFanParameters &
801                                   ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
802                         adev->pm.int_thermal_type = THERMAL_TYPE_RV770;
803                 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
804                         DRM_INFO("Internal thermal controller %s fan control\n",
805                                  (controller->ucFanParameters &
806                                   ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
807                         adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
808                 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) {
809                         DRM_INFO("Internal thermal controller %s fan control\n",
810                                  (controller->ucFanParameters &
811                                   ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
812                         adev->pm.int_thermal_type = THERMAL_TYPE_SUMO;
813                 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) {
814                         DRM_INFO("Internal thermal controller %s fan control\n",
815                                  (controller->ucFanParameters &
816                                   ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
817                         adev->pm.int_thermal_type = THERMAL_TYPE_NI;
818                 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) {
819                         DRM_INFO("Internal thermal controller %s fan control\n",
820                                  (controller->ucFanParameters &
821                                   ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
822                         adev->pm.int_thermal_type = THERMAL_TYPE_SI;
823                 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) {
824                         DRM_INFO("Internal thermal controller %s fan control\n",
825                                  (controller->ucFanParameters &
826                                   ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
827                         adev->pm.int_thermal_type = THERMAL_TYPE_CI;
828                 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) {
829                         DRM_INFO("Internal thermal controller %s fan control\n",
830                                  (controller->ucFanParameters &
831                                   ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
832                         adev->pm.int_thermal_type = THERMAL_TYPE_KV;
833                 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
834                         DRM_INFO("External GPIO thermal controller %s fan control\n",
835                                  (controller->ucFanParameters &
836                                   ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
837                         adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
838                 } else if (controller->ucType ==
839                            ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
840                         DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
841                                  (controller->ucFanParameters &
842                                   ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
843                         adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
844                 } else if (controller->ucType ==
845                            ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
846                         DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
847                                  (controller->ucFanParameters &
848                                   ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
849                         adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
850                 } else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
851                         DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
852                                  pp_lib_thermal_controller_names[controller->ucType],
853                                  controller->ucI2cAddress >> 1,
854                                  (controller->ucFanParameters &
855                                   ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
856                         adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL;
857                         i2c_bus = amdgpu_atombios_lookup_i2c_gpio(adev, controller->ucI2cLine);
858                         adev->pm.i2c_bus = amdgpu_i2c_lookup(adev, &i2c_bus);
859                         if (adev->pm.i2c_bus) {
860                                 struct i2c_board_info info = { };
861                                 const char *name = pp_lib_thermal_controller_names[controller->ucType];
862                                 info.addr = controller->ucI2cAddress >> 1;
863                                 strlcpy(info.type, name, sizeof(info.type));
864                                 i2c_new_client_device(&adev->pm.i2c_bus->adapter, &info);
865                         }
866                 } else {
867                         DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
868                                  controller->ucType,
869                                  controller->ucI2cAddress >> 1,
870                                  (controller->ucFanParameters &
871                                   ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
872                 }
873         }
874 }
875
876 enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev,
877                                                  u32 sys_mask,
878                                                  enum amdgpu_pcie_gen asic_gen,
879                                                  enum amdgpu_pcie_gen default_gen)
880 {
881         switch (asic_gen) {
882         case AMDGPU_PCIE_GEN1:
883                 return AMDGPU_PCIE_GEN1;
884         case AMDGPU_PCIE_GEN2:
885                 return AMDGPU_PCIE_GEN2;
886         case AMDGPU_PCIE_GEN3:
887                 return AMDGPU_PCIE_GEN3;
888         default:
889                 if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) &&
890                     (default_gen == AMDGPU_PCIE_GEN3))
891                         return AMDGPU_PCIE_GEN3;
892                 else if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) &&
893                          (default_gen == AMDGPU_PCIE_GEN2))
894                         return AMDGPU_PCIE_GEN2;
895                 else
896                         return AMDGPU_PCIE_GEN1;
897         }
898         return AMDGPU_PCIE_GEN1;
899 }
900
901 struct amd_vce_state*
902 amdgpu_get_vce_clock_state(void *handle, u32 idx)
903 {
904         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
905
906         if (idx < adev->pm.dpm.num_of_vce_states)
907                 return &adev->pm.dpm.vce_states[idx];
908
909         return NULL;
910 }
911
912 int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
913 {
914         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
915
916         return pp_funcs->get_sclk((adev)->powerplay.pp_handle, (low));
917 }
918
919 int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
920 {
921         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
922
923         return pp_funcs->get_mclk((adev)->powerplay.pp_handle, (low));
924 }
925
926 int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate)
927 {
928         int ret = 0;
929         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
930         bool swsmu = is_support_sw_smu(adev);
931
932         switch (block_type) {
933         case AMD_IP_BLOCK_TYPE_UVD:
934         case AMD_IP_BLOCK_TYPE_VCE:
935                 if (pp_funcs && pp_funcs->set_powergating_by_smu) {
936                         /*
937                          * TODO: need a better lock mechanism
938                          *
939                          * Here adev->pm.mutex lock protection is enforced on
940                          * UVD and VCE cases only. Since for other cases, there
941                          * may be already lock protection in amdgpu_pm.c.
942                          * This is a quick fix for the deadlock issue below.
943                          *     NFO: task ocltst:2028 blocked for more than 120 seconds.
944                          *     Tainted: G           OE     5.0.0-37-generic #40~18.04.1-Ubuntu
945                          *     echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
946                          *     cltst          D    0  2028   2026 0x00000000
947                          *     all Trace:
948                          *     __schedule+0x2c0/0x870
949                          *     schedule+0x2c/0x70
950                          *     schedule_preempt_disabled+0xe/0x10
951                          *     __mutex_lock.isra.9+0x26d/0x4e0
952                          *     __mutex_lock_slowpath+0x13/0x20
953                          *     ? __mutex_lock_slowpath+0x13/0x20
954                          *     mutex_lock+0x2f/0x40
955                          *     amdgpu_dpm_set_powergating_by_smu+0x64/0xe0 [amdgpu]
956                          *     gfx_v8_0_enable_gfx_static_mg_power_gating+0x3c/0x70 [amdgpu]
957                          *     gfx_v8_0_set_powergating_state+0x66/0x260 [amdgpu]
958                          *     amdgpu_device_ip_set_powergating_state+0x62/0xb0 [amdgpu]
959                          *     pp_dpm_force_performance_level+0xe7/0x100 [amdgpu]
960                          *     amdgpu_set_dpm_forced_performance_level+0x129/0x330 [amdgpu]
961                          */
962                         mutex_lock(&adev->pm.mutex);
963                         ret = (pp_funcs->set_powergating_by_smu(
964                                 (adev)->powerplay.pp_handle, block_type, gate));
965                         mutex_unlock(&adev->pm.mutex);
966                 }
967                 break;
968         case AMD_IP_BLOCK_TYPE_GFX:
969         case AMD_IP_BLOCK_TYPE_VCN:
970         case AMD_IP_BLOCK_TYPE_SDMA:
971                 if (pp_funcs && pp_funcs->set_powergating_by_smu) {
972                         ret = (pp_funcs->set_powergating_by_smu(
973                                 (adev)->powerplay.pp_handle, block_type, gate));
974                 }
975                 break;
976         case AMD_IP_BLOCK_TYPE_JPEG:
977                 if (swsmu)
978                         ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate);
979                 break;
980         case AMD_IP_BLOCK_TYPE_GMC:
981         case AMD_IP_BLOCK_TYPE_ACP:
982                 if (pp_funcs && pp_funcs->set_powergating_by_smu) {
983                         ret = (pp_funcs->set_powergating_by_smu(
984                                 (adev)->powerplay.pp_handle, block_type, gate));
985                 }
986                 break;
987         default:
988                 break;
989         }
990
991         return ret;
992 }
993
994 int amdgpu_dpm_baco_enter(struct amdgpu_device *adev)
995 {
996         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
997         void *pp_handle = adev->powerplay.pp_handle;
998         int ret = 0;
999
1000         if (!pp_funcs || !pp_funcs->set_asic_baco_state)
1001                 return -ENOENT;
1002
1003         /* enter BACO state */
1004         ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
1005
1006         return ret;
1007 }
1008
1009 int amdgpu_dpm_baco_exit(struct amdgpu_device *adev)
1010 {
1011         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1012         void *pp_handle = adev->powerplay.pp_handle;
1013         int ret = 0;
1014
1015         if (!pp_funcs || !pp_funcs->set_asic_baco_state)
1016                 return -ENOENT;
1017
1018         /* exit BACO state */
1019         ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
1020
1021         return ret;
1022 }
1023
1024 int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
1025                              enum pp_mp1_state mp1_state)
1026 {
1027         int ret = 0;
1028         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1029
1030         if (pp_funcs && pp_funcs->set_mp1_state) {
1031                 ret = pp_funcs->set_mp1_state(
1032                                 adev->powerplay.pp_handle,
1033                                 mp1_state);
1034         }
1035
1036         return ret;
1037 }
1038
1039 bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
1040 {
1041         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1042         void *pp_handle = adev->powerplay.pp_handle;
1043         bool baco_cap;
1044
1045         if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
1046                 return false;
1047
1048         if (pp_funcs->get_asic_baco_capability(pp_handle, &baco_cap))
1049                 return false;
1050
1051         return baco_cap;
1052 }
1053
1054 int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev)
1055 {
1056         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1057         void *pp_handle = adev->powerplay.pp_handle;
1058
1059         if (!pp_funcs || !pp_funcs->asic_reset_mode_2)
1060                 return -ENOENT;
1061
1062         return pp_funcs->asic_reset_mode_2(pp_handle);
1063 }
1064
1065 int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
1066 {
1067         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1068         void *pp_handle = adev->powerplay.pp_handle;
1069         int ret = 0;
1070
1071         if (!pp_funcs || !pp_funcs->set_asic_baco_state)
1072                 return -ENOENT;
1073
1074         /* enter BACO state */
1075         ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
1076         if (ret)
1077                 return ret;
1078
1079         /* exit BACO state */
1080         ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
1081         if (ret)
1082                 return ret;
1083
1084         return 0;
1085 }
1086
1087 bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev)
1088 {
1089         struct smu_context *smu = &adev->smu;
1090
1091         if (is_support_sw_smu(adev))
1092                 return smu_mode1_reset_is_support(smu);
1093
1094         return false;
1095 }
1096
1097 int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev)
1098 {
1099         struct smu_context *smu = &adev->smu;
1100
1101         if (is_support_sw_smu(adev))
1102                 return smu_mode1_reset(smu);
1103
1104         return -EOPNOTSUPP;
1105 }
1106
1107 int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
1108                                     enum PP_SMC_POWER_PROFILE type,
1109                                     bool en)
1110 {
1111         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1112         int ret = 0;
1113
1114         if (amdgpu_sriov_vf(adev))
1115                 return 0;
1116
1117         if (pp_funcs && pp_funcs->switch_power_profile)
1118                 ret = pp_funcs->switch_power_profile(
1119                         adev->powerplay.pp_handle, type, en);
1120
1121         return ret;
1122 }
1123
1124 int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
1125                                uint32_t pstate)
1126 {
1127         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1128         int ret = 0;
1129
1130         if (pp_funcs && pp_funcs->set_xgmi_pstate)
1131                 ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
1132                                                                 pstate);
1133
1134         return ret;
1135 }
1136
1137 int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
1138                              uint32_t cstate)
1139 {
1140         int ret = 0;
1141         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1142         void *pp_handle = adev->powerplay.pp_handle;
1143
1144         if (pp_funcs && pp_funcs->set_df_cstate)
1145                 ret = pp_funcs->set_df_cstate(pp_handle, cstate);
1146
1147         return ret;
1148 }
1149
1150 int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en)
1151 {
1152         struct smu_context *smu = &adev->smu;
1153
1154         if (is_support_sw_smu(adev))
1155                 return smu_allow_xgmi_power_down(smu, en);
1156
1157         return 0;
1158 }
1159
1160 int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev)
1161 {
1162         void *pp_handle = adev->powerplay.pp_handle;
1163         const struct amd_pm_funcs *pp_funcs =
1164                         adev->powerplay.pp_funcs;
1165         int ret = 0;
1166
1167         if (pp_funcs && pp_funcs->enable_mgpu_fan_boost)
1168                 ret = pp_funcs->enable_mgpu_fan_boost(pp_handle);
1169
1170         return ret;
1171 }
1172
1173 int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev,
1174                                       uint32_t msg_id)
1175 {
1176         void *pp_handle = adev->powerplay.pp_handle;
1177         const struct amd_pm_funcs *pp_funcs =
1178                         adev->powerplay.pp_funcs;
1179         int ret = 0;
1180
1181         if (pp_funcs && pp_funcs->set_clockgating_by_smu)
1182                 ret = pp_funcs->set_clockgating_by_smu(pp_handle,
1183                                                        msg_id);
1184
1185         return ret;
1186 }
1187
1188 int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev,
1189                                   bool acquire)
1190 {
1191         void *pp_handle = adev->powerplay.pp_handle;
1192         const struct amd_pm_funcs *pp_funcs =
1193                         adev->powerplay.pp_funcs;
1194         int ret = -EOPNOTSUPP;
1195
1196         if (pp_funcs && pp_funcs->smu_i2c_bus_access)
1197                 ret = pp_funcs->smu_i2c_bus_access(pp_handle,
1198                                                    acquire);
1199
1200         return ret;
1201 }
1202
1203 void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
1204 {
1205         if (adev->pm.dpm_enabled) {
1206                 mutex_lock(&adev->pm.mutex);
1207                 if (power_supply_is_system_supplied() > 0)
1208                         adev->pm.ac_power = true;
1209                 else
1210                         adev->pm.ac_power = false;
1211                 if (adev->powerplay.pp_funcs &&
1212                     adev->powerplay.pp_funcs->enable_bapm)
1213                         amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
1214                 mutex_unlock(&adev->pm.mutex);
1215
1216                 if (is_support_sw_smu(adev))
1217                         smu_set_ac_dc(&adev->smu);
1218         }
1219 }
1220
1221 int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
1222                            void *data, uint32_t *size)
1223 {
1224         const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1225         int ret = 0;
1226
1227         if (!data || !size)
1228                 return -EINVAL;
1229
1230         if (pp_funcs && pp_funcs->read_sensor)
1231                 ret = pp_funcs->read_sensor((adev)->powerplay.pp_handle,
1232                                                                     sensor, data, size);
1233         else
1234                 ret = -EINVAL;
1235
1236         return ret;
1237 }
1238
1239 void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
1240 {
1241         struct amdgpu_device *adev =
1242                 container_of(work, struct amdgpu_device,
1243                              pm.dpm.thermal.work);
1244         /* switch to the thermal state */
1245         enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
1246         int temp, size = sizeof(temp);
1247
1248         if (!adev->pm.dpm_enabled)
1249                 return;
1250
1251         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP,
1252                                     (void *)&temp, &size)) {
1253                 if (temp < adev->pm.dpm.thermal.min_temp)
1254                         /* switch back the user state */
1255                         dpm_state = adev->pm.dpm.user_state;
1256         } else {
1257                 if (adev->pm.dpm.thermal.high_to_low)
1258                         /* switch back the user state */
1259                         dpm_state = adev->pm.dpm.user_state;
1260         }
1261         mutex_lock(&adev->pm.mutex);
1262         if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
1263                 adev->pm.dpm.thermal_active = true;
1264         else
1265                 adev->pm.dpm.thermal_active = false;
1266         adev->pm.dpm.state = dpm_state;
1267         mutex_unlock(&adev->pm.mutex);
1268
1269         amdgpu_pm_compute_clocks(adev);
1270 }
1271
1272 static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
1273                                                      enum amd_pm_state_type dpm_state)
1274 {
1275         int i;
1276         struct amdgpu_ps *ps;
1277         u32 ui_class;
1278         bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ?
1279                 true : false;
1280
1281         /* check if the vblank period is too short to adjust the mclk */
1282         if (single_display && adev->powerplay.pp_funcs->vblank_too_short) {
1283                 if (amdgpu_dpm_vblank_too_short(adev))
1284                         single_display = false;
1285         }
1286
1287         /* certain older asics have a separare 3D performance state,
1288          * so try that first if the user selected performance
1289          */
1290         if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
1291                 dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
1292         /* balanced states don't exist at the moment */
1293         if (dpm_state == POWER_STATE_TYPE_BALANCED)
1294                 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
1295
1296 restart_search:
1297         /* Pick the best power state based on current conditions */
1298         for (i = 0; i < adev->pm.dpm.num_ps; i++) {
1299                 ps = &adev->pm.dpm.ps[i];
1300                 ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
1301                 switch (dpm_state) {
1302                 /* user states */
1303                 case POWER_STATE_TYPE_BATTERY:
1304                         if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
1305                                 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
1306                                         if (single_display)
1307                                                 return ps;
1308                                 } else
1309                                         return ps;
1310                         }
1311                         break;
1312                 case POWER_STATE_TYPE_BALANCED:
1313                         if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
1314                                 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
1315                                         if (single_display)
1316                                                 return ps;
1317                                 } else
1318                                         return ps;
1319                         }
1320                         break;
1321                 case POWER_STATE_TYPE_PERFORMANCE:
1322                         if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
1323                                 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
1324                                         if (single_display)
1325                                                 return ps;
1326                                 } else
1327                                         return ps;
1328                         }
1329                         break;
1330                 /* internal states */
1331                 case POWER_STATE_TYPE_INTERNAL_UVD:
1332                         if (adev->pm.dpm.uvd_ps)
1333                                 return adev->pm.dpm.uvd_ps;
1334                         else
1335                                 break;
1336                 case POWER_STATE_TYPE_INTERNAL_UVD_SD:
1337                         if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
1338                                 return ps;
1339                         break;
1340                 case POWER_STATE_TYPE_INTERNAL_UVD_HD:
1341                         if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
1342                                 return ps;
1343                         break;
1344                 case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
1345                         if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
1346                                 return ps;
1347                         break;
1348                 case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
1349                         if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
1350                                 return ps;
1351                         break;
1352                 case POWER_STATE_TYPE_INTERNAL_BOOT:
1353                         return adev->pm.dpm.boot_ps;
1354                 case POWER_STATE_TYPE_INTERNAL_THERMAL:
1355                         if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
1356                                 return ps;
1357                         break;
1358                 case POWER_STATE_TYPE_INTERNAL_ACPI:
1359                         if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
1360                                 return ps;
1361                         break;
1362                 case POWER_STATE_TYPE_INTERNAL_ULV:
1363                         if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
1364                                 return ps;
1365                         break;
1366                 case POWER_STATE_TYPE_INTERNAL_3DPERF:
1367                         if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
1368                                 return ps;
1369                         break;
1370                 default:
1371                         break;
1372                 }
1373         }
1374         /* use a fallback state if we didn't match */
1375         switch (dpm_state) {
1376         case POWER_STATE_TYPE_INTERNAL_UVD_SD:
1377                 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
1378                 goto restart_search;
1379         case POWER_STATE_TYPE_INTERNAL_UVD_HD:
1380         case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
1381         case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
1382                 if (adev->pm.dpm.uvd_ps) {
1383                         return adev->pm.dpm.uvd_ps;
1384                 } else {
1385                         dpm_state = POWER_STATE_TYPE_PERFORMANCE;
1386                         goto restart_search;
1387                 }
1388         case POWER_STATE_TYPE_INTERNAL_THERMAL:
1389                 dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
1390                 goto restart_search;
1391         case POWER_STATE_TYPE_INTERNAL_ACPI:
1392                 dpm_state = POWER_STATE_TYPE_BATTERY;
1393                 goto restart_search;
1394         case POWER_STATE_TYPE_BATTERY:
1395         case POWER_STATE_TYPE_BALANCED:
1396         case POWER_STATE_TYPE_INTERNAL_3DPERF:
1397                 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
1398                 goto restart_search;
1399         default:
1400                 break;
1401         }
1402
1403         return NULL;
1404 }
1405
1406 static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
1407 {
1408         struct amdgpu_ps *ps;
1409         enum amd_pm_state_type dpm_state;
1410         int ret;
1411         bool equal = false;
1412
1413         /* if dpm init failed */
1414         if (!adev->pm.dpm_enabled)
1415                 return;
1416
1417         if (adev->pm.dpm.user_state != adev->pm.dpm.state) {
1418                 /* add other state override checks here */
1419                 if ((!adev->pm.dpm.thermal_active) &&
1420                     (!adev->pm.dpm.uvd_active))
1421                         adev->pm.dpm.state = adev->pm.dpm.user_state;
1422         }
1423         dpm_state = adev->pm.dpm.state;
1424
1425         ps = amdgpu_dpm_pick_power_state(adev, dpm_state);
1426         if (ps)
1427                 adev->pm.dpm.requested_ps = ps;
1428         else
1429                 return;
1430
1431         if (amdgpu_dpm == 1 && adev->powerplay.pp_funcs->print_power_state) {
1432                 printk("switching from power state:\n");
1433                 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
1434                 printk("switching to power state:\n");
1435                 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
1436         }
1437
1438         /* update whether vce is active */
1439         ps->vce_active = adev->pm.dpm.vce_active;
1440         if (adev->powerplay.pp_funcs->display_configuration_changed)
1441                 amdgpu_dpm_display_configuration_changed(adev);
1442
1443         ret = amdgpu_dpm_pre_set_power_state(adev);
1444         if (ret)
1445                 return;
1446
1447         if (adev->powerplay.pp_funcs->check_state_equal) {
1448                 if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal))
1449                         equal = false;
1450         }
1451
1452         if (equal)
1453                 return;
1454
1455         amdgpu_dpm_set_power_state(adev);
1456         amdgpu_dpm_post_set_power_state(adev);
1457
1458         adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
1459         adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
1460
1461         if (adev->powerplay.pp_funcs->force_performance_level) {
1462                 if (adev->pm.dpm.thermal_active) {
1463                         enum amd_dpm_forced_level level = adev->pm.dpm.forced_level;
1464                         /* force low perf level for thermal */
1465                         amdgpu_dpm_force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW);
1466                         /* save the user's level */
1467                         adev->pm.dpm.forced_level = level;
1468                 } else {
1469                         /* otherwise, user selected level */
1470                         amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level);
1471                 }
1472         }
1473 }
1474
1475 void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
1476 {
1477         int i = 0;
1478
1479         if (!adev->pm.dpm_enabled)
1480                 return;
1481
1482         if (adev->mode_info.num_crtc)
1483                 amdgpu_display_bandwidth_update(adev);
1484
1485         for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
1486                 struct amdgpu_ring *ring = adev->rings[i];
1487                 if (ring && ring->sched.ready)
1488                         amdgpu_fence_wait_empty(ring);
1489         }
1490
1491         if (adev->powerplay.pp_funcs->dispatch_tasks) {
1492                 if (!amdgpu_device_has_dc_support(adev)) {
1493                         mutex_lock(&adev->pm.mutex);
1494                         amdgpu_dpm_get_active_displays(adev);
1495                         adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
1496                         adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
1497                         adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
1498                         /* we have issues with mclk switching with
1499                          * refresh rates over 120 hz on the non-DC code.
1500                          */
1501                         if (adev->pm.pm_display_cfg.vrefresh > 120)
1502                                 adev->pm.pm_display_cfg.min_vblank_time = 0;
1503                         if (adev->powerplay.pp_funcs->display_configuration_change)
1504                                 adev->powerplay.pp_funcs->display_configuration_change(
1505                                                         adev->powerplay.pp_handle,
1506                                                         &adev->pm.pm_display_cfg);
1507                         mutex_unlock(&adev->pm.mutex);
1508                 }
1509                 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL);
1510         } else {
1511                 mutex_lock(&adev->pm.mutex);
1512                 amdgpu_dpm_get_active_displays(adev);
1513                 amdgpu_dpm_change_power_state_locked(adev);
1514                 mutex_unlock(&adev->pm.mutex);
1515         }
1516 }
1517
1518 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
1519 {
1520         int ret = 0;
1521
1522         if (adev->family == AMDGPU_FAMILY_SI) {
1523                 mutex_lock(&adev->pm.mutex);
1524                 if (enable) {
1525                         adev->pm.dpm.uvd_active = true;
1526                         adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
1527                 } else {
1528                         adev->pm.dpm.uvd_active = false;
1529                 }
1530                 mutex_unlock(&adev->pm.mutex);
1531
1532                 amdgpu_pm_compute_clocks(adev);
1533         } else {
1534                 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
1535                 if (ret)
1536                         DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
1537                                   enable ? "enable" : "disable", ret);
1538
1539                 /* enable/disable Low Memory PState for UVD (4k videos) */
1540                 if (adev->asic_type == CHIP_STONEY &&
1541                         adev->uvd.decode_image_width >= WIDTH_4K) {
1542                         struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
1543
1544                         if (hwmgr && hwmgr->hwmgr_func &&
1545                             hwmgr->hwmgr_func->update_nbdpm_pstate)
1546                                 hwmgr->hwmgr_func->update_nbdpm_pstate(hwmgr,
1547                                                                        !enable,
1548                                                                        true);
1549                 }
1550         }
1551 }
1552
1553 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
1554 {
1555         int ret = 0;
1556
1557         if (adev->family == AMDGPU_FAMILY_SI) {
1558                 mutex_lock(&adev->pm.mutex);
1559                 if (enable) {
1560                         adev->pm.dpm.vce_active = true;
1561                         /* XXX select vce level based on ring/task */
1562                         adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
1563                 } else {
1564                         adev->pm.dpm.vce_active = false;
1565                 }
1566                 mutex_unlock(&adev->pm.mutex);
1567
1568                 amdgpu_pm_compute_clocks(adev);
1569         } else {
1570                 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
1571                 if (ret)
1572                         DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
1573                                   enable ? "enable" : "disable", ret);
1574         }
1575 }
1576
1577 void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
1578 {
1579         int i;
1580
1581         if (adev->powerplay.pp_funcs->print_power_state == NULL)
1582                 return;
1583
1584         for (i = 0; i < adev->pm.dpm.num_ps; i++)
1585                 amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]);
1586
1587 }
1588
1589 void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
1590 {
1591         int ret = 0;
1592
1593         ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable);
1594         if (ret)
1595                 DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
1596                           enable ? "enable" : "disable", ret);
1597 }
1598
1599 int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
1600 {
1601         int r;
1602
1603         if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->load_firmware) {
1604                 r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle);
1605                 if (r) {
1606                         pr_err("smu firmware loading failed\n");
1607                         return r;
1608                 }
1609                 *smu_version = adev->pm.fw_version;
1610         }
1611         return 0;
1612 }