Merge tag 'iommu-updates-v5.2' of ssh://gitolite.kernel.org/pub/scm/linux/kernel...
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_pm.c
1 /*
2  * Copyright 2017 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Rafał Miłecki <zajec5@gmail.com>
23  *          Alex Deucher <alexdeucher@gmail.com>
24  */
25 #include <drm/drmP.h>
26 #include "amdgpu.h"
27 #include "amdgpu_drv.h"
28 #include "amdgpu_pm.h"
29 #include "amdgpu_dpm.h"
30 #include "amdgpu_display.h"
31 #include "amdgpu_smu.h"
32 #include "atom.h"
33 #include <linux/power_supply.h>
34 #include <linux/hwmon.h>
35 #include <linux/hwmon-sysfs.h>
36 #include <linux/nospec.h>
37 #include "hwmgr.h"
38 #define WIDTH_4K 3840
39
40 static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev);
41
42 static const struct cg_flag_name clocks[] = {
43         {AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"},
44         {AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"},
45         {AMD_CG_SUPPORT_GFX_CGCG, "Graphics Coarse Grain Clock Gating"},
46         {AMD_CG_SUPPORT_GFX_CGLS, "Graphics Coarse Grain memory Light Sleep"},
47         {AMD_CG_SUPPORT_GFX_CGTS, "Graphics Coarse Grain Tree Shader Clock Gating"},
48         {AMD_CG_SUPPORT_GFX_CGTS_LS, "Graphics Coarse Grain Tree Shader Light Sleep"},
49         {AMD_CG_SUPPORT_GFX_CP_LS, "Graphics Command Processor Light Sleep"},
50         {AMD_CG_SUPPORT_GFX_RLC_LS, "Graphics Run List Controller Light Sleep"},
51         {AMD_CG_SUPPORT_GFX_3D_CGCG, "Graphics 3D Coarse Grain Clock Gating"},
52         {AMD_CG_SUPPORT_GFX_3D_CGLS, "Graphics 3D Coarse Grain memory Light Sleep"},
53         {AMD_CG_SUPPORT_MC_LS, "Memory Controller Light Sleep"},
54         {AMD_CG_SUPPORT_MC_MGCG, "Memory Controller Medium Grain Clock Gating"},
55         {AMD_CG_SUPPORT_SDMA_LS, "System Direct Memory Access Light Sleep"},
56         {AMD_CG_SUPPORT_SDMA_MGCG, "System Direct Memory Access Medium Grain Clock Gating"},
57         {AMD_CG_SUPPORT_BIF_MGCG, "Bus Interface Medium Grain Clock Gating"},
58         {AMD_CG_SUPPORT_BIF_LS, "Bus Interface Light Sleep"},
59         {AMD_CG_SUPPORT_UVD_MGCG, "Unified Video Decoder Medium Grain Clock Gating"},
60         {AMD_CG_SUPPORT_VCE_MGCG, "Video Compression Engine Medium Grain Clock Gating"},
61         {AMD_CG_SUPPORT_HDP_LS, "Host Data Path Light Sleep"},
62         {AMD_CG_SUPPORT_HDP_MGCG, "Host Data Path Medium Grain Clock Gating"},
63         {AMD_CG_SUPPORT_DRM_MGCG, "Digital Right Management Medium Grain Clock Gating"},
64         {AMD_CG_SUPPORT_DRM_LS, "Digital Right Management Light Sleep"},
65         {AMD_CG_SUPPORT_ROM_MGCG, "Rom Medium Grain Clock Gating"},
66         {AMD_CG_SUPPORT_DF_MGCG, "Data Fabric Medium Grain Clock Gating"},
67         {0, NULL},
68 };
69
70 void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
71 {
72         if (adev->pm.dpm_enabled) {
73                 mutex_lock(&adev->pm.mutex);
74                 if (power_supply_is_system_supplied() > 0)
75                         adev->pm.ac_power = true;
76                 else
77                         adev->pm.ac_power = false;
78                 if (adev->powerplay.pp_funcs->enable_bapm)
79                         amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
80                 mutex_unlock(&adev->pm.mutex);
81         }
82 }
83
84 int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
85                            void *data, uint32_t *size)
86 {
87         int ret = 0;
88
89         if (!data || !size)
90                 return -EINVAL;
91
92         if (is_support_sw_smu(adev))
93                 ret = smu_read_sensor(&adev->smu, sensor, data, size);
94         else {
95                 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
96                         ret = adev->powerplay.pp_funcs->read_sensor((adev)->powerplay.pp_handle,
97                                                                     sensor, data, size);
98                 else
99                         ret = -EINVAL;
100         }
101
102         return ret;
103 }
104
105 /**
106  * DOC: power_dpm_state
107  *
108  * The power_dpm_state file is a legacy interface and is only provided for
109  * backwards compatibility. The amdgpu driver provides a sysfs API for adjusting
110  * certain power related parameters.  The file power_dpm_state is used for this.
111  * It accepts the following arguments:
112  *
113  * - battery
114  *
115  * - balanced
116  *
117  * - performance
118  *
119  * battery
120  *
121  * On older GPUs, the vbios provided a special power state for battery
122  * operation.  Selecting battery switched to this state.  This is no
123  * longer provided on newer GPUs so the option does nothing in that case.
124  *
125  * balanced
126  *
127  * On older GPUs, the vbios provided a special power state for balanced
128  * operation.  Selecting balanced switched to this state.  This is no
129  * longer provided on newer GPUs so the option does nothing in that case.
130  *
131  * performance
132  *
133  * On older GPUs, the vbios provided a special power state for performance
134  * operation.  Selecting performance switched to this state.  This is no
135  * longer provided on newer GPUs so the option does nothing in that case.
136  *
137  */
138
139 static ssize_t amdgpu_get_dpm_state(struct device *dev,
140                                     struct device_attribute *attr,
141                                     char *buf)
142 {
143         struct drm_device *ddev = dev_get_drvdata(dev);
144         struct amdgpu_device *adev = ddev->dev_private;
145         enum amd_pm_state_type pm;
146
147         if (is_support_sw_smu(adev) && adev->smu.ppt_funcs->get_current_power_state)
148                 pm = amdgpu_smu_get_current_power_state(adev);
149         else if (adev->powerplay.pp_funcs->get_current_power_state)
150                 pm = amdgpu_dpm_get_current_power_state(adev);
151         else
152                 pm = adev->pm.dpm.user_state;
153
154         return snprintf(buf, PAGE_SIZE, "%s\n",
155                         (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
156                         (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
157 }
158
159 static ssize_t amdgpu_set_dpm_state(struct device *dev,
160                                     struct device_attribute *attr,
161                                     const char *buf,
162                                     size_t count)
163 {
164         struct drm_device *ddev = dev_get_drvdata(dev);
165         struct amdgpu_device *adev = ddev->dev_private;
166         enum amd_pm_state_type  state;
167
168         if (strncmp("battery", buf, strlen("battery")) == 0)
169                 state = POWER_STATE_TYPE_BATTERY;
170         else if (strncmp("balanced", buf, strlen("balanced")) == 0)
171                 state = POWER_STATE_TYPE_BALANCED;
172         else if (strncmp("performance", buf, strlen("performance")) == 0)
173                 state = POWER_STATE_TYPE_PERFORMANCE;
174         else {
175                 count = -EINVAL;
176                 goto fail;
177         }
178
179         if (adev->powerplay.pp_funcs->dispatch_tasks) {
180                 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state);
181         } else {
182                 mutex_lock(&adev->pm.mutex);
183                 adev->pm.dpm.user_state = state;
184                 mutex_unlock(&adev->pm.mutex);
185
186                 /* Can't set dpm state when the card is off */
187                 if (!(adev->flags & AMD_IS_PX) ||
188                     (ddev->switch_power_state == DRM_SWITCH_POWER_ON))
189                         amdgpu_pm_compute_clocks(adev);
190         }
191 fail:
192         return count;
193 }
194
195
196 /**
197  * DOC: power_dpm_force_performance_level
198  *
199  * The amdgpu driver provides a sysfs API for adjusting certain power
200  * related parameters.  The file power_dpm_force_performance_level is
201  * used for this.  It accepts the following arguments:
202  *
203  * - auto
204  *
205  * - low
206  *
207  * - high
208  *
209  * - manual
210  *
211  * - profile_standard
212  *
213  * - profile_min_sclk
214  *
215  * - profile_min_mclk
216  *
217  * - profile_peak
218  *
219  * auto
220  *
221  * When auto is selected, the driver will attempt to dynamically select
222  * the optimal power profile for current conditions in the driver.
223  *
224  * low
225  *
226  * When low is selected, the clocks are forced to the lowest power state.
227  *
228  * high
229  *
230  * When high is selected, the clocks are forced to the highest power state.
231  *
232  * manual
233  *
234  * When manual is selected, the user can manually adjust which power states
235  * are enabled for each clock domain via the sysfs pp_dpm_mclk, pp_dpm_sclk,
236  * and pp_dpm_pcie files and adjust the power state transition heuristics
237  * via the pp_power_profile_mode sysfs file.
238  *
239  * profile_standard
240  * profile_min_sclk
241  * profile_min_mclk
242  * profile_peak
243  *
244  * When the profiling modes are selected, clock and power gating are
245  * disabled and the clocks are set for different profiling cases. This
246  * mode is recommended for profiling specific work loads where you do
247  * not want clock or power gating for clock fluctuation to interfere
248  * with your results. profile_standard sets the clocks to a fixed clock
249  * level which varies from asic to asic.  profile_min_sclk forces the sclk
250  * to the lowest level.  profile_min_mclk forces the mclk to the lowest level.
251  * profile_peak sets all clocks (mclk, sclk, pcie) to the highest levels.
252  *
253  */
254
255 static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
256                                                 struct device_attribute *attr,
257                                                                 char *buf)
258 {
259         struct drm_device *ddev = dev_get_drvdata(dev);
260         struct amdgpu_device *adev = ddev->dev_private;
261         enum amd_dpm_forced_level level = 0xff;
262
263         if  ((adev->flags & AMD_IS_PX) &&
264              (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
265                 return snprintf(buf, PAGE_SIZE, "off\n");
266
267         if (is_support_sw_smu(adev))
268                 level = smu_get_performance_level(&adev->smu);
269         else if (adev->powerplay.pp_funcs->get_performance_level)
270                 level = amdgpu_dpm_get_performance_level(adev);
271         else
272                 level = adev->pm.dpm.forced_level;
273
274         return snprintf(buf, PAGE_SIZE, "%s\n",
275                         (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
276                         (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
277                         (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" :
278                         (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" :
279                         (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" :
280                         (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
281                         (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
282                         (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" :
283                         "unknown");
284 }
285
286 static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
287                                                        struct device_attribute *attr,
288                                                        const char *buf,
289                                                        size_t count)
290 {
291         struct drm_device *ddev = dev_get_drvdata(dev);
292         struct amdgpu_device *adev = ddev->dev_private;
293         enum amd_dpm_forced_level level;
294         enum amd_dpm_forced_level current_level = 0xff;
295         int ret = 0;
296
297         /* Can't force performance level when the card is off */
298         if  ((adev->flags & AMD_IS_PX) &&
299              (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
300                 return -EINVAL;
301
302         if (is_support_sw_smu(adev))
303                 current_level = smu_get_performance_level(&adev->smu);
304         else if (adev->powerplay.pp_funcs->get_performance_level)
305                 current_level = amdgpu_dpm_get_performance_level(adev);
306
307         if (strncmp("low", buf, strlen("low")) == 0) {
308                 level = AMD_DPM_FORCED_LEVEL_LOW;
309         } else if (strncmp("high", buf, strlen("high")) == 0) {
310                 level = AMD_DPM_FORCED_LEVEL_HIGH;
311         } else if (strncmp("auto", buf, strlen("auto")) == 0) {
312                 level = AMD_DPM_FORCED_LEVEL_AUTO;
313         } else if (strncmp("manual", buf, strlen("manual")) == 0) {
314                 level = AMD_DPM_FORCED_LEVEL_MANUAL;
315         } else if (strncmp("profile_exit", buf, strlen("profile_exit")) == 0) {
316                 level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT;
317         } else if (strncmp("profile_standard", buf, strlen("profile_standard")) == 0) {
318                 level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD;
319         } else if (strncmp("profile_min_sclk", buf, strlen("profile_min_sclk")) == 0) {
320                 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK;
321         } else if (strncmp("profile_min_mclk", buf, strlen("profile_min_mclk")) == 0) {
322                 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK;
323         } else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) {
324                 level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
325         }  else {
326                 count = -EINVAL;
327                 goto fail;
328         }
329
330         if (amdgpu_sriov_vf(adev)) {
331                 if (amdgim_is_hwperf(adev) &&
332                     adev->virt.ops->force_dpm_level) {
333                         mutex_lock(&adev->pm.mutex);
334                         adev->virt.ops->force_dpm_level(adev, level);
335                         mutex_unlock(&adev->pm.mutex);
336                         return count;
337                 } else {
338                         return -EINVAL;
339                 }
340         }
341
342         if (current_level == level)
343                 return count;
344
345         if (is_support_sw_smu(adev)) {
346                 mutex_lock(&adev->pm.mutex);
347                 if (adev->pm.dpm.thermal_active) {
348                         count = -EINVAL;
349                         mutex_unlock(&adev->pm.mutex);
350                         goto fail;
351                 }
352                 ret = smu_force_performance_level(&adev->smu, level);
353                 if (ret)
354                         count = -EINVAL;
355                 else
356                         adev->pm.dpm.forced_level = level;
357                 mutex_unlock(&adev->pm.mutex);
358         } else if (adev->powerplay.pp_funcs->force_performance_level) {
359                 mutex_lock(&adev->pm.mutex);
360                 if (adev->pm.dpm.thermal_active) {
361                         count = -EINVAL;
362                         mutex_unlock(&adev->pm.mutex);
363                         goto fail;
364                 }
365                 ret = amdgpu_dpm_force_performance_level(adev, level);
366                 if (ret)
367                         count = -EINVAL;
368                 else
369                         adev->pm.dpm.forced_level = level;
370                 mutex_unlock(&adev->pm.mutex);
371         }
372
373 fail:
374         return count;
375 }
376
377 static ssize_t amdgpu_get_pp_num_states(struct device *dev,
378                 struct device_attribute *attr,
379                 char *buf)
380 {
381         struct drm_device *ddev = dev_get_drvdata(dev);
382         struct amdgpu_device *adev = ddev->dev_private;
383         struct pp_states_info data;
384         int i, buf_len, ret;
385
386         if (is_support_sw_smu(adev)) {
387                 ret = smu_get_power_num_states(&adev->smu, &data);
388                 if (ret)
389                         return ret;
390         } else if (adev->powerplay.pp_funcs->get_pp_num_states)
391                 amdgpu_dpm_get_pp_num_states(adev, &data);
392
393         buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums);
394         for (i = 0; i < data.nums; i++)
395                 buf_len += snprintf(buf + buf_len, PAGE_SIZE, "%d %s\n", i,
396                                 (data.states[i] == POWER_STATE_TYPE_INTERNAL_BOOT) ? "boot" :
397                                 (data.states[i] == POWER_STATE_TYPE_BATTERY) ? "battery" :
398                                 (data.states[i] == POWER_STATE_TYPE_BALANCED) ? "balanced" :
399                                 (data.states[i] == POWER_STATE_TYPE_PERFORMANCE) ? "performance" : "default");
400
401         return buf_len;
402 }
403
404 static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
405                 struct device_attribute *attr,
406                 char *buf)
407 {
408         struct drm_device *ddev = dev_get_drvdata(dev);
409         struct amdgpu_device *adev = ddev->dev_private;
410         struct pp_states_info data;
411         struct smu_context *smu = &adev->smu;
412         enum amd_pm_state_type pm = 0;
413         int i = 0, ret = 0;
414
415         if (is_support_sw_smu(adev)) {
416                 pm = smu_get_current_power_state(smu);
417                 ret = smu_get_power_num_states(smu, &data);
418                 if (ret)
419                         return ret;
420         } else if (adev->powerplay.pp_funcs->get_current_power_state
421                  && adev->powerplay.pp_funcs->get_pp_num_states) {
422                 pm = amdgpu_dpm_get_current_power_state(adev);
423                 amdgpu_dpm_get_pp_num_states(adev, &data);
424         }
425
426         for (i = 0; i < data.nums; i++) {
427                 if (pm == data.states[i])
428                         break;
429         }
430
431         if (i == data.nums)
432                 i = -EINVAL;
433
434         return snprintf(buf, PAGE_SIZE, "%d\n", i);
435 }
436
437 static ssize_t amdgpu_get_pp_force_state(struct device *dev,
438                 struct device_attribute *attr,
439                 char *buf)
440 {
441         struct drm_device *ddev = dev_get_drvdata(dev);
442         struct amdgpu_device *adev = ddev->dev_private;
443
444         if (adev->pp_force_state_enabled)
445                 return amdgpu_get_pp_cur_state(dev, attr, buf);
446         else
447                 return snprintf(buf, PAGE_SIZE, "\n");
448 }
449
450 static ssize_t amdgpu_set_pp_force_state(struct device *dev,
451                 struct device_attribute *attr,
452                 const char *buf,
453                 size_t count)
454 {
455         struct drm_device *ddev = dev_get_drvdata(dev);
456         struct amdgpu_device *adev = ddev->dev_private;
457         enum amd_pm_state_type state = 0;
458         unsigned long idx;
459         int ret;
460
461         if (strlen(buf) == 1)
462                 adev->pp_force_state_enabled = false;
463         else if (is_support_sw_smu(adev))
464                 adev->pp_force_state_enabled = false;
465         else if (adev->powerplay.pp_funcs->dispatch_tasks &&
466                         adev->powerplay.pp_funcs->get_pp_num_states) {
467                 struct pp_states_info data;
468
469                 ret = kstrtoul(buf, 0, &idx);
470                 if (ret || idx >= ARRAY_SIZE(data.states)) {
471                         count = -EINVAL;
472                         goto fail;
473                 }
474                 idx = array_index_nospec(idx, ARRAY_SIZE(data.states));
475
476                 amdgpu_dpm_get_pp_num_states(adev, &data);
477                 state = data.states[idx];
478                 /* only set user selected power states */
479                 if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
480                     state != POWER_STATE_TYPE_DEFAULT) {
481                         amdgpu_dpm_dispatch_task(adev,
482                                         AMD_PP_TASK_ENABLE_USER_STATE, &state);
483                         adev->pp_force_state_enabled = true;
484                 }
485         }
486 fail:
487         return count;
488 }
489
490 /**
491  * DOC: pp_table
492  *
493  * The amdgpu driver provides a sysfs API for uploading new powerplay
494  * tables.  The file pp_table is used for this.  Reading the file
495  * will dump the current power play table.  Writing to the file
496  * will attempt to upload a new powerplay table and re-initialize
497  * powerplay using that new table.
498  *
499  */
500
501 static ssize_t amdgpu_get_pp_table(struct device *dev,
502                 struct device_attribute *attr,
503                 char *buf)
504 {
505         struct drm_device *ddev = dev_get_drvdata(dev);
506         struct amdgpu_device *adev = ddev->dev_private;
507         char *table = NULL;
508         int size;
509
510         if (is_support_sw_smu(adev)) {
511                 size = smu_sys_get_pp_table(&adev->smu, (void **)&table);
512                 if (size < 0)
513                         return size;
514         }
515         else if (adev->powerplay.pp_funcs->get_pp_table)
516                 size = amdgpu_dpm_get_pp_table(adev, &table);
517         else
518                 return 0;
519
520         if (size >= PAGE_SIZE)
521                 size = PAGE_SIZE - 1;
522
523         memcpy(buf, table, size);
524
525         return size;
526 }
527
528 static ssize_t amdgpu_set_pp_table(struct device *dev,
529                 struct device_attribute *attr,
530                 const char *buf,
531                 size_t count)
532 {
533         struct drm_device *ddev = dev_get_drvdata(dev);
534         struct amdgpu_device *adev = ddev->dev_private;
535         int ret = 0;
536
537         if (is_support_sw_smu(adev)) {
538                 ret = smu_sys_set_pp_table(&adev->smu, (void *)buf, count);
539                 if (ret)
540                         return ret;
541         } else if (adev->powerplay.pp_funcs->set_pp_table)
542                 amdgpu_dpm_set_pp_table(adev, buf, count);
543
544         return count;
545 }
546
547 /**
548  * DOC: pp_od_clk_voltage
549  *
550  * The amdgpu driver provides a sysfs API for adjusting the clocks and voltages
551  * in each power level within a power state.  The pp_od_clk_voltage is used for
552  * this.
553  *
554  * < For Vega10 and previous ASICs >
555  *
556  * Reading the file will display:
557  *
558  * - a list of engine clock levels and voltages labeled OD_SCLK
559  *
560  * - a list of memory clock levels and voltages labeled OD_MCLK
561  *
562  * - a list of valid ranges for sclk, mclk, and voltage labeled OD_RANGE
563  *
564  * To manually adjust these settings, first select manual using
565  * power_dpm_force_performance_level. Enter a new value for each
566  * level by writing a string that contains "s/m level clock voltage" to
567  * the file.  E.g., "s 1 500 820" will update sclk level 1 to be 500 MHz
568  * at 820 mV; "m 0 350 810" will update mclk level 0 to be 350 MHz at
569  * 810 mV.  When you have edited all of the states as needed, write
570  * "c" (commit) to the file to commit your changes.  If you want to reset to the
571  * default power levels, write "r" (reset) to the file to reset them.
572  *
573  *
574  * < For Vega20 >
575  *
576  * Reading the file will display:
577  *
578  * - minimum and maximum engine clock labeled OD_SCLK
579  *
580  * - maximum memory clock labeled OD_MCLK
581  *
582  * - three <frequency, voltage> points labeled OD_VDDC_CURVE.
583  *   They can be used to calibrate the sclk voltage curve.
584  *
585  * - a list of valid ranges for sclk, mclk, and voltage curve points
586  *   labeled OD_RANGE
587  *
588  * To manually adjust these settings:
589  *
590  * - First select manual using power_dpm_force_performance_level
591  *
592  * - For clock frequency setting, enter a new value by writing a
593  *   string that contains "s/m index clock" to the file. The index
594  *   should be 0 if to set minimum clock. And 1 if to set maximum
595  *   clock. E.g., "s 0 500" will update minimum sclk to be 500 MHz.
596  *   "m 1 800" will update maximum mclk to be 800Mhz.
597  *
598  *   For sclk voltage curve, enter the new values by writing a
599  *   string that contains "vc point clock voltage" to the file. The
600  *   points are indexed by 0, 1 and 2. E.g., "vc 0 300 600" will
601  *   update point1 with clock set as 300Mhz and voltage as
602  *   600mV. "vc 2 1000 1000" will update point3 with clock set
603  *   as 1000Mhz and voltage 1000mV.
604  *
605  * - When you have edited all of the states as needed, write "c" (commit)
606  *   to the file to commit your changes
607  *
608  * - If you want to reset to the default power levels, write "r" (reset)
609  *   to the file to reset them
610  *
611  */
612
613 static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
614                 struct device_attribute *attr,
615                 const char *buf,
616                 size_t count)
617 {
618         struct drm_device *ddev = dev_get_drvdata(dev);
619         struct amdgpu_device *adev = ddev->dev_private;
620         int ret;
621         uint32_t parameter_size = 0;
622         long parameter[64];
623         char buf_cpy[128];
624         char *tmp_str;
625         char *sub_str;
626         const char delimiter[3] = {' ', '\n', '\0'};
627         uint32_t type;
628
629         if (count > 127)
630                 return -EINVAL;
631
632         if (*buf == 's')
633                 type = PP_OD_EDIT_SCLK_VDDC_TABLE;
634         else if (*buf == 'm')
635                 type = PP_OD_EDIT_MCLK_VDDC_TABLE;
636         else if(*buf == 'r')
637                 type = PP_OD_RESTORE_DEFAULT_TABLE;
638         else if (*buf == 'c')
639                 type = PP_OD_COMMIT_DPM_TABLE;
640         else if (!strncmp(buf, "vc", 2))
641                 type = PP_OD_EDIT_VDDC_CURVE;
642         else
643                 return -EINVAL;
644
645         memcpy(buf_cpy, buf, count+1);
646
647         tmp_str = buf_cpy;
648
649         if (type == PP_OD_EDIT_VDDC_CURVE)
650                 tmp_str++;
651         while (isspace(*++tmp_str));
652
653         while (tmp_str[0]) {
654                 sub_str = strsep(&tmp_str, delimiter);
655                 ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
656                 if (ret)
657                         return -EINVAL;
658                 parameter_size++;
659
660                 while (isspace(*tmp_str))
661                         tmp_str++;
662         }
663
664         if (is_support_sw_smu(adev)) {
665                 ret = smu_od_edit_dpm_table(&adev->smu, type,
666                                             parameter, parameter_size);
667
668                 if (ret)
669                         return -EINVAL;
670         } else {
671                 if (adev->powerplay.pp_funcs->odn_edit_dpm_table)
672                         ret = amdgpu_dpm_odn_edit_dpm_table(adev, type,
673                                                 parameter, parameter_size);
674
675                 if (ret)
676                         return -EINVAL;
677
678                 if (type == PP_OD_COMMIT_DPM_TABLE) {
679                         if (adev->powerplay.pp_funcs->dispatch_tasks) {
680                                 amdgpu_dpm_dispatch_task(adev,
681                                                 AMD_PP_TASK_READJUST_POWER_STATE,
682                                                 NULL);
683                                 return count;
684                         } else {
685                                 return -EINVAL;
686                         }
687                 }
688         }
689
690         return count;
691 }
692
693 static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
694                 struct device_attribute *attr,
695                 char *buf)
696 {
697         struct drm_device *ddev = dev_get_drvdata(dev);
698         struct amdgpu_device *adev = ddev->dev_private;
699         uint32_t size = 0;
700
701         if (is_support_sw_smu(adev)) {
702                 size = smu_print_clk_levels(&adev->smu, OD_SCLK, buf);
703                 size += smu_print_clk_levels(&adev->smu, OD_MCLK, buf+size);
704                 size += smu_print_clk_levels(&adev->smu, OD_VDDC_CURVE, buf+size);
705                 size += smu_print_clk_levels(&adev->smu, OD_RANGE, buf+size);
706                 return size;
707         } else if (adev->powerplay.pp_funcs->print_clock_levels) {
708                 size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
709                 size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size);
710                 size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf+size);
711                 size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf+size);
712                 return size;
713         } else {
714                 return snprintf(buf, PAGE_SIZE, "\n");
715         }
716
717 }
718
719 /**
720  * DOC: ppfeatures
721  *
722  * The amdgpu driver provides a sysfs API for adjusting what powerplay
723  * features to be enabled. The file ppfeatures is used for this. And
724  * this is only available for Vega10 and later dGPUs.
725  *
726  * Reading back the file will show you the followings:
727  * - Current ppfeature masks
728  * - List of the all supported powerplay features with their naming,
729  *   bitmasks and enablement status('Y'/'N' means "enabled"/"disabled").
730  *
731  * To manually enable or disable a specific feature, just set or clear
732  * the corresponding bit from original ppfeature masks and input the
733  * new ppfeature masks.
734  */
735 static ssize_t amdgpu_set_ppfeature_status(struct device *dev,
736                 struct device_attribute *attr,
737                 const char *buf,
738                 size_t count)
739 {
740         struct drm_device *ddev = dev_get_drvdata(dev);
741         struct amdgpu_device *adev = ddev->dev_private;
742         uint64_t featuremask;
743         int ret;
744
745         ret = kstrtou64(buf, 0, &featuremask);
746         if (ret)
747                 return -EINVAL;
748
749         pr_debug("featuremask = 0x%llx\n", featuremask);
750
751         if (adev->powerplay.pp_funcs->set_ppfeature_status) {
752                 ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask);
753                 if (ret)
754                         return -EINVAL;
755         }
756
757         return count;
758 }
759
760 static ssize_t amdgpu_get_ppfeature_status(struct device *dev,
761                 struct device_attribute *attr,
762                 char *buf)
763 {
764         struct drm_device *ddev = dev_get_drvdata(dev);
765         struct amdgpu_device *adev = ddev->dev_private;
766
767         if (adev->powerplay.pp_funcs->get_ppfeature_status)
768                 return amdgpu_dpm_get_ppfeature_status(adev, buf);
769
770         return snprintf(buf, PAGE_SIZE, "\n");
771 }
772
773 /**
774  * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk
775  * pp_dpm_pcie
776  *
777  * The amdgpu driver provides a sysfs API for adjusting what power levels
778  * are enabled for a given power state.  The files pp_dpm_sclk, pp_dpm_mclk,
779  * pp_dpm_socclk, pp_dpm_fclk, pp_dpm_dcefclk and pp_dpm_pcie are used for
780  * this.
781  *
782  * pp_dpm_socclk and pp_dpm_dcefclk interfaces are only available for
783  * Vega10 and later ASICs.
784  * pp_dpm_fclk interface is only available for Vega20 and later ASICs.
785  *
786  * Reading back the files will show you the available power levels within
787  * the power state and the clock information for those levels.
788  *
789  * To manually adjust these states, first select manual using
790  * power_dpm_force_performance_level.
791  * Secondly,Enter a new value for each level by inputing a string that
792  * contains " echo xx xx xx > pp_dpm_sclk/mclk/pcie"
793  * E.g., echo 4 5 6 to > pp_dpm_sclk will enable sclk levels 4, 5, and 6.
794  *
795  * NOTE: change to the dcefclk max dpm level is not supported now
796  */
797
798 static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
799                 struct device_attribute *attr,
800                 char *buf)
801 {
802         struct drm_device *ddev = dev_get_drvdata(dev);
803         struct amdgpu_device *adev = ddev->dev_private;
804
805         if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) &&
806             adev->virt.ops->get_pp_clk)
807                 return adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf);
808
809         if (is_support_sw_smu(adev))
810                 return smu_print_clk_levels(&adev->smu, PP_SCLK, buf);
811         else if (adev->powerplay.pp_funcs->print_clock_levels)
812                 return amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf);
813         else
814                 return snprintf(buf, PAGE_SIZE, "\n");
815 }
816
817 /*
818  * Worst case: 32 bits individually specified, in octal at 12 characters
819  * per line (+1 for \n).
820  */
821 #define AMDGPU_MASK_BUF_MAX     (32 * 13)
822
823 static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
824 {
825         int ret;
826         long level;
827         char *sub_str = NULL;
828         char *tmp;
829         char buf_cpy[AMDGPU_MASK_BUF_MAX + 1];
830         const char delimiter[3] = {' ', '\n', '\0'};
831         size_t bytes;
832
833         *mask = 0;
834
835         bytes = min(count, sizeof(buf_cpy) - 1);
836         memcpy(buf_cpy, buf, bytes);
837         buf_cpy[bytes] = '\0';
838         tmp = buf_cpy;
839         while (tmp[0]) {
840                 sub_str = strsep(&tmp, delimiter);
841                 if (strlen(sub_str)) {
842                         ret = kstrtol(sub_str, 0, &level);
843                         if (ret)
844                                 return -EINVAL;
845                         *mask |= 1 << level;
846                 } else
847                         break;
848         }
849
850         return 0;
851 }
852
853 static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
854                 struct device_attribute *attr,
855                 const char *buf,
856                 size_t count)
857 {
858         struct drm_device *ddev = dev_get_drvdata(dev);
859         struct amdgpu_device *adev = ddev->dev_private;
860         int ret;
861         uint32_t mask = 0;
862
863         ret = amdgpu_read_mask(buf, count, &mask);
864         if (ret)
865                 return ret;
866
867         if (is_support_sw_smu(adev))
868                 ret = smu_force_clk_levels(&adev->smu, PP_SCLK, mask);
869         else if (adev->powerplay.pp_funcs->force_clock_level)
870                 ret = amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
871
872         if (ret)
873                 return -EINVAL;
874
875         return count;
876 }
877
878 static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
879                 struct device_attribute *attr,
880                 char *buf)
881 {
882         struct drm_device *ddev = dev_get_drvdata(dev);
883         struct amdgpu_device *adev = ddev->dev_private;
884
885         if (is_support_sw_smu(adev))
886                 return smu_print_clk_levels(&adev->smu, PP_MCLK, buf);
887         else if (adev->powerplay.pp_funcs->print_clock_levels)
888                 return amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf);
889         else
890                 return snprintf(buf, PAGE_SIZE, "\n");
891 }
892
893 static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
894                 struct device_attribute *attr,
895                 const char *buf,
896                 size_t count)
897 {
898         struct drm_device *ddev = dev_get_drvdata(dev);
899         struct amdgpu_device *adev = ddev->dev_private;
900         int ret;
901         uint32_t mask = 0;
902
903         ret = amdgpu_read_mask(buf, count, &mask);
904         if (ret)
905                 return ret;
906
907         if (is_support_sw_smu(adev))
908                 ret = smu_force_clk_levels(&adev->smu, PP_MCLK, mask);
909         else if (adev->powerplay.pp_funcs->force_clock_level)
910                 ret = amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
911
912         if (ret)
913                 return -EINVAL;
914
915         return count;
916 }
917
918 static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev,
919                 struct device_attribute *attr,
920                 char *buf)
921 {
922         struct drm_device *ddev = dev_get_drvdata(dev);
923         struct amdgpu_device *adev = ddev->dev_private;
924
925         if (is_support_sw_smu(adev))
926                 return smu_print_clk_levels(&adev->smu, PP_SOCCLK, buf);
927         else if (adev->powerplay.pp_funcs->print_clock_levels)
928                 return amdgpu_dpm_print_clock_levels(adev, PP_SOCCLK, buf);
929         else
930                 return snprintf(buf, PAGE_SIZE, "\n");
931 }
932
933 static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
934                 struct device_attribute *attr,
935                 const char *buf,
936                 size_t count)
937 {
938         struct drm_device *ddev = dev_get_drvdata(dev);
939         struct amdgpu_device *adev = ddev->dev_private;
940         int ret;
941         uint32_t mask = 0;
942
943         ret = amdgpu_read_mask(buf, count, &mask);
944         if (ret)
945                 return ret;
946
947         if (is_support_sw_smu(adev))
948                 ret = smu_force_clk_levels(&adev->smu, PP_SOCCLK, mask);
949         else if (adev->powerplay.pp_funcs->force_clock_level)
950                 ret = amdgpu_dpm_force_clock_level(adev, PP_SOCCLK, mask);
951
952         if (ret)
953                 return -EINVAL;
954
955         return count;
956 }
957
958 static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev,
959                 struct device_attribute *attr,
960                 char *buf)
961 {
962         struct drm_device *ddev = dev_get_drvdata(dev);
963         struct amdgpu_device *adev = ddev->dev_private;
964
965         if (is_support_sw_smu(adev))
966                 return smu_print_clk_levels(&adev->smu, PP_FCLK, buf);
967         else if (adev->powerplay.pp_funcs->print_clock_levels)
968                 return amdgpu_dpm_print_clock_levels(adev, PP_FCLK, buf);
969         else
970                 return snprintf(buf, PAGE_SIZE, "\n");
971 }
972
973 static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
974                 struct device_attribute *attr,
975                 const char *buf,
976                 size_t count)
977 {
978         struct drm_device *ddev = dev_get_drvdata(dev);
979         struct amdgpu_device *adev = ddev->dev_private;
980         int ret;
981         uint32_t mask = 0;
982
983         ret = amdgpu_read_mask(buf, count, &mask);
984         if (ret)
985                 return ret;
986
987         if (is_support_sw_smu(adev))
988                 ret = smu_force_clk_levels(&adev->smu, PP_FCLK, mask);
989         else if (adev->powerplay.pp_funcs->force_clock_level)
990                 ret = amdgpu_dpm_force_clock_level(adev, PP_FCLK, mask);
991
992         if (ret)
993                 return -EINVAL;
994
995         return count;
996 }
997
998 static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
999                 struct device_attribute *attr,
1000                 char *buf)
1001 {
1002         struct drm_device *ddev = dev_get_drvdata(dev);
1003         struct amdgpu_device *adev = ddev->dev_private;
1004
1005         if (is_support_sw_smu(adev))
1006                 return smu_print_clk_levels(&adev->smu, PP_DCEFCLK, buf);
1007         else if (adev->powerplay.pp_funcs->print_clock_levels)
1008                 return amdgpu_dpm_print_clock_levels(adev, PP_DCEFCLK, buf);
1009         else
1010                 return snprintf(buf, PAGE_SIZE, "\n");
1011 }
1012
1013 static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
1014                 struct device_attribute *attr,
1015                 const char *buf,
1016                 size_t count)
1017 {
1018         struct drm_device *ddev = dev_get_drvdata(dev);
1019         struct amdgpu_device *adev = ddev->dev_private;
1020         int ret;
1021         uint32_t mask = 0;
1022
1023         ret = amdgpu_read_mask(buf, count, &mask);
1024         if (ret)
1025                 return ret;
1026
1027         if (is_support_sw_smu(adev))
1028                 ret = smu_force_clk_levels(&adev->smu, PP_DCEFCLK, mask);
1029         else if (adev->powerplay.pp_funcs->force_clock_level)
1030                 ret = amdgpu_dpm_force_clock_level(adev, PP_DCEFCLK, mask);
1031
1032         if (ret)
1033                 return -EINVAL;
1034
1035         return count;
1036 }
1037
1038 static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
1039                 struct device_attribute *attr,
1040                 char *buf)
1041 {
1042         struct drm_device *ddev = dev_get_drvdata(dev);
1043         struct amdgpu_device *adev = ddev->dev_private;
1044
1045         if (is_support_sw_smu(adev))
1046                 return smu_print_clk_levels(&adev->smu, PP_PCIE, buf);
1047         else if (adev->powerplay.pp_funcs->print_clock_levels)
1048                 return amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf);
1049         else
1050                 return snprintf(buf, PAGE_SIZE, "\n");
1051 }
1052
1053 static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
1054                 struct device_attribute *attr,
1055                 const char *buf,
1056                 size_t count)
1057 {
1058         struct drm_device *ddev = dev_get_drvdata(dev);
1059         struct amdgpu_device *adev = ddev->dev_private;
1060         int ret;
1061         uint32_t mask = 0;
1062
1063         ret = amdgpu_read_mask(buf, count, &mask);
1064         if (ret)
1065                 return ret;
1066
1067         if (is_support_sw_smu(adev))
1068                 ret = smu_force_clk_levels(&adev->smu, PP_PCIE, mask);
1069         else if (adev->powerplay.pp_funcs->force_clock_level)
1070                 ret = amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
1071
1072         if (ret)
1073                 return -EINVAL;
1074
1075         return count;
1076 }
1077
1078 static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
1079                 struct device_attribute *attr,
1080                 char *buf)
1081 {
1082         struct drm_device *ddev = dev_get_drvdata(dev);
1083         struct amdgpu_device *adev = ddev->dev_private;
1084         uint32_t value = 0;
1085
1086         if (is_support_sw_smu(adev))
1087                 value = smu_get_od_percentage(&(adev->smu), OD_SCLK);
1088         else if (adev->powerplay.pp_funcs->get_sclk_od)
1089                 value = amdgpu_dpm_get_sclk_od(adev);
1090
1091         return snprintf(buf, PAGE_SIZE, "%d\n", value);
1092 }
1093
1094 static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
1095                 struct device_attribute *attr,
1096                 const char *buf,
1097                 size_t count)
1098 {
1099         struct drm_device *ddev = dev_get_drvdata(dev);
1100         struct amdgpu_device *adev = ddev->dev_private;
1101         int ret;
1102         long int value;
1103
1104         ret = kstrtol(buf, 0, &value);
1105
1106         if (ret) {
1107                 count = -EINVAL;
1108                 goto fail;
1109         }
1110
1111         if (is_support_sw_smu(adev)) {
1112                 value = smu_set_od_percentage(&(adev->smu), OD_SCLK, (uint32_t)value);
1113         } else {
1114                 if (adev->powerplay.pp_funcs->set_sclk_od)
1115                         amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
1116
1117                 if (adev->powerplay.pp_funcs->dispatch_tasks) {
1118                         amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
1119                 } else {
1120                         adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1121                         amdgpu_pm_compute_clocks(adev);
1122                 }
1123         }
1124
1125 fail:
1126         return count;
1127 }
1128
1129 static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
1130                 struct device_attribute *attr,
1131                 char *buf)
1132 {
1133         struct drm_device *ddev = dev_get_drvdata(dev);
1134         struct amdgpu_device *adev = ddev->dev_private;
1135         uint32_t value = 0;
1136
1137         if (is_support_sw_smu(adev))
1138                 value = smu_get_od_percentage(&(adev->smu), OD_MCLK);
1139         else if (adev->powerplay.pp_funcs->get_mclk_od)
1140                 value = amdgpu_dpm_get_mclk_od(adev);
1141
1142         return snprintf(buf, PAGE_SIZE, "%d\n", value);
1143 }
1144
1145 static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
1146                 struct device_attribute *attr,
1147                 const char *buf,
1148                 size_t count)
1149 {
1150         struct drm_device *ddev = dev_get_drvdata(dev);
1151         struct amdgpu_device *adev = ddev->dev_private;
1152         int ret;
1153         long int value;
1154
1155         ret = kstrtol(buf, 0, &value);
1156
1157         if (ret) {
1158                 count = -EINVAL;
1159                 goto fail;
1160         }
1161
1162         if (is_support_sw_smu(adev)) {
1163                 value = smu_set_od_percentage(&(adev->smu), OD_MCLK, (uint32_t)value);
1164         } else {
1165                 if (adev->powerplay.pp_funcs->set_mclk_od)
1166                         amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
1167
1168                 if (adev->powerplay.pp_funcs->dispatch_tasks) {
1169                         amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
1170                 } else {
1171                         adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1172                         amdgpu_pm_compute_clocks(adev);
1173                 }
1174         }
1175
1176 fail:
1177         return count;
1178 }
1179
1180 /**
1181  * DOC: pp_power_profile_mode
1182  *
1183  * The amdgpu driver provides a sysfs API for adjusting the heuristics
1184  * related to switching between power levels in a power state.  The file
1185  * pp_power_profile_mode is used for this.
1186  *
1187  * Reading this file outputs a list of all of the predefined power profiles
1188  * and the relevant heuristics settings for that profile.
1189  *
1190  * To select a profile or create a custom profile, first select manual using
1191  * power_dpm_force_performance_level.  Writing the number of a predefined
1192  * profile to pp_power_profile_mode will enable those heuristics.  To
1193  * create a custom set of heuristics, write a string of numbers to the file
1194  * starting with the number of the custom profile along with a setting
1195  * for each heuristic parameter.  Due to differences across asic families
1196  * the heuristic parameters vary from family to family.
1197  *
1198  */
1199
1200 static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
1201                 struct device_attribute *attr,
1202                 char *buf)
1203 {
1204         struct drm_device *ddev = dev_get_drvdata(dev);
1205         struct amdgpu_device *adev = ddev->dev_private;
1206
1207         if (is_support_sw_smu(adev))
1208                 return smu_get_power_profile_mode(&adev->smu, buf);
1209         else if (adev->powerplay.pp_funcs->get_power_profile_mode)
1210                 return amdgpu_dpm_get_power_profile_mode(adev, buf);
1211
1212         return snprintf(buf, PAGE_SIZE, "\n");
1213 }
1214
1215
1216 static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
1217                 struct device_attribute *attr,
1218                 const char *buf,
1219                 size_t count)
1220 {
1221         int ret = 0xff;
1222         struct drm_device *ddev = dev_get_drvdata(dev);
1223         struct amdgpu_device *adev = ddev->dev_private;
1224         uint32_t parameter_size = 0;
1225         long parameter[64];
1226         char *sub_str, buf_cpy[128];
1227         char *tmp_str;
1228         uint32_t i = 0;
1229         char tmp[2];
1230         long int profile_mode = 0;
1231         const char delimiter[3] = {' ', '\n', '\0'};
1232
1233         tmp[0] = *(buf);
1234         tmp[1] = '\0';
1235         ret = kstrtol(tmp, 0, &profile_mode);
1236         if (ret)
1237                 goto fail;
1238
1239         if (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
1240                 if (count < 2 || count > 127)
1241                         return -EINVAL;
1242                 while (isspace(*++buf))
1243                         i++;
1244                 memcpy(buf_cpy, buf, count-i);
1245                 tmp_str = buf_cpy;
1246                 while (tmp_str[0]) {
1247                         sub_str = strsep(&tmp_str, delimiter);
1248                         ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
1249                         if (ret) {
1250                                 count = -EINVAL;
1251                                 goto fail;
1252                         }
1253                         parameter_size++;
1254                         while (isspace(*tmp_str))
1255                                 tmp_str++;
1256                 }
1257         }
1258         parameter[parameter_size] = profile_mode;
1259         if (is_support_sw_smu(adev))
1260                 ret = smu_set_power_profile_mode(&adev->smu, parameter, parameter_size);
1261         else if (adev->powerplay.pp_funcs->set_power_profile_mode)
1262                 ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
1263         if (!ret)
1264                 return count;
1265 fail:
1266         return -EINVAL;
1267 }
1268
1269 /**
1270  * DOC: busy_percent
1271  *
1272  * The amdgpu driver provides a sysfs API for reading how busy the GPU
1273  * is as a percentage.  The file gpu_busy_percent is used for this.
1274  * The SMU firmware computes a percentage of load based on the
1275  * aggregate activity level in the IP cores.
1276  */
1277 static ssize_t amdgpu_get_busy_percent(struct device *dev,
1278                 struct device_attribute *attr,
1279                 char *buf)
1280 {
1281         struct drm_device *ddev = dev_get_drvdata(dev);
1282         struct amdgpu_device *adev = ddev->dev_private;
1283         int r, value, size = sizeof(value);
1284
1285         /* read the IP busy sensor */
1286         r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD,
1287                                    (void *)&value, &size);
1288
1289         if (r)
1290                 return r;
1291
1292         return snprintf(buf, PAGE_SIZE, "%d\n", value);
1293 }
1294
1295 /**
1296  * DOC: pcie_bw
1297  *
1298  * The amdgpu driver provides a sysfs API for estimating how much data
1299  * has been received and sent by the GPU in the last second through PCIe.
1300  * The file pcie_bw is used for this.
1301  * The Perf counters count the number of received and sent messages and return
1302  * those values, as well as the maximum payload size of a PCIe packet (mps).
1303  * Note that it is not possible to easily and quickly obtain the size of each
1304  * packet transmitted, so we output the max payload size (mps) to allow for
1305  * quick estimation of the PCIe bandwidth usage
1306  */
1307 static ssize_t amdgpu_get_pcie_bw(struct device *dev,
1308                 struct device_attribute *attr,
1309                 char *buf)
1310 {
1311         struct drm_device *ddev = dev_get_drvdata(dev);
1312         struct amdgpu_device *adev = ddev->dev_private;
1313         uint64_t count0, count1;
1314
1315         amdgpu_asic_get_pcie_usage(adev, &count0, &count1);
1316         return snprintf(buf, PAGE_SIZE, "%llu %llu %i\n",
1317                         count0, count1, pcie_get_mps(adev->pdev));
1318 }
1319
1320 static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, amdgpu_get_dpm_state, amdgpu_set_dpm_state);
1321 static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR,
1322                    amdgpu_get_dpm_forced_performance_level,
1323                    amdgpu_set_dpm_forced_performance_level);
1324 static DEVICE_ATTR(pp_num_states, S_IRUGO, amdgpu_get_pp_num_states, NULL);
1325 static DEVICE_ATTR(pp_cur_state, S_IRUGO, amdgpu_get_pp_cur_state, NULL);
1326 static DEVICE_ATTR(pp_force_state, S_IRUGO | S_IWUSR,
1327                 amdgpu_get_pp_force_state,
1328                 amdgpu_set_pp_force_state);
1329 static DEVICE_ATTR(pp_table, S_IRUGO | S_IWUSR,
1330                 amdgpu_get_pp_table,
1331                 amdgpu_set_pp_table);
1332 static DEVICE_ATTR(pp_dpm_sclk, S_IRUGO | S_IWUSR,
1333                 amdgpu_get_pp_dpm_sclk,
1334                 amdgpu_set_pp_dpm_sclk);
1335 static DEVICE_ATTR(pp_dpm_mclk, S_IRUGO | S_IWUSR,
1336                 amdgpu_get_pp_dpm_mclk,
1337                 amdgpu_set_pp_dpm_mclk);
1338 static DEVICE_ATTR(pp_dpm_socclk, S_IRUGO | S_IWUSR,
1339                 amdgpu_get_pp_dpm_socclk,
1340                 amdgpu_set_pp_dpm_socclk);
1341 static DEVICE_ATTR(pp_dpm_fclk, S_IRUGO | S_IWUSR,
1342                 amdgpu_get_pp_dpm_fclk,
1343                 amdgpu_set_pp_dpm_fclk);
1344 static DEVICE_ATTR(pp_dpm_dcefclk, S_IRUGO | S_IWUSR,
1345                 amdgpu_get_pp_dpm_dcefclk,
1346                 amdgpu_set_pp_dpm_dcefclk);
1347 static DEVICE_ATTR(pp_dpm_pcie, S_IRUGO | S_IWUSR,
1348                 amdgpu_get_pp_dpm_pcie,
1349                 amdgpu_set_pp_dpm_pcie);
1350 static DEVICE_ATTR(pp_sclk_od, S_IRUGO | S_IWUSR,
1351                 amdgpu_get_pp_sclk_od,
1352                 amdgpu_set_pp_sclk_od);
1353 static DEVICE_ATTR(pp_mclk_od, S_IRUGO | S_IWUSR,
1354                 amdgpu_get_pp_mclk_od,
1355                 amdgpu_set_pp_mclk_od);
1356 static DEVICE_ATTR(pp_power_profile_mode, S_IRUGO | S_IWUSR,
1357                 amdgpu_get_pp_power_profile_mode,
1358                 amdgpu_set_pp_power_profile_mode);
1359 static DEVICE_ATTR(pp_od_clk_voltage, S_IRUGO | S_IWUSR,
1360                 amdgpu_get_pp_od_clk_voltage,
1361                 amdgpu_set_pp_od_clk_voltage);
1362 static DEVICE_ATTR(gpu_busy_percent, S_IRUGO,
1363                 amdgpu_get_busy_percent, NULL);
1364 static DEVICE_ATTR(pcie_bw, S_IRUGO, amdgpu_get_pcie_bw, NULL);
1365 static DEVICE_ATTR(ppfeatures, S_IRUGO | S_IWUSR,
1366                 amdgpu_get_ppfeature_status,
1367                 amdgpu_set_ppfeature_status);
1368
1369 static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
1370                                       struct device_attribute *attr,
1371                                       char *buf)
1372 {
1373         struct amdgpu_device *adev = dev_get_drvdata(dev);
1374         struct drm_device *ddev = adev->ddev;
1375         int r, temp, size = sizeof(temp);
1376
1377         /* Can't get temperature when the card is off */
1378         if  ((adev->flags & AMD_IS_PX) &&
1379              (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
1380                 return -EINVAL;
1381
1382         /* get the temperature */
1383         r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP,
1384                                    (void *)&temp, &size);
1385         if (r)
1386                 return r;
1387
1388         return snprintf(buf, PAGE_SIZE, "%d\n", temp);
1389 }
1390
1391 static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev,
1392                                              struct device_attribute *attr,
1393                                              char *buf)
1394 {
1395         struct amdgpu_device *adev = dev_get_drvdata(dev);
1396         int hyst = to_sensor_dev_attr(attr)->index;
1397         int temp;
1398
1399         if (hyst)
1400                 temp = adev->pm.dpm.thermal.min_temp;
1401         else
1402                 temp = adev->pm.dpm.thermal.max_temp;
1403
1404         return snprintf(buf, PAGE_SIZE, "%d\n", temp);
1405 }
1406
1407 static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
1408                                             struct device_attribute *attr,
1409                                             char *buf)
1410 {
1411         struct amdgpu_device *adev = dev_get_drvdata(dev);
1412         u32 pwm_mode = 0;
1413         if (is_support_sw_smu(adev)) {
1414                 pwm_mode = smu_get_fan_control_mode(&adev->smu);
1415         } else {
1416                 if (!adev->powerplay.pp_funcs->get_fan_control_mode)
1417                         return -EINVAL;
1418
1419                 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
1420         }
1421
1422         return sprintf(buf, "%i\n", pwm_mode);
1423 }
1424
1425 static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
1426                                             struct device_attribute *attr,
1427                                             const char *buf,
1428                                             size_t count)
1429 {
1430         struct amdgpu_device *adev = dev_get_drvdata(dev);
1431         int err;
1432         int value;
1433
1434         /* Can't adjust fan when the card is off */
1435         if  ((adev->flags & AMD_IS_PX) &&
1436              (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
1437                 return -EINVAL;
1438
1439         if (is_support_sw_smu(adev)) {
1440                 err = kstrtoint(buf, 10, &value);
1441                 if (err)
1442                         return err;
1443
1444                 smu_set_fan_control_mode(&adev->smu, value);
1445         } else {
1446                 if (!adev->powerplay.pp_funcs->set_fan_control_mode)
1447                         return -EINVAL;
1448
1449                 err = kstrtoint(buf, 10, &value);
1450                 if (err)
1451                         return err;
1452
1453                 amdgpu_dpm_set_fan_control_mode(adev, value);
1454         }
1455
1456         return count;
1457 }
1458
1459 static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev,
1460                                          struct device_attribute *attr,
1461                                          char *buf)
1462 {
1463         return sprintf(buf, "%i\n", 0);
1464 }
1465
1466 static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev,
1467                                          struct device_attribute *attr,
1468                                          char *buf)
1469 {
1470         return sprintf(buf, "%i\n", 255);
1471 }
1472
1473 static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
1474                                      struct device_attribute *attr,
1475                                      const char *buf, size_t count)
1476 {
1477         struct amdgpu_device *adev = dev_get_drvdata(dev);
1478         int err;
1479         u32 value;
1480         u32 pwm_mode;
1481
1482         /* Can't adjust fan when the card is off */
1483         if  ((adev->flags & AMD_IS_PX) &&
1484              (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
1485                 return -EINVAL;
1486         if (is_support_sw_smu(adev))
1487                 pwm_mode = smu_get_fan_control_mode(&adev->smu);
1488         else
1489                 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
1490         if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
1491                 pr_info("manual fan speed control should be enabled first\n");
1492                 return -EINVAL;
1493         }
1494
1495         err = kstrtou32(buf, 10, &value);
1496         if (err)
1497                 return err;
1498
1499         value = (value * 100) / 255;
1500
1501         if (is_support_sw_smu(adev)) {
1502                 err = smu_set_fan_speed_percent(&adev->smu, value);
1503                 if (err)
1504                         return err;
1505         } else if (adev->powerplay.pp_funcs->set_fan_speed_percent) {
1506                 err = amdgpu_dpm_set_fan_speed_percent(adev, value);
1507                 if (err)
1508                         return err;
1509         }
1510
1511         return count;
1512 }
1513
1514 static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
1515                                      struct device_attribute *attr,
1516                                      char *buf)
1517 {
1518         struct amdgpu_device *adev = dev_get_drvdata(dev);
1519         int err;
1520         u32 speed = 0;
1521
1522         /* Can't adjust fan when the card is off */
1523         if  ((adev->flags & AMD_IS_PX) &&
1524              (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
1525                 return -EINVAL;
1526
1527         if (is_support_sw_smu(adev)) {
1528                 err = smu_get_fan_speed_percent(&adev->smu, &speed);
1529                 if (err)
1530                         return err;
1531         } else if (adev->powerplay.pp_funcs->get_fan_speed_percent) {
1532                 err = amdgpu_dpm_get_fan_speed_percent(adev, &speed);
1533                 if (err)
1534                         return err;
1535         }
1536
1537         speed = (speed * 255) / 100;
1538
1539         return sprintf(buf, "%i\n", speed);
1540 }
1541
1542 static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
1543                                            struct device_attribute *attr,
1544                                            char *buf)
1545 {
1546         struct amdgpu_device *adev = dev_get_drvdata(dev);
1547         int err;
1548         u32 speed = 0;
1549
1550         /* Can't adjust fan when the card is off */
1551         if  ((adev->flags & AMD_IS_PX) &&
1552              (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
1553                 return -EINVAL;
1554
1555         if (is_support_sw_smu(adev)) {
1556                 err = smu_get_current_rpm(&adev->smu, &speed);
1557                 if (err)
1558                         return err;
1559         } else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) {
1560                 err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
1561                 if (err)
1562                         return err;
1563         }
1564
1565         return sprintf(buf, "%i\n", speed);
1566 }
1567
1568 static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
1569                                          struct device_attribute *attr,
1570                                          char *buf)
1571 {
1572         struct amdgpu_device *adev = dev_get_drvdata(dev);
1573         u32 min_rpm = 0;
1574         u32 size = sizeof(min_rpm);
1575         int r;
1576
1577         r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM,
1578                                    (void *)&min_rpm, &size);
1579         if (r)
1580                 return r;
1581
1582         return snprintf(buf, PAGE_SIZE, "%d\n", min_rpm);
1583 }
1584
1585 static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
1586                                          struct device_attribute *attr,
1587                                          char *buf)
1588 {
1589         struct amdgpu_device *adev = dev_get_drvdata(dev);
1590         u32 max_rpm = 0;
1591         u32 size = sizeof(max_rpm);
1592         int r;
1593
1594         r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM,
1595                                    (void *)&max_rpm, &size);
1596         if (r)
1597                 return r;
1598
1599         return snprintf(buf, PAGE_SIZE, "%d\n", max_rpm);
1600 }
1601
1602 static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
1603                                            struct device_attribute *attr,
1604                                            char *buf)
1605 {
1606         struct amdgpu_device *adev = dev_get_drvdata(dev);
1607         int err;
1608         u32 rpm = 0;
1609
1610         /* Can't adjust fan when the card is off */
1611         if  ((adev->flags & AMD_IS_PX) &&
1612              (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
1613                 return -EINVAL;
1614
1615         if (is_support_sw_smu(adev)) {
1616                 err = smu_get_current_rpm(&adev->smu, &rpm);
1617                 if (err)
1618                         return err;
1619         } else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) {
1620                 err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);
1621                 if (err)
1622                         return err;
1623         }
1624
1625         return sprintf(buf, "%i\n", rpm);
1626 }
1627
1628 static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
1629                                      struct device_attribute *attr,
1630                                      const char *buf, size_t count)
1631 {
1632         struct amdgpu_device *adev = dev_get_drvdata(dev);
1633         int err;
1634         u32 value;
1635         u32 pwm_mode;
1636
1637         if (is_support_sw_smu(adev))
1638                 pwm_mode = smu_get_fan_control_mode(&adev->smu);
1639         else
1640                 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
1641
1642         if (pwm_mode != AMD_FAN_CTRL_MANUAL)
1643                 return -ENODATA;
1644
1645         /* Can't adjust fan when the card is off */
1646         if  ((adev->flags & AMD_IS_PX) &&
1647              (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
1648                 return -EINVAL;
1649
1650         err = kstrtou32(buf, 10, &value);
1651         if (err)
1652                 return err;
1653
1654         if (is_support_sw_smu(adev)) {
1655                 err = smu_set_fan_speed_rpm(&adev->smu, value);
1656                 if (err)
1657                         return err;
1658         } else if (adev->powerplay.pp_funcs->set_fan_speed_rpm) {
1659                 err = amdgpu_dpm_set_fan_speed_rpm(adev, value);
1660                 if (err)
1661                         return err;
1662         }
1663
1664         return count;
1665 }
1666
1667 static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
1668                                             struct device_attribute *attr,
1669                                             char *buf)
1670 {
1671         struct amdgpu_device *adev = dev_get_drvdata(dev);
1672         u32 pwm_mode = 0;
1673
1674         if (is_support_sw_smu(adev)) {
1675                 pwm_mode = smu_get_fan_control_mode(&adev->smu);
1676         } else {
1677                 if (!adev->powerplay.pp_funcs->get_fan_control_mode)
1678                         return -EINVAL;
1679
1680                 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
1681         }
1682         return sprintf(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1);
1683 }
1684
1685 static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
1686                                             struct device_attribute *attr,
1687                                             const char *buf,
1688                                             size_t count)
1689 {
1690         struct amdgpu_device *adev = dev_get_drvdata(dev);
1691         int err;
1692         int value;
1693         u32 pwm_mode;
1694
1695         /* Can't adjust fan when the card is off */
1696         if  ((adev->flags & AMD_IS_PX) &&
1697              (adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
1698                 return -EINVAL;
1699
1700
1701         err = kstrtoint(buf, 10, &value);
1702         if (err)
1703                 return err;
1704
1705         if (value == 0)
1706                 pwm_mode = AMD_FAN_CTRL_AUTO;
1707         else if (value == 1)
1708                 pwm_mode = AMD_FAN_CTRL_MANUAL;
1709         else
1710                 return -EINVAL;
1711
1712         if (is_support_sw_smu(adev)) {
1713                 smu_set_fan_control_mode(&adev->smu, pwm_mode);
1714         } else {
1715                 if (!adev->powerplay.pp_funcs->set_fan_control_mode)
1716                         return -EINVAL;
1717                 amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
1718         }
1719
1720         return count;
1721 }
1722
1723 static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
1724                                         struct device_attribute *attr,
1725                                         char *buf)
1726 {
1727         struct amdgpu_device *adev = dev_get_drvdata(dev);
1728         struct drm_device *ddev = adev->ddev;
1729         u32 vddgfx;
1730         int r, size = sizeof(vddgfx);
1731
1732         /* Can't get voltage when the card is off */
1733         if  ((adev->flags & AMD_IS_PX) &&
1734              (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
1735                 return -EINVAL;
1736
1737         /* get the voltage */
1738         r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX,
1739                                    (void *)&vddgfx, &size);
1740         if (r)
1741                 return r;
1742
1743         return snprintf(buf, PAGE_SIZE, "%d\n", vddgfx);
1744 }
1745
1746 static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev,
1747                                               struct device_attribute *attr,
1748                                               char *buf)
1749 {
1750         return snprintf(buf, PAGE_SIZE, "vddgfx\n");
1751 }
1752
1753 static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
1754                                        struct device_attribute *attr,
1755                                        char *buf)
1756 {
1757         struct amdgpu_device *adev = dev_get_drvdata(dev);
1758         struct drm_device *ddev = adev->ddev;
1759         u32 vddnb;
1760         int r, size = sizeof(vddnb);
1761
1762         /* only APUs have vddnb */
1763         if  (!(adev->flags & AMD_IS_APU))
1764                 return -EINVAL;
1765
1766         /* Can't get voltage when the card is off */
1767         if  ((adev->flags & AMD_IS_PX) &&
1768              (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
1769                 return -EINVAL;
1770
1771         /* get the voltage */
1772         r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB,
1773                                    (void *)&vddnb, &size);
1774         if (r)
1775                 return r;
1776
1777         return snprintf(buf, PAGE_SIZE, "%d\n", vddnb);
1778 }
1779
1780 static ssize_t amdgpu_hwmon_show_vddnb_label(struct device *dev,
1781                                               struct device_attribute *attr,
1782                                               char *buf)
1783 {
1784         return snprintf(buf, PAGE_SIZE, "vddnb\n");
1785 }
1786
1787 static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
1788                                            struct device_attribute *attr,
1789                                            char *buf)
1790 {
1791         struct amdgpu_device *adev = dev_get_drvdata(dev);
1792         struct drm_device *ddev = adev->ddev;
1793         u32 query = 0;
1794         int r, size = sizeof(u32);
1795         unsigned uw;
1796
1797         /* Can't get power when the card is off */
1798         if  ((adev->flags & AMD_IS_PX) &&
1799              (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
1800                 return -EINVAL;
1801
1802         /* get the voltage */
1803         r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER,
1804                                    (void *)&query, &size);
1805         if (r)
1806                 return r;
1807
1808         /* convert to microwatts */
1809         uw = (query >> 8) * 1000000 + (query & 0xff) * 1000;
1810
1811         return snprintf(buf, PAGE_SIZE, "%u\n", uw);
1812 }
1813
1814 static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev,
1815                                          struct device_attribute *attr,
1816                                          char *buf)
1817 {
1818         return sprintf(buf, "%i\n", 0);
1819 }
1820
1821 static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
1822                                          struct device_attribute *attr,
1823                                          char *buf)
1824 {
1825         struct amdgpu_device *adev = dev_get_drvdata(dev);
1826         uint32_t limit = 0;
1827
1828         if (is_support_sw_smu(adev)) {
1829                 smu_get_power_limit(&adev->smu, &limit, true);
1830                 return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
1831         } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
1832                 adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, true);
1833                 return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
1834         } else {
1835                 return snprintf(buf, PAGE_SIZE, "\n");
1836         }
1837 }
1838
1839 static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
1840                                          struct device_attribute *attr,
1841                                          char *buf)
1842 {
1843         struct amdgpu_device *adev = dev_get_drvdata(dev);
1844         uint32_t limit = 0;
1845
1846         if (is_support_sw_smu(adev)) {
1847                 smu_get_power_limit(&adev->smu, &limit, false);
1848                 return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
1849         } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
1850                 adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, false);
1851                 return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
1852         } else {
1853                 return snprintf(buf, PAGE_SIZE, "\n");
1854         }
1855 }
1856
1857
1858 static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
1859                 struct device_attribute *attr,
1860                 const char *buf,
1861                 size_t count)
1862 {
1863         struct amdgpu_device *adev = dev_get_drvdata(dev);
1864         int err;
1865         u32 value;
1866
1867         err = kstrtou32(buf, 10, &value);
1868         if (err)
1869                 return err;
1870
1871         value = value / 1000000; /* convert to Watt */
1872         if (is_support_sw_smu(adev)) {
1873                 adev->smu.funcs->set_power_limit(&adev->smu, value);
1874         } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_power_limit) {
1875                 err = adev->powerplay.pp_funcs->set_power_limit(adev->powerplay.pp_handle, value);
1876                 if (err)
1877                         return err;
1878         } else {
1879                 return -EINVAL;
1880         }
1881
1882         return count;
1883 }
1884
1885 static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
1886                                       struct device_attribute *attr,
1887                                       char *buf)
1888 {
1889         struct amdgpu_device *adev = dev_get_drvdata(dev);
1890         struct drm_device *ddev = adev->ddev;
1891         uint32_t sclk;
1892         int r, size = sizeof(sclk);
1893
1894         /* Can't get voltage when the card is off */
1895         if  ((adev->flags & AMD_IS_PX) &&
1896              (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
1897                 return -EINVAL;
1898
1899         /* sanity check PP is enabled */
1900         if (!(adev->powerplay.pp_funcs &&
1901               adev->powerplay.pp_funcs->read_sensor))
1902               return -EINVAL;
1903
1904         /* get the sclk */
1905         r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK,
1906                                    (void *)&sclk, &size);
1907         if (r)
1908                 return r;
1909
1910         return snprintf(buf, PAGE_SIZE, "%d\n", sclk * 10 * 1000);
1911 }
1912
1913 static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev,
1914                                             struct device_attribute *attr,
1915                                             char *buf)
1916 {
1917         return snprintf(buf, PAGE_SIZE, "sclk\n");
1918 }
1919
1920 static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
1921                                       struct device_attribute *attr,
1922                                       char *buf)
1923 {
1924         struct amdgpu_device *adev = dev_get_drvdata(dev);
1925         struct drm_device *ddev = adev->ddev;
1926         uint32_t mclk;
1927         int r, size = sizeof(mclk);
1928
1929         /* Can't get voltage when the card is off */
1930         if  ((adev->flags & AMD_IS_PX) &&
1931              (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
1932                 return -EINVAL;
1933
1934         /* sanity check PP is enabled */
1935         if (!(adev->powerplay.pp_funcs &&
1936               adev->powerplay.pp_funcs->read_sensor))
1937               return -EINVAL;
1938
1939         /* get the sclk */
1940         r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK,
1941                                    (void *)&mclk, &size);
1942         if (r)
1943                 return r;
1944
1945         return snprintf(buf, PAGE_SIZE, "%d\n", mclk * 10 * 1000);
1946 }
1947
1948 static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
1949                                             struct device_attribute *attr,
1950                                             char *buf)
1951 {
1952         return snprintf(buf, PAGE_SIZE, "mclk\n");
1953 }
1954
1955 /**
1956  * DOC: hwmon
1957  *
1958  * The amdgpu driver exposes the following sensor interfaces:
1959  *
1960  * - GPU temperature (via the on-die sensor)
1961  *
1962  * - GPU voltage
1963  *
1964  * - Northbridge voltage (APUs only)
1965  *
1966  * - GPU power
1967  *
1968  * - GPU fan
1969  *
1970  * - GPU gfx/compute engine clock
1971  *
1972  * - GPU memory clock (dGPU only)
1973  *
1974  * hwmon interfaces for GPU temperature:
1975  *
1976  * - temp1_input: the on die GPU temperature in millidegrees Celsius
1977  *
1978  * - temp1_crit: temperature critical max value in millidegrees Celsius
1979  *
1980  * - temp1_crit_hyst: temperature hysteresis for critical limit in millidegrees Celsius
1981  *
1982  * hwmon interfaces for GPU voltage:
1983  *
1984  * - in0_input: the voltage on the GPU in millivolts
1985  *
1986  * - in1_input: the voltage on the Northbridge in millivolts
1987  *
1988  * hwmon interfaces for GPU power:
1989  *
1990  * - power1_average: average power used by the GPU in microWatts
1991  *
1992  * - power1_cap_min: minimum cap supported in microWatts
1993  *
1994  * - power1_cap_max: maximum cap supported in microWatts
1995  *
1996  * - power1_cap: selected power cap in microWatts
1997  *
1998  * hwmon interfaces for GPU fan:
1999  *
2000  * - pwm1: pulse width modulation fan level (0-255)
2001  *
2002  * - pwm1_enable: pulse width modulation fan control method (0: no fan speed control, 1: manual fan speed control using pwm interface, 2: automatic fan speed control)
2003  *
2004  * - pwm1_min: pulse width modulation fan control minimum level (0)
2005  *
2006  * - pwm1_max: pulse width modulation fan control maximum level (255)
2007  *
2008  * - fan1_min: an minimum value Unit: revolution/min (RPM)
2009  *
2010  * - fan1_max: an maxmum value Unit: revolution/max (RPM)
2011  *
2012  * - fan1_input: fan speed in RPM
2013  *
2014  * - fan[1-*]_target: Desired fan speed Unit: revolution/min (RPM)
2015  *
2016  * - fan[1-*]_enable: Enable or disable the sensors.1: Enable 0: Disable
2017  *
2018  * hwmon interfaces for GPU clocks:
2019  *
2020  * - freq1_input: the gfx/compute clock in hertz
2021  *
2022  * - freq2_input: the memory clock in hertz
2023  *
2024  * You can use hwmon tools like sensors to view this information on your system.
2025  *
2026  */
2027
2028 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, 0);
2029 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0);
2030 static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1);
2031 static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0);
2032 static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0);
2033 static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0);
2034 static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0);
2035 static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0);
2036 static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO, amdgpu_hwmon_get_fan1_min, NULL, 0);
2037 static SENSOR_DEVICE_ATTR(fan1_max, S_IRUGO, amdgpu_hwmon_get_fan1_max, NULL, 0);
2038 static SENSOR_DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_target, amdgpu_hwmon_set_fan1_target, 0);
2039 static SENSOR_DEVICE_ATTR(fan1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_enable, amdgpu_hwmon_set_fan1_enable, 0);
2040 static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, amdgpu_hwmon_show_vddgfx, NULL, 0);
2041 static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, amdgpu_hwmon_show_vddgfx_label, NULL, 0);
2042 static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, amdgpu_hwmon_show_vddnb, NULL, 0);
2043 static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, amdgpu_hwmon_show_vddnb_label, NULL, 0);
2044 static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 0);
2045 static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0);
2046 static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 0);
2047 static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0);
2048 static SENSOR_DEVICE_ATTR(freq1_input, S_IRUGO, amdgpu_hwmon_show_sclk, NULL, 0);
2049 static SENSOR_DEVICE_ATTR(freq1_label, S_IRUGO, amdgpu_hwmon_show_sclk_label, NULL, 0);
2050 static SENSOR_DEVICE_ATTR(freq2_input, S_IRUGO, amdgpu_hwmon_show_mclk, NULL, 0);
2051 static SENSOR_DEVICE_ATTR(freq2_label, S_IRUGO, amdgpu_hwmon_show_mclk_label, NULL, 0);
2052
2053 static struct attribute *hwmon_attributes[] = {
2054         &sensor_dev_attr_temp1_input.dev_attr.attr,
2055         &sensor_dev_attr_temp1_crit.dev_attr.attr,
2056         &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
2057         &sensor_dev_attr_pwm1.dev_attr.attr,
2058         &sensor_dev_attr_pwm1_enable.dev_attr.attr,
2059         &sensor_dev_attr_pwm1_min.dev_attr.attr,
2060         &sensor_dev_attr_pwm1_max.dev_attr.attr,
2061         &sensor_dev_attr_fan1_input.dev_attr.attr,
2062         &sensor_dev_attr_fan1_min.dev_attr.attr,
2063         &sensor_dev_attr_fan1_max.dev_attr.attr,
2064         &sensor_dev_attr_fan1_target.dev_attr.attr,
2065         &sensor_dev_attr_fan1_enable.dev_attr.attr,
2066         &sensor_dev_attr_in0_input.dev_attr.attr,
2067         &sensor_dev_attr_in0_label.dev_attr.attr,
2068         &sensor_dev_attr_in1_input.dev_attr.attr,
2069         &sensor_dev_attr_in1_label.dev_attr.attr,
2070         &sensor_dev_attr_power1_average.dev_attr.attr,
2071         &sensor_dev_attr_power1_cap_max.dev_attr.attr,
2072         &sensor_dev_attr_power1_cap_min.dev_attr.attr,
2073         &sensor_dev_attr_power1_cap.dev_attr.attr,
2074         &sensor_dev_attr_freq1_input.dev_attr.attr,
2075         &sensor_dev_attr_freq1_label.dev_attr.attr,
2076         &sensor_dev_attr_freq2_input.dev_attr.attr,
2077         &sensor_dev_attr_freq2_label.dev_attr.attr,
2078         NULL
2079 };
2080
2081 static umode_t hwmon_attributes_visible(struct kobject *kobj,
2082                                         struct attribute *attr, int index)
2083 {
2084         struct device *dev = kobj_to_dev(kobj);
2085         struct amdgpu_device *adev = dev_get_drvdata(dev);
2086         umode_t effective_mode = attr->mode;
2087
2088         /* Skip fan attributes if fan is not present */
2089         if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
2090             attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
2091             attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
2092             attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
2093             attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
2094             attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
2095             attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
2096             attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
2097             attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
2098                 return 0;
2099
2100         /* Skip fan attributes on APU */
2101         if ((adev->flags & AMD_IS_APU) &&
2102             (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
2103              attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
2104              attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
2105              attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
2106              attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
2107              attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
2108              attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
2109              attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
2110              attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
2111                 return 0;
2112
2113         /* Skip limit attributes if DPM is not enabled */
2114         if (!adev->pm.dpm_enabled &&
2115             (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
2116              attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
2117              attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
2118              attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
2119              attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
2120              attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
2121              attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
2122              attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
2123              attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
2124              attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
2125              attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
2126                 return 0;
2127
2128         if (!is_support_sw_smu(adev)) {
2129                 /* mask fan attributes if we have no bindings for this asic to expose */
2130                 if ((!adev->powerplay.pp_funcs->get_fan_speed_percent &&
2131                      attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
2132                     (!adev->powerplay.pp_funcs->get_fan_control_mode &&
2133                      attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
2134                         effective_mode &= ~S_IRUGO;
2135
2136                 if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
2137                      attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
2138                     (!adev->powerplay.pp_funcs->set_fan_control_mode &&
2139                      attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
2140                         effective_mode &= ~S_IWUSR;
2141         }
2142
2143         if ((adev->flags & AMD_IS_APU) &&
2144             (attr == &sensor_dev_attr_power1_average.dev_attr.attr ||
2145              attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
2146              attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr||
2147              attr == &sensor_dev_attr_power1_cap.dev_attr.attr))
2148                 return 0;
2149
2150         if (!is_support_sw_smu(adev)) {
2151                 /* hide max/min values if we can't both query and manage the fan */
2152                 if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
2153                      !adev->powerplay.pp_funcs->get_fan_speed_percent) &&
2154                      (!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
2155                      !adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
2156                     (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
2157                      attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
2158                         return 0;
2159
2160                 if ((!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
2161                      !adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
2162                     (attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
2163                      attr == &sensor_dev_attr_fan1_min.dev_attr.attr))
2164                         return 0;
2165         }
2166
2167         /* only APUs have vddnb */
2168         if (!(adev->flags & AMD_IS_APU) &&
2169             (attr == &sensor_dev_attr_in1_input.dev_attr.attr ||
2170              attr == &sensor_dev_attr_in1_label.dev_attr.attr))
2171                 return 0;
2172
2173         /* no mclk on APUs */
2174         if ((adev->flags & AMD_IS_APU) &&
2175             (attr == &sensor_dev_attr_freq2_input.dev_attr.attr ||
2176              attr == &sensor_dev_attr_freq2_label.dev_attr.attr))
2177                 return 0;
2178
2179         return effective_mode;
2180 }
2181
2182 static const struct attribute_group hwmon_attrgroup = {
2183         .attrs = hwmon_attributes,
2184         .is_visible = hwmon_attributes_visible,
2185 };
2186
2187 static const struct attribute_group *hwmon_groups[] = {
2188         &hwmon_attrgroup,
2189         NULL
2190 };
2191
2192 void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
2193 {
2194         struct amdgpu_device *adev =
2195                 container_of(work, struct amdgpu_device,
2196                              pm.dpm.thermal.work);
2197         /* switch to the thermal state */
2198         enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
2199         int temp, size = sizeof(temp);
2200
2201         if (!adev->pm.dpm_enabled)
2202                 return;
2203
2204         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP,
2205                                     (void *)&temp, &size)) {
2206                 if (temp < adev->pm.dpm.thermal.min_temp)
2207                         /* switch back the user state */
2208                         dpm_state = adev->pm.dpm.user_state;
2209         } else {
2210                 if (adev->pm.dpm.thermal.high_to_low)
2211                         /* switch back the user state */
2212                         dpm_state = adev->pm.dpm.user_state;
2213         }
2214         mutex_lock(&adev->pm.mutex);
2215         if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
2216                 adev->pm.dpm.thermal_active = true;
2217         else
2218                 adev->pm.dpm.thermal_active = false;
2219         adev->pm.dpm.state = dpm_state;
2220         mutex_unlock(&adev->pm.mutex);
2221
2222         amdgpu_pm_compute_clocks(adev);
2223 }
2224
2225 static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
2226                                                      enum amd_pm_state_type dpm_state)
2227 {
2228         int i;
2229         struct amdgpu_ps *ps;
2230         u32 ui_class;
2231         bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ?
2232                 true : false;
2233
2234         /* check if the vblank period is too short to adjust the mclk */
2235         if (single_display && adev->powerplay.pp_funcs->vblank_too_short) {
2236                 if (amdgpu_dpm_vblank_too_short(adev))
2237                         single_display = false;
2238         }
2239
2240         /* certain older asics have a separare 3D performance state,
2241          * so try that first if the user selected performance
2242          */
2243         if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
2244                 dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
2245         /* balanced states don't exist at the moment */
2246         if (dpm_state == POWER_STATE_TYPE_BALANCED)
2247                 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
2248
2249 restart_search:
2250         /* Pick the best power state based on current conditions */
2251         for (i = 0; i < adev->pm.dpm.num_ps; i++) {
2252                 ps = &adev->pm.dpm.ps[i];
2253                 ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
2254                 switch (dpm_state) {
2255                 /* user states */
2256                 case POWER_STATE_TYPE_BATTERY:
2257                         if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
2258                                 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
2259                                         if (single_display)
2260                                                 return ps;
2261                                 } else
2262                                         return ps;
2263                         }
2264                         break;
2265                 case POWER_STATE_TYPE_BALANCED:
2266                         if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
2267                                 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
2268                                         if (single_display)
2269                                                 return ps;
2270                                 } else
2271                                         return ps;
2272                         }
2273                         break;
2274                 case POWER_STATE_TYPE_PERFORMANCE:
2275                         if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
2276                                 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
2277                                         if (single_display)
2278                                                 return ps;
2279                                 } else
2280                                         return ps;
2281                         }
2282                         break;
2283                 /* internal states */
2284                 case POWER_STATE_TYPE_INTERNAL_UVD:
2285                         if (adev->pm.dpm.uvd_ps)
2286                                 return adev->pm.dpm.uvd_ps;
2287                         else
2288                                 break;
2289                 case POWER_STATE_TYPE_INTERNAL_UVD_SD:
2290                         if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
2291                                 return ps;
2292                         break;
2293                 case POWER_STATE_TYPE_INTERNAL_UVD_HD:
2294                         if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
2295                                 return ps;
2296                         break;
2297                 case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
2298                         if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
2299                                 return ps;
2300                         break;
2301                 case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
2302                         if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
2303                                 return ps;
2304                         break;
2305                 case POWER_STATE_TYPE_INTERNAL_BOOT:
2306                         return adev->pm.dpm.boot_ps;
2307                 case POWER_STATE_TYPE_INTERNAL_THERMAL:
2308                         if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
2309                                 return ps;
2310                         break;
2311                 case POWER_STATE_TYPE_INTERNAL_ACPI:
2312                         if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
2313                                 return ps;
2314                         break;
2315                 case POWER_STATE_TYPE_INTERNAL_ULV:
2316                         if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
2317                                 return ps;
2318                         break;
2319                 case POWER_STATE_TYPE_INTERNAL_3DPERF:
2320                         if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
2321                                 return ps;
2322                         break;
2323                 default:
2324                         break;
2325                 }
2326         }
2327         /* use a fallback state if we didn't match */
2328         switch (dpm_state) {
2329         case POWER_STATE_TYPE_INTERNAL_UVD_SD:
2330                 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
2331                 goto restart_search;
2332         case POWER_STATE_TYPE_INTERNAL_UVD_HD:
2333         case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
2334         case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
2335                 if (adev->pm.dpm.uvd_ps) {
2336                         return adev->pm.dpm.uvd_ps;
2337                 } else {
2338                         dpm_state = POWER_STATE_TYPE_PERFORMANCE;
2339                         goto restart_search;
2340                 }
2341         case POWER_STATE_TYPE_INTERNAL_THERMAL:
2342                 dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
2343                 goto restart_search;
2344         case POWER_STATE_TYPE_INTERNAL_ACPI:
2345                 dpm_state = POWER_STATE_TYPE_BATTERY;
2346                 goto restart_search;
2347         case POWER_STATE_TYPE_BATTERY:
2348         case POWER_STATE_TYPE_BALANCED:
2349         case POWER_STATE_TYPE_INTERNAL_3DPERF:
2350                 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
2351                 goto restart_search;
2352         default:
2353                 break;
2354         }
2355
2356         return NULL;
2357 }
2358
2359 static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
2360 {
2361         struct amdgpu_ps *ps;
2362         enum amd_pm_state_type dpm_state;
2363         int ret;
2364         bool equal = false;
2365
2366         /* if dpm init failed */
2367         if (!adev->pm.dpm_enabled)
2368                 return;
2369
2370         if (adev->pm.dpm.user_state != adev->pm.dpm.state) {
2371                 /* add other state override checks here */
2372                 if ((!adev->pm.dpm.thermal_active) &&
2373                     (!adev->pm.dpm.uvd_active))
2374                         adev->pm.dpm.state = adev->pm.dpm.user_state;
2375         }
2376         dpm_state = adev->pm.dpm.state;
2377
2378         ps = amdgpu_dpm_pick_power_state(adev, dpm_state);
2379         if (ps)
2380                 adev->pm.dpm.requested_ps = ps;
2381         else
2382                 return;
2383
2384         if (amdgpu_dpm == 1 && adev->powerplay.pp_funcs->print_power_state) {
2385                 printk("switching from power state:\n");
2386                 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
2387                 printk("switching to power state:\n");
2388                 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
2389         }
2390
2391         /* update whether vce is active */
2392         ps->vce_active = adev->pm.dpm.vce_active;
2393         if (adev->powerplay.pp_funcs->display_configuration_changed)
2394                 amdgpu_dpm_display_configuration_changed(adev);
2395
2396         ret = amdgpu_dpm_pre_set_power_state(adev);
2397         if (ret)
2398                 return;
2399
2400         if (adev->powerplay.pp_funcs->check_state_equal) {
2401                 if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal))
2402                         equal = false;
2403         }
2404
2405         if (equal)
2406                 return;
2407
2408         amdgpu_dpm_set_power_state(adev);
2409         amdgpu_dpm_post_set_power_state(adev);
2410
2411         adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
2412         adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
2413
2414         if (adev->powerplay.pp_funcs->force_performance_level) {
2415                 if (adev->pm.dpm.thermal_active) {
2416                         enum amd_dpm_forced_level level = adev->pm.dpm.forced_level;
2417                         /* force low perf level for thermal */
2418                         amdgpu_dpm_force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW);
2419                         /* save the user's level */
2420                         adev->pm.dpm.forced_level = level;
2421                 } else {
2422                         /* otherwise, user selected level */
2423                         amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level);
2424                 }
2425         }
2426 }
2427
2428 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
2429 {
2430         int ret = 0;
2431         if (is_support_sw_smu(adev)) {
2432             ret = smu_dpm_set_power_gate(&adev->smu, AMD_IP_BLOCK_TYPE_UVD, enable);
2433             if (ret)
2434                 DRM_ERROR("[SW SMU]: dpm enable uvd failed, state = %s, ret = %d. \n",
2435                           enable ? "true" : "false", ret);
2436         } else if (adev->powerplay.pp_funcs->set_powergating_by_smu) {
2437                 /* enable/disable UVD */
2438                 mutex_lock(&adev->pm.mutex);
2439                 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
2440                 mutex_unlock(&adev->pm.mutex);
2441         }
2442         /* enable/disable Low Memory PState for UVD (4k videos) */
2443         if (adev->asic_type == CHIP_STONEY &&
2444                 adev->uvd.decode_image_width >= WIDTH_4K) {
2445                 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
2446
2447                 if (hwmgr && hwmgr->hwmgr_func &&
2448                     hwmgr->hwmgr_func->update_nbdpm_pstate)
2449                         hwmgr->hwmgr_func->update_nbdpm_pstate(hwmgr,
2450                                                                !enable,
2451                                                                true);
2452         }
2453 }
2454
2455 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
2456 {
2457         int ret = 0;
2458         if (is_support_sw_smu(adev)) {
2459             ret = smu_dpm_set_power_gate(&adev->smu, AMD_IP_BLOCK_TYPE_VCE, enable);
2460             if (ret)
2461                 DRM_ERROR("[SW SMU]: dpm enable vce failed, state = %s, ret = %d. \n",
2462                           enable ? "true" : "false", ret);
2463         } else if (adev->powerplay.pp_funcs->set_powergating_by_smu) {
2464                 /* enable/disable VCE */
2465                 mutex_lock(&adev->pm.mutex);
2466                 amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
2467                 mutex_unlock(&adev->pm.mutex);
2468         }
2469 }
2470
2471 void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
2472 {
2473         int i;
2474
2475         if (adev->powerplay.pp_funcs->print_power_state == NULL)
2476                 return;
2477
2478         for (i = 0; i < adev->pm.dpm.num_ps; i++)
2479                 amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]);
2480
2481 }
2482
2483 int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
2484 {
2485         struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
2486         int ret;
2487
2488         if (adev->pm.sysfs_initialized)
2489                 return 0;
2490
2491         if (adev->pm.dpm_enabled == 0)
2492                 return 0;
2493
2494         adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
2495                                                                    DRIVER_NAME, adev,
2496                                                                    hwmon_groups);
2497         if (IS_ERR(adev->pm.int_hwmon_dev)) {
2498                 ret = PTR_ERR(adev->pm.int_hwmon_dev);
2499                 dev_err(adev->dev,
2500                         "Unable to register hwmon device: %d\n", ret);
2501                 return ret;
2502         }
2503
2504         ret = device_create_file(adev->dev, &dev_attr_power_dpm_state);
2505         if (ret) {
2506                 DRM_ERROR("failed to create device file for dpm state\n");
2507                 return ret;
2508         }
2509         ret = device_create_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
2510         if (ret) {
2511                 DRM_ERROR("failed to create device file for dpm state\n");
2512                 return ret;
2513         }
2514
2515
2516         ret = device_create_file(adev->dev, &dev_attr_pp_num_states);
2517         if (ret) {
2518                 DRM_ERROR("failed to create device file pp_num_states\n");
2519                 return ret;
2520         }
2521         ret = device_create_file(adev->dev, &dev_attr_pp_cur_state);
2522         if (ret) {
2523                 DRM_ERROR("failed to create device file pp_cur_state\n");
2524                 return ret;
2525         }
2526         ret = device_create_file(adev->dev, &dev_attr_pp_force_state);
2527         if (ret) {
2528                 DRM_ERROR("failed to create device file pp_force_state\n");
2529                 return ret;
2530         }
2531         ret = device_create_file(adev->dev, &dev_attr_pp_table);
2532         if (ret) {
2533                 DRM_ERROR("failed to create device file pp_table\n");
2534                 return ret;
2535         }
2536
2537         ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk);
2538         if (ret) {
2539                 DRM_ERROR("failed to create device file pp_dpm_sclk\n");
2540                 return ret;
2541         }
2542         ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk);
2543         if (ret) {
2544                 DRM_ERROR("failed to create device file pp_dpm_mclk\n");
2545                 return ret;
2546         }
2547         if (adev->asic_type >= CHIP_VEGA10) {
2548                 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_socclk);
2549                 if (ret) {
2550                         DRM_ERROR("failed to create device file pp_dpm_socclk\n");
2551                         return ret;
2552                 }
2553                 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_dcefclk);
2554                 if (ret) {
2555                         DRM_ERROR("failed to create device file pp_dpm_dcefclk\n");
2556                         return ret;
2557                 }
2558         }
2559         if (adev->asic_type >= CHIP_VEGA20) {
2560                 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_fclk);
2561                 if (ret) {
2562                         DRM_ERROR("failed to create device file pp_dpm_fclk\n");
2563                         return ret;
2564                 }
2565         }
2566         ret = device_create_file(adev->dev, &dev_attr_pp_dpm_pcie);
2567         if (ret) {
2568                 DRM_ERROR("failed to create device file pp_dpm_pcie\n");
2569                 return ret;
2570         }
2571         ret = device_create_file(adev->dev, &dev_attr_pp_sclk_od);
2572         if (ret) {
2573                 DRM_ERROR("failed to create device file pp_sclk_od\n");
2574                 return ret;
2575         }
2576         ret = device_create_file(adev->dev, &dev_attr_pp_mclk_od);
2577         if (ret) {
2578                 DRM_ERROR("failed to create device file pp_mclk_od\n");
2579                 return ret;
2580         }
2581         ret = device_create_file(adev->dev,
2582                         &dev_attr_pp_power_profile_mode);
2583         if (ret) {
2584                 DRM_ERROR("failed to create device file "
2585                                 "pp_power_profile_mode\n");
2586                 return ret;
2587         }
2588         if ((is_support_sw_smu(adev) && adev->smu.od_enabled) ||
2589             (!is_support_sw_smu(adev) && hwmgr->od_enabled)) {
2590                 ret = device_create_file(adev->dev,
2591                                 &dev_attr_pp_od_clk_voltage);
2592                 if (ret) {
2593                         DRM_ERROR("failed to create device file "
2594                                         "pp_od_clk_voltage\n");
2595                         return ret;
2596                 }
2597         }
2598         ret = device_create_file(adev->dev,
2599                         &dev_attr_gpu_busy_percent);
2600         if (ret) {
2601                 DRM_ERROR("failed to create device file "
2602                                 "gpu_busy_level\n");
2603                 return ret;
2604         }
2605         /* PCIe Perf counters won't work on APU nodes */
2606         if (!(adev->flags & AMD_IS_APU)) {
2607                 ret = device_create_file(adev->dev, &dev_attr_pcie_bw);
2608                 if (ret) {
2609                         DRM_ERROR("failed to create device file pcie_bw\n");
2610                         return ret;
2611                 }
2612         }
2613         ret = amdgpu_debugfs_pm_init(adev);
2614         if (ret) {
2615                 DRM_ERROR("Failed to register debugfs file for dpm!\n");
2616                 return ret;
2617         }
2618
2619         if ((adev->asic_type >= CHIP_VEGA10) &&
2620             !(adev->flags & AMD_IS_APU)) {
2621                 ret = device_create_file(adev->dev,
2622                                 &dev_attr_ppfeatures);
2623                 if (ret) {
2624                         DRM_ERROR("failed to create device file "
2625                                         "ppfeatures\n");
2626                         return ret;
2627                 }
2628         }
2629
2630         adev->pm.sysfs_initialized = true;
2631
2632         return 0;
2633 }
2634
2635 void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
2636 {
2637         struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
2638
2639         if (adev->pm.dpm_enabled == 0)
2640                 return;
2641
2642         if (adev->pm.int_hwmon_dev)
2643                 hwmon_device_unregister(adev->pm.int_hwmon_dev);
2644         device_remove_file(adev->dev, &dev_attr_power_dpm_state);
2645         device_remove_file(adev->dev, &dev_attr_power_dpm_force_performance_level);
2646
2647         device_remove_file(adev->dev, &dev_attr_pp_num_states);
2648         device_remove_file(adev->dev, &dev_attr_pp_cur_state);
2649         device_remove_file(adev->dev, &dev_attr_pp_force_state);
2650         device_remove_file(adev->dev, &dev_attr_pp_table);
2651
2652         device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk);
2653         device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk);
2654         if (adev->asic_type >= CHIP_VEGA10) {
2655                 device_remove_file(adev->dev, &dev_attr_pp_dpm_socclk);
2656                 device_remove_file(adev->dev, &dev_attr_pp_dpm_dcefclk);
2657         }
2658         device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie);
2659         if (adev->asic_type >= CHIP_VEGA20)
2660                 device_remove_file(adev->dev, &dev_attr_pp_dpm_fclk);
2661         device_remove_file(adev->dev, &dev_attr_pp_sclk_od);
2662         device_remove_file(adev->dev, &dev_attr_pp_mclk_od);
2663         device_remove_file(adev->dev,
2664                         &dev_attr_pp_power_profile_mode);
2665         if ((is_support_sw_smu(adev) && adev->smu.od_enabled) ||
2666             (!is_support_sw_smu(adev) && hwmgr->od_enabled))
2667                 device_remove_file(adev->dev,
2668                                 &dev_attr_pp_od_clk_voltage);
2669         device_remove_file(adev->dev, &dev_attr_gpu_busy_percent);
2670         if (!(adev->flags & AMD_IS_APU))
2671                 device_remove_file(adev->dev, &dev_attr_pcie_bw);
2672         if ((adev->asic_type >= CHIP_VEGA10) &&
2673             !(adev->flags & AMD_IS_APU))
2674                 device_remove_file(adev->dev, &dev_attr_ppfeatures);
2675 }
2676
2677 void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
2678 {
2679         int i = 0;
2680
2681         if (!adev->pm.dpm_enabled)
2682                 return;
2683
2684         if (adev->mode_info.num_crtc)
2685                 amdgpu_display_bandwidth_update(adev);
2686
2687         for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
2688                 struct amdgpu_ring *ring = adev->rings[i];
2689                 if (ring && ring->sched.ready)
2690                         amdgpu_fence_wait_empty(ring);
2691         }
2692
2693         if (is_support_sw_smu(adev)) {
2694                 struct smu_context *smu = &adev->smu;
2695                 struct smu_dpm_context *smu_dpm = &adev->smu.smu_dpm;
2696                 mutex_lock(&(smu->mutex));
2697                 smu_handle_task(&adev->smu,
2698                                 smu_dpm->dpm_level,
2699                                 AMD_PP_TASK_DISPLAY_CONFIG_CHANGE);
2700                 mutex_unlock(&(smu->mutex));
2701         } else {
2702                 if (adev->powerplay.pp_funcs->dispatch_tasks) {
2703                         if (!amdgpu_device_has_dc_support(adev)) {
2704                                 mutex_lock(&adev->pm.mutex);
2705                                 amdgpu_dpm_get_active_displays(adev);
2706                                 adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
2707                                 adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
2708                                 adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
2709                                 /* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */
2710                                 if (adev->pm.pm_display_cfg.vrefresh > 120)
2711                                         adev->pm.pm_display_cfg.min_vblank_time = 0;
2712                                 if (adev->powerplay.pp_funcs->display_configuration_change)
2713                                         adev->powerplay.pp_funcs->display_configuration_change(
2714                                                                         adev->powerplay.pp_handle,
2715                                                                         &adev->pm.pm_display_cfg);
2716                                 mutex_unlock(&adev->pm.mutex);
2717                         }
2718                         amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL);
2719                 } else {
2720                         mutex_lock(&adev->pm.mutex);
2721                         amdgpu_dpm_get_active_displays(adev);
2722                         amdgpu_dpm_change_power_state_locked(adev);
2723                         mutex_unlock(&adev->pm.mutex);
2724                 }
2725         }
2726 }
2727
2728 /*
2729  * Debugfs info
2730  */
2731 #if defined(CONFIG_DEBUG_FS)
2732
2733 static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev)
2734 {
2735         uint32_t value;
2736         uint64_t value64;
2737         uint32_t query = 0;
2738         int size;
2739
2740         /* GPU Clocks */
2741         size = sizeof(value);
2742         seq_printf(m, "GFX Clocks and Power:\n");
2743         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, (void *)&value, &size))
2744                 seq_printf(m, "\t%u MHz (MCLK)\n", value/100);
2745         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, (void *)&value, &size))
2746                 seq_printf(m, "\t%u MHz (SCLK)\n", value/100);
2747         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK, (void *)&value, &size))
2748                 seq_printf(m, "\t%u MHz (PSTATE_SCLK)\n", value/100);
2749         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK, (void *)&value, &size))
2750                 seq_printf(m, "\t%u MHz (PSTATE_MCLK)\n", value/100);
2751         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, (void *)&value, &size))
2752                 seq_printf(m, "\t%u mV (VDDGFX)\n", value);
2753         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size))
2754                 seq_printf(m, "\t%u mV (VDDNB)\n", value);
2755         size = sizeof(uint32_t);
2756         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, (void *)&query, &size))
2757                 seq_printf(m, "\t%u.%u W (average GPU)\n", query >> 8, query & 0xff);
2758         size = sizeof(value);
2759         seq_printf(m, "\n");
2760
2761         /* GPU Temp */
2762         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, (void *)&value, &size))
2763                 seq_printf(m, "GPU Temperature: %u C\n", value/1000);
2764
2765         /* GPU Load */
2766         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, (void *)&value, &size))
2767                 seq_printf(m, "GPU Load: %u %%\n", value);
2768         seq_printf(m, "\n");
2769
2770         /* SMC feature mask */
2771         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size))
2772                 seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64);
2773
2774         /* UVD clocks */
2775         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) {
2776                 if (!value) {
2777                         seq_printf(m, "UVD: Disabled\n");
2778                 } else {
2779                         seq_printf(m, "UVD: Enabled\n");
2780                         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
2781                                 seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
2782                         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
2783                                 seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
2784                 }
2785         }
2786         seq_printf(m, "\n");
2787
2788         /* VCE clocks */
2789         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) {
2790                 if (!value) {
2791                         seq_printf(m, "VCE: Disabled\n");
2792                 } else {
2793                         seq_printf(m, "VCE: Enabled\n");
2794                         if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size))
2795                                 seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
2796                 }
2797         }
2798
2799         return 0;
2800 }
2801
2802 static void amdgpu_parse_cg_state(struct seq_file *m, u32 flags)
2803 {
2804         int i;
2805
2806         for (i = 0; clocks[i].flag; i++)
2807                 seq_printf(m, "\t%s: %s\n", clocks[i].name,
2808                            (flags & clocks[i].flag) ? "On" : "Off");
2809 }
2810
2811 static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
2812 {
2813         struct drm_info_node *node = (struct drm_info_node *) m->private;
2814         struct drm_device *dev = node->minor->dev;
2815         struct amdgpu_device *adev = dev->dev_private;
2816         struct drm_device *ddev = adev->ddev;
2817         u32 flags = 0;
2818
2819         amdgpu_device_ip_get_clockgating_state(adev, &flags);
2820         seq_printf(m, "Clock Gating Flags Mask: 0x%x\n", flags);
2821         amdgpu_parse_cg_state(m, flags);
2822         seq_printf(m, "\n");
2823
2824         if (!adev->pm.dpm_enabled) {
2825                 seq_printf(m, "dpm not enabled\n");
2826                 return 0;
2827         }
2828         if  ((adev->flags & AMD_IS_PX) &&
2829              (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
2830                 seq_printf(m, "PX asic powered off\n");
2831         } else if (!is_support_sw_smu(adev) && adev->powerplay.pp_funcs->debugfs_print_current_performance_level) {
2832                 mutex_lock(&adev->pm.mutex);
2833                 if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level)
2834                         adev->powerplay.pp_funcs->debugfs_print_current_performance_level(adev, m);
2835                 else
2836                         seq_printf(m, "Debugfs support not implemented for this asic\n");
2837                 mutex_unlock(&adev->pm.mutex);
2838         } else {
2839                 return amdgpu_debugfs_pm_info_pp(m, adev);
2840         }
2841
2842         return 0;
2843 }
2844
2845 static const struct drm_info_list amdgpu_pm_info_list[] = {
2846         {"amdgpu_pm_info", amdgpu_debugfs_pm_info, 0, NULL},
2847 };
2848 #endif
2849
2850 static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
2851 {
2852 #if defined(CONFIG_DEBUG_FS)
2853         return amdgpu_debugfs_add_files(adev, amdgpu_pm_info_list, ARRAY_SIZE(amdgpu_pm_info_list));
2854 #else
2855         return 0;
2856 #endif
2857 }