2 * Copyright 2017 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Rafał Miłecki <zajec5@gmail.com>
23 * Alex Deucher <alexdeucher@gmail.com>
26 #include <drm/drm_debugfs.h>
29 #include "amdgpu_drv.h"
30 #include "amdgpu_pm.h"
31 #include "amdgpu_dpm.h"
32 #include "amdgpu_display.h"
33 #include "amdgpu_smu.h"
35 #include <linux/power_supply.h>
36 #include <linux/pci.h>
37 #include <linux/hwmon.h>
38 #include <linux/hwmon-sysfs.h>
39 #include <linux/nospec.h>
40 #include <linux/pm_runtime.h>
44 static const struct cg_flag_name clocks[] = {
45 {AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"},
46 {AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"},
47 {AMD_CG_SUPPORT_GFX_CGCG, "Graphics Coarse Grain Clock Gating"},
48 {AMD_CG_SUPPORT_GFX_CGLS, "Graphics Coarse Grain memory Light Sleep"},
49 {AMD_CG_SUPPORT_GFX_CGTS, "Graphics Coarse Grain Tree Shader Clock Gating"},
50 {AMD_CG_SUPPORT_GFX_CGTS_LS, "Graphics Coarse Grain Tree Shader Light Sleep"},
51 {AMD_CG_SUPPORT_GFX_CP_LS, "Graphics Command Processor Light Sleep"},
52 {AMD_CG_SUPPORT_GFX_RLC_LS, "Graphics Run List Controller Light Sleep"},
53 {AMD_CG_SUPPORT_GFX_3D_CGCG, "Graphics 3D Coarse Grain Clock Gating"},
54 {AMD_CG_SUPPORT_GFX_3D_CGLS, "Graphics 3D Coarse Grain memory Light Sleep"},
55 {AMD_CG_SUPPORT_MC_LS, "Memory Controller Light Sleep"},
56 {AMD_CG_SUPPORT_MC_MGCG, "Memory Controller Medium Grain Clock Gating"},
57 {AMD_CG_SUPPORT_SDMA_LS, "System Direct Memory Access Light Sleep"},
58 {AMD_CG_SUPPORT_SDMA_MGCG, "System Direct Memory Access Medium Grain Clock Gating"},
59 {AMD_CG_SUPPORT_BIF_MGCG, "Bus Interface Medium Grain Clock Gating"},
60 {AMD_CG_SUPPORT_BIF_LS, "Bus Interface Light Sleep"},
61 {AMD_CG_SUPPORT_UVD_MGCG, "Unified Video Decoder Medium Grain Clock Gating"},
62 {AMD_CG_SUPPORT_VCE_MGCG, "Video Compression Engine Medium Grain Clock Gating"},
63 {AMD_CG_SUPPORT_HDP_LS, "Host Data Path Light Sleep"},
64 {AMD_CG_SUPPORT_HDP_MGCG, "Host Data Path Medium Grain Clock Gating"},
65 {AMD_CG_SUPPORT_DRM_MGCG, "Digital Right Management Medium Grain Clock Gating"},
66 {AMD_CG_SUPPORT_DRM_LS, "Digital Right Management Light Sleep"},
67 {AMD_CG_SUPPORT_ROM_MGCG, "Rom Medium Grain Clock Gating"},
68 {AMD_CG_SUPPORT_DF_MGCG, "Data Fabric Medium Grain Clock Gating"},
70 {AMD_CG_SUPPORT_ATHUB_MGCG, "Address Translation Hub Medium Grain Clock Gating"},
71 {AMD_CG_SUPPORT_ATHUB_LS, "Address Translation Hub Light Sleep"},
75 static const struct hwmon_temp_label {
76 enum PP_HWMON_TEMP channel;
79 {PP_TEMP_EDGE, "edge"},
80 {PP_TEMP_JUNCTION, "junction"},
84 void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
86 if (adev->pm.dpm_enabled) {
87 mutex_lock(&adev->pm.mutex);
88 if (power_supply_is_system_supplied() > 0)
89 adev->pm.ac_power = true;
91 adev->pm.ac_power = false;
92 if (adev->powerplay.pp_funcs &&
93 adev->powerplay.pp_funcs->enable_bapm)
94 amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
95 mutex_unlock(&adev->pm.mutex);
97 if (is_support_sw_smu(adev))
98 smu_set_ac_dc(&adev->smu);
102 int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
103 void *data, uint32_t *size)
110 if (is_support_sw_smu(adev))
111 ret = smu_read_sensor(&adev->smu, sensor, data, size);
113 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
114 ret = adev->powerplay.pp_funcs->read_sensor((adev)->powerplay.pp_handle,
124 * DOC: power_dpm_state
126 * The power_dpm_state file is a legacy interface and is only provided for
127 * backwards compatibility. The amdgpu driver provides a sysfs API for adjusting
128 * certain power related parameters. The file power_dpm_state is used for this.
129 * It accepts the following arguments:
139 * On older GPUs, the vbios provided a special power state for battery
140 * operation. Selecting battery switched to this state. This is no
141 * longer provided on newer GPUs so the option does nothing in that case.
145 * On older GPUs, the vbios provided a special power state for balanced
146 * operation. Selecting balanced switched to this state. This is no
147 * longer provided on newer GPUs so the option does nothing in that case.
151 * On older GPUs, the vbios provided a special power state for performance
152 * operation. Selecting performance switched to this state. This is no
153 * longer provided on newer GPUs so the option does nothing in that case.
157 static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
158 struct device_attribute *attr,
161 struct drm_device *ddev = dev_get_drvdata(dev);
162 struct amdgpu_device *adev = ddev->dev_private;
163 enum amd_pm_state_type pm;
166 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
169 ret = pm_runtime_get_sync(ddev->dev);
173 if (is_support_sw_smu(adev)) {
174 if (adev->smu.ppt_funcs->get_current_power_state)
175 pm = smu_get_current_power_state(&adev->smu);
177 pm = adev->pm.dpm.user_state;
178 } else if (adev->powerplay.pp_funcs->get_current_power_state) {
179 pm = amdgpu_dpm_get_current_power_state(adev);
181 pm = adev->pm.dpm.user_state;
184 pm_runtime_mark_last_busy(ddev->dev);
185 pm_runtime_put_autosuspend(ddev->dev);
187 return snprintf(buf, PAGE_SIZE, "%s\n",
188 (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
189 (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
192 static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
193 struct device_attribute *attr,
197 struct drm_device *ddev = dev_get_drvdata(dev);
198 struct amdgpu_device *adev = ddev->dev_private;
199 enum amd_pm_state_type state;
202 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
205 if (strncmp("battery", buf, strlen("battery")) == 0)
206 state = POWER_STATE_TYPE_BATTERY;
207 else if (strncmp("balanced", buf, strlen("balanced")) == 0)
208 state = POWER_STATE_TYPE_BALANCED;
209 else if (strncmp("performance", buf, strlen("performance")) == 0)
210 state = POWER_STATE_TYPE_PERFORMANCE;
214 ret = pm_runtime_get_sync(ddev->dev);
218 if (is_support_sw_smu(adev)) {
219 mutex_lock(&adev->pm.mutex);
220 adev->pm.dpm.user_state = state;
221 mutex_unlock(&adev->pm.mutex);
222 } else if (adev->powerplay.pp_funcs->dispatch_tasks) {
223 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state);
225 mutex_lock(&adev->pm.mutex);
226 adev->pm.dpm.user_state = state;
227 mutex_unlock(&adev->pm.mutex);
229 amdgpu_pm_compute_clocks(adev);
231 pm_runtime_mark_last_busy(ddev->dev);
232 pm_runtime_put_autosuspend(ddev->dev);
239 * DOC: power_dpm_force_performance_level
241 * The amdgpu driver provides a sysfs API for adjusting certain power
242 * related parameters. The file power_dpm_force_performance_level is
243 * used for this. It accepts the following arguments:
263 * When auto is selected, the driver will attempt to dynamically select
264 * the optimal power profile for current conditions in the driver.
268 * When low is selected, the clocks are forced to the lowest power state.
272 * When high is selected, the clocks are forced to the highest power state.
276 * When manual is selected, the user can manually adjust which power states
277 * are enabled for each clock domain via the sysfs pp_dpm_mclk, pp_dpm_sclk,
278 * and pp_dpm_pcie files and adjust the power state transition heuristics
279 * via the pp_power_profile_mode sysfs file.
286 * When the profiling modes are selected, clock and power gating are
287 * disabled and the clocks are set for different profiling cases. This
288 * mode is recommended for profiling specific work loads where you do
289 * not want clock or power gating for clock fluctuation to interfere
290 * with your results. profile_standard sets the clocks to a fixed clock
291 * level which varies from asic to asic. profile_min_sclk forces the sclk
292 * to the lowest level. profile_min_mclk forces the mclk to the lowest level.
293 * profile_peak sets all clocks (mclk, sclk, pcie) to the highest levels.
297 static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
298 struct device_attribute *attr,
301 struct drm_device *ddev = dev_get_drvdata(dev);
302 struct amdgpu_device *adev = ddev->dev_private;
303 enum amd_dpm_forced_level level = 0xff;
306 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
309 ret = pm_runtime_get_sync(ddev->dev);
313 if (is_support_sw_smu(adev))
314 level = smu_get_performance_level(&adev->smu);
315 else if (adev->powerplay.pp_funcs->get_performance_level)
316 level = amdgpu_dpm_get_performance_level(adev);
318 level = adev->pm.dpm.forced_level;
320 pm_runtime_mark_last_busy(ddev->dev);
321 pm_runtime_put_autosuspend(ddev->dev);
323 return snprintf(buf, PAGE_SIZE, "%s\n",
324 (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
325 (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
326 (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" :
327 (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" :
328 (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" :
329 (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
330 (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
331 (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" :
335 static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
336 struct device_attribute *attr,
340 struct drm_device *ddev = dev_get_drvdata(dev);
341 struct amdgpu_device *adev = ddev->dev_private;
342 enum amd_dpm_forced_level level;
343 enum amd_dpm_forced_level current_level = 0xff;
346 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
349 if (strncmp("low", buf, strlen("low")) == 0) {
350 level = AMD_DPM_FORCED_LEVEL_LOW;
351 } else if (strncmp("high", buf, strlen("high")) == 0) {
352 level = AMD_DPM_FORCED_LEVEL_HIGH;
353 } else if (strncmp("auto", buf, strlen("auto")) == 0) {
354 level = AMD_DPM_FORCED_LEVEL_AUTO;
355 } else if (strncmp("manual", buf, strlen("manual")) == 0) {
356 level = AMD_DPM_FORCED_LEVEL_MANUAL;
357 } else if (strncmp("profile_exit", buf, strlen("profile_exit")) == 0) {
358 level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT;
359 } else if (strncmp("profile_standard", buf, strlen("profile_standard")) == 0) {
360 level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD;
361 } else if (strncmp("profile_min_sclk", buf, strlen("profile_min_sclk")) == 0) {
362 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK;
363 } else if (strncmp("profile_min_mclk", buf, strlen("profile_min_mclk")) == 0) {
364 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK;
365 } else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) {
366 level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
371 ret = pm_runtime_get_sync(ddev->dev);
375 if (is_support_sw_smu(adev))
376 current_level = smu_get_performance_level(&adev->smu);
377 else if (adev->powerplay.pp_funcs->get_performance_level)
378 current_level = amdgpu_dpm_get_performance_level(adev);
380 if (current_level == level) {
381 pm_runtime_mark_last_busy(ddev->dev);
382 pm_runtime_put_autosuspend(ddev->dev);
386 /* profile_exit setting is valid only when current mode is in profile mode */
387 if (!(current_level & (AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
388 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
389 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
390 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)) &&
391 (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) {
392 pr_err("Currently not in any profile mode!\n");
393 pm_runtime_mark_last_busy(ddev->dev);
394 pm_runtime_put_autosuspend(ddev->dev);
398 if (is_support_sw_smu(adev)) {
399 ret = smu_force_performance_level(&adev->smu, level);
401 pm_runtime_mark_last_busy(ddev->dev);
402 pm_runtime_put_autosuspend(ddev->dev);
405 } else if (adev->powerplay.pp_funcs->force_performance_level) {
406 mutex_lock(&adev->pm.mutex);
407 if (adev->pm.dpm.thermal_active) {
408 mutex_unlock(&adev->pm.mutex);
409 pm_runtime_mark_last_busy(ddev->dev);
410 pm_runtime_put_autosuspend(ddev->dev);
413 ret = amdgpu_dpm_force_performance_level(adev, level);
415 mutex_unlock(&adev->pm.mutex);
416 pm_runtime_mark_last_busy(ddev->dev);
417 pm_runtime_put_autosuspend(ddev->dev);
420 adev->pm.dpm.forced_level = level;
422 mutex_unlock(&adev->pm.mutex);
424 pm_runtime_mark_last_busy(ddev->dev);
425 pm_runtime_put_autosuspend(ddev->dev);
430 static ssize_t amdgpu_get_pp_num_states(struct device *dev,
431 struct device_attribute *attr,
434 struct drm_device *ddev = dev_get_drvdata(dev);
435 struct amdgpu_device *adev = ddev->dev_private;
436 struct pp_states_info data;
439 ret = pm_runtime_get_sync(ddev->dev);
443 if (is_support_sw_smu(adev)) {
444 ret = smu_get_power_num_states(&adev->smu, &data);
447 } else if (adev->powerplay.pp_funcs->get_pp_num_states) {
448 amdgpu_dpm_get_pp_num_states(adev, &data);
450 memset(&data, 0, sizeof(data));
453 pm_runtime_mark_last_busy(ddev->dev);
454 pm_runtime_put_autosuspend(ddev->dev);
456 buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums);
457 for (i = 0; i < data.nums; i++)
458 buf_len += snprintf(buf + buf_len, PAGE_SIZE, "%d %s\n", i,
459 (data.states[i] == POWER_STATE_TYPE_INTERNAL_BOOT) ? "boot" :
460 (data.states[i] == POWER_STATE_TYPE_BATTERY) ? "battery" :
461 (data.states[i] == POWER_STATE_TYPE_BALANCED) ? "balanced" :
462 (data.states[i] == POWER_STATE_TYPE_PERFORMANCE) ? "performance" : "default");
467 static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
468 struct device_attribute *attr,
471 struct drm_device *ddev = dev_get_drvdata(dev);
472 struct amdgpu_device *adev = ddev->dev_private;
473 struct pp_states_info data;
474 struct smu_context *smu = &adev->smu;
475 enum amd_pm_state_type pm = 0;
478 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
481 ret = pm_runtime_get_sync(ddev->dev);
485 if (is_support_sw_smu(adev)) {
486 pm = smu_get_current_power_state(smu);
487 ret = smu_get_power_num_states(smu, &data);
490 } else if (adev->powerplay.pp_funcs->get_current_power_state
491 && adev->powerplay.pp_funcs->get_pp_num_states) {
492 pm = amdgpu_dpm_get_current_power_state(adev);
493 amdgpu_dpm_get_pp_num_states(adev, &data);
496 pm_runtime_mark_last_busy(ddev->dev);
497 pm_runtime_put_autosuspend(ddev->dev);
499 for (i = 0; i < data.nums; i++) {
500 if (pm == data.states[i])
507 return snprintf(buf, PAGE_SIZE, "%d\n", i);
510 static ssize_t amdgpu_get_pp_force_state(struct device *dev,
511 struct device_attribute *attr,
514 struct drm_device *ddev = dev_get_drvdata(dev);
515 struct amdgpu_device *adev = ddev->dev_private;
517 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
520 if (adev->pp_force_state_enabled)
521 return amdgpu_get_pp_cur_state(dev, attr, buf);
523 return snprintf(buf, PAGE_SIZE, "\n");
526 static ssize_t amdgpu_set_pp_force_state(struct device *dev,
527 struct device_attribute *attr,
531 struct drm_device *ddev = dev_get_drvdata(dev);
532 struct amdgpu_device *adev = ddev->dev_private;
533 enum amd_pm_state_type state = 0;
537 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
540 if (strlen(buf) == 1)
541 adev->pp_force_state_enabled = false;
542 else if (is_support_sw_smu(adev))
543 adev->pp_force_state_enabled = false;
544 else if (adev->powerplay.pp_funcs->dispatch_tasks &&
545 adev->powerplay.pp_funcs->get_pp_num_states) {
546 struct pp_states_info data;
548 ret = kstrtoul(buf, 0, &idx);
549 if (ret || idx >= ARRAY_SIZE(data.states))
552 idx = array_index_nospec(idx, ARRAY_SIZE(data.states));
554 amdgpu_dpm_get_pp_num_states(adev, &data);
555 state = data.states[idx];
557 ret = pm_runtime_get_sync(ddev->dev);
561 /* only set user selected power states */
562 if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
563 state != POWER_STATE_TYPE_DEFAULT) {
564 amdgpu_dpm_dispatch_task(adev,
565 AMD_PP_TASK_ENABLE_USER_STATE, &state);
566 adev->pp_force_state_enabled = true;
568 pm_runtime_mark_last_busy(ddev->dev);
569 pm_runtime_put_autosuspend(ddev->dev);
578 * The amdgpu driver provides a sysfs API for uploading new powerplay
579 * tables. The file pp_table is used for this. Reading the file
580 * will dump the current power play table. Writing to the file
581 * will attempt to upload a new powerplay table and re-initialize
582 * powerplay using that new table.
586 static ssize_t amdgpu_get_pp_table(struct device *dev,
587 struct device_attribute *attr,
590 struct drm_device *ddev = dev_get_drvdata(dev);
591 struct amdgpu_device *adev = ddev->dev_private;
595 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
598 ret = pm_runtime_get_sync(ddev->dev);
602 if (is_support_sw_smu(adev)) {
603 size = smu_sys_get_pp_table(&adev->smu, (void **)&table);
604 pm_runtime_mark_last_busy(ddev->dev);
605 pm_runtime_put_autosuspend(ddev->dev);
608 } else if (adev->powerplay.pp_funcs->get_pp_table) {
609 size = amdgpu_dpm_get_pp_table(adev, &table);
610 pm_runtime_mark_last_busy(ddev->dev);
611 pm_runtime_put_autosuspend(ddev->dev);
615 pm_runtime_mark_last_busy(ddev->dev);
616 pm_runtime_put_autosuspend(ddev->dev);
620 if (size >= PAGE_SIZE)
621 size = PAGE_SIZE - 1;
623 memcpy(buf, table, size);
628 static ssize_t amdgpu_set_pp_table(struct device *dev,
629 struct device_attribute *attr,
633 struct drm_device *ddev = dev_get_drvdata(dev);
634 struct amdgpu_device *adev = ddev->dev_private;
637 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
640 ret = pm_runtime_get_sync(ddev->dev);
644 if (is_support_sw_smu(adev)) {
645 ret = smu_sys_set_pp_table(&adev->smu, (void *)buf, count);
647 pm_runtime_mark_last_busy(ddev->dev);
648 pm_runtime_put_autosuspend(ddev->dev);
651 } else if (adev->powerplay.pp_funcs->set_pp_table)
652 amdgpu_dpm_set_pp_table(adev, buf, count);
654 pm_runtime_mark_last_busy(ddev->dev);
655 pm_runtime_put_autosuspend(ddev->dev);
661 * DOC: pp_od_clk_voltage
663 * The amdgpu driver provides a sysfs API for adjusting the clocks and voltages
664 * in each power level within a power state. The pp_od_clk_voltage is used for
667 * < For Vega10 and previous ASICs >
669 * Reading the file will display:
671 * - a list of engine clock levels and voltages labeled OD_SCLK
673 * - a list of memory clock levels and voltages labeled OD_MCLK
675 * - a list of valid ranges for sclk, mclk, and voltage labeled OD_RANGE
677 * To manually adjust these settings, first select manual using
678 * power_dpm_force_performance_level. Enter a new value for each
679 * level by writing a string that contains "s/m level clock voltage" to
680 * the file. E.g., "s 1 500 820" will update sclk level 1 to be 500 MHz
681 * at 820 mV; "m 0 350 810" will update mclk level 0 to be 350 MHz at
682 * 810 mV. When you have edited all of the states as needed, write
683 * "c" (commit) to the file to commit your changes. If you want to reset to the
684 * default power levels, write "r" (reset) to the file to reset them.
689 * Reading the file will display:
691 * - minimum and maximum engine clock labeled OD_SCLK
693 * - maximum memory clock labeled OD_MCLK
695 * - three <frequency, voltage> points labeled OD_VDDC_CURVE.
696 * They can be used to calibrate the sclk voltage curve.
698 * - a list of valid ranges for sclk, mclk, and voltage curve points
701 * To manually adjust these settings:
703 * - First select manual using power_dpm_force_performance_level
705 * - For clock frequency setting, enter a new value by writing a
706 * string that contains "s/m index clock" to the file. The index
707 * should be 0 if to set minimum clock. And 1 if to set maximum
708 * clock. E.g., "s 0 500" will update minimum sclk to be 500 MHz.
709 * "m 1 800" will update maximum mclk to be 800Mhz.
711 * For sclk voltage curve, enter the new values by writing a
712 * string that contains "vc point clock voltage" to the file. The
713 * points are indexed by 0, 1 and 2. E.g., "vc 0 300 600" will
714 * update point1 with clock set as 300Mhz and voltage as
715 * 600mV. "vc 2 1000 1000" will update point3 with clock set
716 * as 1000Mhz and voltage 1000mV.
718 * - When you have edited all of the states as needed, write "c" (commit)
719 * to the file to commit your changes
721 * - If you want to reset to the default power levels, write "r" (reset)
722 * to the file to reset them
726 static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
727 struct device_attribute *attr,
731 struct drm_device *ddev = dev_get_drvdata(dev);
732 struct amdgpu_device *adev = ddev->dev_private;
734 uint32_t parameter_size = 0;
739 const char delimiter[3] = {' ', '\n', '\0'};
742 if (amdgpu_sriov_vf(adev))
749 type = PP_OD_EDIT_SCLK_VDDC_TABLE;
750 else if (*buf == 'm')
751 type = PP_OD_EDIT_MCLK_VDDC_TABLE;
753 type = PP_OD_RESTORE_DEFAULT_TABLE;
754 else if (*buf == 'c')
755 type = PP_OD_COMMIT_DPM_TABLE;
756 else if (!strncmp(buf, "vc", 2))
757 type = PP_OD_EDIT_VDDC_CURVE;
761 memcpy(buf_cpy, buf, count+1);
765 if (type == PP_OD_EDIT_VDDC_CURVE)
767 while (isspace(*++tmp_str));
770 sub_str = strsep(&tmp_str, delimiter);
771 ret = kstrtol(sub_str, 0, ¶meter[parameter_size]);
776 while (isspace(*tmp_str))
780 ret = pm_runtime_get_sync(ddev->dev);
784 if (is_support_sw_smu(adev)) {
785 ret = smu_od_edit_dpm_table(&adev->smu, type,
786 parameter, parameter_size);
789 pm_runtime_mark_last_busy(ddev->dev);
790 pm_runtime_put_autosuspend(ddev->dev);
794 if (adev->powerplay.pp_funcs->odn_edit_dpm_table) {
795 ret = amdgpu_dpm_odn_edit_dpm_table(adev, type,
796 parameter, parameter_size);
798 pm_runtime_mark_last_busy(ddev->dev);
799 pm_runtime_put_autosuspend(ddev->dev);
804 if (type == PP_OD_COMMIT_DPM_TABLE) {
805 if (adev->powerplay.pp_funcs->dispatch_tasks) {
806 amdgpu_dpm_dispatch_task(adev,
807 AMD_PP_TASK_READJUST_POWER_STATE,
809 pm_runtime_mark_last_busy(ddev->dev);
810 pm_runtime_put_autosuspend(ddev->dev);
813 pm_runtime_mark_last_busy(ddev->dev);
814 pm_runtime_put_autosuspend(ddev->dev);
819 pm_runtime_mark_last_busy(ddev->dev);
820 pm_runtime_put_autosuspend(ddev->dev);
825 static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
826 struct device_attribute *attr,
829 struct drm_device *ddev = dev_get_drvdata(dev);
830 struct amdgpu_device *adev = ddev->dev_private;
834 if (amdgpu_sriov_vf(adev))
837 ret = pm_runtime_get_sync(ddev->dev);
841 if (is_support_sw_smu(adev)) {
842 size = smu_print_clk_levels(&adev->smu, SMU_OD_SCLK, buf);
843 size += smu_print_clk_levels(&adev->smu, SMU_OD_MCLK, buf+size);
844 size += smu_print_clk_levels(&adev->smu, SMU_OD_VDDC_CURVE, buf+size);
845 size += smu_print_clk_levels(&adev->smu, SMU_OD_RANGE, buf+size);
846 } else if (adev->powerplay.pp_funcs->print_clock_levels) {
847 size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
848 size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size);
849 size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf+size);
850 size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf+size);
852 size = snprintf(buf, PAGE_SIZE, "\n");
854 pm_runtime_mark_last_busy(ddev->dev);
855 pm_runtime_put_autosuspend(ddev->dev);
863 * The amdgpu driver provides a sysfs API for adjusting what powerplay
864 * features to be enabled. The file pp_features is used for this. And
865 * this is only available for Vega10 and later dGPUs.
867 * Reading back the file will show you the followings:
868 * - Current ppfeature masks
869 * - List of the all supported powerplay features with their naming,
870 * bitmasks and enablement status('Y'/'N' means "enabled"/"disabled").
872 * To manually enable or disable a specific feature, just set or clear
873 * the corresponding bit from original ppfeature masks and input the
874 * new ppfeature masks.
876 static ssize_t amdgpu_set_pp_features(struct device *dev,
877 struct device_attribute *attr,
881 struct drm_device *ddev = dev_get_drvdata(dev);
882 struct amdgpu_device *adev = ddev->dev_private;
883 uint64_t featuremask;
886 if (amdgpu_sriov_vf(adev))
889 ret = kstrtou64(buf, 0, &featuremask);
893 pr_debug("featuremask = 0x%llx\n", featuremask);
895 ret = pm_runtime_get_sync(ddev->dev);
899 if (is_support_sw_smu(adev)) {
900 ret = smu_sys_set_pp_feature_mask(&adev->smu, featuremask);
902 pm_runtime_mark_last_busy(ddev->dev);
903 pm_runtime_put_autosuspend(ddev->dev);
906 } else if (adev->powerplay.pp_funcs->set_ppfeature_status) {
907 ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask);
909 pm_runtime_mark_last_busy(ddev->dev);
910 pm_runtime_put_autosuspend(ddev->dev);
914 pm_runtime_mark_last_busy(ddev->dev);
915 pm_runtime_put_autosuspend(ddev->dev);
920 static ssize_t amdgpu_get_pp_features(struct device *dev,
921 struct device_attribute *attr,
924 struct drm_device *ddev = dev_get_drvdata(dev);
925 struct amdgpu_device *adev = ddev->dev_private;
929 if (amdgpu_sriov_vf(adev))
932 ret = pm_runtime_get_sync(ddev->dev);
936 if (is_support_sw_smu(adev))
937 size = smu_sys_get_pp_feature_mask(&adev->smu, buf);
938 else if (adev->powerplay.pp_funcs->get_ppfeature_status)
939 size = amdgpu_dpm_get_ppfeature_status(adev, buf);
941 size = snprintf(buf, PAGE_SIZE, "\n");
943 pm_runtime_mark_last_busy(ddev->dev);
944 pm_runtime_put_autosuspend(ddev->dev);
950 * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk pp_dpm_pcie
952 * The amdgpu driver provides a sysfs API for adjusting what power levels
953 * are enabled for a given power state. The files pp_dpm_sclk, pp_dpm_mclk,
954 * pp_dpm_socclk, pp_dpm_fclk, pp_dpm_dcefclk and pp_dpm_pcie are used for
957 * pp_dpm_socclk and pp_dpm_dcefclk interfaces are only available for
958 * Vega10 and later ASICs.
959 * pp_dpm_fclk interface is only available for Vega20 and later ASICs.
961 * Reading back the files will show you the available power levels within
962 * the power state and the clock information for those levels.
964 * To manually adjust these states, first select manual using
965 * power_dpm_force_performance_level.
966 * Secondly, enter a new value for each level by inputing a string that
967 * contains " echo xx xx xx > pp_dpm_sclk/mclk/pcie"
970 * .. code-block:: bash
972 * echo "4 5 6" > pp_dpm_sclk
974 * will enable sclk levels 4, 5, and 6.
976 * NOTE: change to the dcefclk max dpm level is not supported now
979 static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
980 struct device_attribute *attr,
983 struct drm_device *ddev = dev_get_drvdata(dev);
984 struct amdgpu_device *adev = ddev->dev_private;
988 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
991 ret = pm_runtime_get_sync(ddev->dev);
995 if (is_support_sw_smu(adev))
996 size = smu_print_clk_levels(&adev->smu, SMU_SCLK, buf);
997 else if (adev->powerplay.pp_funcs->print_clock_levels)
998 size = amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf);
1000 size = snprintf(buf, PAGE_SIZE, "\n");
1002 pm_runtime_mark_last_busy(ddev->dev);
1003 pm_runtime_put_autosuspend(ddev->dev);
1009 * Worst case: 32 bits individually specified, in octal at 12 characters
1010 * per line (+1 for \n).
1012 #define AMDGPU_MASK_BUF_MAX (32 * 13)
1014 static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
1018 char *sub_str = NULL;
1020 char buf_cpy[AMDGPU_MASK_BUF_MAX + 1];
1021 const char delimiter[3] = {' ', '\n', '\0'};
1026 bytes = min(count, sizeof(buf_cpy) - 1);
1027 memcpy(buf_cpy, buf, bytes);
1028 buf_cpy[bytes] = '\0';
1031 sub_str = strsep(&tmp, delimiter);
1032 if (strlen(sub_str)) {
1033 ret = kstrtol(sub_str, 0, &level);
1036 *mask |= 1 << level;
1044 static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
1045 struct device_attribute *attr,
1049 struct drm_device *ddev = dev_get_drvdata(dev);
1050 struct amdgpu_device *adev = ddev->dev_private;
1054 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
1057 ret = amdgpu_read_mask(buf, count, &mask);
1061 ret = pm_runtime_get_sync(ddev->dev);
1065 if (is_support_sw_smu(adev))
1066 ret = smu_force_clk_levels(&adev->smu, SMU_SCLK, mask, true);
1067 else if (adev->powerplay.pp_funcs->force_clock_level)
1068 ret = amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
1070 pm_runtime_mark_last_busy(ddev->dev);
1071 pm_runtime_put_autosuspend(ddev->dev);
1079 static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
1080 struct device_attribute *attr,
1083 struct drm_device *ddev = dev_get_drvdata(dev);
1084 struct amdgpu_device *adev = ddev->dev_private;
1088 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
1091 ret = pm_runtime_get_sync(ddev->dev);
1095 if (is_support_sw_smu(adev))
1096 size = smu_print_clk_levels(&adev->smu, SMU_MCLK, buf);
1097 else if (adev->powerplay.pp_funcs->print_clock_levels)
1098 size = amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf);
1100 size = snprintf(buf, PAGE_SIZE, "\n");
1102 pm_runtime_mark_last_busy(ddev->dev);
1103 pm_runtime_put_autosuspend(ddev->dev);
1108 static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
1109 struct device_attribute *attr,
1113 struct drm_device *ddev = dev_get_drvdata(dev);
1114 struct amdgpu_device *adev = ddev->dev_private;
1118 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
1121 ret = amdgpu_read_mask(buf, count, &mask);
1125 ret = pm_runtime_get_sync(ddev->dev);
1129 if (is_support_sw_smu(adev))
1130 ret = smu_force_clk_levels(&adev->smu, SMU_MCLK, mask, true);
1131 else if (adev->powerplay.pp_funcs->force_clock_level)
1132 ret = amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
1134 pm_runtime_mark_last_busy(ddev->dev);
1135 pm_runtime_put_autosuspend(ddev->dev);
1143 static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev,
1144 struct device_attribute *attr,
1147 struct drm_device *ddev = dev_get_drvdata(dev);
1148 struct amdgpu_device *adev = ddev->dev_private;
1152 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
1155 ret = pm_runtime_get_sync(ddev->dev);
1159 if (is_support_sw_smu(adev))
1160 size = smu_print_clk_levels(&adev->smu, SMU_SOCCLK, buf);
1161 else if (adev->powerplay.pp_funcs->print_clock_levels)
1162 size = amdgpu_dpm_print_clock_levels(adev, PP_SOCCLK, buf);
1164 size = snprintf(buf, PAGE_SIZE, "\n");
1166 pm_runtime_mark_last_busy(ddev->dev);
1167 pm_runtime_put_autosuspend(ddev->dev);
1172 static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
1173 struct device_attribute *attr,
1177 struct drm_device *ddev = dev_get_drvdata(dev);
1178 struct amdgpu_device *adev = ddev->dev_private;
1182 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
1185 ret = amdgpu_read_mask(buf, count, &mask);
1189 ret = pm_runtime_get_sync(ddev->dev);
1193 if (is_support_sw_smu(adev))
1194 ret = smu_force_clk_levels(&adev->smu, SMU_SOCCLK, mask, true);
1195 else if (adev->powerplay.pp_funcs->force_clock_level)
1196 ret = amdgpu_dpm_force_clock_level(adev, PP_SOCCLK, mask);
1200 pm_runtime_mark_last_busy(ddev->dev);
1201 pm_runtime_put_autosuspend(ddev->dev);
1209 static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev,
1210 struct device_attribute *attr,
1213 struct drm_device *ddev = dev_get_drvdata(dev);
1214 struct amdgpu_device *adev = ddev->dev_private;
1218 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
1221 ret = pm_runtime_get_sync(ddev->dev);
1225 if (is_support_sw_smu(adev))
1226 size = smu_print_clk_levels(&adev->smu, SMU_FCLK, buf);
1227 else if (adev->powerplay.pp_funcs->print_clock_levels)
1228 size = amdgpu_dpm_print_clock_levels(adev, PP_FCLK, buf);
1230 size = snprintf(buf, PAGE_SIZE, "\n");
1232 pm_runtime_mark_last_busy(ddev->dev);
1233 pm_runtime_put_autosuspend(ddev->dev);
1238 static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
1239 struct device_attribute *attr,
1243 struct drm_device *ddev = dev_get_drvdata(dev);
1244 struct amdgpu_device *adev = ddev->dev_private;
1248 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
1251 ret = amdgpu_read_mask(buf, count, &mask);
1255 ret = pm_runtime_get_sync(ddev->dev);
1259 if (is_support_sw_smu(adev))
1260 ret = smu_force_clk_levels(&adev->smu, SMU_FCLK, mask, true);
1261 else if (adev->powerplay.pp_funcs->force_clock_level)
1262 ret = amdgpu_dpm_force_clock_level(adev, PP_FCLK, mask);
1266 pm_runtime_mark_last_busy(ddev->dev);
1267 pm_runtime_put_autosuspend(ddev->dev);
1275 static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
1276 struct device_attribute *attr,
1279 struct drm_device *ddev = dev_get_drvdata(dev);
1280 struct amdgpu_device *adev = ddev->dev_private;
1284 if (amdgpu_sriov_vf(adev))
1287 ret = pm_runtime_get_sync(ddev->dev);
1291 if (is_support_sw_smu(adev))
1292 size = smu_print_clk_levels(&adev->smu, SMU_DCEFCLK, buf);
1293 else if (adev->powerplay.pp_funcs->print_clock_levels)
1294 size = amdgpu_dpm_print_clock_levels(adev, PP_DCEFCLK, buf);
1296 size = snprintf(buf, PAGE_SIZE, "\n");
1298 pm_runtime_mark_last_busy(ddev->dev);
1299 pm_runtime_put_autosuspend(ddev->dev);
1304 static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
1305 struct device_attribute *attr,
1309 struct drm_device *ddev = dev_get_drvdata(dev);
1310 struct amdgpu_device *adev = ddev->dev_private;
1314 if (amdgpu_sriov_vf(adev))
1317 ret = amdgpu_read_mask(buf, count, &mask);
1321 ret = pm_runtime_get_sync(ddev->dev);
1325 if (is_support_sw_smu(adev))
1326 ret = smu_force_clk_levels(&adev->smu, SMU_DCEFCLK, mask, true);
1327 else if (adev->powerplay.pp_funcs->force_clock_level)
1328 ret = amdgpu_dpm_force_clock_level(adev, PP_DCEFCLK, mask);
1332 pm_runtime_mark_last_busy(ddev->dev);
1333 pm_runtime_put_autosuspend(ddev->dev);
1341 static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
1342 struct device_attribute *attr,
1345 struct drm_device *ddev = dev_get_drvdata(dev);
1346 struct amdgpu_device *adev = ddev->dev_private;
1350 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
1353 ret = pm_runtime_get_sync(ddev->dev);
1357 if (is_support_sw_smu(adev))
1358 size = smu_print_clk_levels(&adev->smu, SMU_PCIE, buf);
1359 else if (adev->powerplay.pp_funcs->print_clock_levels)
1360 size = amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf);
1362 size = snprintf(buf, PAGE_SIZE, "\n");
1364 pm_runtime_mark_last_busy(ddev->dev);
1365 pm_runtime_put_autosuspend(ddev->dev);
1370 static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
1371 struct device_attribute *attr,
1375 struct drm_device *ddev = dev_get_drvdata(dev);
1376 struct amdgpu_device *adev = ddev->dev_private;
1380 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
1383 ret = amdgpu_read_mask(buf, count, &mask);
1387 ret = pm_runtime_get_sync(ddev->dev);
1391 if (is_support_sw_smu(adev))
1392 ret = smu_force_clk_levels(&adev->smu, SMU_PCIE, mask, true);
1393 else if (adev->powerplay.pp_funcs->force_clock_level)
1394 ret = amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
1398 pm_runtime_mark_last_busy(ddev->dev);
1399 pm_runtime_put_autosuspend(ddev->dev);
1407 static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
1408 struct device_attribute *attr,
1411 struct drm_device *ddev = dev_get_drvdata(dev);
1412 struct amdgpu_device *adev = ddev->dev_private;
1416 if (amdgpu_sriov_vf(adev))
1419 ret = pm_runtime_get_sync(ddev->dev);
1423 if (is_support_sw_smu(adev))
1424 value = smu_get_od_percentage(&(adev->smu), SMU_OD_SCLK);
1425 else if (adev->powerplay.pp_funcs->get_sclk_od)
1426 value = amdgpu_dpm_get_sclk_od(adev);
1428 pm_runtime_mark_last_busy(ddev->dev);
1429 pm_runtime_put_autosuspend(ddev->dev);
1431 return snprintf(buf, PAGE_SIZE, "%d\n", value);
1434 static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
1435 struct device_attribute *attr,
1439 struct drm_device *ddev = dev_get_drvdata(dev);
1440 struct amdgpu_device *adev = ddev->dev_private;
1444 if (amdgpu_sriov_vf(adev))
1447 ret = kstrtol(buf, 0, &value);
1452 ret = pm_runtime_get_sync(ddev->dev);
1456 if (is_support_sw_smu(adev)) {
1457 value = smu_set_od_percentage(&(adev->smu), SMU_OD_SCLK, (uint32_t)value);
1459 if (adev->powerplay.pp_funcs->set_sclk_od)
1460 amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
1462 if (adev->powerplay.pp_funcs->dispatch_tasks) {
1463 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
1465 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1466 amdgpu_pm_compute_clocks(adev);
1470 pm_runtime_mark_last_busy(ddev->dev);
1471 pm_runtime_put_autosuspend(ddev->dev);
1476 static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
1477 struct device_attribute *attr,
1480 struct drm_device *ddev = dev_get_drvdata(dev);
1481 struct amdgpu_device *adev = ddev->dev_private;
1485 if (amdgpu_sriov_vf(adev))
1488 ret = pm_runtime_get_sync(ddev->dev);
1492 if (is_support_sw_smu(adev))
1493 value = smu_get_od_percentage(&(adev->smu), SMU_OD_MCLK);
1494 else if (adev->powerplay.pp_funcs->get_mclk_od)
1495 value = amdgpu_dpm_get_mclk_od(adev);
1497 pm_runtime_mark_last_busy(ddev->dev);
1498 pm_runtime_put_autosuspend(ddev->dev);
1500 return snprintf(buf, PAGE_SIZE, "%d\n", value);
1503 static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
1504 struct device_attribute *attr,
1508 struct drm_device *ddev = dev_get_drvdata(dev);
1509 struct amdgpu_device *adev = ddev->dev_private;
1513 if (amdgpu_sriov_vf(adev))
1516 ret = kstrtol(buf, 0, &value);
1521 ret = pm_runtime_get_sync(ddev->dev);
1525 if (is_support_sw_smu(adev)) {
1526 value = smu_set_od_percentage(&(adev->smu), SMU_OD_MCLK, (uint32_t)value);
1528 if (adev->powerplay.pp_funcs->set_mclk_od)
1529 amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
1531 if (adev->powerplay.pp_funcs->dispatch_tasks) {
1532 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
1534 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1535 amdgpu_pm_compute_clocks(adev);
1539 pm_runtime_mark_last_busy(ddev->dev);
1540 pm_runtime_put_autosuspend(ddev->dev);
1546 * DOC: pp_power_profile_mode
1548 * The amdgpu driver provides a sysfs API for adjusting the heuristics
1549 * related to switching between power levels in a power state. The file
1550 * pp_power_profile_mode is used for this.
1552 * Reading this file outputs a list of all of the predefined power profiles
1553 * and the relevant heuristics settings for that profile.
1555 * To select a profile or create a custom profile, first select manual using
1556 * power_dpm_force_performance_level. Writing the number of a predefined
1557 * profile to pp_power_profile_mode will enable those heuristics. To
1558 * create a custom set of heuristics, write a string of numbers to the file
1559 * starting with the number of the custom profile along with a setting
1560 * for each heuristic parameter. Due to differences across asic families
1561 * the heuristic parameters vary from family to family.
1565 static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
1566 struct device_attribute *attr,
1569 struct drm_device *ddev = dev_get_drvdata(dev);
1570 struct amdgpu_device *adev = ddev->dev_private;
1574 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
1577 ret = pm_runtime_get_sync(ddev->dev);
1581 if (is_support_sw_smu(adev))
1582 size = smu_get_power_profile_mode(&adev->smu, buf);
1583 else if (adev->powerplay.pp_funcs->get_power_profile_mode)
1584 size = amdgpu_dpm_get_power_profile_mode(adev, buf);
1586 size = snprintf(buf, PAGE_SIZE, "\n");
1588 pm_runtime_mark_last_busy(ddev->dev);
1589 pm_runtime_put_autosuspend(ddev->dev);
1595 static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
1596 struct device_attribute *attr,
1601 struct drm_device *ddev = dev_get_drvdata(dev);
1602 struct amdgpu_device *adev = ddev->dev_private;
1603 uint32_t parameter_size = 0;
1605 char *sub_str, buf_cpy[128];
1609 long int profile_mode = 0;
1610 const char delimiter[3] = {' ', '\n', '\0'};
1614 ret = kstrtol(tmp, 0, &profile_mode);
1618 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
1621 if (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
1622 if (count < 2 || count > 127)
1624 while (isspace(*++buf))
1626 memcpy(buf_cpy, buf, count-i);
1628 while (tmp_str[0]) {
1629 sub_str = strsep(&tmp_str, delimiter);
1630 ret = kstrtol(sub_str, 0, ¶meter[parameter_size]);
1634 while (isspace(*tmp_str))
1638 parameter[parameter_size] = profile_mode;
1640 ret = pm_runtime_get_sync(ddev->dev);
1644 if (is_support_sw_smu(adev))
1645 ret = smu_set_power_profile_mode(&adev->smu, parameter, parameter_size, true);
1646 else if (adev->powerplay.pp_funcs->set_power_profile_mode)
1647 ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
1649 pm_runtime_mark_last_busy(ddev->dev);
1650 pm_runtime_put_autosuspend(ddev->dev);
1661 * The amdgpu driver provides a sysfs API for reading how busy the GPU
1662 * is as a percentage. The file gpu_busy_percent is used for this.
1663 * The SMU firmware computes a percentage of load based on the
1664 * aggregate activity level in the IP cores.
1666 static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev,
1667 struct device_attribute *attr,
1670 struct drm_device *ddev = dev_get_drvdata(dev);
1671 struct amdgpu_device *adev = ddev->dev_private;
1672 int r, value, size = sizeof(value);
1674 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
1677 r = pm_runtime_get_sync(ddev->dev);
1681 /* read the IP busy sensor */
1682 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD,
1683 (void *)&value, &size);
1685 pm_runtime_mark_last_busy(ddev->dev);
1686 pm_runtime_put_autosuspend(ddev->dev);
1691 return snprintf(buf, PAGE_SIZE, "%d\n", value);
1695 * DOC: mem_busy_percent
1697 * The amdgpu driver provides a sysfs API for reading how busy the VRAM
1698 * is as a percentage. The file mem_busy_percent is used for this.
1699 * The SMU firmware computes a percentage of load based on the
1700 * aggregate activity level in the IP cores.
1702 static ssize_t amdgpu_get_mem_busy_percent(struct device *dev,
1703 struct device_attribute *attr,
1706 struct drm_device *ddev = dev_get_drvdata(dev);
1707 struct amdgpu_device *adev = ddev->dev_private;
1708 int r, value, size = sizeof(value);
1710 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
1713 r = pm_runtime_get_sync(ddev->dev);
1717 /* read the IP busy sensor */
1718 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD,
1719 (void *)&value, &size);
1721 pm_runtime_mark_last_busy(ddev->dev);
1722 pm_runtime_put_autosuspend(ddev->dev);
1727 return snprintf(buf, PAGE_SIZE, "%d\n", value);
1733 * The amdgpu driver provides a sysfs API for estimating how much data
1734 * has been received and sent by the GPU in the last second through PCIe.
1735 * The file pcie_bw is used for this.
1736 * The Perf counters count the number of received and sent messages and return
1737 * those values, as well as the maximum payload size of a PCIe packet (mps).
1738 * Note that it is not possible to easily and quickly obtain the size of each
1739 * packet transmitted, so we output the max payload size (mps) to allow for
1740 * quick estimation of the PCIe bandwidth usage
1742 static ssize_t amdgpu_get_pcie_bw(struct device *dev,
1743 struct device_attribute *attr,
1746 struct drm_device *ddev = dev_get_drvdata(dev);
1747 struct amdgpu_device *adev = ddev->dev_private;
1748 uint64_t count0, count1;
1751 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
1754 ret = pm_runtime_get_sync(ddev->dev);
1758 amdgpu_asic_get_pcie_usage(adev, &count0, &count1);
1760 pm_runtime_mark_last_busy(ddev->dev);
1761 pm_runtime_put_autosuspend(ddev->dev);
1763 return snprintf(buf, PAGE_SIZE, "%llu %llu %i\n",
1764 count0, count1, pcie_get_mps(adev->pdev));
1770 * The amdgpu driver provides a sysfs API for providing a unique ID for the GPU
1771 * The file unique_id is used for this.
1772 * This will provide a Unique ID that will persist from machine to machine
1774 * NOTE: This will only work for GFX9 and newer. This file will be absent
1775 * on unsupported ASICs (GFX8 and older)
1777 static ssize_t amdgpu_get_unique_id(struct device *dev,
1778 struct device_attribute *attr,
1781 struct drm_device *ddev = dev_get_drvdata(dev);
1782 struct amdgpu_device *adev = ddev->dev_private;
1784 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
1787 if (adev->unique_id)
1788 return snprintf(buf, PAGE_SIZE, "%016llx\n", adev->unique_id);
1793 static struct amdgpu_device_attr amdgpu_device_attrs[] = {
1794 AMDGPU_DEVICE_ATTR_RW(power_dpm_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1795 AMDGPU_DEVICE_ATTR_RW(power_dpm_force_performance_level, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1796 AMDGPU_DEVICE_ATTR_RO(pp_num_states, ATTR_FLAG_BASIC),
1797 AMDGPU_DEVICE_ATTR_RO(pp_cur_state, ATTR_FLAG_BASIC),
1798 AMDGPU_DEVICE_ATTR_RW(pp_force_state, ATTR_FLAG_BASIC),
1799 AMDGPU_DEVICE_ATTR_RW(pp_table, ATTR_FLAG_BASIC),
1800 AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1801 AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1802 AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1803 AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1804 AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk, ATTR_FLAG_BASIC),
1805 AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie, ATTR_FLAG_BASIC),
1806 AMDGPU_DEVICE_ATTR_RW(pp_sclk_od, ATTR_FLAG_BASIC),
1807 AMDGPU_DEVICE_ATTR_RW(pp_mclk_od, ATTR_FLAG_BASIC),
1808 AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode, ATTR_FLAG_BASIC),
1809 AMDGPU_DEVICE_ATTR_RW(pp_od_clk_voltage, ATTR_FLAG_BASIC),
1810 AMDGPU_DEVICE_ATTR_RO(gpu_busy_percent, ATTR_FLAG_BASIC),
1811 AMDGPU_DEVICE_ATTR_RO(mem_busy_percent, ATTR_FLAG_BASIC),
1812 AMDGPU_DEVICE_ATTR_RO(pcie_bw, ATTR_FLAG_BASIC),
1813 AMDGPU_DEVICE_ATTR_RW(pp_features, ATTR_FLAG_BASIC),
1814 AMDGPU_DEVICE_ATTR_RO(unique_id, ATTR_FLAG_BASIC),
1817 static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
1820 struct device_attribute *dev_attr = &attr->dev_attr;
1821 const char *attr_name = dev_attr->attr.name;
1822 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
1823 enum amd_asic_type asic_type = adev->asic_type;
1825 if (!(attr->flags & mask)) {
1826 attr->states = ATTR_STATE_UNSUPPORTED;
1830 #define DEVICE_ATTR_IS(_name) (!strcmp(attr_name, #_name))
1832 if (DEVICE_ATTR_IS(pp_dpm_socclk)) {
1833 if (asic_type <= CHIP_VEGA10)
1834 attr->states = ATTR_STATE_UNSUPPORTED;
1835 } else if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) {
1836 if (asic_type <= CHIP_VEGA10 || asic_type == CHIP_ARCTURUS)
1837 attr->states = ATTR_STATE_UNSUPPORTED;
1838 } else if (DEVICE_ATTR_IS(pp_dpm_fclk)) {
1839 if (asic_type < CHIP_VEGA20)
1840 attr->states = ATTR_STATE_UNSUPPORTED;
1841 } else if (DEVICE_ATTR_IS(pp_dpm_pcie)) {
1842 if (asic_type == CHIP_ARCTURUS)
1843 attr->states = ATTR_STATE_UNSUPPORTED;
1844 } else if (DEVICE_ATTR_IS(pp_od_clk_voltage)) {
1845 attr->states = ATTR_STATE_UNSUPPORTED;
1846 if ((is_support_sw_smu(adev) && adev->smu.od_enabled) ||
1847 (!is_support_sw_smu(adev) && hwmgr->od_enabled))
1848 attr->states = ATTR_STATE_UNSUPPORTED;
1849 } else if (DEVICE_ATTR_IS(mem_busy_percent)) {
1850 if (adev->flags & AMD_IS_APU || asic_type == CHIP_VEGA10)
1851 attr->states = ATTR_STATE_UNSUPPORTED;
1852 } else if (DEVICE_ATTR_IS(pcie_bw)) {
1853 /* PCIe Perf counters won't work on APU nodes */
1854 if (adev->flags & AMD_IS_APU)
1855 attr->states = ATTR_STATE_UNSUPPORTED;
1856 } else if (DEVICE_ATTR_IS(unique_id)) {
1857 if (!adev->unique_id)
1858 attr->states = ATTR_STATE_UNSUPPORTED;
1859 } else if (DEVICE_ATTR_IS(pp_features)) {
1860 if (adev->flags & AMD_IS_APU || asic_type <= CHIP_VEGA10)
1861 attr->states = ATTR_STATE_UNSUPPORTED;
1864 if (asic_type == CHIP_ARCTURUS) {
1865 /* Arcturus does not support standalone mclk/socclk/fclk level setting */
1866 if (DEVICE_ATTR_IS(pp_dpm_mclk) ||
1867 DEVICE_ATTR_IS(pp_dpm_socclk) ||
1868 DEVICE_ATTR_IS(pp_dpm_fclk)) {
1869 dev_attr->attr.mode &= ~S_IWUGO;
1870 dev_attr->store = NULL;
1874 #undef DEVICE_ATTR_IS
1880 static int amdgpu_device_attr_create(struct amdgpu_device *adev,
1881 struct amdgpu_device_attr *attr,
1885 struct device_attribute *dev_attr = &attr->dev_attr;
1886 const char *name = dev_attr->attr.name;
1887 int (*attr_update)(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
1888 uint32_t mask) = default_attr_update;
1892 attr_update = attr->attr_update ? attr_update : default_attr_update;
1894 ret = attr_update(adev, attr, mask);
1896 dev_err(adev->dev, "failed to update device file %s, ret = %d\n",
1901 /* the attr->states maybe changed after call attr->attr_update function */
1902 if (attr->states == ATTR_STATE_UNSUPPORTED)
1905 ret = device_create_file(adev->dev, dev_attr);
1907 dev_err(adev->dev, "failed to create device file %s, ret = %d\n",
1911 attr->states = ATTR_STATE_SUPPORTED;
1916 static void amdgpu_device_attr_remove(struct amdgpu_device *adev, struct amdgpu_device_attr *attr)
1918 struct device_attribute *dev_attr = &attr->dev_attr;
1920 if (attr->states == ATTR_STATE_UNSUPPORTED)
1923 device_remove_file(adev->dev, dev_attr);
1925 attr->states = ATTR_STATE_UNSUPPORTED;
1928 static int amdgpu_device_attr_create_groups(struct amdgpu_device *adev,
1929 struct amdgpu_device_attr *attrs,
1936 for (i = 0; i < counts; i++) {
1937 ret = amdgpu_device_attr_create(adev, &attrs[i], mask);
1945 for (; i > 0; i--) {
1946 amdgpu_device_attr_remove(adev, &attrs[i]);
1952 static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
1953 struct amdgpu_device_attr *attrs,
1958 for (i = 0; i < counts; i++)
1959 amdgpu_device_attr_remove(adev, &attrs[i]);
1962 static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
1963 struct device_attribute *attr,
1966 struct amdgpu_device *adev = dev_get_drvdata(dev);
1967 int channel = to_sensor_dev_attr(attr)->index;
1968 int r, temp = 0, size = sizeof(temp);
1970 if (channel >= PP_TEMP_MAX)
1973 r = pm_runtime_get_sync(adev->ddev->dev);
1978 case PP_TEMP_JUNCTION:
1979 /* get current junction temperature */
1980 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
1981 (void *)&temp, &size);
1984 /* get current edge temperature */
1985 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_EDGE_TEMP,
1986 (void *)&temp, &size);
1989 /* get current memory temperature */
1990 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_TEMP,
1991 (void *)&temp, &size);
1998 pm_runtime_mark_last_busy(adev->ddev->dev);
1999 pm_runtime_put_autosuspend(adev->ddev->dev);
2004 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
2007 static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev,
2008 struct device_attribute *attr,
2011 struct amdgpu_device *adev = dev_get_drvdata(dev);
2012 int hyst = to_sensor_dev_attr(attr)->index;
2016 temp = adev->pm.dpm.thermal.min_temp;
2018 temp = adev->pm.dpm.thermal.max_temp;
2020 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
2023 static ssize_t amdgpu_hwmon_show_hotspot_temp_thresh(struct device *dev,
2024 struct device_attribute *attr,
2027 struct amdgpu_device *adev = dev_get_drvdata(dev);
2028 int hyst = to_sensor_dev_attr(attr)->index;
2032 temp = adev->pm.dpm.thermal.min_hotspot_temp;
2034 temp = adev->pm.dpm.thermal.max_hotspot_crit_temp;
2036 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
2039 static ssize_t amdgpu_hwmon_show_mem_temp_thresh(struct device *dev,
2040 struct device_attribute *attr,
2043 struct amdgpu_device *adev = dev_get_drvdata(dev);
2044 int hyst = to_sensor_dev_attr(attr)->index;
2048 temp = adev->pm.dpm.thermal.min_mem_temp;
2050 temp = adev->pm.dpm.thermal.max_mem_crit_temp;
2052 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
2055 static ssize_t amdgpu_hwmon_show_temp_label(struct device *dev,
2056 struct device_attribute *attr,
2059 int channel = to_sensor_dev_attr(attr)->index;
2061 if (channel >= PP_TEMP_MAX)
2064 return snprintf(buf, PAGE_SIZE, "%s\n", temp_label[channel].label);
2067 static ssize_t amdgpu_hwmon_show_temp_emergency(struct device *dev,
2068 struct device_attribute *attr,
2071 struct amdgpu_device *adev = dev_get_drvdata(dev);
2072 int channel = to_sensor_dev_attr(attr)->index;
2075 if (channel >= PP_TEMP_MAX)
2079 case PP_TEMP_JUNCTION:
2080 temp = adev->pm.dpm.thermal.max_hotspot_emergency_temp;
2083 temp = adev->pm.dpm.thermal.max_edge_emergency_temp;
2086 temp = adev->pm.dpm.thermal.max_mem_emergency_temp;
2090 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
2093 static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
2094 struct device_attribute *attr,
2097 struct amdgpu_device *adev = dev_get_drvdata(dev);
2101 ret = pm_runtime_get_sync(adev->ddev->dev);
2105 if (is_support_sw_smu(adev)) {
2106 pwm_mode = smu_get_fan_control_mode(&adev->smu);
2108 if (!adev->powerplay.pp_funcs->get_fan_control_mode) {
2109 pm_runtime_mark_last_busy(adev->ddev->dev);
2110 pm_runtime_put_autosuspend(adev->ddev->dev);
2114 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
2117 pm_runtime_mark_last_busy(adev->ddev->dev);
2118 pm_runtime_put_autosuspend(adev->ddev->dev);
2120 return sprintf(buf, "%i\n", pwm_mode);
2123 static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
2124 struct device_attribute *attr,
2128 struct amdgpu_device *adev = dev_get_drvdata(dev);
2132 err = kstrtoint(buf, 10, &value);
2136 ret = pm_runtime_get_sync(adev->ddev->dev);
2140 if (is_support_sw_smu(adev)) {
2141 smu_set_fan_control_mode(&adev->smu, value);
2143 if (!adev->powerplay.pp_funcs->set_fan_control_mode) {
2144 pm_runtime_mark_last_busy(adev->ddev->dev);
2145 pm_runtime_put_autosuspend(adev->ddev->dev);
2149 amdgpu_dpm_set_fan_control_mode(adev, value);
2152 pm_runtime_mark_last_busy(adev->ddev->dev);
2153 pm_runtime_put_autosuspend(adev->ddev->dev);
2158 static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev,
2159 struct device_attribute *attr,
2162 return sprintf(buf, "%i\n", 0);
2165 static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev,
2166 struct device_attribute *attr,
2169 return sprintf(buf, "%i\n", 255);
2172 static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
2173 struct device_attribute *attr,
2174 const char *buf, size_t count)
2176 struct amdgpu_device *adev = dev_get_drvdata(dev);
2181 err = pm_runtime_get_sync(adev->ddev->dev);
2185 if (is_support_sw_smu(adev))
2186 pwm_mode = smu_get_fan_control_mode(&adev->smu);
2188 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
2190 if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
2191 pr_info("manual fan speed control should be enabled first\n");
2192 pm_runtime_mark_last_busy(adev->ddev->dev);
2193 pm_runtime_put_autosuspend(adev->ddev->dev);
2197 err = kstrtou32(buf, 10, &value);
2199 pm_runtime_mark_last_busy(adev->ddev->dev);
2200 pm_runtime_put_autosuspend(adev->ddev->dev);
2204 value = (value * 100) / 255;
2206 if (is_support_sw_smu(adev))
2207 err = smu_set_fan_speed_percent(&adev->smu, value);
2208 else if (adev->powerplay.pp_funcs->set_fan_speed_percent)
2209 err = amdgpu_dpm_set_fan_speed_percent(adev, value);
2213 pm_runtime_mark_last_busy(adev->ddev->dev);
2214 pm_runtime_put_autosuspend(adev->ddev->dev);
2222 static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
2223 struct device_attribute *attr,
2226 struct amdgpu_device *adev = dev_get_drvdata(dev);
2230 err = pm_runtime_get_sync(adev->ddev->dev);
2234 if (is_support_sw_smu(adev))
2235 err = smu_get_fan_speed_percent(&adev->smu, &speed);
2236 else if (adev->powerplay.pp_funcs->get_fan_speed_percent)
2237 err = amdgpu_dpm_get_fan_speed_percent(adev, &speed);
2241 pm_runtime_mark_last_busy(adev->ddev->dev);
2242 pm_runtime_put_autosuspend(adev->ddev->dev);
2247 speed = (speed * 255) / 100;
2249 return sprintf(buf, "%i\n", speed);
2252 static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
2253 struct device_attribute *attr,
2256 struct amdgpu_device *adev = dev_get_drvdata(dev);
2260 err = pm_runtime_get_sync(adev->ddev->dev);
2264 if (is_support_sw_smu(adev))
2265 err = smu_get_fan_speed_rpm(&adev->smu, &speed);
2266 else if (adev->powerplay.pp_funcs->get_fan_speed_rpm)
2267 err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
2271 pm_runtime_mark_last_busy(adev->ddev->dev);
2272 pm_runtime_put_autosuspend(adev->ddev->dev);
2277 return sprintf(buf, "%i\n", speed);
2280 static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
2281 struct device_attribute *attr,
2284 struct amdgpu_device *adev = dev_get_drvdata(dev);
2286 u32 size = sizeof(min_rpm);
2289 r = pm_runtime_get_sync(adev->ddev->dev);
2293 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM,
2294 (void *)&min_rpm, &size);
2296 pm_runtime_mark_last_busy(adev->ddev->dev);
2297 pm_runtime_put_autosuspend(adev->ddev->dev);
2302 return snprintf(buf, PAGE_SIZE, "%d\n", min_rpm);
2305 static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
2306 struct device_attribute *attr,
2309 struct amdgpu_device *adev = dev_get_drvdata(dev);
2311 u32 size = sizeof(max_rpm);
2314 r = pm_runtime_get_sync(adev->ddev->dev);
2318 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM,
2319 (void *)&max_rpm, &size);
2321 pm_runtime_mark_last_busy(adev->ddev->dev);
2322 pm_runtime_put_autosuspend(adev->ddev->dev);
2327 return snprintf(buf, PAGE_SIZE, "%d\n", max_rpm);
2330 static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
2331 struct device_attribute *attr,
2334 struct amdgpu_device *adev = dev_get_drvdata(dev);
2338 err = pm_runtime_get_sync(adev->ddev->dev);
2342 if (is_support_sw_smu(adev))
2343 err = smu_get_fan_speed_rpm(&adev->smu, &rpm);
2344 else if (adev->powerplay.pp_funcs->get_fan_speed_rpm)
2345 err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);
2349 pm_runtime_mark_last_busy(adev->ddev->dev);
2350 pm_runtime_put_autosuspend(adev->ddev->dev);
2355 return sprintf(buf, "%i\n", rpm);
2358 static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
2359 struct device_attribute *attr,
2360 const char *buf, size_t count)
2362 struct amdgpu_device *adev = dev_get_drvdata(dev);
2367 err = pm_runtime_get_sync(adev->ddev->dev);
2371 if (is_support_sw_smu(adev))
2372 pwm_mode = smu_get_fan_control_mode(&adev->smu);
2374 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
2376 if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
2377 pm_runtime_mark_last_busy(adev->ddev->dev);
2378 pm_runtime_put_autosuspend(adev->ddev->dev);
2382 err = kstrtou32(buf, 10, &value);
2384 pm_runtime_mark_last_busy(adev->ddev->dev);
2385 pm_runtime_put_autosuspend(adev->ddev->dev);
2389 if (is_support_sw_smu(adev))
2390 err = smu_set_fan_speed_rpm(&adev->smu, value);
2391 else if (adev->powerplay.pp_funcs->set_fan_speed_rpm)
2392 err = amdgpu_dpm_set_fan_speed_rpm(adev, value);
2396 pm_runtime_mark_last_busy(adev->ddev->dev);
2397 pm_runtime_put_autosuspend(adev->ddev->dev);
2405 static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
2406 struct device_attribute *attr,
2409 struct amdgpu_device *adev = dev_get_drvdata(dev);
2413 ret = pm_runtime_get_sync(adev->ddev->dev);
2417 if (is_support_sw_smu(adev)) {
2418 pwm_mode = smu_get_fan_control_mode(&adev->smu);
2420 if (!adev->powerplay.pp_funcs->get_fan_control_mode) {
2421 pm_runtime_mark_last_busy(adev->ddev->dev);
2422 pm_runtime_put_autosuspend(adev->ddev->dev);
2426 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
2429 pm_runtime_mark_last_busy(adev->ddev->dev);
2430 pm_runtime_put_autosuspend(adev->ddev->dev);
2432 return sprintf(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1);
2435 static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
2436 struct device_attribute *attr,
2440 struct amdgpu_device *adev = dev_get_drvdata(dev);
2445 err = kstrtoint(buf, 10, &value);
2450 pwm_mode = AMD_FAN_CTRL_AUTO;
2451 else if (value == 1)
2452 pwm_mode = AMD_FAN_CTRL_MANUAL;
2456 err = pm_runtime_get_sync(adev->ddev->dev);
2460 if (is_support_sw_smu(adev)) {
2461 smu_set_fan_control_mode(&adev->smu, pwm_mode);
2463 if (!adev->powerplay.pp_funcs->set_fan_control_mode) {
2464 pm_runtime_mark_last_busy(adev->ddev->dev);
2465 pm_runtime_put_autosuspend(adev->ddev->dev);
2468 amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
2471 pm_runtime_mark_last_busy(adev->ddev->dev);
2472 pm_runtime_put_autosuspend(adev->ddev->dev);
2477 static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
2478 struct device_attribute *attr,
2481 struct amdgpu_device *adev = dev_get_drvdata(dev);
2483 int r, size = sizeof(vddgfx);
2485 r = pm_runtime_get_sync(adev->ddev->dev);
2489 /* get the voltage */
2490 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX,
2491 (void *)&vddgfx, &size);
2493 pm_runtime_mark_last_busy(adev->ddev->dev);
2494 pm_runtime_put_autosuspend(adev->ddev->dev);
2499 return snprintf(buf, PAGE_SIZE, "%d\n", vddgfx);
2502 static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev,
2503 struct device_attribute *attr,
2506 return snprintf(buf, PAGE_SIZE, "vddgfx\n");
2509 static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
2510 struct device_attribute *attr,
2513 struct amdgpu_device *adev = dev_get_drvdata(dev);
2515 int r, size = sizeof(vddnb);
2517 /* only APUs have vddnb */
2518 if (!(adev->flags & AMD_IS_APU))
2521 r = pm_runtime_get_sync(adev->ddev->dev);
2525 /* get the voltage */
2526 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB,
2527 (void *)&vddnb, &size);
2529 pm_runtime_mark_last_busy(adev->ddev->dev);
2530 pm_runtime_put_autosuspend(adev->ddev->dev);
2535 return snprintf(buf, PAGE_SIZE, "%d\n", vddnb);
2538 static ssize_t amdgpu_hwmon_show_vddnb_label(struct device *dev,
2539 struct device_attribute *attr,
2542 return snprintf(buf, PAGE_SIZE, "vddnb\n");
2545 static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
2546 struct device_attribute *attr,
2549 struct amdgpu_device *adev = dev_get_drvdata(dev);
2551 int r, size = sizeof(u32);
2554 r = pm_runtime_get_sync(adev->ddev->dev);
2558 /* get the voltage */
2559 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER,
2560 (void *)&query, &size);
2562 pm_runtime_mark_last_busy(adev->ddev->dev);
2563 pm_runtime_put_autosuspend(adev->ddev->dev);
2568 /* convert to microwatts */
2569 uw = (query >> 8) * 1000000 + (query & 0xff) * 1000;
2571 return snprintf(buf, PAGE_SIZE, "%u\n", uw);
2574 static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev,
2575 struct device_attribute *attr,
2578 return sprintf(buf, "%i\n", 0);
2581 static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
2582 struct device_attribute *attr,
2585 struct amdgpu_device *adev = dev_get_drvdata(dev);
2590 r = pm_runtime_get_sync(adev->ddev->dev);
2594 if (is_support_sw_smu(adev)) {
2595 smu_get_power_limit(&adev->smu, &limit, true, true);
2596 size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
2597 } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
2598 adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, true);
2599 size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
2601 size = snprintf(buf, PAGE_SIZE, "\n");
2604 pm_runtime_mark_last_busy(adev->ddev->dev);
2605 pm_runtime_put_autosuspend(adev->ddev->dev);
2610 static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
2611 struct device_attribute *attr,
2614 struct amdgpu_device *adev = dev_get_drvdata(dev);
2619 r = pm_runtime_get_sync(adev->ddev->dev);
2623 if (is_support_sw_smu(adev)) {
2624 smu_get_power_limit(&adev->smu, &limit, false, true);
2625 size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
2626 } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
2627 adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, false);
2628 size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
2630 size = snprintf(buf, PAGE_SIZE, "\n");
2633 pm_runtime_mark_last_busy(adev->ddev->dev);
2634 pm_runtime_put_autosuspend(adev->ddev->dev);
2640 static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
2641 struct device_attribute *attr,
2645 struct amdgpu_device *adev = dev_get_drvdata(dev);
2649 if (amdgpu_sriov_vf(adev))
2652 err = kstrtou32(buf, 10, &value);
2656 value = value / 1000000; /* convert to Watt */
2659 err = pm_runtime_get_sync(adev->ddev->dev);
2663 if (is_support_sw_smu(adev))
2664 err = smu_set_power_limit(&adev->smu, value);
2665 else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_power_limit)
2666 err = adev->powerplay.pp_funcs->set_power_limit(adev->powerplay.pp_handle, value);
2670 pm_runtime_mark_last_busy(adev->ddev->dev);
2671 pm_runtime_put_autosuspend(adev->ddev->dev);
2679 static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
2680 struct device_attribute *attr,
2683 struct amdgpu_device *adev = dev_get_drvdata(dev);
2685 int r, size = sizeof(sclk);
2687 r = pm_runtime_get_sync(adev->ddev->dev);
2692 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK,
2693 (void *)&sclk, &size);
2695 pm_runtime_mark_last_busy(adev->ddev->dev);
2696 pm_runtime_put_autosuspend(adev->ddev->dev);
2701 return snprintf(buf, PAGE_SIZE, "%d\n", sclk * 10 * 1000);
2704 static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev,
2705 struct device_attribute *attr,
2708 return snprintf(buf, PAGE_SIZE, "sclk\n");
2711 static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
2712 struct device_attribute *attr,
2715 struct amdgpu_device *adev = dev_get_drvdata(dev);
2717 int r, size = sizeof(mclk);
2719 r = pm_runtime_get_sync(adev->ddev->dev);
2724 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK,
2725 (void *)&mclk, &size);
2727 pm_runtime_mark_last_busy(adev->ddev->dev);
2728 pm_runtime_put_autosuspend(adev->ddev->dev);
2733 return snprintf(buf, PAGE_SIZE, "%d\n", mclk * 10 * 1000);
2736 static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
2737 struct device_attribute *attr,
2740 return snprintf(buf, PAGE_SIZE, "mclk\n");
2746 * The amdgpu driver exposes the following sensor interfaces:
2748 * - GPU temperature (via the on-die sensor)
2752 * - Northbridge voltage (APUs only)
2758 * - GPU gfx/compute engine clock
2760 * - GPU memory clock (dGPU only)
2762 * hwmon interfaces for GPU temperature:
2764 * - temp[1-3]_input: the on die GPU temperature in millidegrees Celsius
2765 * - temp2_input and temp3_input are supported on SOC15 dGPUs only
2767 * - temp[1-3]_label: temperature channel label
2768 * - temp2_label and temp3_label are supported on SOC15 dGPUs only
2770 * - temp[1-3]_crit: temperature critical max value in millidegrees Celsius
2771 * - temp2_crit and temp3_crit are supported on SOC15 dGPUs only
2773 * - temp[1-3]_crit_hyst: temperature hysteresis for critical limit in millidegrees Celsius
2774 * - temp2_crit_hyst and temp3_crit_hyst are supported on SOC15 dGPUs only
2776 * - temp[1-3]_emergency: temperature emergency max value(asic shutdown) in millidegrees Celsius
2777 * - these are supported on SOC15 dGPUs only
2779 * hwmon interfaces for GPU voltage:
2781 * - in0_input: the voltage on the GPU in millivolts
2783 * - in1_input: the voltage on the Northbridge in millivolts
2785 * hwmon interfaces for GPU power:
2787 * - power1_average: average power used by the GPU in microWatts
2789 * - power1_cap_min: minimum cap supported in microWatts
2791 * - power1_cap_max: maximum cap supported in microWatts
2793 * - power1_cap: selected power cap in microWatts
2795 * hwmon interfaces for GPU fan:
2797 * - pwm1: pulse width modulation fan level (0-255)
2799 * - pwm1_enable: pulse width modulation fan control method (0: no fan speed control, 1: manual fan speed control using pwm interface, 2: automatic fan speed control)
2801 * - pwm1_min: pulse width modulation fan control minimum level (0)
2803 * - pwm1_max: pulse width modulation fan control maximum level (255)
2805 * - fan1_min: an minimum value Unit: revolution/min (RPM)
2807 * - fan1_max: an maxmum value Unit: revolution/max (RPM)
2809 * - fan1_input: fan speed in RPM
2811 * - fan[1-\*]_target: Desired fan speed Unit: revolution/min (RPM)
2813 * - fan[1-\*]_enable: Enable or disable the sensors.1: Enable 0: Disable
2815 * hwmon interfaces for GPU clocks:
2817 * - freq1_input: the gfx/compute clock in hertz
2819 * - freq2_input: the memory clock in hertz
2821 * You can use hwmon tools like sensors to view this information on your system.
2825 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_EDGE);
2826 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0);
2827 static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1);
2828 static SENSOR_DEVICE_ATTR(temp1_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_EDGE);
2829 static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_JUNCTION);
2830 static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 0);
2831 static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 1);
2832 static SENSOR_DEVICE_ATTR(temp2_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_JUNCTION);
2833 static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_MEM);
2834 static SENSOR_DEVICE_ATTR(temp3_crit, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 0);
2835 static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 1);
2836 static SENSOR_DEVICE_ATTR(temp3_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_MEM);
2837 static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_EDGE);
2838 static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_JUNCTION);
2839 static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_MEM);
2840 static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0);
2841 static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0);
2842 static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0);
2843 static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0);
2844 static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0);
2845 static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO, amdgpu_hwmon_get_fan1_min, NULL, 0);
2846 static SENSOR_DEVICE_ATTR(fan1_max, S_IRUGO, amdgpu_hwmon_get_fan1_max, NULL, 0);
2847 static SENSOR_DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_target, amdgpu_hwmon_set_fan1_target, 0);
2848 static SENSOR_DEVICE_ATTR(fan1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_enable, amdgpu_hwmon_set_fan1_enable, 0);
2849 static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, amdgpu_hwmon_show_vddgfx, NULL, 0);
2850 static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, amdgpu_hwmon_show_vddgfx_label, NULL, 0);
2851 static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, amdgpu_hwmon_show_vddnb, NULL, 0);
2852 static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, amdgpu_hwmon_show_vddnb_label, NULL, 0);
2853 static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 0);
2854 static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0);
2855 static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 0);
2856 static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0);
2857 static SENSOR_DEVICE_ATTR(freq1_input, S_IRUGO, amdgpu_hwmon_show_sclk, NULL, 0);
2858 static SENSOR_DEVICE_ATTR(freq1_label, S_IRUGO, amdgpu_hwmon_show_sclk_label, NULL, 0);
2859 static SENSOR_DEVICE_ATTR(freq2_input, S_IRUGO, amdgpu_hwmon_show_mclk, NULL, 0);
2860 static SENSOR_DEVICE_ATTR(freq2_label, S_IRUGO, amdgpu_hwmon_show_mclk_label, NULL, 0);
2862 static struct attribute *hwmon_attributes[] = {
2863 &sensor_dev_attr_temp1_input.dev_attr.attr,
2864 &sensor_dev_attr_temp1_crit.dev_attr.attr,
2865 &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
2866 &sensor_dev_attr_temp2_input.dev_attr.attr,
2867 &sensor_dev_attr_temp2_crit.dev_attr.attr,
2868 &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr,
2869 &sensor_dev_attr_temp3_input.dev_attr.attr,
2870 &sensor_dev_attr_temp3_crit.dev_attr.attr,
2871 &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr,
2872 &sensor_dev_attr_temp1_emergency.dev_attr.attr,
2873 &sensor_dev_attr_temp2_emergency.dev_attr.attr,
2874 &sensor_dev_attr_temp3_emergency.dev_attr.attr,
2875 &sensor_dev_attr_temp1_label.dev_attr.attr,
2876 &sensor_dev_attr_temp2_label.dev_attr.attr,
2877 &sensor_dev_attr_temp3_label.dev_attr.attr,
2878 &sensor_dev_attr_pwm1.dev_attr.attr,
2879 &sensor_dev_attr_pwm1_enable.dev_attr.attr,
2880 &sensor_dev_attr_pwm1_min.dev_attr.attr,
2881 &sensor_dev_attr_pwm1_max.dev_attr.attr,
2882 &sensor_dev_attr_fan1_input.dev_attr.attr,
2883 &sensor_dev_attr_fan1_min.dev_attr.attr,
2884 &sensor_dev_attr_fan1_max.dev_attr.attr,
2885 &sensor_dev_attr_fan1_target.dev_attr.attr,
2886 &sensor_dev_attr_fan1_enable.dev_attr.attr,
2887 &sensor_dev_attr_in0_input.dev_attr.attr,
2888 &sensor_dev_attr_in0_label.dev_attr.attr,
2889 &sensor_dev_attr_in1_input.dev_attr.attr,
2890 &sensor_dev_attr_in1_label.dev_attr.attr,
2891 &sensor_dev_attr_power1_average.dev_attr.attr,
2892 &sensor_dev_attr_power1_cap_max.dev_attr.attr,
2893 &sensor_dev_attr_power1_cap_min.dev_attr.attr,
2894 &sensor_dev_attr_power1_cap.dev_attr.attr,
2895 &sensor_dev_attr_freq1_input.dev_attr.attr,
2896 &sensor_dev_attr_freq1_label.dev_attr.attr,
2897 &sensor_dev_attr_freq2_input.dev_attr.attr,
2898 &sensor_dev_attr_freq2_label.dev_attr.attr,
2902 static umode_t hwmon_attributes_visible(struct kobject *kobj,
2903 struct attribute *attr, int index)
2905 struct device *dev = kobj_to_dev(kobj);
2906 struct amdgpu_device *adev = dev_get_drvdata(dev);
2907 umode_t effective_mode = attr->mode;
2909 /* under multi-vf mode, the hwmon attributes are all not supported */
2910 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
2913 /* there is no fan under pp one vf mode */
2914 if (amdgpu_sriov_is_pp_one_vf(adev) &&
2915 (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
2916 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
2917 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
2918 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
2919 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
2920 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
2921 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
2922 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
2923 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
2926 /* Skip fan attributes if fan is not present */
2927 if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
2928 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
2929 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
2930 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
2931 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
2932 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
2933 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
2934 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
2935 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
2938 /* Skip fan attributes on APU */
2939 if ((adev->flags & AMD_IS_APU) &&
2940 (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
2941 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
2942 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
2943 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
2944 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
2945 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
2946 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
2947 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
2948 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
2951 /* Skip limit attributes if DPM is not enabled */
2952 if (!adev->pm.dpm_enabled &&
2953 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
2954 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
2955 attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
2956 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
2957 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
2958 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
2959 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
2960 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
2961 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
2962 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
2963 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
2966 if (!is_support_sw_smu(adev)) {
2967 /* mask fan attributes if we have no bindings for this asic to expose */
2968 if ((!adev->powerplay.pp_funcs->get_fan_speed_percent &&
2969 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
2970 (!adev->powerplay.pp_funcs->get_fan_control_mode &&
2971 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
2972 effective_mode &= ~S_IRUGO;
2974 if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
2975 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
2976 (!adev->powerplay.pp_funcs->set_fan_control_mode &&
2977 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
2978 effective_mode &= ~S_IWUSR;
2981 if (((adev->flags & AMD_IS_APU) ||
2982 adev->family == AMDGPU_FAMILY_SI || /* not implemented yet */
2983 adev->family == AMDGPU_FAMILY_KV) && /* not implemented yet */
2984 (attr == &sensor_dev_attr_power1_average.dev_attr.attr ||
2985 attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
2986 attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr||
2987 attr == &sensor_dev_attr_power1_cap.dev_attr.attr))
2990 if (!is_support_sw_smu(adev)) {
2991 /* hide max/min values if we can't both query and manage the fan */
2992 if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
2993 !adev->powerplay.pp_funcs->get_fan_speed_percent) &&
2994 (!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
2995 !adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
2996 (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
2997 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
3000 if ((!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
3001 !adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
3002 (attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3003 attr == &sensor_dev_attr_fan1_min.dev_attr.attr))
3007 if ((adev->family == AMDGPU_FAMILY_SI || /* not implemented yet */
3008 adev->family == AMDGPU_FAMILY_KV) && /* not implemented yet */
3009 (attr == &sensor_dev_attr_in0_input.dev_attr.attr ||
3010 attr == &sensor_dev_attr_in0_label.dev_attr.attr))
3013 /* only APUs have vddnb */
3014 if (!(adev->flags & AMD_IS_APU) &&
3015 (attr == &sensor_dev_attr_in1_input.dev_attr.attr ||
3016 attr == &sensor_dev_attr_in1_label.dev_attr.attr))
3019 /* no mclk on APUs */
3020 if ((adev->flags & AMD_IS_APU) &&
3021 (attr == &sensor_dev_attr_freq2_input.dev_attr.attr ||
3022 attr == &sensor_dev_attr_freq2_label.dev_attr.attr))
3025 /* only SOC15 dGPUs support hotspot and mem temperatures */
3026 if (((adev->flags & AMD_IS_APU) ||
3027 adev->asic_type < CHIP_VEGA10) &&
3028 (attr == &sensor_dev_attr_temp2_crit.dev_attr.attr ||
3029 attr == &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr ||
3030 attr == &sensor_dev_attr_temp3_crit.dev_attr.attr ||
3031 attr == &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr ||
3032 attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr ||
3033 attr == &sensor_dev_attr_temp2_emergency.dev_attr.attr ||
3034 attr == &sensor_dev_attr_temp3_emergency.dev_attr.attr ||
3035 attr == &sensor_dev_attr_temp2_input.dev_attr.attr ||
3036 attr == &sensor_dev_attr_temp3_input.dev_attr.attr ||
3037 attr == &sensor_dev_attr_temp2_label.dev_attr.attr ||
3038 attr == &sensor_dev_attr_temp3_label.dev_attr.attr))
3041 return effective_mode;
3044 static const struct attribute_group hwmon_attrgroup = {
3045 .attrs = hwmon_attributes,
3046 .is_visible = hwmon_attributes_visible,
3049 static const struct attribute_group *hwmon_groups[] = {
3054 void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
3056 struct amdgpu_device *adev =
3057 container_of(work, struct amdgpu_device,
3058 pm.dpm.thermal.work);
3059 /* switch to the thermal state */
3060 enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
3061 int temp, size = sizeof(temp);
3063 if (!adev->pm.dpm_enabled)
3066 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP,
3067 (void *)&temp, &size)) {
3068 if (temp < adev->pm.dpm.thermal.min_temp)
3069 /* switch back the user state */
3070 dpm_state = adev->pm.dpm.user_state;
3072 if (adev->pm.dpm.thermal.high_to_low)
3073 /* switch back the user state */
3074 dpm_state = adev->pm.dpm.user_state;
3076 mutex_lock(&adev->pm.mutex);
3077 if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
3078 adev->pm.dpm.thermal_active = true;
3080 adev->pm.dpm.thermal_active = false;
3081 adev->pm.dpm.state = dpm_state;
3082 mutex_unlock(&adev->pm.mutex);
3084 amdgpu_pm_compute_clocks(adev);
3087 static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
3088 enum amd_pm_state_type dpm_state)
3091 struct amdgpu_ps *ps;
3093 bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ?
3096 /* check if the vblank period is too short to adjust the mclk */
3097 if (single_display && adev->powerplay.pp_funcs->vblank_too_short) {
3098 if (amdgpu_dpm_vblank_too_short(adev))
3099 single_display = false;
3102 /* certain older asics have a separare 3D performance state,
3103 * so try that first if the user selected performance
3105 if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
3106 dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
3107 /* balanced states don't exist at the moment */
3108 if (dpm_state == POWER_STATE_TYPE_BALANCED)
3109 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
3112 /* Pick the best power state based on current conditions */
3113 for (i = 0; i < adev->pm.dpm.num_ps; i++) {
3114 ps = &adev->pm.dpm.ps[i];
3115 ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
3116 switch (dpm_state) {
3118 case POWER_STATE_TYPE_BATTERY:
3119 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
3120 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
3127 case POWER_STATE_TYPE_BALANCED:
3128 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
3129 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
3136 case POWER_STATE_TYPE_PERFORMANCE:
3137 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
3138 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
3145 /* internal states */
3146 case POWER_STATE_TYPE_INTERNAL_UVD:
3147 if (adev->pm.dpm.uvd_ps)
3148 return adev->pm.dpm.uvd_ps;
3151 case POWER_STATE_TYPE_INTERNAL_UVD_SD:
3152 if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
3155 case POWER_STATE_TYPE_INTERNAL_UVD_HD:
3156 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
3159 case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
3160 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
3163 case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
3164 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
3167 case POWER_STATE_TYPE_INTERNAL_BOOT:
3168 return adev->pm.dpm.boot_ps;
3169 case POWER_STATE_TYPE_INTERNAL_THERMAL:
3170 if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
3173 case POWER_STATE_TYPE_INTERNAL_ACPI:
3174 if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
3177 case POWER_STATE_TYPE_INTERNAL_ULV:
3178 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
3181 case POWER_STATE_TYPE_INTERNAL_3DPERF:
3182 if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
3189 /* use a fallback state if we didn't match */
3190 switch (dpm_state) {
3191 case POWER_STATE_TYPE_INTERNAL_UVD_SD:
3192 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
3193 goto restart_search;
3194 case POWER_STATE_TYPE_INTERNAL_UVD_HD:
3195 case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
3196 case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
3197 if (adev->pm.dpm.uvd_ps) {
3198 return adev->pm.dpm.uvd_ps;
3200 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
3201 goto restart_search;
3203 case POWER_STATE_TYPE_INTERNAL_THERMAL:
3204 dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
3205 goto restart_search;
3206 case POWER_STATE_TYPE_INTERNAL_ACPI:
3207 dpm_state = POWER_STATE_TYPE_BATTERY;
3208 goto restart_search;
3209 case POWER_STATE_TYPE_BATTERY:
3210 case POWER_STATE_TYPE_BALANCED:
3211 case POWER_STATE_TYPE_INTERNAL_3DPERF:
3212 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
3213 goto restart_search;
3221 static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
3223 struct amdgpu_ps *ps;
3224 enum amd_pm_state_type dpm_state;
3228 /* if dpm init failed */
3229 if (!adev->pm.dpm_enabled)
3232 if (adev->pm.dpm.user_state != adev->pm.dpm.state) {
3233 /* add other state override checks here */
3234 if ((!adev->pm.dpm.thermal_active) &&
3235 (!adev->pm.dpm.uvd_active))
3236 adev->pm.dpm.state = adev->pm.dpm.user_state;
3238 dpm_state = adev->pm.dpm.state;
3240 ps = amdgpu_dpm_pick_power_state(adev, dpm_state);
3242 adev->pm.dpm.requested_ps = ps;
3246 if (amdgpu_dpm == 1 && adev->powerplay.pp_funcs->print_power_state) {
3247 printk("switching from power state:\n");
3248 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
3249 printk("switching to power state:\n");
3250 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
3253 /* update whether vce is active */
3254 ps->vce_active = adev->pm.dpm.vce_active;
3255 if (adev->powerplay.pp_funcs->display_configuration_changed)
3256 amdgpu_dpm_display_configuration_changed(adev);
3258 ret = amdgpu_dpm_pre_set_power_state(adev);
3262 if (adev->powerplay.pp_funcs->check_state_equal) {
3263 if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal))
3270 amdgpu_dpm_set_power_state(adev);
3271 amdgpu_dpm_post_set_power_state(adev);
3273 adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
3274 adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
3276 if (adev->powerplay.pp_funcs->force_performance_level) {
3277 if (adev->pm.dpm.thermal_active) {
3278 enum amd_dpm_forced_level level = adev->pm.dpm.forced_level;
3279 /* force low perf level for thermal */
3280 amdgpu_dpm_force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW);
3281 /* save the user's level */
3282 adev->pm.dpm.forced_level = level;
3284 /* otherwise, user selected level */
3285 amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level);
3290 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
3294 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
3296 DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
3297 enable ? "enable" : "disable", ret);
3299 /* enable/disable Low Memory PState for UVD (4k videos) */
3300 if (adev->asic_type == CHIP_STONEY &&
3301 adev->uvd.decode_image_width >= WIDTH_4K) {
3302 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
3304 if (hwmgr && hwmgr->hwmgr_func &&
3305 hwmgr->hwmgr_func->update_nbdpm_pstate)
3306 hwmgr->hwmgr_func->update_nbdpm_pstate(hwmgr,
3312 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
3316 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
3318 DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
3319 enable ? "enable" : "disable", ret);
3322 void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
3326 if (adev->powerplay.pp_funcs->print_power_state == NULL)
3329 for (i = 0; i < adev->pm.dpm.num_ps; i++)
3330 amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]);
3334 void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
3338 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable);
3340 DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
3341 enable ? "enable" : "disable", ret);
3344 int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
3348 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->load_firmware) {
3349 r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle);
3351 pr_err("smu firmware loading failed\n");
3354 *smu_version = adev->pm.fw_version;
3359 int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
3364 if (adev->pm.sysfs_initialized)
3367 if (adev->pm.dpm_enabled == 0)
3370 adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
3373 if (IS_ERR(adev->pm.int_hwmon_dev)) {
3374 ret = PTR_ERR(adev->pm.int_hwmon_dev);
3376 "Unable to register hwmon device: %d\n", ret);
3380 switch (amdgpu_virt_get_sriov_vf_mode(adev)) {
3381 case SRIOV_VF_MODE_ONE_VF:
3382 mask = ATTR_FLAG_ONEVF;
3384 case SRIOV_VF_MODE_MULTI_VF:
3387 case SRIOV_VF_MODE_BARE_METAL:
3389 mask = ATTR_FLAG_MASK_ALL;
3393 ret = amdgpu_device_attr_create_groups(adev,
3394 amdgpu_device_attrs,
3395 ARRAY_SIZE(amdgpu_device_attrs),
3400 adev->pm.sysfs_initialized = true;
3405 void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
3407 if (adev->pm.dpm_enabled == 0)
3410 if (adev->pm.int_hwmon_dev)
3411 hwmon_device_unregister(adev->pm.int_hwmon_dev);
3413 amdgpu_device_attr_remove_groups(adev,
3414 amdgpu_device_attrs,
3415 ARRAY_SIZE(amdgpu_device_attrs));
3418 void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
3422 if (!adev->pm.dpm_enabled)
3425 if (adev->mode_info.num_crtc)
3426 amdgpu_display_bandwidth_update(adev);
3428 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
3429 struct amdgpu_ring *ring = adev->rings[i];
3430 if (ring && ring->sched.ready)
3431 amdgpu_fence_wait_empty(ring);
3434 if (is_support_sw_smu(adev)) {
3435 struct smu_dpm_context *smu_dpm = &adev->smu.smu_dpm;
3436 smu_handle_task(&adev->smu,
3438 AMD_PP_TASK_DISPLAY_CONFIG_CHANGE,
3441 if (adev->powerplay.pp_funcs->dispatch_tasks) {
3442 if (!amdgpu_device_has_dc_support(adev)) {
3443 mutex_lock(&adev->pm.mutex);
3444 amdgpu_dpm_get_active_displays(adev);
3445 adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
3446 adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
3447 adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
3448 /* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */
3449 if (adev->pm.pm_display_cfg.vrefresh > 120)
3450 adev->pm.pm_display_cfg.min_vblank_time = 0;
3451 if (adev->powerplay.pp_funcs->display_configuration_change)
3452 adev->powerplay.pp_funcs->display_configuration_change(
3453 adev->powerplay.pp_handle,
3454 &adev->pm.pm_display_cfg);
3455 mutex_unlock(&adev->pm.mutex);
3457 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL);
3459 mutex_lock(&adev->pm.mutex);
3460 amdgpu_dpm_get_active_displays(adev);
3461 amdgpu_dpm_change_power_state_locked(adev);
3462 mutex_unlock(&adev->pm.mutex);
3470 #if defined(CONFIG_DEBUG_FS)
3472 static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev)
3480 size = sizeof(value);
3481 seq_printf(m, "GFX Clocks and Power:\n");
3482 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, (void *)&value, &size))
3483 seq_printf(m, "\t%u MHz (MCLK)\n", value/100);
3484 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, (void *)&value, &size))
3485 seq_printf(m, "\t%u MHz (SCLK)\n", value/100);
3486 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK, (void *)&value, &size))
3487 seq_printf(m, "\t%u MHz (PSTATE_SCLK)\n", value/100);
3488 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK, (void *)&value, &size))
3489 seq_printf(m, "\t%u MHz (PSTATE_MCLK)\n", value/100);
3490 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, (void *)&value, &size))
3491 seq_printf(m, "\t%u mV (VDDGFX)\n", value);
3492 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size))
3493 seq_printf(m, "\t%u mV (VDDNB)\n", value);
3494 size = sizeof(uint32_t);
3495 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, (void *)&query, &size))
3496 seq_printf(m, "\t%u.%u W (average GPU)\n", query >> 8, query & 0xff);
3497 size = sizeof(value);
3498 seq_printf(m, "\n");
3501 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, (void *)&value, &size))
3502 seq_printf(m, "GPU Temperature: %u C\n", value/1000);
3505 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, (void *)&value, &size))
3506 seq_printf(m, "GPU Load: %u %%\n", value);
3508 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, (void *)&value, &size))
3509 seq_printf(m, "MEM Load: %u %%\n", value);
3511 seq_printf(m, "\n");
3513 /* SMC feature mask */
3514 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size))
3515 seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64);
3517 if (adev->asic_type > CHIP_VEGA20) {
3519 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_POWER_STATE, (void *)&value, &size)) {
3521 seq_printf(m, "VCN: Disabled\n");
3523 seq_printf(m, "VCN: Enabled\n");
3524 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
3525 seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
3526 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
3527 seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
3530 seq_printf(m, "\n");
3533 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) {
3535 seq_printf(m, "UVD: Disabled\n");
3537 seq_printf(m, "UVD: Enabled\n");
3538 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
3539 seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
3540 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
3541 seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
3544 seq_printf(m, "\n");
3547 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) {
3549 seq_printf(m, "VCE: Disabled\n");
3551 seq_printf(m, "VCE: Enabled\n");
3552 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size))
3553 seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
3561 static void amdgpu_parse_cg_state(struct seq_file *m, u32 flags)
3565 for (i = 0; clocks[i].flag; i++)
3566 seq_printf(m, "\t%s: %s\n", clocks[i].name,
3567 (flags & clocks[i].flag) ? "On" : "Off");
3570 static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
3572 struct drm_info_node *node = (struct drm_info_node *) m->private;
3573 struct drm_device *dev = node->minor->dev;
3574 struct amdgpu_device *adev = dev->dev_private;
3578 r = pm_runtime_get_sync(dev->dev);
3582 amdgpu_device_ip_get_clockgating_state(adev, &flags);
3583 seq_printf(m, "Clock Gating Flags Mask: 0x%x\n", flags);
3584 amdgpu_parse_cg_state(m, flags);
3585 seq_printf(m, "\n");
3587 if (!adev->pm.dpm_enabled) {
3588 seq_printf(m, "dpm not enabled\n");
3589 pm_runtime_mark_last_busy(dev->dev);
3590 pm_runtime_put_autosuspend(dev->dev);
3594 if (!is_support_sw_smu(adev) &&
3595 adev->powerplay.pp_funcs->debugfs_print_current_performance_level) {
3596 mutex_lock(&adev->pm.mutex);
3597 if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level)
3598 adev->powerplay.pp_funcs->debugfs_print_current_performance_level(adev, m);
3600 seq_printf(m, "Debugfs support not implemented for this asic\n");
3601 mutex_unlock(&adev->pm.mutex);
3604 r = amdgpu_debugfs_pm_info_pp(m, adev);
3607 pm_runtime_mark_last_busy(dev->dev);
3608 pm_runtime_put_autosuspend(dev->dev);
3613 static const struct drm_info_list amdgpu_pm_info_list[] = {
3614 {"amdgpu_pm_info", amdgpu_debugfs_pm_info, 0, NULL},
3618 int amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
3620 #if defined(CONFIG_DEBUG_FS)
3621 return amdgpu_debugfs_add_files(adev, amdgpu_pm_info_list, ARRAY_SIZE(amdgpu_pm_info_list));