2 * Copyright 2017 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Rafał Miłecki <zajec5@gmail.com>
23 * Alex Deucher <alexdeucher@gmail.com>
26 #include <drm/drm_debugfs.h>
29 #include "amdgpu_drv.h"
30 #include "amdgpu_pm.h"
31 #include "amdgpu_dpm.h"
32 #include "amdgpu_display.h"
33 #include "amdgpu_smu.h"
35 #include <linux/power_supply.h>
36 #include <linux/pci.h>
37 #include <linux/hwmon.h>
38 #include <linux/hwmon-sysfs.h>
39 #include <linux/nospec.h>
40 #include <linux/pm_runtime.h>
44 static const struct cg_flag_name clocks[] = {
45 {AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"},
46 {AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"},
47 {AMD_CG_SUPPORT_GFX_CGCG, "Graphics Coarse Grain Clock Gating"},
48 {AMD_CG_SUPPORT_GFX_CGLS, "Graphics Coarse Grain memory Light Sleep"},
49 {AMD_CG_SUPPORT_GFX_CGTS, "Graphics Coarse Grain Tree Shader Clock Gating"},
50 {AMD_CG_SUPPORT_GFX_CGTS_LS, "Graphics Coarse Grain Tree Shader Light Sleep"},
51 {AMD_CG_SUPPORT_GFX_CP_LS, "Graphics Command Processor Light Sleep"},
52 {AMD_CG_SUPPORT_GFX_RLC_LS, "Graphics Run List Controller Light Sleep"},
53 {AMD_CG_SUPPORT_GFX_3D_CGCG, "Graphics 3D Coarse Grain Clock Gating"},
54 {AMD_CG_SUPPORT_GFX_3D_CGLS, "Graphics 3D Coarse Grain memory Light Sleep"},
55 {AMD_CG_SUPPORT_MC_LS, "Memory Controller Light Sleep"},
56 {AMD_CG_SUPPORT_MC_MGCG, "Memory Controller Medium Grain Clock Gating"},
57 {AMD_CG_SUPPORT_SDMA_LS, "System Direct Memory Access Light Sleep"},
58 {AMD_CG_SUPPORT_SDMA_MGCG, "System Direct Memory Access Medium Grain Clock Gating"},
59 {AMD_CG_SUPPORT_BIF_MGCG, "Bus Interface Medium Grain Clock Gating"},
60 {AMD_CG_SUPPORT_BIF_LS, "Bus Interface Light Sleep"},
61 {AMD_CG_SUPPORT_UVD_MGCG, "Unified Video Decoder Medium Grain Clock Gating"},
62 {AMD_CG_SUPPORT_VCE_MGCG, "Video Compression Engine Medium Grain Clock Gating"},
63 {AMD_CG_SUPPORT_HDP_LS, "Host Data Path Light Sleep"},
64 {AMD_CG_SUPPORT_HDP_MGCG, "Host Data Path Medium Grain Clock Gating"},
65 {AMD_CG_SUPPORT_DRM_MGCG, "Digital Right Management Medium Grain Clock Gating"},
66 {AMD_CG_SUPPORT_DRM_LS, "Digital Right Management Light Sleep"},
67 {AMD_CG_SUPPORT_ROM_MGCG, "Rom Medium Grain Clock Gating"},
68 {AMD_CG_SUPPORT_DF_MGCG, "Data Fabric Medium Grain Clock Gating"},
70 {AMD_CG_SUPPORT_ATHUB_MGCG, "Address Translation Hub Medium Grain Clock Gating"},
71 {AMD_CG_SUPPORT_ATHUB_LS, "Address Translation Hub Light Sleep"},
75 static const struct hwmon_temp_label {
76 enum PP_HWMON_TEMP channel;
79 {PP_TEMP_EDGE, "edge"},
80 {PP_TEMP_JUNCTION, "junction"},
84 void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
86 if (adev->pm.dpm_enabled) {
87 mutex_lock(&adev->pm.mutex);
88 if (power_supply_is_system_supplied() > 0)
89 adev->pm.ac_power = true;
91 adev->pm.ac_power = false;
92 if (adev->powerplay.pp_funcs &&
93 adev->powerplay.pp_funcs->enable_bapm)
94 amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
95 mutex_unlock(&adev->pm.mutex);
97 if (is_support_sw_smu(adev))
98 smu_set_ac_dc(&adev->smu);
102 int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
103 void *data, uint32_t *size)
110 if (is_support_sw_smu(adev))
111 ret = smu_read_sensor(&adev->smu, sensor, data, size);
113 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
114 ret = adev->powerplay.pp_funcs->read_sensor((adev)->powerplay.pp_handle,
124 * DOC: power_dpm_state
126 * The power_dpm_state file is a legacy interface and is only provided for
127 * backwards compatibility. The amdgpu driver provides a sysfs API for adjusting
128 * certain power related parameters. The file power_dpm_state is used for this.
129 * It accepts the following arguments:
139 * On older GPUs, the vbios provided a special power state for battery
140 * operation. Selecting battery switched to this state. This is no
141 * longer provided on newer GPUs so the option does nothing in that case.
145 * On older GPUs, the vbios provided a special power state for balanced
146 * operation. Selecting balanced switched to this state. This is no
147 * longer provided on newer GPUs so the option does nothing in that case.
151 * On older GPUs, the vbios provided a special power state for performance
152 * operation. Selecting performance switched to this state. This is no
153 * longer provided on newer GPUs so the option does nothing in that case.
157 static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
158 struct device_attribute *attr,
161 struct drm_device *ddev = dev_get_drvdata(dev);
162 struct amdgpu_device *adev = ddev->dev_private;
163 enum amd_pm_state_type pm;
166 ret = pm_runtime_get_sync(ddev->dev);
170 if (is_support_sw_smu(adev)) {
171 if (adev->smu.ppt_funcs->get_current_power_state)
172 pm = smu_get_current_power_state(&adev->smu);
174 pm = adev->pm.dpm.user_state;
175 } else if (adev->powerplay.pp_funcs->get_current_power_state) {
176 pm = amdgpu_dpm_get_current_power_state(adev);
178 pm = adev->pm.dpm.user_state;
181 pm_runtime_mark_last_busy(ddev->dev);
182 pm_runtime_put_autosuspend(ddev->dev);
184 return snprintf(buf, PAGE_SIZE, "%s\n",
185 (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
186 (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
189 static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
190 struct device_attribute *attr,
194 struct drm_device *ddev = dev_get_drvdata(dev);
195 struct amdgpu_device *adev = ddev->dev_private;
196 enum amd_pm_state_type state;
199 if (strncmp("battery", buf, strlen("battery")) == 0)
200 state = POWER_STATE_TYPE_BATTERY;
201 else if (strncmp("balanced", buf, strlen("balanced")) == 0)
202 state = POWER_STATE_TYPE_BALANCED;
203 else if (strncmp("performance", buf, strlen("performance")) == 0)
204 state = POWER_STATE_TYPE_PERFORMANCE;
208 ret = pm_runtime_get_sync(ddev->dev);
212 if (is_support_sw_smu(adev)) {
213 mutex_lock(&adev->pm.mutex);
214 adev->pm.dpm.user_state = state;
215 mutex_unlock(&adev->pm.mutex);
216 } else if (adev->powerplay.pp_funcs->dispatch_tasks) {
217 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state);
219 mutex_lock(&adev->pm.mutex);
220 adev->pm.dpm.user_state = state;
221 mutex_unlock(&adev->pm.mutex);
223 amdgpu_pm_compute_clocks(adev);
225 pm_runtime_mark_last_busy(ddev->dev);
226 pm_runtime_put_autosuspend(ddev->dev);
233 * DOC: power_dpm_force_performance_level
235 * The amdgpu driver provides a sysfs API for adjusting certain power
236 * related parameters. The file power_dpm_force_performance_level is
237 * used for this. It accepts the following arguments:
257 * When auto is selected, the driver will attempt to dynamically select
258 * the optimal power profile for current conditions in the driver.
262 * When low is selected, the clocks are forced to the lowest power state.
266 * When high is selected, the clocks are forced to the highest power state.
270 * When manual is selected, the user can manually adjust which power states
271 * are enabled for each clock domain via the sysfs pp_dpm_mclk, pp_dpm_sclk,
272 * and pp_dpm_pcie files and adjust the power state transition heuristics
273 * via the pp_power_profile_mode sysfs file.
280 * When the profiling modes are selected, clock and power gating are
281 * disabled and the clocks are set for different profiling cases. This
282 * mode is recommended for profiling specific work loads where you do
283 * not want clock or power gating for clock fluctuation to interfere
284 * with your results. profile_standard sets the clocks to a fixed clock
285 * level which varies from asic to asic. profile_min_sclk forces the sclk
286 * to the lowest level. profile_min_mclk forces the mclk to the lowest level.
287 * profile_peak sets all clocks (mclk, sclk, pcie) to the highest levels.
291 static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
292 struct device_attribute *attr,
295 struct drm_device *ddev = dev_get_drvdata(dev);
296 struct amdgpu_device *adev = ddev->dev_private;
297 enum amd_dpm_forced_level level = 0xff;
300 ret = pm_runtime_get_sync(ddev->dev);
304 if (is_support_sw_smu(adev))
305 level = smu_get_performance_level(&adev->smu);
306 else if (adev->powerplay.pp_funcs->get_performance_level)
307 level = amdgpu_dpm_get_performance_level(adev);
309 level = adev->pm.dpm.forced_level;
311 pm_runtime_mark_last_busy(ddev->dev);
312 pm_runtime_put_autosuspend(ddev->dev);
314 return snprintf(buf, PAGE_SIZE, "%s\n",
315 (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
316 (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
317 (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" :
318 (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" :
319 (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" :
320 (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
321 (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
322 (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" :
326 static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
327 struct device_attribute *attr,
331 struct drm_device *ddev = dev_get_drvdata(dev);
332 struct amdgpu_device *adev = ddev->dev_private;
333 enum amd_dpm_forced_level level;
334 enum amd_dpm_forced_level current_level = 0xff;
337 if (strncmp("low", buf, strlen("low")) == 0) {
338 level = AMD_DPM_FORCED_LEVEL_LOW;
339 } else if (strncmp("high", buf, strlen("high")) == 0) {
340 level = AMD_DPM_FORCED_LEVEL_HIGH;
341 } else if (strncmp("auto", buf, strlen("auto")) == 0) {
342 level = AMD_DPM_FORCED_LEVEL_AUTO;
343 } else if (strncmp("manual", buf, strlen("manual")) == 0) {
344 level = AMD_DPM_FORCED_LEVEL_MANUAL;
345 } else if (strncmp("profile_exit", buf, strlen("profile_exit")) == 0) {
346 level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT;
347 } else if (strncmp("profile_standard", buf, strlen("profile_standard")) == 0) {
348 level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD;
349 } else if (strncmp("profile_min_sclk", buf, strlen("profile_min_sclk")) == 0) {
350 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK;
351 } else if (strncmp("profile_min_mclk", buf, strlen("profile_min_mclk")) == 0) {
352 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK;
353 } else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) {
354 level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
359 ret = pm_runtime_get_sync(ddev->dev);
363 if (is_support_sw_smu(adev))
364 current_level = smu_get_performance_level(&adev->smu);
365 else if (adev->powerplay.pp_funcs->get_performance_level)
366 current_level = amdgpu_dpm_get_performance_level(adev);
368 if (current_level == level) {
369 pm_runtime_mark_last_busy(ddev->dev);
370 pm_runtime_put_autosuspend(ddev->dev);
374 /* profile_exit setting is valid only when current mode is in profile mode */
375 if (!(current_level & (AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
376 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
377 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
378 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)) &&
379 (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) {
380 pr_err("Currently not in any profile mode!\n");
381 pm_runtime_mark_last_busy(ddev->dev);
382 pm_runtime_put_autosuspend(ddev->dev);
386 if (is_support_sw_smu(adev)) {
387 ret = smu_force_performance_level(&adev->smu, level);
389 pm_runtime_mark_last_busy(ddev->dev);
390 pm_runtime_put_autosuspend(ddev->dev);
393 } else if (adev->powerplay.pp_funcs->force_performance_level) {
394 mutex_lock(&adev->pm.mutex);
395 if (adev->pm.dpm.thermal_active) {
396 mutex_unlock(&adev->pm.mutex);
397 pm_runtime_mark_last_busy(ddev->dev);
398 pm_runtime_put_autosuspend(ddev->dev);
401 ret = amdgpu_dpm_force_performance_level(adev, level);
403 mutex_unlock(&adev->pm.mutex);
404 pm_runtime_mark_last_busy(ddev->dev);
405 pm_runtime_put_autosuspend(ddev->dev);
408 adev->pm.dpm.forced_level = level;
410 mutex_unlock(&adev->pm.mutex);
412 pm_runtime_mark_last_busy(ddev->dev);
413 pm_runtime_put_autosuspend(ddev->dev);
418 static ssize_t amdgpu_get_pp_num_states(struct device *dev,
419 struct device_attribute *attr,
422 struct drm_device *ddev = dev_get_drvdata(dev);
423 struct amdgpu_device *adev = ddev->dev_private;
424 struct pp_states_info data;
427 ret = pm_runtime_get_sync(ddev->dev);
431 if (is_support_sw_smu(adev)) {
432 ret = smu_get_power_num_states(&adev->smu, &data);
435 } else if (adev->powerplay.pp_funcs->get_pp_num_states) {
436 amdgpu_dpm_get_pp_num_states(adev, &data);
438 memset(&data, 0, sizeof(data));
441 pm_runtime_mark_last_busy(ddev->dev);
442 pm_runtime_put_autosuspend(ddev->dev);
444 buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums);
445 for (i = 0; i < data.nums; i++)
446 buf_len += snprintf(buf + buf_len, PAGE_SIZE, "%d %s\n", i,
447 (data.states[i] == POWER_STATE_TYPE_INTERNAL_BOOT) ? "boot" :
448 (data.states[i] == POWER_STATE_TYPE_BATTERY) ? "battery" :
449 (data.states[i] == POWER_STATE_TYPE_BALANCED) ? "balanced" :
450 (data.states[i] == POWER_STATE_TYPE_PERFORMANCE) ? "performance" : "default");
455 static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
456 struct device_attribute *attr,
459 struct drm_device *ddev = dev_get_drvdata(dev);
460 struct amdgpu_device *adev = ddev->dev_private;
461 struct pp_states_info data;
462 struct smu_context *smu = &adev->smu;
463 enum amd_pm_state_type pm = 0;
466 ret = pm_runtime_get_sync(ddev->dev);
470 if (is_support_sw_smu(adev)) {
471 pm = smu_get_current_power_state(smu);
472 ret = smu_get_power_num_states(smu, &data);
475 } else if (adev->powerplay.pp_funcs->get_current_power_state
476 && adev->powerplay.pp_funcs->get_pp_num_states) {
477 pm = amdgpu_dpm_get_current_power_state(adev);
478 amdgpu_dpm_get_pp_num_states(adev, &data);
481 pm_runtime_mark_last_busy(ddev->dev);
482 pm_runtime_put_autosuspend(ddev->dev);
484 for (i = 0; i < data.nums; i++) {
485 if (pm == data.states[i])
492 return snprintf(buf, PAGE_SIZE, "%d\n", i);
495 static ssize_t amdgpu_get_pp_force_state(struct device *dev,
496 struct device_attribute *attr,
499 struct drm_device *ddev = dev_get_drvdata(dev);
500 struct amdgpu_device *adev = ddev->dev_private;
502 if (adev->pp_force_state_enabled)
503 return amdgpu_get_pp_cur_state(dev, attr, buf);
505 return snprintf(buf, PAGE_SIZE, "\n");
508 static ssize_t amdgpu_set_pp_force_state(struct device *dev,
509 struct device_attribute *attr,
513 struct drm_device *ddev = dev_get_drvdata(dev);
514 struct amdgpu_device *adev = ddev->dev_private;
515 enum amd_pm_state_type state = 0;
519 if (strlen(buf) == 1)
520 adev->pp_force_state_enabled = false;
521 else if (is_support_sw_smu(adev))
522 adev->pp_force_state_enabled = false;
523 else if (adev->powerplay.pp_funcs->dispatch_tasks &&
524 adev->powerplay.pp_funcs->get_pp_num_states) {
525 struct pp_states_info data;
527 ret = kstrtoul(buf, 0, &idx);
528 if (ret || idx >= ARRAY_SIZE(data.states))
531 idx = array_index_nospec(idx, ARRAY_SIZE(data.states));
533 amdgpu_dpm_get_pp_num_states(adev, &data);
534 state = data.states[idx];
536 ret = pm_runtime_get_sync(ddev->dev);
540 /* only set user selected power states */
541 if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
542 state != POWER_STATE_TYPE_DEFAULT) {
543 amdgpu_dpm_dispatch_task(adev,
544 AMD_PP_TASK_ENABLE_USER_STATE, &state);
545 adev->pp_force_state_enabled = true;
547 pm_runtime_mark_last_busy(ddev->dev);
548 pm_runtime_put_autosuspend(ddev->dev);
557 * The amdgpu driver provides a sysfs API for uploading new powerplay
558 * tables. The file pp_table is used for this. Reading the file
559 * will dump the current power play table. Writing to the file
560 * will attempt to upload a new powerplay table and re-initialize
561 * powerplay using that new table.
565 static ssize_t amdgpu_get_pp_table(struct device *dev,
566 struct device_attribute *attr,
569 struct drm_device *ddev = dev_get_drvdata(dev);
570 struct amdgpu_device *adev = ddev->dev_private;
574 ret = pm_runtime_get_sync(ddev->dev);
578 if (is_support_sw_smu(adev)) {
579 size = smu_sys_get_pp_table(&adev->smu, (void **)&table);
580 pm_runtime_mark_last_busy(ddev->dev);
581 pm_runtime_put_autosuspend(ddev->dev);
584 } else if (adev->powerplay.pp_funcs->get_pp_table) {
585 size = amdgpu_dpm_get_pp_table(adev, &table);
586 pm_runtime_mark_last_busy(ddev->dev);
587 pm_runtime_put_autosuspend(ddev->dev);
591 pm_runtime_mark_last_busy(ddev->dev);
592 pm_runtime_put_autosuspend(ddev->dev);
596 if (size >= PAGE_SIZE)
597 size = PAGE_SIZE - 1;
599 memcpy(buf, table, size);
604 static ssize_t amdgpu_set_pp_table(struct device *dev,
605 struct device_attribute *attr,
609 struct drm_device *ddev = dev_get_drvdata(dev);
610 struct amdgpu_device *adev = ddev->dev_private;
613 ret = pm_runtime_get_sync(ddev->dev);
617 if (is_support_sw_smu(adev)) {
618 ret = smu_sys_set_pp_table(&adev->smu, (void *)buf, count);
620 pm_runtime_mark_last_busy(ddev->dev);
621 pm_runtime_put_autosuspend(ddev->dev);
624 } else if (adev->powerplay.pp_funcs->set_pp_table)
625 amdgpu_dpm_set_pp_table(adev, buf, count);
627 pm_runtime_mark_last_busy(ddev->dev);
628 pm_runtime_put_autosuspend(ddev->dev);
634 * DOC: pp_od_clk_voltage
636 * The amdgpu driver provides a sysfs API for adjusting the clocks and voltages
637 * in each power level within a power state. The pp_od_clk_voltage is used for
640 * < For Vega10 and previous ASICs >
642 * Reading the file will display:
644 * - a list of engine clock levels and voltages labeled OD_SCLK
646 * - a list of memory clock levels and voltages labeled OD_MCLK
648 * - a list of valid ranges for sclk, mclk, and voltage labeled OD_RANGE
650 * To manually adjust these settings, first select manual using
651 * power_dpm_force_performance_level. Enter a new value for each
652 * level by writing a string that contains "s/m level clock voltage" to
653 * the file. E.g., "s 1 500 820" will update sclk level 1 to be 500 MHz
654 * at 820 mV; "m 0 350 810" will update mclk level 0 to be 350 MHz at
655 * 810 mV. When you have edited all of the states as needed, write
656 * "c" (commit) to the file to commit your changes. If you want to reset to the
657 * default power levels, write "r" (reset) to the file to reset them.
662 * Reading the file will display:
664 * - minimum and maximum engine clock labeled OD_SCLK
666 * - maximum memory clock labeled OD_MCLK
668 * - three <frequency, voltage> points labeled OD_VDDC_CURVE.
669 * They can be used to calibrate the sclk voltage curve.
671 * - a list of valid ranges for sclk, mclk, and voltage curve points
674 * To manually adjust these settings:
676 * - First select manual using power_dpm_force_performance_level
678 * - For clock frequency setting, enter a new value by writing a
679 * string that contains "s/m index clock" to the file. The index
680 * should be 0 if to set minimum clock. And 1 if to set maximum
681 * clock. E.g., "s 0 500" will update minimum sclk to be 500 MHz.
682 * "m 1 800" will update maximum mclk to be 800Mhz.
684 * For sclk voltage curve, enter the new values by writing a
685 * string that contains "vc point clock voltage" to the file. The
686 * points are indexed by 0, 1 and 2. E.g., "vc 0 300 600" will
687 * update point1 with clock set as 300Mhz and voltage as
688 * 600mV. "vc 2 1000 1000" will update point3 with clock set
689 * as 1000Mhz and voltage 1000mV.
691 * - When you have edited all of the states as needed, write "c" (commit)
692 * to the file to commit your changes
694 * - If you want to reset to the default power levels, write "r" (reset)
695 * to the file to reset them
699 static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
700 struct device_attribute *attr,
704 struct drm_device *ddev = dev_get_drvdata(dev);
705 struct amdgpu_device *adev = ddev->dev_private;
707 uint32_t parameter_size = 0;
712 const char delimiter[3] = {' ', '\n', '\0'};
719 type = PP_OD_EDIT_SCLK_VDDC_TABLE;
720 else if (*buf == 'm')
721 type = PP_OD_EDIT_MCLK_VDDC_TABLE;
723 type = PP_OD_RESTORE_DEFAULT_TABLE;
724 else if (*buf == 'c')
725 type = PP_OD_COMMIT_DPM_TABLE;
726 else if (!strncmp(buf, "vc", 2))
727 type = PP_OD_EDIT_VDDC_CURVE;
731 memcpy(buf_cpy, buf, count+1);
735 if (type == PP_OD_EDIT_VDDC_CURVE)
737 while (isspace(*++tmp_str));
740 sub_str = strsep(&tmp_str, delimiter);
741 ret = kstrtol(sub_str, 0, ¶meter[parameter_size]);
746 while (isspace(*tmp_str))
750 ret = pm_runtime_get_sync(ddev->dev);
754 if (is_support_sw_smu(adev)) {
755 ret = smu_od_edit_dpm_table(&adev->smu, type,
756 parameter, parameter_size);
759 pm_runtime_mark_last_busy(ddev->dev);
760 pm_runtime_put_autosuspend(ddev->dev);
764 if (adev->powerplay.pp_funcs->odn_edit_dpm_table) {
765 ret = amdgpu_dpm_odn_edit_dpm_table(adev, type,
766 parameter, parameter_size);
768 pm_runtime_mark_last_busy(ddev->dev);
769 pm_runtime_put_autosuspend(ddev->dev);
774 if (type == PP_OD_COMMIT_DPM_TABLE) {
775 if (adev->powerplay.pp_funcs->dispatch_tasks) {
776 amdgpu_dpm_dispatch_task(adev,
777 AMD_PP_TASK_READJUST_POWER_STATE,
779 pm_runtime_mark_last_busy(ddev->dev);
780 pm_runtime_put_autosuspend(ddev->dev);
783 pm_runtime_mark_last_busy(ddev->dev);
784 pm_runtime_put_autosuspend(ddev->dev);
789 pm_runtime_mark_last_busy(ddev->dev);
790 pm_runtime_put_autosuspend(ddev->dev);
795 static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
796 struct device_attribute *attr,
799 struct drm_device *ddev = dev_get_drvdata(dev);
800 struct amdgpu_device *adev = ddev->dev_private;
804 ret = pm_runtime_get_sync(ddev->dev);
808 if (is_support_sw_smu(adev)) {
809 size = smu_print_clk_levels(&adev->smu, SMU_OD_SCLK, buf);
810 size += smu_print_clk_levels(&adev->smu, SMU_OD_MCLK, buf+size);
811 size += smu_print_clk_levels(&adev->smu, SMU_OD_VDDC_CURVE, buf+size);
812 size += smu_print_clk_levels(&adev->smu, SMU_OD_RANGE, buf+size);
813 } else if (adev->powerplay.pp_funcs->print_clock_levels) {
814 size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
815 size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size);
816 size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf+size);
817 size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf+size);
819 size = snprintf(buf, PAGE_SIZE, "\n");
821 pm_runtime_mark_last_busy(ddev->dev);
822 pm_runtime_put_autosuspend(ddev->dev);
830 * The amdgpu driver provides a sysfs API for adjusting what powerplay
831 * features to be enabled. The file pp_features is used for this. And
832 * this is only available for Vega10 and later dGPUs.
834 * Reading back the file will show you the followings:
835 * - Current ppfeature masks
836 * - List of the all supported powerplay features with their naming,
837 * bitmasks and enablement status('Y'/'N' means "enabled"/"disabled").
839 * To manually enable or disable a specific feature, just set or clear
840 * the corresponding bit from original ppfeature masks and input the
841 * new ppfeature masks.
843 static ssize_t amdgpu_set_pp_features(struct device *dev,
844 struct device_attribute *attr,
848 struct drm_device *ddev = dev_get_drvdata(dev);
849 struct amdgpu_device *adev = ddev->dev_private;
850 uint64_t featuremask;
853 ret = kstrtou64(buf, 0, &featuremask);
857 pr_debug("featuremask = 0x%llx\n", featuremask);
859 ret = pm_runtime_get_sync(ddev->dev);
863 if (is_support_sw_smu(adev)) {
864 ret = smu_sys_set_pp_feature_mask(&adev->smu, featuremask);
866 pm_runtime_mark_last_busy(ddev->dev);
867 pm_runtime_put_autosuspend(ddev->dev);
870 } else if (adev->powerplay.pp_funcs->set_ppfeature_status) {
871 ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask);
873 pm_runtime_mark_last_busy(ddev->dev);
874 pm_runtime_put_autosuspend(ddev->dev);
878 pm_runtime_mark_last_busy(ddev->dev);
879 pm_runtime_put_autosuspend(ddev->dev);
884 static ssize_t amdgpu_get_pp_features(struct device *dev,
885 struct device_attribute *attr,
888 struct drm_device *ddev = dev_get_drvdata(dev);
889 struct amdgpu_device *adev = ddev->dev_private;
893 ret = pm_runtime_get_sync(ddev->dev);
897 if (is_support_sw_smu(adev))
898 size = smu_sys_get_pp_feature_mask(&adev->smu, buf);
899 else if (adev->powerplay.pp_funcs->get_ppfeature_status)
900 size = amdgpu_dpm_get_ppfeature_status(adev, buf);
902 size = snprintf(buf, PAGE_SIZE, "\n");
904 pm_runtime_mark_last_busy(ddev->dev);
905 pm_runtime_put_autosuspend(ddev->dev);
911 * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk pp_dpm_pcie
913 * The amdgpu driver provides a sysfs API for adjusting what power levels
914 * are enabled for a given power state. The files pp_dpm_sclk, pp_dpm_mclk,
915 * pp_dpm_socclk, pp_dpm_fclk, pp_dpm_dcefclk and pp_dpm_pcie are used for
918 * pp_dpm_socclk and pp_dpm_dcefclk interfaces are only available for
919 * Vega10 and later ASICs.
920 * pp_dpm_fclk interface is only available for Vega20 and later ASICs.
922 * Reading back the files will show you the available power levels within
923 * the power state and the clock information for those levels.
925 * To manually adjust these states, first select manual using
926 * power_dpm_force_performance_level.
927 * Secondly, enter a new value for each level by inputing a string that
928 * contains " echo xx xx xx > pp_dpm_sclk/mclk/pcie"
931 * .. code-block:: bash
933 * echo "4 5 6" > pp_dpm_sclk
935 * will enable sclk levels 4, 5, and 6.
937 * NOTE: change to the dcefclk max dpm level is not supported now
940 static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
941 struct device_attribute *attr,
944 struct drm_device *ddev = dev_get_drvdata(dev);
945 struct amdgpu_device *adev = ddev->dev_private;
949 ret = pm_runtime_get_sync(ddev->dev);
953 if (is_support_sw_smu(adev))
954 size = smu_print_clk_levels(&adev->smu, SMU_SCLK, buf);
955 else if (adev->powerplay.pp_funcs->print_clock_levels)
956 size = amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf);
958 size = snprintf(buf, PAGE_SIZE, "\n");
960 pm_runtime_mark_last_busy(ddev->dev);
961 pm_runtime_put_autosuspend(ddev->dev);
967 * Worst case: 32 bits individually specified, in octal at 12 characters
968 * per line (+1 for \n).
970 #define AMDGPU_MASK_BUF_MAX (32 * 13)
972 static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
976 char *sub_str = NULL;
978 char buf_cpy[AMDGPU_MASK_BUF_MAX + 1];
979 const char delimiter[3] = {' ', '\n', '\0'};
984 bytes = min(count, sizeof(buf_cpy) - 1);
985 memcpy(buf_cpy, buf, bytes);
986 buf_cpy[bytes] = '\0';
989 sub_str = strsep(&tmp, delimiter);
990 if (strlen(sub_str)) {
991 ret = kstrtol(sub_str, 0, &level);
1002 static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
1003 struct device_attribute *attr,
1007 struct drm_device *ddev = dev_get_drvdata(dev);
1008 struct amdgpu_device *adev = ddev->dev_private;
1012 ret = amdgpu_read_mask(buf, count, &mask);
1016 ret = pm_runtime_get_sync(ddev->dev);
1020 if (is_support_sw_smu(adev))
1021 ret = smu_force_clk_levels(&adev->smu, SMU_SCLK, mask, true);
1022 else if (adev->powerplay.pp_funcs->force_clock_level)
1023 ret = amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
1025 pm_runtime_mark_last_busy(ddev->dev);
1026 pm_runtime_put_autosuspend(ddev->dev);
1034 static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
1035 struct device_attribute *attr,
1038 struct drm_device *ddev = dev_get_drvdata(dev);
1039 struct amdgpu_device *adev = ddev->dev_private;
1043 ret = pm_runtime_get_sync(ddev->dev);
1047 if (is_support_sw_smu(adev))
1048 size = smu_print_clk_levels(&adev->smu, SMU_MCLK, buf);
1049 else if (adev->powerplay.pp_funcs->print_clock_levels)
1050 size = amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf);
1052 size = snprintf(buf, PAGE_SIZE, "\n");
1054 pm_runtime_mark_last_busy(ddev->dev);
1055 pm_runtime_put_autosuspend(ddev->dev);
1060 static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
1061 struct device_attribute *attr,
1065 struct drm_device *ddev = dev_get_drvdata(dev);
1066 struct amdgpu_device *adev = ddev->dev_private;
1070 ret = amdgpu_read_mask(buf, count, &mask);
1074 ret = pm_runtime_get_sync(ddev->dev);
1078 if (is_support_sw_smu(adev))
1079 ret = smu_force_clk_levels(&adev->smu, SMU_MCLK, mask, true);
1080 else if (adev->powerplay.pp_funcs->force_clock_level)
1081 ret = amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
1083 pm_runtime_mark_last_busy(ddev->dev);
1084 pm_runtime_put_autosuspend(ddev->dev);
1092 static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev,
1093 struct device_attribute *attr,
1096 struct drm_device *ddev = dev_get_drvdata(dev);
1097 struct amdgpu_device *adev = ddev->dev_private;
1101 ret = pm_runtime_get_sync(ddev->dev);
1105 if (is_support_sw_smu(adev))
1106 size = smu_print_clk_levels(&adev->smu, SMU_SOCCLK, buf);
1107 else if (adev->powerplay.pp_funcs->print_clock_levels)
1108 size = amdgpu_dpm_print_clock_levels(adev, PP_SOCCLK, buf);
1110 size = snprintf(buf, PAGE_SIZE, "\n");
1112 pm_runtime_mark_last_busy(ddev->dev);
1113 pm_runtime_put_autosuspend(ddev->dev);
1118 static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
1119 struct device_attribute *attr,
1123 struct drm_device *ddev = dev_get_drvdata(dev);
1124 struct amdgpu_device *adev = ddev->dev_private;
1128 ret = amdgpu_read_mask(buf, count, &mask);
1132 ret = pm_runtime_get_sync(ddev->dev);
1136 if (is_support_sw_smu(adev))
1137 ret = smu_force_clk_levels(&adev->smu, SMU_SOCCLK, mask, true);
1138 else if (adev->powerplay.pp_funcs->force_clock_level)
1139 ret = amdgpu_dpm_force_clock_level(adev, PP_SOCCLK, mask);
1143 pm_runtime_mark_last_busy(ddev->dev);
1144 pm_runtime_put_autosuspend(ddev->dev);
1152 static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev,
1153 struct device_attribute *attr,
1156 struct drm_device *ddev = dev_get_drvdata(dev);
1157 struct amdgpu_device *adev = ddev->dev_private;
1161 ret = pm_runtime_get_sync(ddev->dev);
1165 if (is_support_sw_smu(adev))
1166 size = smu_print_clk_levels(&adev->smu, SMU_FCLK, buf);
1167 else if (adev->powerplay.pp_funcs->print_clock_levels)
1168 size = amdgpu_dpm_print_clock_levels(adev, PP_FCLK, buf);
1170 size = snprintf(buf, PAGE_SIZE, "\n");
1172 pm_runtime_mark_last_busy(ddev->dev);
1173 pm_runtime_put_autosuspend(ddev->dev);
1178 static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
1179 struct device_attribute *attr,
1183 struct drm_device *ddev = dev_get_drvdata(dev);
1184 struct amdgpu_device *adev = ddev->dev_private;
1188 ret = amdgpu_read_mask(buf, count, &mask);
1192 ret = pm_runtime_get_sync(ddev->dev);
1196 if (is_support_sw_smu(adev))
1197 ret = smu_force_clk_levels(&adev->smu, SMU_FCLK, mask, true);
1198 else if (adev->powerplay.pp_funcs->force_clock_level)
1199 ret = amdgpu_dpm_force_clock_level(adev, PP_FCLK, mask);
1203 pm_runtime_mark_last_busy(ddev->dev);
1204 pm_runtime_put_autosuspend(ddev->dev);
1212 static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
1213 struct device_attribute *attr,
1216 struct drm_device *ddev = dev_get_drvdata(dev);
1217 struct amdgpu_device *adev = ddev->dev_private;
1221 ret = pm_runtime_get_sync(ddev->dev);
1225 if (is_support_sw_smu(adev))
1226 size = smu_print_clk_levels(&adev->smu, SMU_DCEFCLK, buf);
1227 else if (adev->powerplay.pp_funcs->print_clock_levels)
1228 size = amdgpu_dpm_print_clock_levels(adev, PP_DCEFCLK, buf);
1230 size = snprintf(buf, PAGE_SIZE, "\n");
1232 pm_runtime_mark_last_busy(ddev->dev);
1233 pm_runtime_put_autosuspend(ddev->dev);
1238 static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
1239 struct device_attribute *attr,
1243 struct drm_device *ddev = dev_get_drvdata(dev);
1244 struct amdgpu_device *adev = ddev->dev_private;
1248 ret = amdgpu_read_mask(buf, count, &mask);
1252 ret = pm_runtime_get_sync(ddev->dev);
1256 if (is_support_sw_smu(adev))
1257 ret = smu_force_clk_levels(&adev->smu, SMU_DCEFCLK, mask, true);
1258 else if (adev->powerplay.pp_funcs->force_clock_level)
1259 ret = amdgpu_dpm_force_clock_level(adev, PP_DCEFCLK, mask);
1263 pm_runtime_mark_last_busy(ddev->dev);
1264 pm_runtime_put_autosuspend(ddev->dev);
1272 static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
1273 struct device_attribute *attr,
1276 struct drm_device *ddev = dev_get_drvdata(dev);
1277 struct amdgpu_device *adev = ddev->dev_private;
1281 ret = pm_runtime_get_sync(ddev->dev);
1285 if (is_support_sw_smu(adev))
1286 size = smu_print_clk_levels(&adev->smu, SMU_PCIE, buf);
1287 else if (adev->powerplay.pp_funcs->print_clock_levels)
1288 size = amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf);
1290 size = snprintf(buf, PAGE_SIZE, "\n");
1292 pm_runtime_mark_last_busy(ddev->dev);
1293 pm_runtime_put_autosuspend(ddev->dev);
1298 static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
1299 struct device_attribute *attr,
1303 struct drm_device *ddev = dev_get_drvdata(dev);
1304 struct amdgpu_device *adev = ddev->dev_private;
1308 ret = amdgpu_read_mask(buf, count, &mask);
1312 ret = pm_runtime_get_sync(ddev->dev);
1316 if (is_support_sw_smu(adev))
1317 ret = smu_force_clk_levels(&adev->smu, SMU_PCIE, mask, true);
1318 else if (adev->powerplay.pp_funcs->force_clock_level)
1319 ret = amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
1323 pm_runtime_mark_last_busy(ddev->dev);
1324 pm_runtime_put_autosuspend(ddev->dev);
1332 static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
1333 struct device_attribute *attr,
1336 struct drm_device *ddev = dev_get_drvdata(dev);
1337 struct amdgpu_device *adev = ddev->dev_private;
1341 ret = pm_runtime_get_sync(ddev->dev);
1345 if (is_support_sw_smu(adev))
1346 value = smu_get_od_percentage(&(adev->smu), SMU_OD_SCLK);
1347 else if (adev->powerplay.pp_funcs->get_sclk_od)
1348 value = amdgpu_dpm_get_sclk_od(adev);
1350 pm_runtime_mark_last_busy(ddev->dev);
1351 pm_runtime_put_autosuspend(ddev->dev);
1353 return snprintf(buf, PAGE_SIZE, "%d\n", value);
1356 static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
1357 struct device_attribute *attr,
1361 struct drm_device *ddev = dev_get_drvdata(dev);
1362 struct amdgpu_device *adev = ddev->dev_private;
1366 ret = kstrtol(buf, 0, &value);
1371 ret = pm_runtime_get_sync(ddev->dev);
1375 if (is_support_sw_smu(adev)) {
1376 value = smu_set_od_percentage(&(adev->smu), SMU_OD_SCLK, (uint32_t)value);
1378 if (adev->powerplay.pp_funcs->set_sclk_od)
1379 amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
1381 if (adev->powerplay.pp_funcs->dispatch_tasks) {
1382 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
1384 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1385 amdgpu_pm_compute_clocks(adev);
1389 pm_runtime_mark_last_busy(ddev->dev);
1390 pm_runtime_put_autosuspend(ddev->dev);
1395 static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
1396 struct device_attribute *attr,
1399 struct drm_device *ddev = dev_get_drvdata(dev);
1400 struct amdgpu_device *adev = ddev->dev_private;
1404 ret = pm_runtime_get_sync(ddev->dev);
1408 if (is_support_sw_smu(adev))
1409 value = smu_get_od_percentage(&(adev->smu), SMU_OD_MCLK);
1410 else if (adev->powerplay.pp_funcs->get_mclk_od)
1411 value = amdgpu_dpm_get_mclk_od(adev);
1413 pm_runtime_mark_last_busy(ddev->dev);
1414 pm_runtime_put_autosuspend(ddev->dev);
1416 return snprintf(buf, PAGE_SIZE, "%d\n", value);
1419 static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
1420 struct device_attribute *attr,
1424 struct drm_device *ddev = dev_get_drvdata(dev);
1425 struct amdgpu_device *adev = ddev->dev_private;
1429 ret = kstrtol(buf, 0, &value);
1434 ret = pm_runtime_get_sync(ddev->dev);
1438 if (is_support_sw_smu(adev)) {
1439 value = smu_set_od_percentage(&(adev->smu), SMU_OD_MCLK, (uint32_t)value);
1441 if (adev->powerplay.pp_funcs->set_mclk_od)
1442 amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
1444 if (adev->powerplay.pp_funcs->dispatch_tasks) {
1445 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
1447 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1448 amdgpu_pm_compute_clocks(adev);
1452 pm_runtime_mark_last_busy(ddev->dev);
1453 pm_runtime_put_autosuspend(ddev->dev);
1459 * DOC: pp_power_profile_mode
1461 * The amdgpu driver provides a sysfs API for adjusting the heuristics
1462 * related to switching between power levels in a power state. The file
1463 * pp_power_profile_mode is used for this.
1465 * Reading this file outputs a list of all of the predefined power profiles
1466 * and the relevant heuristics settings for that profile.
1468 * To select a profile or create a custom profile, first select manual using
1469 * power_dpm_force_performance_level. Writing the number of a predefined
1470 * profile to pp_power_profile_mode will enable those heuristics. To
1471 * create a custom set of heuristics, write a string of numbers to the file
1472 * starting with the number of the custom profile along with a setting
1473 * for each heuristic parameter. Due to differences across asic families
1474 * the heuristic parameters vary from family to family.
1478 static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
1479 struct device_attribute *attr,
1482 struct drm_device *ddev = dev_get_drvdata(dev);
1483 struct amdgpu_device *adev = ddev->dev_private;
1487 ret = pm_runtime_get_sync(ddev->dev);
1491 if (is_support_sw_smu(adev))
1492 size = smu_get_power_profile_mode(&adev->smu, buf);
1493 else if (adev->powerplay.pp_funcs->get_power_profile_mode)
1494 size = amdgpu_dpm_get_power_profile_mode(adev, buf);
1496 size = snprintf(buf, PAGE_SIZE, "\n");
1498 pm_runtime_mark_last_busy(ddev->dev);
1499 pm_runtime_put_autosuspend(ddev->dev);
1505 static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
1506 struct device_attribute *attr,
1511 struct drm_device *ddev = dev_get_drvdata(dev);
1512 struct amdgpu_device *adev = ddev->dev_private;
1513 uint32_t parameter_size = 0;
1515 char *sub_str, buf_cpy[128];
1519 long int profile_mode = 0;
1520 const char delimiter[3] = {' ', '\n', '\0'};
1524 ret = kstrtol(tmp, 0, &profile_mode);
1528 if (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
1529 if (count < 2 || count > 127)
1531 while (isspace(*++buf))
1533 memcpy(buf_cpy, buf, count-i);
1535 while (tmp_str[0]) {
1536 sub_str = strsep(&tmp_str, delimiter);
1537 ret = kstrtol(sub_str, 0, ¶meter[parameter_size]);
1541 while (isspace(*tmp_str))
1545 parameter[parameter_size] = profile_mode;
1547 ret = pm_runtime_get_sync(ddev->dev);
1551 if (is_support_sw_smu(adev))
1552 ret = smu_set_power_profile_mode(&adev->smu, parameter, parameter_size, true);
1553 else if (adev->powerplay.pp_funcs->set_power_profile_mode)
1554 ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
1556 pm_runtime_mark_last_busy(ddev->dev);
1557 pm_runtime_put_autosuspend(ddev->dev);
1568 * The amdgpu driver provides a sysfs API for reading how busy the GPU
1569 * is as a percentage. The file gpu_busy_percent is used for this.
1570 * The SMU firmware computes a percentage of load based on the
1571 * aggregate activity level in the IP cores.
1573 static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev,
1574 struct device_attribute *attr,
1577 struct drm_device *ddev = dev_get_drvdata(dev);
1578 struct amdgpu_device *adev = ddev->dev_private;
1579 int r, value, size = sizeof(value);
1581 r = pm_runtime_get_sync(ddev->dev);
1585 /* read the IP busy sensor */
1586 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD,
1587 (void *)&value, &size);
1589 pm_runtime_mark_last_busy(ddev->dev);
1590 pm_runtime_put_autosuspend(ddev->dev);
1595 return snprintf(buf, PAGE_SIZE, "%d\n", value);
1599 * DOC: mem_busy_percent
1601 * The amdgpu driver provides a sysfs API for reading how busy the VRAM
1602 * is as a percentage. The file mem_busy_percent is used for this.
1603 * The SMU firmware computes a percentage of load based on the
1604 * aggregate activity level in the IP cores.
1606 static ssize_t amdgpu_get_mem_busy_percent(struct device *dev,
1607 struct device_attribute *attr,
1610 struct drm_device *ddev = dev_get_drvdata(dev);
1611 struct amdgpu_device *adev = ddev->dev_private;
1612 int r, value, size = sizeof(value);
1614 r = pm_runtime_get_sync(ddev->dev);
1618 /* read the IP busy sensor */
1619 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD,
1620 (void *)&value, &size);
1622 pm_runtime_mark_last_busy(ddev->dev);
1623 pm_runtime_put_autosuspend(ddev->dev);
1628 return snprintf(buf, PAGE_SIZE, "%d\n", value);
1634 * The amdgpu driver provides a sysfs API for estimating how much data
1635 * has been received and sent by the GPU in the last second through PCIe.
1636 * The file pcie_bw is used for this.
1637 * The Perf counters count the number of received and sent messages and return
1638 * those values, as well as the maximum payload size of a PCIe packet (mps).
1639 * Note that it is not possible to easily and quickly obtain the size of each
1640 * packet transmitted, so we output the max payload size (mps) to allow for
1641 * quick estimation of the PCIe bandwidth usage
1643 static ssize_t amdgpu_get_pcie_bw(struct device *dev,
1644 struct device_attribute *attr,
1647 struct drm_device *ddev = dev_get_drvdata(dev);
1648 struct amdgpu_device *adev = ddev->dev_private;
1649 uint64_t count0, count1;
1652 ret = pm_runtime_get_sync(ddev->dev);
1656 amdgpu_asic_get_pcie_usage(adev, &count0, &count1);
1658 pm_runtime_mark_last_busy(ddev->dev);
1659 pm_runtime_put_autosuspend(ddev->dev);
1661 return snprintf(buf, PAGE_SIZE, "%llu %llu %i\n",
1662 count0, count1, pcie_get_mps(adev->pdev));
1668 * The amdgpu driver provides a sysfs API for providing a unique ID for the GPU
1669 * The file unique_id is used for this.
1670 * This will provide a Unique ID that will persist from machine to machine
1672 * NOTE: This will only work for GFX9 and newer. This file will be absent
1673 * on unsupported ASICs (GFX8 and older)
1675 static ssize_t amdgpu_get_unique_id(struct device *dev,
1676 struct device_attribute *attr,
1679 struct drm_device *ddev = dev_get_drvdata(dev);
1680 struct amdgpu_device *adev = ddev->dev_private;
1682 if (adev->unique_id)
1683 return snprintf(buf, PAGE_SIZE, "%016llx\n", adev->unique_id);
1688 static struct amdgpu_device_attr amdgpu_device_attrs[] = {
1689 AMDGPU_DEVICE_ATTR_RW(power_dpm_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1690 AMDGPU_DEVICE_ATTR_RW(power_dpm_force_performance_level, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1691 AMDGPU_DEVICE_ATTR_RO(pp_num_states, ATTR_FLAG_BASIC),
1692 AMDGPU_DEVICE_ATTR_RO(pp_cur_state, ATTR_FLAG_BASIC),
1693 AMDGPU_DEVICE_ATTR_RW(pp_force_state, ATTR_FLAG_BASIC),
1694 AMDGPU_DEVICE_ATTR_RW(pp_table, ATTR_FLAG_BASIC),
1695 AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1696 AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1697 AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1698 AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
1699 AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk, ATTR_FLAG_BASIC),
1700 AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie, ATTR_FLAG_BASIC),
1701 AMDGPU_DEVICE_ATTR_RW(pp_sclk_od, ATTR_FLAG_BASIC),
1702 AMDGPU_DEVICE_ATTR_RW(pp_mclk_od, ATTR_FLAG_BASIC),
1703 AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode, ATTR_FLAG_BASIC),
1704 AMDGPU_DEVICE_ATTR_RW(pp_od_clk_voltage, ATTR_FLAG_BASIC),
1705 AMDGPU_DEVICE_ATTR_RO(gpu_busy_percent, ATTR_FLAG_BASIC),
1706 AMDGPU_DEVICE_ATTR_RO(mem_busy_percent, ATTR_FLAG_BASIC),
1707 AMDGPU_DEVICE_ATTR_RO(pcie_bw, ATTR_FLAG_BASIC),
1708 AMDGPU_DEVICE_ATTR_RW(pp_features, ATTR_FLAG_BASIC),
1709 AMDGPU_DEVICE_ATTR_RO(unique_id, ATTR_FLAG_BASIC),
1712 static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
1715 struct device_attribute *dev_attr = &attr->dev_attr;
1716 const char *attr_name = dev_attr->attr.name;
1717 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
1718 enum amd_asic_type asic_type = adev->asic_type;
1720 if (!(attr->flags & mask)) {
1721 attr->states = ATTR_STATE_UNSUPPORTED;
1725 #define DEVICE_ATTR_IS(_name) (!strcmp(attr_name, #_name))
1727 if (DEVICE_ATTR_IS(pp_dpm_socclk)) {
1728 if (asic_type <= CHIP_VEGA10)
1729 attr->states = ATTR_STATE_UNSUPPORTED;
1730 } else if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) {
1731 if (asic_type <= CHIP_VEGA10 || asic_type == CHIP_ARCTURUS)
1732 attr->states = ATTR_STATE_UNSUPPORTED;
1733 } else if (DEVICE_ATTR_IS(pp_dpm_fclk)) {
1734 if (asic_type < CHIP_VEGA20)
1735 attr->states = ATTR_STATE_UNSUPPORTED;
1736 } else if (DEVICE_ATTR_IS(pp_dpm_pcie)) {
1737 if (asic_type == CHIP_ARCTURUS)
1738 attr->states = ATTR_STATE_UNSUPPORTED;
1739 } else if (DEVICE_ATTR_IS(pp_od_clk_voltage)) {
1740 attr->states = ATTR_STATE_UNSUPPORTED;
1741 if ((is_support_sw_smu(adev) && adev->smu.od_enabled) ||
1742 (!is_support_sw_smu(adev) && hwmgr->od_enabled))
1743 attr->states = ATTR_STATE_UNSUPPORTED;
1744 } else if (DEVICE_ATTR_IS(mem_busy_percent)) {
1745 if (adev->flags & AMD_IS_APU || asic_type == CHIP_VEGA10)
1746 attr->states = ATTR_STATE_UNSUPPORTED;
1747 } else if (DEVICE_ATTR_IS(pcie_bw)) {
1748 /* PCIe Perf counters won't work on APU nodes */
1749 if (adev->flags & AMD_IS_APU)
1750 attr->states = ATTR_STATE_UNSUPPORTED;
1751 } else if (DEVICE_ATTR_IS(unique_id)) {
1752 if (!adev->unique_id)
1753 attr->states = ATTR_STATE_UNSUPPORTED;
1754 } else if (DEVICE_ATTR_IS(pp_features)) {
1755 if (adev->flags & AMD_IS_APU || asic_type <= CHIP_VEGA10)
1756 attr->states = ATTR_STATE_UNSUPPORTED;
1759 if (asic_type == CHIP_ARCTURUS) {
1760 /* Arcturus does not support standalone mclk/socclk/fclk level setting */
1761 if (DEVICE_ATTR_IS(pp_dpm_mclk) ||
1762 DEVICE_ATTR_IS(pp_dpm_socclk) ||
1763 DEVICE_ATTR_IS(pp_dpm_fclk)) {
1764 dev_attr->attr.mode &= ~S_IWUGO;
1765 dev_attr->store = NULL;
1769 #undef DEVICE_ATTR_IS
1775 static int amdgpu_device_attr_create(struct amdgpu_device *adev,
1776 struct amdgpu_device_attr *attr,
1780 struct device_attribute *dev_attr = &attr->dev_attr;
1781 const char *name = dev_attr->attr.name;
1782 int (*attr_update)(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
1783 uint32_t mask) = default_attr_update;
1787 attr_update = attr->attr_update ? attr_update : default_attr_update;
1789 ret = attr_update(adev, attr, mask);
1791 dev_err(adev->dev, "failed to update device file %s, ret = %d\n",
1796 /* the attr->states maybe changed after call attr->attr_update function */
1797 if (attr->states == ATTR_STATE_UNSUPPORTED)
1800 ret = device_create_file(adev->dev, dev_attr);
1802 dev_err(adev->dev, "failed to create device file %s, ret = %d\n",
1806 attr->states = ATTR_STATE_SUPPORTED;
1811 static void amdgpu_device_attr_remove(struct amdgpu_device *adev, struct amdgpu_device_attr *attr)
1813 struct device_attribute *dev_attr = &attr->dev_attr;
1815 if (attr->states == ATTR_STATE_UNSUPPORTED)
1818 device_remove_file(adev->dev, dev_attr);
1820 attr->states = ATTR_STATE_UNSUPPORTED;
1823 static int amdgpu_device_attr_create_groups(struct amdgpu_device *adev,
1824 struct amdgpu_device_attr *attrs,
1831 for (i = 0; i < counts; i++) {
1832 ret = amdgpu_device_attr_create(adev, &attrs[i], mask);
1841 amdgpu_device_attr_remove(adev, &attrs[i]);
1846 static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
1847 struct amdgpu_device_attr *attrs,
1852 for (i = 0; i < counts; i++)
1853 amdgpu_device_attr_remove(adev, &attrs[i]);
1856 static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
1857 struct device_attribute *attr,
1860 struct amdgpu_device *adev = dev_get_drvdata(dev);
1861 int channel = to_sensor_dev_attr(attr)->index;
1862 int r, temp = 0, size = sizeof(temp);
1864 if (channel >= PP_TEMP_MAX)
1867 r = pm_runtime_get_sync(adev->ddev->dev);
1872 case PP_TEMP_JUNCTION:
1873 /* get current junction temperature */
1874 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
1875 (void *)&temp, &size);
1878 /* get current edge temperature */
1879 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_EDGE_TEMP,
1880 (void *)&temp, &size);
1883 /* get current memory temperature */
1884 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_TEMP,
1885 (void *)&temp, &size);
1892 pm_runtime_mark_last_busy(adev->ddev->dev);
1893 pm_runtime_put_autosuspend(adev->ddev->dev);
1898 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
1901 static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev,
1902 struct device_attribute *attr,
1905 struct amdgpu_device *adev = dev_get_drvdata(dev);
1906 int hyst = to_sensor_dev_attr(attr)->index;
1910 temp = adev->pm.dpm.thermal.min_temp;
1912 temp = adev->pm.dpm.thermal.max_temp;
1914 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
1917 static ssize_t amdgpu_hwmon_show_hotspot_temp_thresh(struct device *dev,
1918 struct device_attribute *attr,
1921 struct amdgpu_device *adev = dev_get_drvdata(dev);
1922 int hyst = to_sensor_dev_attr(attr)->index;
1926 temp = adev->pm.dpm.thermal.min_hotspot_temp;
1928 temp = adev->pm.dpm.thermal.max_hotspot_crit_temp;
1930 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
1933 static ssize_t amdgpu_hwmon_show_mem_temp_thresh(struct device *dev,
1934 struct device_attribute *attr,
1937 struct amdgpu_device *adev = dev_get_drvdata(dev);
1938 int hyst = to_sensor_dev_attr(attr)->index;
1942 temp = adev->pm.dpm.thermal.min_mem_temp;
1944 temp = adev->pm.dpm.thermal.max_mem_crit_temp;
1946 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
1949 static ssize_t amdgpu_hwmon_show_temp_label(struct device *dev,
1950 struct device_attribute *attr,
1953 int channel = to_sensor_dev_attr(attr)->index;
1955 if (channel >= PP_TEMP_MAX)
1958 return snprintf(buf, PAGE_SIZE, "%s\n", temp_label[channel].label);
1961 static ssize_t amdgpu_hwmon_show_temp_emergency(struct device *dev,
1962 struct device_attribute *attr,
1965 struct amdgpu_device *adev = dev_get_drvdata(dev);
1966 int channel = to_sensor_dev_attr(attr)->index;
1969 if (channel >= PP_TEMP_MAX)
1973 case PP_TEMP_JUNCTION:
1974 temp = adev->pm.dpm.thermal.max_hotspot_emergency_temp;
1977 temp = adev->pm.dpm.thermal.max_edge_emergency_temp;
1980 temp = adev->pm.dpm.thermal.max_mem_emergency_temp;
1984 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
1987 static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
1988 struct device_attribute *attr,
1991 struct amdgpu_device *adev = dev_get_drvdata(dev);
1995 ret = pm_runtime_get_sync(adev->ddev->dev);
1999 if (is_support_sw_smu(adev)) {
2000 pwm_mode = smu_get_fan_control_mode(&adev->smu);
2002 if (!adev->powerplay.pp_funcs->get_fan_control_mode) {
2003 pm_runtime_mark_last_busy(adev->ddev->dev);
2004 pm_runtime_put_autosuspend(adev->ddev->dev);
2008 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
2011 pm_runtime_mark_last_busy(adev->ddev->dev);
2012 pm_runtime_put_autosuspend(adev->ddev->dev);
2014 return sprintf(buf, "%i\n", pwm_mode);
2017 static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
2018 struct device_attribute *attr,
2022 struct amdgpu_device *adev = dev_get_drvdata(dev);
2026 err = kstrtoint(buf, 10, &value);
2030 ret = pm_runtime_get_sync(adev->ddev->dev);
2034 if (is_support_sw_smu(adev)) {
2035 smu_set_fan_control_mode(&adev->smu, value);
2037 if (!adev->powerplay.pp_funcs->set_fan_control_mode) {
2038 pm_runtime_mark_last_busy(adev->ddev->dev);
2039 pm_runtime_put_autosuspend(adev->ddev->dev);
2043 amdgpu_dpm_set_fan_control_mode(adev, value);
2046 pm_runtime_mark_last_busy(adev->ddev->dev);
2047 pm_runtime_put_autosuspend(adev->ddev->dev);
2052 static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev,
2053 struct device_attribute *attr,
2056 return sprintf(buf, "%i\n", 0);
2059 static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev,
2060 struct device_attribute *attr,
2063 return sprintf(buf, "%i\n", 255);
2066 static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
2067 struct device_attribute *attr,
2068 const char *buf, size_t count)
2070 struct amdgpu_device *adev = dev_get_drvdata(dev);
2075 err = pm_runtime_get_sync(adev->ddev->dev);
2079 if (is_support_sw_smu(adev))
2080 pwm_mode = smu_get_fan_control_mode(&adev->smu);
2082 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
2084 if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
2085 pr_info("manual fan speed control should be enabled first\n");
2086 pm_runtime_mark_last_busy(adev->ddev->dev);
2087 pm_runtime_put_autosuspend(adev->ddev->dev);
2091 err = kstrtou32(buf, 10, &value);
2093 pm_runtime_mark_last_busy(adev->ddev->dev);
2094 pm_runtime_put_autosuspend(adev->ddev->dev);
2098 value = (value * 100) / 255;
2100 if (is_support_sw_smu(adev))
2101 err = smu_set_fan_speed_percent(&adev->smu, value);
2102 else if (adev->powerplay.pp_funcs->set_fan_speed_percent)
2103 err = amdgpu_dpm_set_fan_speed_percent(adev, value);
2107 pm_runtime_mark_last_busy(adev->ddev->dev);
2108 pm_runtime_put_autosuspend(adev->ddev->dev);
2116 static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
2117 struct device_attribute *attr,
2120 struct amdgpu_device *adev = dev_get_drvdata(dev);
2124 err = pm_runtime_get_sync(adev->ddev->dev);
2128 if (is_support_sw_smu(adev))
2129 err = smu_get_fan_speed_percent(&adev->smu, &speed);
2130 else if (adev->powerplay.pp_funcs->get_fan_speed_percent)
2131 err = amdgpu_dpm_get_fan_speed_percent(adev, &speed);
2135 pm_runtime_mark_last_busy(adev->ddev->dev);
2136 pm_runtime_put_autosuspend(adev->ddev->dev);
2141 speed = (speed * 255) / 100;
2143 return sprintf(buf, "%i\n", speed);
2146 static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
2147 struct device_attribute *attr,
2150 struct amdgpu_device *adev = dev_get_drvdata(dev);
2154 err = pm_runtime_get_sync(adev->ddev->dev);
2158 if (is_support_sw_smu(adev))
2159 err = smu_get_fan_speed_rpm(&adev->smu, &speed);
2160 else if (adev->powerplay.pp_funcs->get_fan_speed_rpm)
2161 err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
2165 pm_runtime_mark_last_busy(adev->ddev->dev);
2166 pm_runtime_put_autosuspend(adev->ddev->dev);
2171 return sprintf(buf, "%i\n", speed);
2174 static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
2175 struct device_attribute *attr,
2178 struct amdgpu_device *adev = dev_get_drvdata(dev);
2180 u32 size = sizeof(min_rpm);
2183 r = pm_runtime_get_sync(adev->ddev->dev);
2187 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM,
2188 (void *)&min_rpm, &size);
2190 pm_runtime_mark_last_busy(adev->ddev->dev);
2191 pm_runtime_put_autosuspend(adev->ddev->dev);
2196 return snprintf(buf, PAGE_SIZE, "%d\n", min_rpm);
2199 static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
2200 struct device_attribute *attr,
2203 struct amdgpu_device *adev = dev_get_drvdata(dev);
2205 u32 size = sizeof(max_rpm);
2208 r = pm_runtime_get_sync(adev->ddev->dev);
2212 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM,
2213 (void *)&max_rpm, &size);
2215 pm_runtime_mark_last_busy(adev->ddev->dev);
2216 pm_runtime_put_autosuspend(adev->ddev->dev);
2221 return snprintf(buf, PAGE_SIZE, "%d\n", max_rpm);
2224 static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
2225 struct device_attribute *attr,
2228 struct amdgpu_device *adev = dev_get_drvdata(dev);
2232 err = pm_runtime_get_sync(adev->ddev->dev);
2236 if (is_support_sw_smu(adev))
2237 err = smu_get_fan_speed_rpm(&adev->smu, &rpm);
2238 else if (adev->powerplay.pp_funcs->get_fan_speed_rpm)
2239 err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);
2243 pm_runtime_mark_last_busy(adev->ddev->dev);
2244 pm_runtime_put_autosuspend(adev->ddev->dev);
2249 return sprintf(buf, "%i\n", rpm);
2252 static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
2253 struct device_attribute *attr,
2254 const char *buf, size_t count)
2256 struct amdgpu_device *adev = dev_get_drvdata(dev);
2261 err = pm_runtime_get_sync(adev->ddev->dev);
2265 if (is_support_sw_smu(adev))
2266 pwm_mode = smu_get_fan_control_mode(&adev->smu);
2268 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
2270 if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
2271 pm_runtime_mark_last_busy(adev->ddev->dev);
2272 pm_runtime_put_autosuspend(adev->ddev->dev);
2276 err = kstrtou32(buf, 10, &value);
2278 pm_runtime_mark_last_busy(adev->ddev->dev);
2279 pm_runtime_put_autosuspend(adev->ddev->dev);
2283 if (is_support_sw_smu(adev))
2284 err = smu_set_fan_speed_rpm(&adev->smu, value);
2285 else if (adev->powerplay.pp_funcs->set_fan_speed_rpm)
2286 err = amdgpu_dpm_set_fan_speed_rpm(adev, value);
2290 pm_runtime_mark_last_busy(adev->ddev->dev);
2291 pm_runtime_put_autosuspend(adev->ddev->dev);
2299 static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
2300 struct device_attribute *attr,
2303 struct amdgpu_device *adev = dev_get_drvdata(dev);
2307 ret = pm_runtime_get_sync(adev->ddev->dev);
2311 if (is_support_sw_smu(adev)) {
2312 pwm_mode = smu_get_fan_control_mode(&adev->smu);
2314 if (!adev->powerplay.pp_funcs->get_fan_control_mode) {
2315 pm_runtime_mark_last_busy(adev->ddev->dev);
2316 pm_runtime_put_autosuspend(adev->ddev->dev);
2320 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
2323 pm_runtime_mark_last_busy(adev->ddev->dev);
2324 pm_runtime_put_autosuspend(adev->ddev->dev);
2326 return sprintf(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1);
2329 static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
2330 struct device_attribute *attr,
2334 struct amdgpu_device *adev = dev_get_drvdata(dev);
2339 err = kstrtoint(buf, 10, &value);
2344 pwm_mode = AMD_FAN_CTRL_AUTO;
2345 else if (value == 1)
2346 pwm_mode = AMD_FAN_CTRL_MANUAL;
2350 err = pm_runtime_get_sync(adev->ddev->dev);
2354 if (is_support_sw_smu(adev)) {
2355 smu_set_fan_control_mode(&adev->smu, pwm_mode);
2357 if (!adev->powerplay.pp_funcs->set_fan_control_mode) {
2358 pm_runtime_mark_last_busy(adev->ddev->dev);
2359 pm_runtime_put_autosuspend(adev->ddev->dev);
2362 amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
2365 pm_runtime_mark_last_busy(adev->ddev->dev);
2366 pm_runtime_put_autosuspend(adev->ddev->dev);
2371 static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
2372 struct device_attribute *attr,
2375 struct amdgpu_device *adev = dev_get_drvdata(dev);
2377 int r, size = sizeof(vddgfx);
2379 r = pm_runtime_get_sync(adev->ddev->dev);
2383 /* get the voltage */
2384 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX,
2385 (void *)&vddgfx, &size);
2387 pm_runtime_mark_last_busy(adev->ddev->dev);
2388 pm_runtime_put_autosuspend(adev->ddev->dev);
2393 return snprintf(buf, PAGE_SIZE, "%d\n", vddgfx);
2396 static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev,
2397 struct device_attribute *attr,
2400 return snprintf(buf, PAGE_SIZE, "vddgfx\n");
2403 static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
2404 struct device_attribute *attr,
2407 struct amdgpu_device *adev = dev_get_drvdata(dev);
2409 int r, size = sizeof(vddnb);
2411 /* only APUs have vddnb */
2412 if (!(adev->flags & AMD_IS_APU))
2415 r = pm_runtime_get_sync(adev->ddev->dev);
2419 /* get the voltage */
2420 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB,
2421 (void *)&vddnb, &size);
2423 pm_runtime_mark_last_busy(adev->ddev->dev);
2424 pm_runtime_put_autosuspend(adev->ddev->dev);
2429 return snprintf(buf, PAGE_SIZE, "%d\n", vddnb);
2432 static ssize_t amdgpu_hwmon_show_vddnb_label(struct device *dev,
2433 struct device_attribute *attr,
2436 return snprintf(buf, PAGE_SIZE, "vddnb\n");
2439 static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
2440 struct device_attribute *attr,
2443 struct amdgpu_device *adev = dev_get_drvdata(dev);
2445 int r, size = sizeof(u32);
2448 r = pm_runtime_get_sync(adev->ddev->dev);
2452 /* get the voltage */
2453 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER,
2454 (void *)&query, &size);
2456 pm_runtime_mark_last_busy(adev->ddev->dev);
2457 pm_runtime_put_autosuspend(adev->ddev->dev);
2462 /* convert to microwatts */
2463 uw = (query >> 8) * 1000000 + (query & 0xff) * 1000;
2465 return snprintf(buf, PAGE_SIZE, "%u\n", uw);
2468 static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev,
2469 struct device_attribute *attr,
2472 return sprintf(buf, "%i\n", 0);
2475 static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
2476 struct device_attribute *attr,
2479 struct amdgpu_device *adev = dev_get_drvdata(dev);
2484 r = pm_runtime_get_sync(adev->ddev->dev);
2488 if (is_support_sw_smu(adev)) {
2489 smu_get_power_limit(&adev->smu, &limit, true, true);
2490 size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
2491 } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
2492 adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, true);
2493 size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
2495 size = snprintf(buf, PAGE_SIZE, "\n");
2498 pm_runtime_mark_last_busy(adev->ddev->dev);
2499 pm_runtime_put_autosuspend(adev->ddev->dev);
2504 static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
2505 struct device_attribute *attr,
2508 struct amdgpu_device *adev = dev_get_drvdata(dev);
2513 r = pm_runtime_get_sync(adev->ddev->dev);
2517 if (is_support_sw_smu(adev)) {
2518 smu_get_power_limit(&adev->smu, &limit, false, true);
2519 size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
2520 } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
2521 adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, false);
2522 size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
2524 size = snprintf(buf, PAGE_SIZE, "\n");
2527 pm_runtime_mark_last_busy(adev->ddev->dev);
2528 pm_runtime_put_autosuspend(adev->ddev->dev);
2534 static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
2535 struct device_attribute *attr,
2539 struct amdgpu_device *adev = dev_get_drvdata(dev);
2543 if (amdgpu_sriov_vf(adev))
2546 err = kstrtou32(buf, 10, &value);
2550 value = value / 1000000; /* convert to Watt */
2553 err = pm_runtime_get_sync(adev->ddev->dev);
2557 if (is_support_sw_smu(adev))
2558 err = smu_set_power_limit(&adev->smu, value);
2559 else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_power_limit)
2560 err = adev->powerplay.pp_funcs->set_power_limit(adev->powerplay.pp_handle, value);
2564 pm_runtime_mark_last_busy(adev->ddev->dev);
2565 pm_runtime_put_autosuspend(adev->ddev->dev);
2573 static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
2574 struct device_attribute *attr,
2577 struct amdgpu_device *adev = dev_get_drvdata(dev);
2579 int r, size = sizeof(sclk);
2581 r = pm_runtime_get_sync(adev->ddev->dev);
2586 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK,
2587 (void *)&sclk, &size);
2589 pm_runtime_mark_last_busy(adev->ddev->dev);
2590 pm_runtime_put_autosuspend(adev->ddev->dev);
2595 return snprintf(buf, PAGE_SIZE, "%d\n", sclk * 10 * 1000);
2598 static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev,
2599 struct device_attribute *attr,
2602 return snprintf(buf, PAGE_SIZE, "sclk\n");
2605 static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
2606 struct device_attribute *attr,
2609 struct amdgpu_device *adev = dev_get_drvdata(dev);
2611 int r, size = sizeof(mclk);
2613 r = pm_runtime_get_sync(adev->ddev->dev);
2618 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK,
2619 (void *)&mclk, &size);
2621 pm_runtime_mark_last_busy(adev->ddev->dev);
2622 pm_runtime_put_autosuspend(adev->ddev->dev);
2627 return snprintf(buf, PAGE_SIZE, "%d\n", mclk * 10 * 1000);
2630 static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
2631 struct device_attribute *attr,
2634 return snprintf(buf, PAGE_SIZE, "mclk\n");
2640 * The amdgpu driver exposes the following sensor interfaces:
2642 * - GPU temperature (via the on-die sensor)
2646 * - Northbridge voltage (APUs only)
2652 * - GPU gfx/compute engine clock
2654 * - GPU memory clock (dGPU only)
2656 * hwmon interfaces for GPU temperature:
2658 * - temp[1-3]_input: the on die GPU temperature in millidegrees Celsius
2659 * - temp2_input and temp3_input are supported on SOC15 dGPUs only
2661 * - temp[1-3]_label: temperature channel label
2662 * - temp2_label and temp3_label are supported on SOC15 dGPUs only
2664 * - temp[1-3]_crit: temperature critical max value in millidegrees Celsius
2665 * - temp2_crit and temp3_crit are supported on SOC15 dGPUs only
2667 * - temp[1-3]_crit_hyst: temperature hysteresis for critical limit in millidegrees Celsius
2668 * - temp2_crit_hyst and temp3_crit_hyst are supported on SOC15 dGPUs only
2670 * - temp[1-3]_emergency: temperature emergency max value(asic shutdown) in millidegrees Celsius
2671 * - these are supported on SOC15 dGPUs only
2673 * hwmon interfaces for GPU voltage:
2675 * - in0_input: the voltage on the GPU in millivolts
2677 * - in1_input: the voltage on the Northbridge in millivolts
2679 * hwmon interfaces for GPU power:
2681 * - power1_average: average power used by the GPU in microWatts
2683 * - power1_cap_min: minimum cap supported in microWatts
2685 * - power1_cap_max: maximum cap supported in microWatts
2687 * - power1_cap: selected power cap in microWatts
2689 * hwmon interfaces for GPU fan:
2691 * - pwm1: pulse width modulation fan level (0-255)
2693 * - pwm1_enable: pulse width modulation fan control method (0: no fan speed control, 1: manual fan speed control using pwm interface, 2: automatic fan speed control)
2695 * - pwm1_min: pulse width modulation fan control minimum level (0)
2697 * - pwm1_max: pulse width modulation fan control maximum level (255)
2699 * - fan1_min: an minimum value Unit: revolution/min (RPM)
2701 * - fan1_max: an maxmum value Unit: revolution/max (RPM)
2703 * - fan1_input: fan speed in RPM
2705 * - fan[1-\*]_target: Desired fan speed Unit: revolution/min (RPM)
2707 * - fan[1-\*]_enable: Enable or disable the sensors.1: Enable 0: Disable
2709 * hwmon interfaces for GPU clocks:
2711 * - freq1_input: the gfx/compute clock in hertz
2713 * - freq2_input: the memory clock in hertz
2715 * You can use hwmon tools like sensors to view this information on your system.
2719 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_EDGE);
2720 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0);
2721 static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1);
2722 static SENSOR_DEVICE_ATTR(temp1_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_EDGE);
2723 static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_JUNCTION);
2724 static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 0);
2725 static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 1);
2726 static SENSOR_DEVICE_ATTR(temp2_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_JUNCTION);
2727 static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_MEM);
2728 static SENSOR_DEVICE_ATTR(temp3_crit, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 0);
2729 static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 1);
2730 static SENSOR_DEVICE_ATTR(temp3_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_MEM);
2731 static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_EDGE);
2732 static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_JUNCTION);
2733 static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_MEM);
2734 static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0);
2735 static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0);
2736 static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0);
2737 static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0);
2738 static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0);
2739 static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO, amdgpu_hwmon_get_fan1_min, NULL, 0);
2740 static SENSOR_DEVICE_ATTR(fan1_max, S_IRUGO, amdgpu_hwmon_get_fan1_max, NULL, 0);
2741 static SENSOR_DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_target, amdgpu_hwmon_set_fan1_target, 0);
2742 static SENSOR_DEVICE_ATTR(fan1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_enable, amdgpu_hwmon_set_fan1_enable, 0);
2743 static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, amdgpu_hwmon_show_vddgfx, NULL, 0);
2744 static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, amdgpu_hwmon_show_vddgfx_label, NULL, 0);
2745 static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, amdgpu_hwmon_show_vddnb, NULL, 0);
2746 static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, amdgpu_hwmon_show_vddnb_label, NULL, 0);
2747 static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 0);
2748 static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0);
2749 static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 0);
2750 static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0);
2751 static SENSOR_DEVICE_ATTR(freq1_input, S_IRUGO, amdgpu_hwmon_show_sclk, NULL, 0);
2752 static SENSOR_DEVICE_ATTR(freq1_label, S_IRUGO, amdgpu_hwmon_show_sclk_label, NULL, 0);
2753 static SENSOR_DEVICE_ATTR(freq2_input, S_IRUGO, amdgpu_hwmon_show_mclk, NULL, 0);
2754 static SENSOR_DEVICE_ATTR(freq2_label, S_IRUGO, amdgpu_hwmon_show_mclk_label, NULL, 0);
2756 static struct attribute *hwmon_attributes[] = {
2757 &sensor_dev_attr_temp1_input.dev_attr.attr,
2758 &sensor_dev_attr_temp1_crit.dev_attr.attr,
2759 &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
2760 &sensor_dev_attr_temp2_input.dev_attr.attr,
2761 &sensor_dev_attr_temp2_crit.dev_attr.attr,
2762 &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr,
2763 &sensor_dev_attr_temp3_input.dev_attr.attr,
2764 &sensor_dev_attr_temp3_crit.dev_attr.attr,
2765 &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr,
2766 &sensor_dev_attr_temp1_emergency.dev_attr.attr,
2767 &sensor_dev_attr_temp2_emergency.dev_attr.attr,
2768 &sensor_dev_attr_temp3_emergency.dev_attr.attr,
2769 &sensor_dev_attr_temp1_label.dev_attr.attr,
2770 &sensor_dev_attr_temp2_label.dev_attr.attr,
2771 &sensor_dev_attr_temp3_label.dev_attr.attr,
2772 &sensor_dev_attr_pwm1.dev_attr.attr,
2773 &sensor_dev_attr_pwm1_enable.dev_attr.attr,
2774 &sensor_dev_attr_pwm1_min.dev_attr.attr,
2775 &sensor_dev_attr_pwm1_max.dev_attr.attr,
2776 &sensor_dev_attr_fan1_input.dev_attr.attr,
2777 &sensor_dev_attr_fan1_min.dev_attr.attr,
2778 &sensor_dev_attr_fan1_max.dev_attr.attr,
2779 &sensor_dev_attr_fan1_target.dev_attr.attr,
2780 &sensor_dev_attr_fan1_enable.dev_attr.attr,
2781 &sensor_dev_attr_in0_input.dev_attr.attr,
2782 &sensor_dev_attr_in0_label.dev_attr.attr,
2783 &sensor_dev_attr_in1_input.dev_attr.attr,
2784 &sensor_dev_attr_in1_label.dev_attr.attr,
2785 &sensor_dev_attr_power1_average.dev_attr.attr,
2786 &sensor_dev_attr_power1_cap_max.dev_attr.attr,
2787 &sensor_dev_attr_power1_cap_min.dev_attr.attr,
2788 &sensor_dev_attr_power1_cap.dev_attr.attr,
2789 &sensor_dev_attr_freq1_input.dev_attr.attr,
2790 &sensor_dev_attr_freq1_label.dev_attr.attr,
2791 &sensor_dev_attr_freq2_input.dev_attr.attr,
2792 &sensor_dev_attr_freq2_label.dev_attr.attr,
2796 static umode_t hwmon_attributes_visible(struct kobject *kobj,
2797 struct attribute *attr, int index)
2799 struct device *dev = kobj_to_dev(kobj);
2800 struct amdgpu_device *adev = dev_get_drvdata(dev);
2801 umode_t effective_mode = attr->mode;
2803 /* under multi-vf mode, the hwmon attributes are all not supported */
2804 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
2807 /* there is no fan under pp one vf mode */
2808 if (amdgpu_sriov_is_pp_one_vf(adev) &&
2809 (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
2810 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
2811 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
2812 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
2813 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
2814 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
2815 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
2816 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
2817 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
2820 /* Skip fan attributes if fan is not present */
2821 if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
2822 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
2823 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
2824 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
2825 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
2826 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
2827 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
2828 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
2829 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
2832 /* Skip fan attributes on APU */
2833 if ((adev->flags & AMD_IS_APU) &&
2834 (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
2835 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
2836 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
2837 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
2838 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
2839 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
2840 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
2841 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
2842 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
2845 /* Skip limit attributes if DPM is not enabled */
2846 if (!adev->pm.dpm_enabled &&
2847 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
2848 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
2849 attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
2850 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
2851 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
2852 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
2853 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
2854 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
2855 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
2856 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
2857 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
2860 if (!is_support_sw_smu(adev)) {
2861 /* mask fan attributes if we have no bindings for this asic to expose */
2862 if ((!adev->powerplay.pp_funcs->get_fan_speed_percent &&
2863 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
2864 (!adev->powerplay.pp_funcs->get_fan_control_mode &&
2865 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
2866 effective_mode &= ~S_IRUGO;
2868 if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
2869 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
2870 (!adev->powerplay.pp_funcs->set_fan_control_mode &&
2871 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
2872 effective_mode &= ~S_IWUSR;
2875 if (((adev->flags & AMD_IS_APU) ||
2876 adev->family == AMDGPU_FAMILY_SI || /* not implemented yet */
2877 adev->family == AMDGPU_FAMILY_KV) && /* not implemented yet */
2878 (attr == &sensor_dev_attr_power1_average.dev_attr.attr ||
2879 attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
2880 attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr||
2881 attr == &sensor_dev_attr_power1_cap.dev_attr.attr))
2884 if (!is_support_sw_smu(adev)) {
2885 /* hide max/min values if we can't both query and manage the fan */
2886 if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
2887 !adev->powerplay.pp_funcs->get_fan_speed_percent) &&
2888 (!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
2889 !adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
2890 (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
2891 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
2894 if ((!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
2895 !adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
2896 (attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
2897 attr == &sensor_dev_attr_fan1_min.dev_attr.attr))
2901 if ((adev->family == AMDGPU_FAMILY_SI || /* not implemented yet */
2902 adev->family == AMDGPU_FAMILY_KV) && /* not implemented yet */
2903 (attr == &sensor_dev_attr_in0_input.dev_attr.attr ||
2904 attr == &sensor_dev_attr_in0_label.dev_attr.attr))
2907 /* only APUs have vddnb */
2908 if (!(adev->flags & AMD_IS_APU) &&
2909 (attr == &sensor_dev_attr_in1_input.dev_attr.attr ||
2910 attr == &sensor_dev_attr_in1_label.dev_attr.attr))
2913 /* no mclk on APUs */
2914 if ((adev->flags & AMD_IS_APU) &&
2915 (attr == &sensor_dev_attr_freq2_input.dev_attr.attr ||
2916 attr == &sensor_dev_attr_freq2_label.dev_attr.attr))
2919 /* only SOC15 dGPUs support hotspot and mem temperatures */
2920 if (((adev->flags & AMD_IS_APU) ||
2921 adev->asic_type < CHIP_VEGA10) &&
2922 (attr == &sensor_dev_attr_temp2_crit.dev_attr.attr ||
2923 attr == &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr ||
2924 attr == &sensor_dev_attr_temp3_crit.dev_attr.attr ||
2925 attr == &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr ||
2926 attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr ||
2927 attr == &sensor_dev_attr_temp2_emergency.dev_attr.attr ||
2928 attr == &sensor_dev_attr_temp3_emergency.dev_attr.attr ||
2929 attr == &sensor_dev_attr_temp2_input.dev_attr.attr ||
2930 attr == &sensor_dev_attr_temp3_input.dev_attr.attr ||
2931 attr == &sensor_dev_attr_temp2_label.dev_attr.attr ||
2932 attr == &sensor_dev_attr_temp3_label.dev_attr.attr))
2935 return effective_mode;
2938 static const struct attribute_group hwmon_attrgroup = {
2939 .attrs = hwmon_attributes,
2940 .is_visible = hwmon_attributes_visible,
2943 static const struct attribute_group *hwmon_groups[] = {
2948 void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
2950 struct amdgpu_device *adev =
2951 container_of(work, struct amdgpu_device,
2952 pm.dpm.thermal.work);
2953 /* switch to the thermal state */
2954 enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
2955 int temp, size = sizeof(temp);
2957 if (!adev->pm.dpm_enabled)
2960 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP,
2961 (void *)&temp, &size)) {
2962 if (temp < adev->pm.dpm.thermal.min_temp)
2963 /* switch back the user state */
2964 dpm_state = adev->pm.dpm.user_state;
2966 if (adev->pm.dpm.thermal.high_to_low)
2967 /* switch back the user state */
2968 dpm_state = adev->pm.dpm.user_state;
2970 mutex_lock(&adev->pm.mutex);
2971 if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
2972 adev->pm.dpm.thermal_active = true;
2974 adev->pm.dpm.thermal_active = false;
2975 adev->pm.dpm.state = dpm_state;
2976 mutex_unlock(&adev->pm.mutex);
2978 amdgpu_pm_compute_clocks(adev);
2981 static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
2982 enum amd_pm_state_type dpm_state)
2985 struct amdgpu_ps *ps;
2987 bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ?
2990 /* check if the vblank period is too short to adjust the mclk */
2991 if (single_display && adev->powerplay.pp_funcs->vblank_too_short) {
2992 if (amdgpu_dpm_vblank_too_short(adev))
2993 single_display = false;
2996 /* certain older asics have a separare 3D performance state,
2997 * so try that first if the user selected performance
2999 if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
3000 dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
3001 /* balanced states don't exist at the moment */
3002 if (dpm_state == POWER_STATE_TYPE_BALANCED)
3003 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
3006 /* Pick the best power state based on current conditions */
3007 for (i = 0; i < adev->pm.dpm.num_ps; i++) {
3008 ps = &adev->pm.dpm.ps[i];
3009 ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
3010 switch (dpm_state) {
3012 case POWER_STATE_TYPE_BATTERY:
3013 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
3014 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
3021 case POWER_STATE_TYPE_BALANCED:
3022 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
3023 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
3030 case POWER_STATE_TYPE_PERFORMANCE:
3031 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
3032 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
3039 /* internal states */
3040 case POWER_STATE_TYPE_INTERNAL_UVD:
3041 if (adev->pm.dpm.uvd_ps)
3042 return adev->pm.dpm.uvd_ps;
3045 case POWER_STATE_TYPE_INTERNAL_UVD_SD:
3046 if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
3049 case POWER_STATE_TYPE_INTERNAL_UVD_HD:
3050 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
3053 case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
3054 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
3057 case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
3058 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
3061 case POWER_STATE_TYPE_INTERNAL_BOOT:
3062 return adev->pm.dpm.boot_ps;
3063 case POWER_STATE_TYPE_INTERNAL_THERMAL:
3064 if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
3067 case POWER_STATE_TYPE_INTERNAL_ACPI:
3068 if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
3071 case POWER_STATE_TYPE_INTERNAL_ULV:
3072 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
3075 case POWER_STATE_TYPE_INTERNAL_3DPERF:
3076 if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
3083 /* use a fallback state if we didn't match */
3084 switch (dpm_state) {
3085 case POWER_STATE_TYPE_INTERNAL_UVD_SD:
3086 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
3087 goto restart_search;
3088 case POWER_STATE_TYPE_INTERNAL_UVD_HD:
3089 case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
3090 case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
3091 if (adev->pm.dpm.uvd_ps) {
3092 return adev->pm.dpm.uvd_ps;
3094 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
3095 goto restart_search;
3097 case POWER_STATE_TYPE_INTERNAL_THERMAL:
3098 dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
3099 goto restart_search;
3100 case POWER_STATE_TYPE_INTERNAL_ACPI:
3101 dpm_state = POWER_STATE_TYPE_BATTERY;
3102 goto restart_search;
3103 case POWER_STATE_TYPE_BATTERY:
3104 case POWER_STATE_TYPE_BALANCED:
3105 case POWER_STATE_TYPE_INTERNAL_3DPERF:
3106 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
3107 goto restart_search;
3115 static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
3117 struct amdgpu_ps *ps;
3118 enum amd_pm_state_type dpm_state;
3122 /* if dpm init failed */
3123 if (!adev->pm.dpm_enabled)
3126 if (adev->pm.dpm.user_state != adev->pm.dpm.state) {
3127 /* add other state override checks here */
3128 if ((!adev->pm.dpm.thermal_active) &&
3129 (!adev->pm.dpm.uvd_active))
3130 adev->pm.dpm.state = adev->pm.dpm.user_state;
3132 dpm_state = adev->pm.dpm.state;
3134 ps = amdgpu_dpm_pick_power_state(adev, dpm_state);
3136 adev->pm.dpm.requested_ps = ps;
3140 if (amdgpu_dpm == 1 && adev->powerplay.pp_funcs->print_power_state) {
3141 printk("switching from power state:\n");
3142 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
3143 printk("switching to power state:\n");
3144 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
3147 /* update whether vce is active */
3148 ps->vce_active = adev->pm.dpm.vce_active;
3149 if (adev->powerplay.pp_funcs->display_configuration_changed)
3150 amdgpu_dpm_display_configuration_changed(adev);
3152 ret = amdgpu_dpm_pre_set_power_state(adev);
3156 if (adev->powerplay.pp_funcs->check_state_equal) {
3157 if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal))
3164 amdgpu_dpm_set_power_state(adev);
3165 amdgpu_dpm_post_set_power_state(adev);
3167 adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
3168 adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
3170 if (adev->powerplay.pp_funcs->force_performance_level) {
3171 if (adev->pm.dpm.thermal_active) {
3172 enum amd_dpm_forced_level level = adev->pm.dpm.forced_level;
3173 /* force low perf level for thermal */
3174 amdgpu_dpm_force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW);
3175 /* save the user's level */
3176 adev->pm.dpm.forced_level = level;
3178 /* otherwise, user selected level */
3179 amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level);
3184 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
3188 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
3190 DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
3191 enable ? "enable" : "disable", ret);
3193 /* enable/disable Low Memory PState for UVD (4k videos) */
3194 if (adev->asic_type == CHIP_STONEY &&
3195 adev->uvd.decode_image_width >= WIDTH_4K) {
3196 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
3198 if (hwmgr && hwmgr->hwmgr_func &&
3199 hwmgr->hwmgr_func->update_nbdpm_pstate)
3200 hwmgr->hwmgr_func->update_nbdpm_pstate(hwmgr,
3206 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
3210 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
3212 DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
3213 enable ? "enable" : "disable", ret);
3216 void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
3220 if (adev->powerplay.pp_funcs->print_power_state == NULL)
3223 for (i = 0; i < adev->pm.dpm.num_ps; i++)
3224 amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]);
3228 void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
3232 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable);
3234 DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
3235 enable ? "enable" : "disable", ret);
3238 int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
3242 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->load_firmware) {
3243 r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle);
3245 pr_err("smu firmware loading failed\n");
3248 *smu_version = adev->pm.fw_version;
3253 int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
3258 if (adev->pm.sysfs_initialized)
3261 if (adev->pm.dpm_enabled == 0)
3264 adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
3267 if (IS_ERR(adev->pm.int_hwmon_dev)) {
3268 ret = PTR_ERR(adev->pm.int_hwmon_dev);
3270 "Unable to register hwmon device: %d\n", ret);
3274 switch (amdgpu_virt_get_sriov_vf_mode(adev)) {
3275 case SRIOV_VF_MODE_ONE_VF:
3276 mask = ATTR_FLAG_ONEVF;
3278 case SRIOV_VF_MODE_MULTI_VF:
3281 case SRIOV_VF_MODE_BARE_METAL:
3283 mask = ATTR_FLAG_MASK_ALL;
3287 ret = amdgpu_device_attr_create_groups(adev,
3288 amdgpu_device_attrs,
3289 ARRAY_SIZE(amdgpu_device_attrs),
3294 adev->pm.sysfs_initialized = true;
3299 void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
3301 if (adev->pm.dpm_enabled == 0)
3304 if (adev->pm.int_hwmon_dev)
3305 hwmon_device_unregister(adev->pm.int_hwmon_dev);
3307 amdgpu_device_attr_remove_groups(adev,
3308 amdgpu_device_attrs,
3309 ARRAY_SIZE(amdgpu_device_attrs));
3312 void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
3316 if (!adev->pm.dpm_enabled)
3319 if (adev->mode_info.num_crtc)
3320 amdgpu_display_bandwidth_update(adev);
3322 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
3323 struct amdgpu_ring *ring = adev->rings[i];
3324 if (ring && ring->sched.ready)
3325 amdgpu_fence_wait_empty(ring);
3328 if (is_support_sw_smu(adev)) {
3329 struct smu_dpm_context *smu_dpm = &adev->smu.smu_dpm;
3330 smu_handle_task(&adev->smu,
3332 AMD_PP_TASK_DISPLAY_CONFIG_CHANGE,
3335 if (adev->powerplay.pp_funcs->dispatch_tasks) {
3336 if (!amdgpu_device_has_dc_support(adev)) {
3337 mutex_lock(&adev->pm.mutex);
3338 amdgpu_dpm_get_active_displays(adev);
3339 adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
3340 adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
3341 adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
3342 /* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */
3343 if (adev->pm.pm_display_cfg.vrefresh > 120)
3344 adev->pm.pm_display_cfg.min_vblank_time = 0;
3345 if (adev->powerplay.pp_funcs->display_configuration_change)
3346 adev->powerplay.pp_funcs->display_configuration_change(
3347 adev->powerplay.pp_handle,
3348 &adev->pm.pm_display_cfg);
3349 mutex_unlock(&adev->pm.mutex);
3351 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL);
3353 mutex_lock(&adev->pm.mutex);
3354 amdgpu_dpm_get_active_displays(adev);
3355 amdgpu_dpm_change_power_state_locked(adev);
3356 mutex_unlock(&adev->pm.mutex);
3364 #if defined(CONFIG_DEBUG_FS)
3366 static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev)
3374 size = sizeof(value);
3375 seq_printf(m, "GFX Clocks and Power:\n");
3376 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, (void *)&value, &size))
3377 seq_printf(m, "\t%u MHz (MCLK)\n", value/100);
3378 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, (void *)&value, &size))
3379 seq_printf(m, "\t%u MHz (SCLK)\n", value/100);
3380 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK, (void *)&value, &size))
3381 seq_printf(m, "\t%u MHz (PSTATE_SCLK)\n", value/100);
3382 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK, (void *)&value, &size))
3383 seq_printf(m, "\t%u MHz (PSTATE_MCLK)\n", value/100);
3384 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, (void *)&value, &size))
3385 seq_printf(m, "\t%u mV (VDDGFX)\n", value);
3386 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size))
3387 seq_printf(m, "\t%u mV (VDDNB)\n", value);
3388 size = sizeof(uint32_t);
3389 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, (void *)&query, &size))
3390 seq_printf(m, "\t%u.%u W (average GPU)\n", query >> 8, query & 0xff);
3391 size = sizeof(value);
3392 seq_printf(m, "\n");
3395 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, (void *)&value, &size))
3396 seq_printf(m, "GPU Temperature: %u C\n", value/1000);
3399 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, (void *)&value, &size))
3400 seq_printf(m, "GPU Load: %u %%\n", value);
3402 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, (void *)&value, &size))
3403 seq_printf(m, "MEM Load: %u %%\n", value);
3405 seq_printf(m, "\n");
3407 /* SMC feature mask */
3408 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size))
3409 seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64);
3411 if (adev->asic_type > CHIP_VEGA20) {
3413 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_POWER_STATE, (void *)&value, &size)) {
3415 seq_printf(m, "VCN: Disabled\n");
3417 seq_printf(m, "VCN: Enabled\n");
3418 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
3419 seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
3420 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
3421 seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
3424 seq_printf(m, "\n");
3427 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) {
3429 seq_printf(m, "UVD: Disabled\n");
3431 seq_printf(m, "UVD: Enabled\n");
3432 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
3433 seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
3434 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
3435 seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
3438 seq_printf(m, "\n");
3441 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) {
3443 seq_printf(m, "VCE: Disabled\n");
3445 seq_printf(m, "VCE: Enabled\n");
3446 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size))
3447 seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
3455 static void amdgpu_parse_cg_state(struct seq_file *m, u32 flags)
3459 for (i = 0; clocks[i].flag; i++)
3460 seq_printf(m, "\t%s: %s\n", clocks[i].name,
3461 (flags & clocks[i].flag) ? "On" : "Off");
3464 static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
3466 struct drm_info_node *node = (struct drm_info_node *) m->private;
3467 struct drm_device *dev = node->minor->dev;
3468 struct amdgpu_device *adev = dev->dev_private;
3472 r = pm_runtime_get_sync(dev->dev);
3476 amdgpu_device_ip_get_clockgating_state(adev, &flags);
3477 seq_printf(m, "Clock Gating Flags Mask: 0x%x\n", flags);
3478 amdgpu_parse_cg_state(m, flags);
3479 seq_printf(m, "\n");
3481 if (!adev->pm.dpm_enabled) {
3482 seq_printf(m, "dpm not enabled\n");
3483 pm_runtime_mark_last_busy(dev->dev);
3484 pm_runtime_put_autosuspend(dev->dev);
3488 if (!is_support_sw_smu(adev) &&
3489 adev->powerplay.pp_funcs->debugfs_print_current_performance_level) {
3490 mutex_lock(&adev->pm.mutex);
3491 if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level)
3492 adev->powerplay.pp_funcs->debugfs_print_current_performance_level(adev, m);
3494 seq_printf(m, "Debugfs support not implemented for this asic\n");
3495 mutex_unlock(&adev->pm.mutex);
3498 r = amdgpu_debugfs_pm_info_pp(m, adev);
3501 pm_runtime_mark_last_busy(dev->dev);
3502 pm_runtime_put_autosuspend(dev->dev);
3507 static const struct drm_info_list amdgpu_pm_info_list[] = {
3508 {"amdgpu_pm_info", amdgpu_debugfs_pm_info, 0, NULL},
3512 int amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
3514 #if defined(CONFIG_DEBUG_FS)
3515 return amdgpu_debugfs_add_files(adev, amdgpu_pm_info_list, ARRAY_SIZE(amdgpu_pm_info_list));