2 * Copyright 2017 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Rafał Miłecki <zajec5@gmail.com>
23 * Alex Deucher <alexdeucher@gmail.com>
26 #include <drm/drm_debugfs.h>
29 #include "amdgpu_drv.h"
30 #include "amdgpu_pm.h"
31 #include "amdgpu_dpm.h"
32 #include "amdgpu_display.h"
33 #include "amdgpu_smu.h"
35 #include <linux/power_supply.h>
36 #include <linux/pci.h>
37 #include <linux/hwmon.h>
38 #include <linux/hwmon-sysfs.h>
39 #include <linux/nospec.h>
40 #include <linux/pm_runtime.h>
44 static const struct cg_flag_name clocks[] = {
45 {AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"},
46 {AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"},
47 {AMD_CG_SUPPORT_GFX_CGCG, "Graphics Coarse Grain Clock Gating"},
48 {AMD_CG_SUPPORT_GFX_CGLS, "Graphics Coarse Grain memory Light Sleep"},
49 {AMD_CG_SUPPORT_GFX_CGTS, "Graphics Coarse Grain Tree Shader Clock Gating"},
50 {AMD_CG_SUPPORT_GFX_CGTS_LS, "Graphics Coarse Grain Tree Shader Light Sleep"},
51 {AMD_CG_SUPPORT_GFX_CP_LS, "Graphics Command Processor Light Sleep"},
52 {AMD_CG_SUPPORT_GFX_RLC_LS, "Graphics Run List Controller Light Sleep"},
53 {AMD_CG_SUPPORT_GFX_3D_CGCG, "Graphics 3D Coarse Grain Clock Gating"},
54 {AMD_CG_SUPPORT_GFX_3D_CGLS, "Graphics 3D Coarse Grain memory Light Sleep"},
55 {AMD_CG_SUPPORT_MC_LS, "Memory Controller Light Sleep"},
56 {AMD_CG_SUPPORT_MC_MGCG, "Memory Controller Medium Grain Clock Gating"},
57 {AMD_CG_SUPPORT_SDMA_LS, "System Direct Memory Access Light Sleep"},
58 {AMD_CG_SUPPORT_SDMA_MGCG, "System Direct Memory Access Medium Grain Clock Gating"},
59 {AMD_CG_SUPPORT_BIF_MGCG, "Bus Interface Medium Grain Clock Gating"},
60 {AMD_CG_SUPPORT_BIF_LS, "Bus Interface Light Sleep"},
61 {AMD_CG_SUPPORT_UVD_MGCG, "Unified Video Decoder Medium Grain Clock Gating"},
62 {AMD_CG_SUPPORT_VCE_MGCG, "Video Compression Engine Medium Grain Clock Gating"},
63 {AMD_CG_SUPPORT_HDP_LS, "Host Data Path Light Sleep"},
64 {AMD_CG_SUPPORT_HDP_MGCG, "Host Data Path Medium Grain Clock Gating"},
65 {AMD_CG_SUPPORT_DRM_MGCG, "Digital Right Management Medium Grain Clock Gating"},
66 {AMD_CG_SUPPORT_DRM_LS, "Digital Right Management Light Sleep"},
67 {AMD_CG_SUPPORT_ROM_MGCG, "Rom Medium Grain Clock Gating"},
68 {AMD_CG_SUPPORT_DF_MGCG, "Data Fabric Medium Grain Clock Gating"},
70 {AMD_CG_SUPPORT_ATHUB_MGCG, "Address Translation Hub Medium Grain Clock Gating"},
71 {AMD_CG_SUPPORT_ATHUB_LS, "Address Translation Hub Light Sleep"},
75 static const struct hwmon_temp_label {
76 enum PP_HWMON_TEMP channel;
79 {PP_TEMP_EDGE, "edge"},
80 {PP_TEMP_JUNCTION, "junction"},
84 void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
86 if (adev->pm.dpm_enabled) {
87 mutex_lock(&adev->pm.mutex);
88 if (power_supply_is_system_supplied() > 0)
89 adev->pm.ac_power = true;
91 adev->pm.ac_power = false;
92 if (adev->powerplay.pp_funcs &&
93 adev->powerplay.pp_funcs->enable_bapm)
94 amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
95 mutex_unlock(&adev->pm.mutex);
97 if (is_support_sw_smu(adev))
98 smu_set_ac_dc(&adev->smu);
102 int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
103 void *data, uint32_t *size)
110 if (is_support_sw_smu(adev))
111 ret = smu_read_sensor(&adev->smu, sensor, data, size);
113 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
114 ret = adev->powerplay.pp_funcs->read_sensor((adev)->powerplay.pp_handle,
124 * DOC: power_dpm_state
126 * The power_dpm_state file is a legacy interface and is only provided for
127 * backwards compatibility. The amdgpu driver provides a sysfs API for adjusting
128 * certain power related parameters. The file power_dpm_state is used for this.
129 * It accepts the following arguments:
139 * On older GPUs, the vbios provided a special power state for battery
140 * operation. Selecting battery switched to this state. This is no
141 * longer provided on newer GPUs so the option does nothing in that case.
145 * On older GPUs, the vbios provided a special power state for balanced
146 * operation. Selecting balanced switched to this state. This is no
147 * longer provided on newer GPUs so the option does nothing in that case.
151 * On older GPUs, the vbios provided a special power state for performance
152 * operation. Selecting performance switched to this state. This is no
153 * longer provided on newer GPUs so the option does nothing in that case.
157 static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
158 struct device_attribute *attr,
161 struct drm_device *ddev = dev_get_drvdata(dev);
162 struct amdgpu_device *adev = ddev->dev_private;
163 enum amd_pm_state_type pm;
166 if (adev->in_gpu_reset)
169 ret = pm_runtime_get_sync(ddev->dev);
171 pm_runtime_put_autosuspend(ddev->dev);
175 if (is_support_sw_smu(adev)) {
176 if (adev->smu.ppt_funcs->get_current_power_state)
177 pm = smu_get_current_power_state(&adev->smu);
179 pm = adev->pm.dpm.user_state;
180 } else if (adev->powerplay.pp_funcs->get_current_power_state) {
181 pm = amdgpu_dpm_get_current_power_state(adev);
183 pm = adev->pm.dpm.user_state;
186 pm_runtime_mark_last_busy(ddev->dev);
187 pm_runtime_put_autosuspend(ddev->dev);
189 return snprintf(buf, PAGE_SIZE, "%s\n",
190 (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
191 (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
194 static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
195 struct device_attribute *attr,
199 struct drm_device *ddev = dev_get_drvdata(dev);
200 struct amdgpu_device *adev = ddev->dev_private;
201 enum amd_pm_state_type state;
204 if (adev->in_gpu_reset)
207 if (strncmp("battery", buf, strlen("battery")) == 0)
208 state = POWER_STATE_TYPE_BATTERY;
209 else if (strncmp("balanced", buf, strlen("balanced")) == 0)
210 state = POWER_STATE_TYPE_BALANCED;
211 else if (strncmp("performance", buf, strlen("performance")) == 0)
212 state = POWER_STATE_TYPE_PERFORMANCE;
216 ret = pm_runtime_get_sync(ddev->dev);
218 pm_runtime_put_autosuspend(ddev->dev);
222 if (is_support_sw_smu(adev)) {
223 mutex_lock(&adev->pm.mutex);
224 adev->pm.dpm.user_state = state;
225 mutex_unlock(&adev->pm.mutex);
226 } else if (adev->powerplay.pp_funcs->dispatch_tasks) {
227 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state);
229 mutex_lock(&adev->pm.mutex);
230 adev->pm.dpm.user_state = state;
231 mutex_unlock(&adev->pm.mutex);
233 amdgpu_pm_compute_clocks(adev);
235 pm_runtime_mark_last_busy(ddev->dev);
236 pm_runtime_put_autosuspend(ddev->dev);
243 * DOC: power_dpm_force_performance_level
245 * The amdgpu driver provides a sysfs API for adjusting certain power
246 * related parameters. The file power_dpm_force_performance_level is
247 * used for this. It accepts the following arguments:
267 * When auto is selected, the driver will attempt to dynamically select
268 * the optimal power profile for current conditions in the driver.
272 * When low is selected, the clocks are forced to the lowest power state.
276 * When high is selected, the clocks are forced to the highest power state.
280 * When manual is selected, the user can manually adjust which power states
281 * are enabled for each clock domain via the sysfs pp_dpm_mclk, pp_dpm_sclk,
282 * and pp_dpm_pcie files and adjust the power state transition heuristics
283 * via the pp_power_profile_mode sysfs file.
290 * When the profiling modes are selected, clock and power gating are
291 * disabled and the clocks are set for different profiling cases. This
292 * mode is recommended for profiling specific work loads where you do
293 * not want clock or power gating for clock fluctuation to interfere
294 * with your results. profile_standard sets the clocks to a fixed clock
295 * level which varies from asic to asic. profile_min_sclk forces the sclk
296 * to the lowest level. profile_min_mclk forces the mclk to the lowest level.
297 * profile_peak sets all clocks (mclk, sclk, pcie) to the highest levels.
301 static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
302 struct device_attribute *attr,
305 struct drm_device *ddev = dev_get_drvdata(dev);
306 struct amdgpu_device *adev = ddev->dev_private;
307 enum amd_dpm_forced_level level = 0xff;
310 if (adev->in_gpu_reset)
313 ret = pm_runtime_get_sync(ddev->dev);
315 pm_runtime_put_autosuspend(ddev->dev);
319 if (is_support_sw_smu(adev))
320 level = smu_get_performance_level(&adev->smu);
321 else if (adev->powerplay.pp_funcs->get_performance_level)
322 level = amdgpu_dpm_get_performance_level(adev);
324 level = adev->pm.dpm.forced_level;
326 pm_runtime_mark_last_busy(ddev->dev);
327 pm_runtime_put_autosuspend(ddev->dev);
329 return snprintf(buf, PAGE_SIZE, "%s\n",
330 (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
331 (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
332 (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" :
333 (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" :
334 (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" :
335 (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
336 (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
337 (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" :
341 static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
342 struct device_attribute *attr,
346 struct drm_device *ddev = dev_get_drvdata(dev);
347 struct amdgpu_device *adev = ddev->dev_private;
348 enum amd_dpm_forced_level level;
349 enum amd_dpm_forced_level current_level = 0xff;
352 if (adev->in_gpu_reset)
355 if (strncmp("low", buf, strlen("low")) == 0) {
356 level = AMD_DPM_FORCED_LEVEL_LOW;
357 } else if (strncmp("high", buf, strlen("high")) == 0) {
358 level = AMD_DPM_FORCED_LEVEL_HIGH;
359 } else if (strncmp("auto", buf, strlen("auto")) == 0) {
360 level = AMD_DPM_FORCED_LEVEL_AUTO;
361 } else if (strncmp("manual", buf, strlen("manual")) == 0) {
362 level = AMD_DPM_FORCED_LEVEL_MANUAL;
363 } else if (strncmp("profile_exit", buf, strlen("profile_exit")) == 0) {
364 level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT;
365 } else if (strncmp("profile_standard", buf, strlen("profile_standard")) == 0) {
366 level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD;
367 } else if (strncmp("profile_min_sclk", buf, strlen("profile_min_sclk")) == 0) {
368 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK;
369 } else if (strncmp("profile_min_mclk", buf, strlen("profile_min_mclk")) == 0) {
370 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK;
371 } else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) {
372 level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
377 ret = pm_runtime_get_sync(ddev->dev);
379 pm_runtime_put_autosuspend(ddev->dev);
383 if (is_support_sw_smu(adev))
384 current_level = smu_get_performance_level(&adev->smu);
385 else if (adev->powerplay.pp_funcs->get_performance_level)
386 current_level = amdgpu_dpm_get_performance_level(adev);
388 if (current_level == level) {
389 pm_runtime_mark_last_busy(ddev->dev);
390 pm_runtime_put_autosuspend(ddev->dev);
394 if (adev->asic_type == CHIP_RAVEN) {
395 if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) {
396 if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL && level == AMD_DPM_FORCED_LEVEL_MANUAL)
397 amdgpu_gfx_off_ctrl(adev, false);
398 else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL && level != AMD_DPM_FORCED_LEVEL_MANUAL)
399 amdgpu_gfx_off_ctrl(adev, true);
403 /* profile_exit setting is valid only when current mode is in profile mode */
404 if (!(current_level & (AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
405 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
406 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
407 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)) &&
408 (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) {
409 pr_err("Currently not in any profile mode!\n");
410 pm_runtime_mark_last_busy(ddev->dev);
411 pm_runtime_put_autosuspend(ddev->dev);
415 if (is_support_sw_smu(adev)) {
416 ret = smu_force_performance_level(&adev->smu, level);
418 pm_runtime_mark_last_busy(ddev->dev);
419 pm_runtime_put_autosuspend(ddev->dev);
422 } else if (adev->powerplay.pp_funcs->force_performance_level) {
423 mutex_lock(&adev->pm.mutex);
424 if (adev->pm.dpm.thermal_active) {
425 mutex_unlock(&adev->pm.mutex);
426 pm_runtime_mark_last_busy(ddev->dev);
427 pm_runtime_put_autosuspend(ddev->dev);
430 ret = amdgpu_dpm_force_performance_level(adev, level);
432 mutex_unlock(&adev->pm.mutex);
433 pm_runtime_mark_last_busy(ddev->dev);
434 pm_runtime_put_autosuspend(ddev->dev);
437 adev->pm.dpm.forced_level = level;
439 mutex_unlock(&adev->pm.mutex);
441 pm_runtime_mark_last_busy(ddev->dev);
442 pm_runtime_put_autosuspend(ddev->dev);
447 static ssize_t amdgpu_get_pp_num_states(struct device *dev,
448 struct device_attribute *attr,
451 struct drm_device *ddev = dev_get_drvdata(dev);
452 struct amdgpu_device *adev = ddev->dev_private;
453 struct pp_states_info data;
456 if (adev->in_gpu_reset)
459 ret = pm_runtime_get_sync(ddev->dev);
461 pm_runtime_put_autosuspend(ddev->dev);
465 if (is_support_sw_smu(adev)) {
466 ret = smu_get_power_num_states(&adev->smu, &data);
469 } else if (adev->powerplay.pp_funcs->get_pp_num_states) {
470 amdgpu_dpm_get_pp_num_states(adev, &data);
472 memset(&data, 0, sizeof(data));
475 pm_runtime_mark_last_busy(ddev->dev);
476 pm_runtime_put_autosuspend(ddev->dev);
478 buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums);
479 for (i = 0; i < data.nums; i++)
480 buf_len += snprintf(buf + buf_len, PAGE_SIZE, "%d %s\n", i,
481 (data.states[i] == POWER_STATE_TYPE_INTERNAL_BOOT) ? "boot" :
482 (data.states[i] == POWER_STATE_TYPE_BATTERY) ? "battery" :
483 (data.states[i] == POWER_STATE_TYPE_BALANCED) ? "balanced" :
484 (data.states[i] == POWER_STATE_TYPE_PERFORMANCE) ? "performance" : "default");
489 static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
490 struct device_attribute *attr,
493 struct drm_device *ddev = dev_get_drvdata(dev);
494 struct amdgpu_device *adev = ddev->dev_private;
495 struct pp_states_info data;
496 struct smu_context *smu = &adev->smu;
497 enum amd_pm_state_type pm = 0;
500 if (adev->in_gpu_reset)
503 ret = pm_runtime_get_sync(ddev->dev);
505 pm_runtime_put_autosuspend(ddev->dev);
509 if (is_support_sw_smu(adev)) {
510 pm = smu_get_current_power_state(smu);
511 ret = smu_get_power_num_states(smu, &data);
514 } else if (adev->powerplay.pp_funcs->get_current_power_state
515 && adev->powerplay.pp_funcs->get_pp_num_states) {
516 pm = amdgpu_dpm_get_current_power_state(adev);
517 amdgpu_dpm_get_pp_num_states(adev, &data);
520 pm_runtime_mark_last_busy(ddev->dev);
521 pm_runtime_put_autosuspend(ddev->dev);
523 for (i = 0; i < data.nums; i++) {
524 if (pm == data.states[i])
531 return snprintf(buf, PAGE_SIZE, "%d\n", i);
534 static ssize_t amdgpu_get_pp_force_state(struct device *dev,
535 struct device_attribute *attr,
538 struct drm_device *ddev = dev_get_drvdata(dev);
539 struct amdgpu_device *adev = ddev->dev_private;
541 if (adev->in_gpu_reset)
544 if (adev->pp_force_state_enabled)
545 return amdgpu_get_pp_cur_state(dev, attr, buf);
547 return snprintf(buf, PAGE_SIZE, "\n");
550 static ssize_t amdgpu_set_pp_force_state(struct device *dev,
551 struct device_attribute *attr,
555 struct drm_device *ddev = dev_get_drvdata(dev);
556 struct amdgpu_device *adev = ddev->dev_private;
557 enum amd_pm_state_type state = 0;
561 if (adev->in_gpu_reset)
564 if (strlen(buf) == 1)
565 adev->pp_force_state_enabled = false;
566 else if (is_support_sw_smu(adev))
567 adev->pp_force_state_enabled = false;
568 else if (adev->powerplay.pp_funcs->dispatch_tasks &&
569 adev->powerplay.pp_funcs->get_pp_num_states) {
570 struct pp_states_info data;
572 ret = kstrtoul(buf, 0, &idx);
573 if (ret || idx >= ARRAY_SIZE(data.states))
576 idx = array_index_nospec(idx, ARRAY_SIZE(data.states));
578 amdgpu_dpm_get_pp_num_states(adev, &data);
579 state = data.states[idx];
581 ret = pm_runtime_get_sync(ddev->dev);
583 pm_runtime_put_autosuspend(ddev->dev);
587 /* only set user selected power states */
588 if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
589 state != POWER_STATE_TYPE_DEFAULT) {
590 amdgpu_dpm_dispatch_task(adev,
591 AMD_PP_TASK_ENABLE_USER_STATE, &state);
592 adev->pp_force_state_enabled = true;
594 pm_runtime_mark_last_busy(ddev->dev);
595 pm_runtime_put_autosuspend(ddev->dev);
604 * The amdgpu driver provides a sysfs API for uploading new powerplay
605 * tables. The file pp_table is used for this. Reading the file
606 * will dump the current power play table. Writing to the file
607 * will attempt to upload a new powerplay table and re-initialize
608 * powerplay using that new table.
612 static ssize_t amdgpu_get_pp_table(struct device *dev,
613 struct device_attribute *attr,
616 struct drm_device *ddev = dev_get_drvdata(dev);
617 struct amdgpu_device *adev = ddev->dev_private;
621 if (adev->in_gpu_reset)
624 ret = pm_runtime_get_sync(ddev->dev);
626 pm_runtime_put_autosuspend(ddev->dev);
630 if (is_support_sw_smu(adev)) {
631 size = smu_sys_get_pp_table(&adev->smu, (void **)&table);
632 pm_runtime_mark_last_busy(ddev->dev);
633 pm_runtime_put_autosuspend(ddev->dev);
636 } else if (adev->powerplay.pp_funcs->get_pp_table) {
637 size = amdgpu_dpm_get_pp_table(adev, &table);
638 pm_runtime_mark_last_busy(ddev->dev);
639 pm_runtime_put_autosuspend(ddev->dev);
643 pm_runtime_mark_last_busy(ddev->dev);
644 pm_runtime_put_autosuspend(ddev->dev);
648 if (size >= PAGE_SIZE)
649 size = PAGE_SIZE - 1;
651 memcpy(buf, table, size);
656 static ssize_t amdgpu_set_pp_table(struct device *dev,
657 struct device_attribute *attr,
661 struct drm_device *ddev = dev_get_drvdata(dev);
662 struct amdgpu_device *adev = ddev->dev_private;
665 if (adev->in_gpu_reset)
668 ret = pm_runtime_get_sync(ddev->dev);
670 pm_runtime_put_autosuspend(ddev->dev);
674 if (is_support_sw_smu(adev)) {
675 ret = smu_sys_set_pp_table(&adev->smu, (void *)buf, count);
677 pm_runtime_mark_last_busy(ddev->dev);
678 pm_runtime_put_autosuspend(ddev->dev);
681 } else if (adev->powerplay.pp_funcs->set_pp_table)
682 amdgpu_dpm_set_pp_table(adev, buf, count);
684 pm_runtime_mark_last_busy(ddev->dev);
685 pm_runtime_put_autosuspend(ddev->dev);
691 * DOC: pp_od_clk_voltage
693 * The amdgpu driver provides a sysfs API for adjusting the clocks and voltages
694 * in each power level within a power state. The pp_od_clk_voltage is used for
697 * Note that the actual memory controller clock rate are exposed, not
698 * the effective memory clock of the DRAMs. To translate it, use the
701 * Clock conversion (Mhz):
703 * HBM: effective_memory_clock = memory_controller_clock * 1
705 * G5: effective_memory_clock = memory_controller_clock * 1
707 * G6: effective_memory_clock = memory_controller_clock * 2
709 * DRAM data rate (MT/s):
711 * HBM: effective_memory_clock * 2 = data_rate
713 * G5: effective_memory_clock * 4 = data_rate
715 * G6: effective_memory_clock * 8 = data_rate
719 * data_rate * vram_bit_width / 8 = memory_bandwidth
725 * memory_controller_clock = 1750 Mhz
727 * effective_memory_clock = 1750 Mhz * 1 = 1750 Mhz
729 * data rate = 1750 * 4 = 7000 MT/s
731 * memory_bandwidth = 7000 * 128 bits / 8 = 112000 MB/s
735 * memory_controller_clock = 875 Mhz
737 * effective_memory_clock = 875 Mhz * 2 = 1750 Mhz
739 * data rate = 1750 * 8 = 14000 MT/s
741 * memory_bandwidth = 14000 * 256 bits / 8 = 448000 MB/s
743 * < For Vega10 and previous ASICs >
745 * Reading the file will display:
747 * - a list of engine clock levels and voltages labeled OD_SCLK
749 * - a list of memory clock levels and voltages labeled OD_MCLK
751 * - a list of valid ranges for sclk, mclk, and voltage labeled OD_RANGE
753 * To manually adjust these settings, first select manual using
754 * power_dpm_force_performance_level. Enter a new value for each
755 * level by writing a string that contains "s/m level clock voltage" to
756 * the file. E.g., "s 1 500 820" will update sclk level 1 to be 500 MHz
757 * at 820 mV; "m 0 350 810" will update mclk level 0 to be 350 MHz at
758 * 810 mV. When you have edited all of the states as needed, write
759 * "c" (commit) to the file to commit your changes. If you want to reset to the
760 * default power levels, write "r" (reset) to the file to reset them.
763 * < For Vega20 and newer ASICs >
765 * Reading the file will display:
767 * - minimum and maximum engine clock labeled OD_SCLK
769 * - maximum memory clock labeled OD_MCLK
771 * - three <frequency, voltage> points labeled OD_VDDC_CURVE.
772 * They can be used to calibrate the sclk voltage curve.
774 * - a list of valid ranges for sclk, mclk, and voltage curve points
777 * To manually adjust these settings:
779 * - First select manual using power_dpm_force_performance_level
781 * - For clock frequency setting, enter a new value by writing a
782 * string that contains "s/m index clock" to the file. The index
783 * should be 0 if to set minimum clock. And 1 if to set maximum
784 * clock. E.g., "s 0 500" will update minimum sclk to be 500 MHz.
785 * "m 1 800" will update maximum mclk to be 800Mhz.
787 * For sclk voltage curve, enter the new values by writing a
788 * string that contains "vc point clock voltage" to the file. The
789 * points are indexed by 0, 1 and 2. E.g., "vc 0 300 600" will
790 * update point1 with clock set as 300Mhz and voltage as
791 * 600mV. "vc 2 1000 1000" will update point3 with clock set
792 * as 1000Mhz and voltage 1000mV.
794 * - When you have edited all of the states as needed, write "c" (commit)
795 * to the file to commit your changes
797 * - If you want to reset to the default power levels, write "r" (reset)
798 * to the file to reset them
802 static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
803 struct device_attribute *attr,
807 struct drm_device *ddev = dev_get_drvdata(dev);
808 struct amdgpu_device *adev = ddev->dev_private;
810 uint32_t parameter_size = 0;
815 const char delimiter[3] = {' ', '\n', '\0'};
818 if (adev->in_gpu_reset)
825 type = PP_OD_EDIT_SCLK_VDDC_TABLE;
826 else if (*buf == 'm')
827 type = PP_OD_EDIT_MCLK_VDDC_TABLE;
829 type = PP_OD_RESTORE_DEFAULT_TABLE;
830 else if (*buf == 'c')
831 type = PP_OD_COMMIT_DPM_TABLE;
832 else if (!strncmp(buf, "vc", 2))
833 type = PP_OD_EDIT_VDDC_CURVE;
837 memcpy(buf_cpy, buf, count+1);
841 if (type == PP_OD_EDIT_VDDC_CURVE)
843 while (isspace(*++tmp_str));
846 sub_str = strsep(&tmp_str, delimiter);
847 ret = kstrtol(sub_str, 0, ¶meter[parameter_size]);
852 while (isspace(*tmp_str))
856 ret = pm_runtime_get_sync(ddev->dev);
858 pm_runtime_put_autosuspend(ddev->dev);
862 if (is_support_sw_smu(adev)) {
863 ret = smu_od_edit_dpm_table(&adev->smu, type,
864 parameter, parameter_size);
867 pm_runtime_mark_last_busy(ddev->dev);
868 pm_runtime_put_autosuspend(ddev->dev);
872 if (adev->powerplay.pp_funcs->odn_edit_dpm_table) {
873 ret = amdgpu_dpm_odn_edit_dpm_table(adev, type,
874 parameter, parameter_size);
876 pm_runtime_mark_last_busy(ddev->dev);
877 pm_runtime_put_autosuspend(ddev->dev);
882 if (type == PP_OD_COMMIT_DPM_TABLE) {
883 if (adev->powerplay.pp_funcs->dispatch_tasks) {
884 amdgpu_dpm_dispatch_task(adev,
885 AMD_PP_TASK_READJUST_POWER_STATE,
887 pm_runtime_mark_last_busy(ddev->dev);
888 pm_runtime_put_autosuspend(ddev->dev);
891 pm_runtime_mark_last_busy(ddev->dev);
892 pm_runtime_put_autosuspend(ddev->dev);
897 pm_runtime_mark_last_busy(ddev->dev);
898 pm_runtime_put_autosuspend(ddev->dev);
903 static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
904 struct device_attribute *attr,
907 struct drm_device *ddev = dev_get_drvdata(dev);
908 struct amdgpu_device *adev = ddev->dev_private;
912 if (adev->in_gpu_reset)
915 ret = pm_runtime_get_sync(ddev->dev);
917 pm_runtime_put_autosuspend(ddev->dev);
921 if (is_support_sw_smu(adev)) {
922 size = smu_print_clk_levels(&adev->smu, SMU_OD_SCLK, buf);
923 size += smu_print_clk_levels(&adev->smu, SMU_OD_MCLK, buf+size);
924 size += smu_print_clk_levels(&adev->smu, SMU_OD_VDDC_CURVE, buf+size);
925 size += smu_print_clk_levels(&adev->smu, SMU_OD_RANGE, buf+size);
926 } else if (adev->powerplay.pp_funcs->print_clock_levels) {
927 size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
928 size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size);
929 size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf+size);
930 size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf+size);
932 size = snprintf(buf, PAGE_SIZE, "\n");
934 pm_runtime_mark_last_busy(ddev->dev);
935 pm_runtime_put_autosuspend(ddev->dev);
943 * The amdgpu driver provides a sysfs API for adjusting what powerplay
944 * features to be enabled. The file pp_features is used for this. And
945 * this is only available for Vega10 and later dGPUs.
947 * Reading back the file will show you the followings:
948 * - Current ppfeature masks
949 * - List of the all supported powerplay features with their naming,
950 * bitmasks and enablement status('Y'/'N' means "enabled"/"disabled").
952 * To manually enable or disable a specific feature, just set or clear
953 * the corresponding bit from original ppfeature masks and input the
954 * new ppfeature masks.
956 static ssize_t amdgpu_set_pp_features(struct device *dev,
957 struct device_attribute *attr,
961 struct drm_device *ddev = dev_get_drvdata(dev);
962 struct amdgpu_device *adev = ddev->dev_private;
963 uint64_t featuremask;
966 if (adev->in_gpu_reset)
969 ret = kstrtou64(buf, 0, &featuremask);
973 pr_debug("featuremask = 0x%llx\n", featuremask);
975 ret = pm_runtime_get_sync(ddev->dev);
977 pm_runtime_put_autosuspend(ddev->dev);
981 if (is_support_sw_smu(adev)) {
982 ret = smu_sys_set_pp_feature_mask(&adev->smu, featuremask);
984 pm_runtime_mark_last_busy(ddev->dev);
985 pm_runtime_put_autosuspend(ddev->dev);
988 } else if (adev->powerplay.pp_funcs->set_ppfeature_status) {
989 ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask);
991 pm_runtime_mark_last_busy(ddev->dev);
992 pm_runtime_put_autosuspend(ddev->dev);
996 pm_runtime_mark_last_busy(ddev->dev);
997 pm_runtime_put_autosuspend(ddev->dev);
1002 static ssize_t amdgpu_get_pp_features(struct device *dev,
1003 struct device_attribute *attr,
1006 struct drm_device *ddev = dev_get_drvdata(dev);
1007 struct amdgpu_device *adev = ddev->dev_private;
1011 if (adev->in_gpu_reset)
1014 ret = pm_runtime_get_sync(ddev->dev);
1016 pm_runtime_put_autosuspend(ddev->dev);
1020 if (is_support_sw_smu(adev))
1021 size = smu_sys_get_pp_feature_mask(&adev->smu, buf);
1022 else if (adev->powerplay.pp_funcs->get_ppfeature_status)
1023 size = amdgpu_dpm_get_ppfeature_status(adev, buf);
1025 size = snprintf(buf, PAGE_SIZE, "\n");
1027 pm_runtime_mark_last_busy(ddev->dev);
1028 pm_runtime_put_autosuspend(ddev->dev);
1034 * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk pp_dpm_pcie
1036 * The amdgpu driver provides a sysfs API for adjusting what power levels
1037 * are enabled for a given power state. The files pp_dpm_sclk, pp_dpm_mclk,
1038 * pp_dpm_socclk, pp_dpm_fclk, pp_dpm_dcefclk and pp_dpm_pcie are used for
1041 * pp_dpm_socclk and pp_dpm_dcefclk interfaces are only available for
1042 * Vega10 and later ASICs.
1043 * pp_dpm_fclk interface is only available for Vega20 and later ASICs.
1045 * Reading back the files will show you the available power levels within
1046 * the power state and the clock information for those levels.
1048 * To manually adjust these states, first select manual using
1049 * power_dpm_force_performance_level.
1050 * Secondly, enter a new value for each level by inputing a string that
1051 * contains " echo xx xx xx > pp_dpm_sclk/mclk/pcie"
1054 * .. code-block:: bash
1056 * echo "4 5 6" > pp_dpm_sclk
1058 * will enable sclk levels 4, 5, and 6.
1060 * NOTE: change to the dcefclk max dpm level is not supported now
1063 static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
1064 struct device_attribute *attr,
1067 struct drm_device *ddev = dev_get_drvdata(dev);
1068 struct amdgpu_device *adev = ddev->dev_private;
1072 if (adev->in_gpu_reset)
1075 ret = pm_runtime_get_sync(ddev->dev);
1077 pm_runtime_put_autosuspend(ddev->dev);
1081 if (is_support_sw_smu(adev))
1082 size = smu_print_clk_levels(&adev->smu, SMU_SCLK, buf);
1083 else if (adev->powerplay.pp_funcs->print_clock_levels)
1084 size = amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf);
1086 size = snprintf(buf, PAGE_SIZE, "\n");
1088 pm_runtime_mark_last_busy(ddev->dev);
1089 pm_runtime_put_autosuspend(ddev->dev);
1095 * Worst case: 32 bits individually specified, in octal at 12 characters
1096 * per line (+1 for \n).
1098 #define AMDGPU_MASK_BUF_MAX (32 * 13)
1100 static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
1104 char *sub_str = NULL;
1106 char buf_cpy[AMDGPU_MASK_BUF_MAX + 1];
1107 const char delimiter[3] = {' ', '\n', '\0'};
1112 bytes = min(count, sizeof(buf_cpy) - 1);
1113 memcpy(buf_cpy, buf, bytes);
1114 buf_cpy[bytes] = '\0';
1117 sub_str = strsep(&tmp, delimiter);
1118 if (strlen(sub_str)) {
1119 ret = kstrtol(sub_str, 0, &level);
1122 *mask |= 1 << level;
1130 static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
1131 struct device_attribute *attr,
1135 struct drm_device *ddev = dev_get_drvdata(dev);
1136 struct amdgpu_device *adev = ddev->dev_private;
1140 if (adev->in_gpu_reset)
1143 ret = amdgpu_read_mask(buf, count, &mask);
1147 ret = pm_runtime_get_sync(ddev->dev);
1149 pm_runtime_put_autosuspend(ddev->dev);
1153 if (is_support_sw_smu(adev))
1154 ret = smu_force_clk_levels(&adev->smu, SMU_SCLK, mask);
1155 else if (adev->powerplay.pp_funcs->force_clock_level)
1156 ret = amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
1158 pm_runtime_mark_last_busy(ddev->dev);
1159 pm_runtime_put_autosuspend(ddev->dev);
1167 static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
1168 struct device_attribute *attr,
1171 struct drm_device *ddev = dev_get_drvdata(dev);
1172 struct amdgpu_device *adev = ddev->dev_private;
1176 if (adev->in_gpu_reset)
1179 ret = pm_runtime_get_sync(ddev->dev);
1181 pm_runtime_put_autosuspend(ddev->dev);
1185 if (is_support_sw_smu(adev))
1186 size = smu_print_clk_levels(&adev->smu, SMU_MCLK, buf);
1187 else if (adev->powerplay.pp_funcs->print_clock_levels)
1188 size = amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf);
1190 size = snprintf(buf, PAGE_SIZE, "\n");
1192 pm_runtime_mark_last_busy(ddev->dev);
1193 pm_runtime_put_autosuspend(ddev->dev);
1198 static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
1199 struct device_attribute *attr,
1203 struct drm_device *ddev = dev_get_drvdata(dev);
1204 struct amdgpu_device *adev = ddev->dev_private;
1208 if (adev->in_gpu_reset)
1211 ret = amdgpu_read_mask(buf, count, &mask);
1215 ret = pm_runtime_get_sync(ddev->dev);
1217 pm_runtime_put_autosuspend(ddev->dev);
1221 if (is_support_sw_smu(adev))
1222 ret = smu_force_clk_levels(&adev->smu, SMU_MCLK, mask);
1223 else if (adev->powerplay.pp_funcs->force_clock_level)
1224 ret = amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
1226 pm_runtime_mark_last_busy(ddev->dev);
1227 pm_runtime_put_autosuspend(ddev->dev);
1235 static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev,
1236 struct device_attribute *attr,
1239 struct drm_device *ddev = dev_get_drvdata(dev);
1240 struct amdgpu_device *adev = ddev->dev_private;
1244 if (adev->in_gpu_reset)
1247 ret = pm_runtime_get_sync(ddev->dev);
1249 pm_runtime_put_autosuspend(ddev->dev);
1253 if (is_support_sw_smu(adev))
1254 size = smu_print_clk_levels(&adev->smu, SMU_SOCCLK, buf);
1255 else if (adev->powerplay.pp_funcs->print_clock_levels)
1256 size = amdgpu_dpm_print_clock_levels(adev, PP_SOCCLK, buf);
1258 size = snprintf(buf, PAGE_SIZE, "\n");
1260 pm_runtime_mark_last_busy(ddev->dev);
1261 pm_runtime_put_autosuspend(ddev->dev);
1266 static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
1267 struct device_attribute *attr,
1271 struct drm_device *ddev = dev_get_drvdata(dev);
1272 struct amdgpu_device *adev = ddev->dev_private;
1276 if (adev->in_gpu_reset)
1279 ret = amdgpu_read_mask(buf, count, &mask);
1283 ret = pm_runtime_get_sync(ddev->dev);
1285 pm_runtime_put_autosuspend(ddev->dev);
1289 if (is_support_sw_smu(adev))
1290 ret = smu_force_clk_levels(&adev->smu, SMU_SOCCLK, mask);
1291 else if (adev->powerplay.pp_funcs->force_clock_level)
1292 ret = amdgpu_dpm_force_clock_level(adev, PP_SOCCLK, mask);
1296 pm_runtime_mark_last_busy(ddev->dev);
1297 pm_runtime_put_autosuspend(ddev->dev);
1305 static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev,
1306 struct device_attribute *attr,
1309 struct drm_device *ddev = dev_get_drvdata(dev);
1310 struct amdgpu_device *adev = ddev->dev_private;
1314 if (adev->in_gpu_reset)
1317 ret = pm_runtime_get_sync(ddev->dev);
1319 pm_runtime_put_autosuspend(ddev->dev);
1323 if (is_support_sw_smu(adev))
1324 size = smu_print_clk_levels(&adev->smu, SMU_FCLK, buf);
1325 else if (adev->powerplay.pp_funcs->print_clock_levels)
1326 size = amdgpu_dpm_print_clock_levels(adev, PP_FCLK, buf);
1328 size = snprintf(buf, PAGE_SIZE, "\n");
1330 pm_runtime_mark_last_busy(ddev->dev);
1331 pm_runtime_put_autosuspend(ddev->dev);
1336 static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
1337 struct device_attribute *attr,
1341 struct drm_device *ddev = dev_get_drvdata(dev);
1342 struct amdgpu_device *adev = ddev->dev_private;
1346 if (adev->in_gpu_reset)
1349 ret = amdgpu_read_mask(buf, count, &mask);
1353 ret = pm_runtime_get_sync(ddev->dev);
1355 pm_runtime_put_autosuspend(ddev->dev);
1359 if (is_support_sw_smu(adev))
1360 ret = smu_force_clk_levels(&adev->smu, SMU_FCLK, mask);
1361 else if (adev->powerplay.pp_funcs->force_clock_level)
1362 ret = amdgpu_dpm_force_clock_level(adev, PP_FCLK, mask);
1366 pm_runtime_mark_last_busy(ddev->dev);
1367 pm_runtime_put_autosuspend(ddev->dev);
1375 static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
1376 struct device_attribute *attr,
1379 struct drm_device *ddev = dev_get_drvdata(dev);
1380 struct amdgpu_device *adev = ddev->dev_private;
1384 if (adev->in_gpu_reset)
1387 ret = pm_runtime_get_sync(ddev->dev);
1389 pm_runtime_put_autosuspend(ddev->dev);
1393 if (is_support_sw_smu(adev))
1394 size = smu_print_clk_levels(&adev->smu, SMU_DCEFCLK, buf);
1395 else if (adev->powerplay.pp_funcs->print_clock_levels)
1396 size = amdgpu_dpm_print_clock_levels(adev, PP_DCEFCLK, buf);
1398 size = snprintf(buf, PAGE_SIZE, "\n");
1400 pm_runtime_mark_last_busy(ddev->dev);
1401 pm_runtime_put_autosuspend(ddev->dev);
1406 static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
1407 struct device_attribute *attr,
1411 struct drm_device *ddev = dev_get_drvdata(dev);
1412 struct amdgpu_device *adev = ddev->dev_private;
1416 if (adev->in_gpu_reset)
1419 ret = amdgpu_read_mask(buf, count, &mask);
1423 ret = pm_runtime_get_sync(ddev->dev);
1425 pm_runtime_put_autosuspend(ddev->dev);
1429 if (is_support_sw_smu(adev))
1430 ret = smu_force_clk_levels(&adev->smu, SMU_DCEFCLK, mask);
1431 else if (adev->powerplay.pp_funcs->force_clock_level)
1432 ret = amdgpu_dpm_force_clock_level(adev, PP_DCEFCLK, mask);
1436 pm_runtime_mark_last_busy(ddev->dev);
1437 pm_runtime_put_autosuspend(ddev->dev);
1445 static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
1446 struct device_attribute *attr,
1449 struct drm_device *ddev = dev_get_drvdata(dev);
1450 struct amdgpu_device *adev = ddev->dev_private;
1454 if (adev->in_gpu_reset)
1457 ret = pm_runtime_get_sync(ddev->dev);
1459 pm_runtime_put_autosuspend(ddev->dev);
1463 if (is_support_sw_smu(adev))
1464 size = smu_print_clk_levels(&adev->smu, SMU_PCIE, buf);
1465 else if (adev->powerplay.pp_funcs->print_clock_levels)
1466 size = amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf);
1468 size = snprintf(buf, PAGE_SIZE, "\n");
1470 pm_runtime_mark_last_busy(ddev->dev);
1471 pm_runtime_put_autosuspend(ddev->dev);
1476 static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
1477 struct device_attribute *attr,
1481 struct drm_device *ddev = dev_get_drvdata(dev);
1482 struct amdgpu_device *adev = ddev->dev_private;
1486 if (adev->in_gpu_reset)
1489 ret = amdgpu_read_mask(buf, count, &mask);
1493 ret = pm_runtime_get_sync(ddev->dev);
1495 pm_runtime_put_autosuspend(ddev->dev);
1499 if (is_support_sw_smu(adev))
1500 ret = smu_force_clk_levels(&adev->smu, SMU_PCIE, mask);
1501 else if (adev->powerplay.pp_funcs->force_clock_level)
1502 ret = amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
1506 pm_runtime_mark_last_busy(ddev->dev);
1507 pm_runtime_put_autosuspend(ddev->dev);
1515 static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
1516 struct device_attribute *attr,
1519 struct drm_device *ddev = dev_get_drvdata(dev);
1520 struct amdgpu_device *adev = ddev->dev_private;
1524 if (adev->in_gpu_reset)
1527 ret = pm_runtime_get_sync(ddev->dev);
1529 pm_runtime_put_autosuspend(ddev->dev);
1533 if (is_support_sw_smu(adev))
1534 value = smu_get_od_percentage(&(adev->smu), SMU_OD_SCLK);
1535 else if (adev->powerplay.pp_funcs->get_sclk_od)
1536 value = amdgpu_dpm_get_sclk_od(adev);
1538 pm_runtime_mark_last_busy(ddev->dev);
1539 pm_runtime_put_autosuspend(ddev->dev);
1541 return snprintf(buf, PAGE_SIZE, "%d\n", value);
1544 static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
1545 struct device_attribute *attr,
1549 struct drm_device *ddev = dev_get_drvdata(dev);
1550 struct amdgpu_device *adev = ddev->dev_private;
1554 if (adev->in_gpu_reset)
1557 ret = kstrtol(buf, 0, &value);
1562 ret = pm_runtime_get_sync(ddev->dev);
1564 pm_runtime_put_autosuspend(ddev->dev);
1568 if (is_support_sw_smu(adev)) {
1569 value = smu_set_od_percentage(&(adev->smu), SMU_OD_SCLK, (uint32_t)value);
1571 if (adev->powerplay.pp_funcs->set_sclk_od)
1572 amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
1574 if (adev->powerplay.pp_funcs->dispatch_tasks) {
1575 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
1577 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1578 amdgpu_pm_compute_clocks(adev);
1582 pm_runtime_mark_last_busy(ddev->dev);
1583 pm_runtime_put_autosuspend(ddev->dev);
1588 static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
1589 struct device_attribute *attr,
1592 struct drm_device *ddev = dev_get_drvdata(dev);
1593 struct amdgpu_device *adev = ddev->dev_private;
1597 if (adev->in_gpu_reset)
1600 ret = pm_runtime_get_sync(ddev->dev);
1602 pm_runtime_put_autosuspend(ddev->dev);
1606 if (is_support_sw_smu(adev))
1607 value = smu_get_od_percentage(&(adev->smu), SMU_OD_MCLK);
1608 else if (adev->powerplay.pp_funcs->get_mclk_od)
1609 value = amdgpu_dpm_get_mclk_od(adev);
1611 pm_runtime_mark_last_busy(ddev->dev);
1612 pm_runtime_put_autosuspend(ddev->dev);
1614 return snprintf(buf, PAGE_SIZE, "%d\n", value);
1617 static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
1618 struct device_attribute *attr,
1622 struct drm_device *ddev = dev_get_drvdata(dev);
1623 struct amdgpu_device *adev = ddev->dev_private;
1627 if (adev->in_gpu_reset)
1630 ret = kstrtol(buf, 0, &value);
1635 ret = pm_runtime_get_sync(ddev->dev);
1637 pm_runtime_put_autosuspend(ddev->dev);
1641 if (is_support_sw_smu(adev)) {
1642 value = smu_set_od_percentage(&(adev->smu), SMU_OD_MCLK, (uint32_t)value);
1644 if (adev->powerplay.pp_funcs->set_mclk_od)
1645 amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
1647 if (adev->powerplay.pp_funcs->dispatch_tasks) {
1648 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
1650 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1651 amdgpu_pm_compute_clocks(adev);
1655 pm_runtime_mark_last_busy(ddev->dev);
1656 pm_runtime_put_autosuspend(ddev->dev);
1662 * DOC: pp_power_profile_mode
1664 * The amdgpu driver provides a sysfs API for adjusting the heuristics
1665 * related to switching between power levels in a power state. The file
1666 * pp_power_profile_mode is used for this.
1668 * Reading this file outputs a list of all of the predefined power profiles
1669 * and the relevant heuristics settings for that profile.
1671 * To select a profile or create a custom profile, first select manual using
1672 * power_dpm_force_performance_level. Writing the number of a predefined
1673 * profile to pp_power_profile_mode will enable those heuristics. To
1674 * create a custom set of heuristics, write a string of numbers to the file
1675 * starting with the number of the custom profile along with a setting
1676 * for each heuristic parameter. Due to differences across asic families
1677 * the heuristic parameters vary from family to family.
1681 static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
1682 struct device_attribute *attr,
1685 struct drm_device *ddev = dev_get_drvdata(dev);
1686 struct amdgpu_device *adev = ddev->dev_private;
1690 if (adev->in_gpu_reset)
1693 ret = pm_runtime_get_sync(ddev->dev);
1695 pm_runtime_put_autosuspend(ddev->dev);
1699 if (is_support_sw_smu(adev))
1700 size = smu_get_power_profile_mode(&adev->smu, buf);
1701 else if (adev->powerplay.pp_funcs->get_power_profile_mode)
1702 size = amdgpu_dpm_get_power_profile_mode(adev, buf);
1704 size = snprintf(buf, PAGE_SIZE, "\n");
1706 pm_runtime_mark_last_busy(ddev->dev);
1707 pm_runtime_put_autosuspend(ddev->dev);
1713 static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
1714 struct device_attribute *attr,
1719 struct drm_device *ddev = dev_get_drvdata(dev);
1720 struct amdgpu_device *adev = ddev->dev_private;
1721 uint32_t parameter_size = 0;
1723 char *sub_str, buf_cpy[128];
1727 long int profile_mode = 0;
1728 const char delimiter[3] = {' ', '\n', '\0'};
1730 if (adev->in_gpu_reset)
1735 ret = kstrtol(tmp, 0, &profile_mode);
1739 if (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
1740 if (count < 2 || count > 127)
1742 while (isspace(*++buf))
1744 memcpy(buf_cpy, buf, count-i);
1746 while (tmp_str[0]) {
1747 sub_str = strsep(&tmp_str, delimiter);
1748 ret = kstrtol(sub_str, 0, ¶meter[parameter_size]);
1752 while (isspace(*tmp_str))
1756 parameter[parameter_size] = profile_mode;
1758 ret = pm_runtime_get_sync(ddev->dev);
1760 pm_runtime_put_autosuspend(ddev->dev);
1764 if (is_support_sw_smu(adev))
1765 ret = smu_set_power_profile_mode(&adev->smu, parameter, parameter_size, true);
1766 else if (adev->powerplay.pp_funcs->set_power_profile_mode)
1767 ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
1769 pm_runtime_mark_last_busy(ddev->dev);
1770 pm_runtime_put_autosuspend(ddev->dev);
1779 * DOC: gpu_busy_percent
1781 * The amdgpu driver provides a sysfs API for reading how busy the GPU
1782 * is as a percentage. The file gpu_busy_percent is used for this.
1783 * The SMU firmware computes a percentage of load based on the
1784 * aggregate activity level in the IP cores.
1786 static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev,
1787 struct device_attribute *attr,
1790 struct drm_device *ddev = dev_get_drvdata(dev);
1791 struct amdgpu_device *adev = ddev->dev_private;
1792 int r, value, size = sizeof(value);
1794 if (adev->in_gpu_reset)
1797 r = pm_runtime_get_sync(ddev->dev);
1799 pm_runtime_put_autosuspend(ddev->dev);
1803 /* read the IP busy sensor */
1804 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD,
1805 (void *)&value, &size);
1807 pm_runtime_mark_last_busy(ddev->dev);
1808 pm_runtime_put_autosuspend(ddev->dev);
1813 return snprintf(buf, PAGE_SIZE, "%d\n", value);
1817 * DOC: mem_busy_percent
1819 * The amdgpu driver provides a sysfs API for reading how busy the VRAM
1820 * is as a percentage. The file mem_busy_percent is used for this.
1821 * The SMU firmware computes a percentage of load based on the
1822 * aggregate activity level in the IP cores.
1824 static ssize_t amdgpu_get_mem_busy_percent(struct device *dev,
1825 struct device_attribute *attr,
1828 struct drm_device *ddev = dev_get_drvdata(dev);
1829 struct amdgpu_device *adev = ddev->dev_private;
1830 int r, value, size = sizeof(value);
1832 if (adev->in_gpu_reset)
1835 r = pm_runtime_get_sync(ddev->dev);
1837 pm_runtime_put_autosuspend(ddev->dev);
1841 /* read the IP busy sensor */
1842 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD,
1843 (void *)&value, &size);
1845 pm_runtime_mark_last_busy(ddev->dev);
1846 pm_runtime_put_autosuspend(ddev->dev);
1851 return snprintf(buf, PAGE_SIZE, "%d\n", value);
1857 * The amdgpu driver provides a sysfs API for estimating how much data
1858 * has been received and sent by the GPU in the last second through PCIe.
1859 * The file pcie_bw is used for this.
1860 * The Perf counters count the number of received and sent messages and return
1861 * those values, as well as the maximum payload size of a PCIe packet (mps).
1862 * Note that it is not possible to easily and quickly obtain the size of each
1863 * packet transmitted, so we output the max payload size (mps) to allow for
1864 * quick estimation of the PCIe bandwidth usage
1866 static ssize_t amdgpu_get_pcie_bw(struct device *dev,
1867 struct device_attribute *attr,
1870 struct drm_device *ddev = dev_get_drvdata(dev);
1871 struct amdgpu_device *adev = ddev->dev_private;
1872 uint64_t count0 = 0, count1 = 0;
1875 if (adev->in_gpu_reset)
1878 if (adev->flags & AMD_IS_APU)
1881 if (!adev->asic_funcs->get_pcie_usage)
1884 ret = pm_runtime_get_sync(ddev->dev);
1886 pm_runtime_put_autosuspend(ddev->dev);
1890 amdgpu_asic_get_pcie_usage(adev, &count0, &count1);
1892 pm_runtime_mark_last_busy(ddev->dev);
1893 pm_runtime_put_autosuspend(ddev->dev);
1895 return snprintf(buf, PAGE_SIZE, "%llu %llu %i\n",
1896 count0, count1, pcie_get_mps(adev->pdev));
1902 * The amdgpu driver provides a sysfs API for providing a unique ID for the GPU
1903 * The file unique_id is used for this.
1904 * This will provide a Unique ID that will persist from machine to machine
1906 * NOTE: This will only work for GFX9 and newer. This file will be absent
1907 * on unsupported ASICs (GFX8 and older)
1909 static ssize_t amdgpu_get_unique_id(struct device *dev,
1910 struct device_attribute *attr,
1913 struct drm_device *ddev = dev_get_drvdata(dev);
1914 struct amdgpu_device *adev = ddev->dev_private;
1916 if (adev->in_gpu_reset)
1919 if (adev->unique_id)
1920 return snprintf(buf, PAGE_SIZE, "%016llx\n", adev->unique_id);
1926 * DOC: thermal_throttling_logging
1928 * Thermal throttling pulls down the clock frequency and thus the performance.
1929 * It's an useful mechanism to protect the chip from overheating. Since it
1930 * impacts performance, the user controls whether it is enabled and if so,
1931 * the log frequency.
1933 * Reading back the file shows you the status(enabled or disabled) and
1934 * the interval(in seconds) between each thermal logging.
1936 * Writing an integer to the file, sets a new logging interval, in seconds.
1937 * The value should be between 1 and 3600. If the value is less than 1,
1938 * thermal logging is disabled. Values greater than 3600 are ignored.
1940 static ssize_t amdgpu_get_thermal_throttling_logging(struct device *dev,
1941 struct device_attribute *attr,
1944 struct drm_device *ddev = dev_get_drvdata(dev);
1945 struct amdgpu_device *adev = ddev->dev_private;
1947 return snprintf(buf, PAGE_SIZE, "%s: thermal throttling logging %s, with interval %d seconds\n",
1949 atomic_read(&adev->throttling_logging_enabled) ? "enabled" : "disabled",
1950 adev->throttling_logging_rs.interval / HZ + 1);
1953 static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev,
1954 struct device_attribute *attr,
1958 struct drm_device *ddev = dev_get_drvdata(dev);
1959 struct amdgpu_device *adev = ddev->dev_private;
1960 long throttling_logging_interval;
1961 unsigned long flags;
1964 ret = kstrtol(buf, 0, &throttling_logging_interval);
1968 if (throttling_logging_interval > 3600)
1971 if (throttling_logging_interval > 0) {
1972 raw_spin_lock_irqsave(&adev->throttling_logging_rs.lock, flags);
1974 * Reset the ratelimit timer internals.
1975 * This can effectively restart the timer.
1977 adev->throttling_logging_rs.interval =
1978 (throttling_logging_interval - 1) * HZ;
1979 adev->throttling_logging_rs.begin = 0;
1980 adev->throttling_logging_rs.printed = 0;
1981 adev->throttling_logging_rs.missed = 0;
1982 raw_spin_unlock_irqrestore(&adev->throttling_logging_rs.lock, flags);
1984 atomic_set(&adev->throttling_logging_enabled, 1);
1986 atomic_set(&adev->throttling_logging_enabled, 0);
1995 * The amdgpu driver provides a sysfs API for retrieving current gpu
1996 * metrics data. The file gpu_metrics is used for this. Reading the
1997 * file will dump all the current gpu metrics data.
1999 * These data include temperature, frequency, engines utilization,
2000 * power consume, throttler status, fan speed and cpu core statistics(
2001 * available for APU only). That's it will give a snapshot of all sensors
2004 static ssize_t amdgpu_get_gpu_metrics(struct device *dev,
2005 struct device_attribute *attr,
2008 struct drm_device *ddev = dev_get_drvdata(dev);
2009 struct amdgpu_device *adev = ddev->dev_private;
2014 if (adev->in_gpu_reset)
2017 ret = pm_runtime_get_sync(ddev->dev);
2019 pm_runtime_put_autosuspend(ddev->dev);
2023 if (is_support_sw_smu(adev))
2024 size = smu_sys_get_gpu_metrics(&adev->smu, &gpu_metrics);
2025 else if (adev->powerplay.pp_funcs->get_gpu_metrics)
2026 size = amdgpu_dpm_get_gpu_metrics(adev, &gpu_metrics);
2031 if (size >= PAGE_SIZE)
2032 size = PAGE_SIZE - 1;
2034 memcpy(buf, gpu_metrics, size);
2037 pm_runtime_mark_last_busy(ddev->dev);
2038 pm_runtime_put_autosuspend(ddev->dev);
2043 static struct amdgpu_device_attr amdgpu_device_attrs[] = {
2044 AMDGPU_DEVICE_ATTR_RW(power_dpm_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2045 AMDGPU_DEVICE_ATTR_RW(power_dpm_force_performance_level, ATTR_FLAG_BASIC),
2046 AMDGPU_DEVICE_ATTR_RO(pp_num_states, ATTR_FLAG_BASIC),
2047 AMDGPU_DEVICE_ATTR_RO(pp_cur_state, ATTR_FLAG_BASIC),
2048 AMDGPU_DEVICE_ATTR_RW(pp_force_state, ATTR_FLAG_BASIC),
2049 AMDGPU_DEVICE_ATTR_RW(pp_table, ATTR_FLAG_BASIC),
2050 AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2051 AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2052 AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2053 AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2054 AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk, ATTR_FLAG_BASIC),
2055 AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie, ATTR_FLAG_BASIC),
2056 AMDGPU_DEVICE_ATTR_RW(pp_sclk_od, ATTR_FLAG_BASIC),
2057 AMDGPU_DEVICE_ATTR_RW(pp_mclk_od, ATTR_FLAG_BASIC),
2058 AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode, ATTR_FLAG_BASIC),
2059 AMDGPU_DEVICE_ATTR_RW(pp_od_clk_voltage, ATTR_FLAG_BASIC),
2060 AMDGPU_DEVICE_ATTR_RO(gpu_busy_percent, ATTR_FLAG_BASIC),
2061 AMDGPU_DEVICE_ATTR_RO(mem_busy_percent, ATTR_FLAG_BASIC),
2062 AMDGPU_DEVICE_ATTR_RO(pcie_bw, ATTR_FLAG_BASIC),
2063 AMDGPU_DEVICE_ATTR_RW(pp_features, ATTR_FLAG_BASIC),
2064 AMDGPU_DEVICE_ATTR_RO(unique_id, ATTR_FLAG_BASIC),
2065 AMDGPU_DEVICE_ATTR_RW(thermal_throttling_logging, ATTR_FLAG_BASIC),
2066 AMDGPU_DEVICE_ATTR_RO(gpu_metrics, ATTR_FLAG_BASIC),
2069 static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
2070 uint32_t mask, enum amdgpu_device_attr_states *states)
2072 struct device_attribute *dev_attr = &attr->dev_attr;
2073 const char *attr_name = dev_attr->attr.name;
2074 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
2075 enum amd_asic_type asic_type = adev->asic_type;
2077 if (!(attr->flags & mask)) {
2078 *states = ATTR_STATE_UNSUPPORTED;
2082 #define DEVICE_ATTR_IS(_name) (!strcmp(attr_name, #_name))
2084 if (DEVICE_ATTR_IS(pp_dpm_socclk)) {
2085 if (asic_type < CHIP_VEGA10)
2086 *states = ATTR_STATE_UNSUPPORTED;
2087 } else if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) {
2088 if (asic_type < CHIP_VEGA10 || asic_type == CHIP_ARCTURUS)
2089 *states = ATTR_STATE_UNSUPPORTED;
2090 } else if (DEVICE_ATTR_IS(pp_dpm_fclk)) {
2091 if (asic_type < CHIP_VEGA20)
2092 *states = ATTR_STATE_UNSUPPORTED;
2093 } else if (DEVICE_ATTR_IS(pp_dpm_pcie)) {
2094 if (asic_type == CHIP_ARCTURUS)
2095 *states = ATTR_STATE_UNSUPPORTED;
2096 } else if (DEVICE_ATTR_IS(pp_od_clk_voltage)) {
2097 *states = ATTR_STATE_UNSUPPORTED;
2098 if ((is_support_sw_smu(adev) && adev->smu.od_enabled) ||
2099 (!is_support_sw_smu(adev) && hwmgr->od_enabled))
2100 *states = ATTR_STATE_SUPPORTED;
2101 } else if (DEVICE_ATTR_IS(mem_busy_percent)) {
2102 if (adev->flags & AMD_IS_APU || asic_type == CHIP_VEGA10)
2103 *states = ATTR_STATE_UNSUPPORTED;
2104 } else if (DEVICE_ATTR_IS(pcie_bw)) {
2105 /* PCIe Perf counters won't work on APU nodes */
2106 if (adev->flags & AMD_IS_APU)
2107 *states = ATTR_STATE_UNSUPPORTED;
2108 } else if (DEVICE_ATTR_IS(unique_id)) {
2109 if (asic_type != CHIP_VEGA10 &&
2110 asic_type != CHIP_VEGA20 &&
2111 asic_type != CHIP_ARCTURUS)
2112 *states = ATTR_STATE_UNSUPPORTED;
2113 } else if (DEVICE_ATTR_IS(pp_features)) {
2114 if (adev->flags & AMD_IS_APU || asic_type < CHIP_VEGA10)
2115 *states = ATTR_STATE_UNSUPPORTED;
2116 } else if (DEVICE_ATTR_IS(gpu_metrics)) {
2117 if (asic_type < CHIP_VEGA12)
2118 *states = ATTR_STATE_UNSUPPORTED;
2121 if (asic_type == CHIP_ARCTURUS) {
2122 /* Arcturus does not support standalone mclk/socclk/fclk level setting */
2123 if (DEVICE_ATTR_IS(pp_dpm_mclk) ||
2124 DEVICE_ATTR_IS(pp_dpm_socclk) ||
2125 DEVICE_ATTR_IS(pp_dpm_fclk)) {
2126 dev_attr->attr.mode &= ~S_IWUGO;
2127 dev_attr->store = NULL;
2131 #undef DEVICE_ATTR_IS
2137 static int amdgpu_device_attr_create(struct amdgpu_device *adev,
2138 struct amdgpu_device_attr *attr,
2139 uint32_t mask, struct list_head *attr_list)
2142 struct device_attribute *dev_attr = &attr->dev_attr;
2143 const char *name = dev_attr->attr.name;
2144 enum amdgpu_device_attr_states attr_states = ATTR_STATE_SUPPORTED;
2145 struct amdgpu_device_attr_entry *attr_entry;
2147 int (*attr_update)(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
2148 uint32_t mask, enum amdgpu_device_attr_states *states) = default_attr_update;
2152 attr_update = attr->attr_update ? attr_update : default_attr_update;
2154 ret = attr_update(adev, attr, mask, &attr_states);
2156 dev_err(adev->dev, "failed to update device file %s, ret = %d\n",
2161 if (attr_states == ATTR_STATE_UNSUPPORTED)
2164 ret = device_create_file(adev->dev, dev_attr);
2166 dev_err(adev->dev, "failed to create device file %s, ret = %d\n",
2170 attr_entry = kmalloc(sizeof(*attr_entry), GFP_KERNEL);
2174 attr_entry->attr = attr;
2175 INIT_LIST_HEAD(&attr_entry->entry);
2177 list_add_tail(&attr_entry->entry, attr_list);
2182 static void amdgpu_device_attr_remove(struct amdgpu_device *adev, struct amdgpu_device_attr *attr)
2184 struct device_attribute *dev_attr = &attr->dev_attr;
2186 device_remove_file(adev->dev, dev_attr);
2189 static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
2190 struct list_head *attr_list);
2192 static int amdgpu_device_attr_create_groups(struct amdgpu_device *adev,
2193 struct amdgpu_device_attr *attrs,
2196 struct list_head *attr_list)
2201 for (i = 0; i < counts; i++) {
2202 ret = amdgpu_device_attr_create(adev, &attrs[i], mask, attr_list);
2210 amdgpu_device_attr_remove_groups(adev, attr_list);
2215 static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
2216 struct list_head *attr_list)
2218 struct amdgpu_device_attr_entry *entry, *entry_tmp;
2220 if (list_empty(attr_list))
2223 list_for_each_entry_safe(entry, entry_tmp, attr_list, entry) {
2224 amdgpu_device_attr_remove(adev, entry->attr);
2225 list_del(&entry->entry);
2230 static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
2231 struct device_attribute *attr,
2234 struct amdgpu_device *adev = dev_get_drvdata(dev);
2235 int channel = to_sensor_dev_attr(attr)->index;
2236 int r, temp = 0, size = sizeof(temp);
2238 if (adev->in_gpu_reset)
2241 if (channel >= PP_TEMP_MAX)
2244 r = pm_runtime_get_sync(adev->ddev->dev);
2246 pm_runtime_put_autosuspend(adev->ddev->dev);
2251 case PP_TEMP_JUNCTION:
2252 /* get current junction temperature */
2253 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
2254 (void *)&temp, &size);
2257 /* get current edge temperature */
2258 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_EDGE_TEMP,
2259 (void *)&temp, &size);
2262 /* get current memory temperature */
2263 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_TEMP,
2264 (void *)&temp, &size);
2271 pm_runtime_mark_last_busy(adev->ddev->dev);
2272 pm_runtime_put_autosuspend(adev->ddev->dev);
2277 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
2280 static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev,
2281 struct device_attribute *attr,
2284 struct amdgpu_device *adev = dev_get_drvdata(dev);
2285 int hyst = to_sensor_dev_attr(attr)->index;
2289 temp = adev->pm.dpm.thermal.min_temp;
2291 temp = adev->pm.dpm.thermal.max_temp;
2293 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
2296 static ssize_t amdgpu_hwmon_show_hotspot_temp_thresh(struct device *dev,
2297 struct device_attribute *attr,
2300 struct amdgpu_device *adev = dev_get_drvdata(dev);
2301 int hyst = to_sensor_dev_attr(attr)->index;
2305 temp = adev->pm.dpm.thermal.min_hotspot_temp;
2307 temp = adev->pm.dpm.thermal.max_hotspot_crit_temp;
2309 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
2312 static ssize_t amdgpu_hwmon_show_mem_temp_thresh(struct device *dev,
2313 struct device_attribute *attr,
2316 struct amdgpu_device *adev = dev_get_drvdata(dev);
2317 int hyst = to_sensor_dev_attr(attr)->index;
2321 temp = adev->pm.dpm.thermal.min_mem_temp;
2323 temp = adev->pm.dpm.thermal.max_mem_crit_temp;
2325 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
2328 static ssize_t amdgpu_hwmon_show_temp_label(struct device *dev,
2329 struct device_attribute *attr,
2332 int channel = to_sensor_dev_attr(attr)->index;
2334 if (channel >= PP_TEMP_MAX)
2337 return snprintf(buf, PAGE_SIZE, "%s\n", temp_label[channel].label);
2340 static ssize_t amdgpu_hwmon_show_temp_emergency(struct device *dev,
2341 struct device_attribute *attr,
2344 struct amdgpu_device *adev = dev_get_drvdata(dev);
2345 int channel = to_sensor_dev_attr(attr)->index;
2348 if (channel >= PP_TEMP_MAX)
2352 case PP_TEMP_JUNCTION:
2353 temp = adev->pm.dpm.thermal.max_hotspot_emergency_temp;
2356 temp = adev->pm.dpm.thermal.max_edge_emergency_temp;
2359 temp = adev->pm.dpm.thermal.max_mem_emergency_temp;
2363 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
2366 static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
2367 struct device_attribute *attr,
2370 struct amdgpu_device *adev = dev_get_drvdata(dev);
2374 if (adev->in_gpu_reset)
2377 ret = pm_runtime_get_sync(adev->ddev->dev);
2379 pm_runtime_put_autosuspend(adev->ddev->dev);
2383 if (is_support_sw_smu(adev)) {
2384 pwm_mode = smu_get_fan_control_mode(&adev->smu);
2386 if (!adev->powerplay.pp_funcs->get_fan_control_mode) {
2387 pm_runtime_mark_last_busy(adev->ddev->dev);
2388 pm_runtime_put_autosuspend(adev->ddev->dev);
2392 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
2395 pm_runtime_mark_last_busy(adev->ddev->dev);
2396 pm_runtime_put_autosuspend(adev->ddev->dev);
2398 return sprintf(buf, "%i\n", pwm_mode);
2401 static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
2402 struct device_attribute *attr,
2406 struct amdgpu_device *adev = dev_get_drvdata(dev);
2410 if (adev->in_gpu_reset)
2413 err = kstrtoint(buf, 10, &value);
2417 ret = pm_runtime_get_sync(adev->ddev->dev);
2419 pm_runtime_put_autosuspend(adev->ddev->dev);
2423 if (is_support_sw_smu(adev)) {
2424 smu_set_fan_control_mode(&adev->smu, value);
2426 if (!adev->powerplay.pp_funcs->set_fan_control_mode) {
2427 pm_runtime_mark_last_busy(adev->ddev->dev);
2428 pm_runtime_put_autosuspend(adev->ddev->dev);
2432 amdgpu_dpm_set_fan_control_mode(adev, value);
2435 pm_runtime_mark_last_busy(adev->ddev->dev);
2436 pm_runtime_put_autosuspend(adev->ddev->dev);
2441 static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev,
2442 struct device_attribute *attr,
2445 return sprintf(buf, "%i\n", 0);
2448 static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev,
2449 struct device_attribute *attr,
2452 return sprintf(buf, "%i\n", 255);
2455 static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
2456 struct device_attribute *attr,
2457 const char *buf, size_t count)
2459 struct amdgpu_device *adev = dev_get_drvdata(dev);
2464 if (adev->in_gpu_reset)
2467 err = pm_runtime_get_sync(adev->ddev->dev);
2469 pm_runtime_put_autosuspend(adev->ddev->dev);
2473 if (is_support_sw_smu(adev))
2474 pwm_mode = smu_get_fan_control_mode(&adev->smu);
2476 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
2478 if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
2479 pr_info("manual fan speed control should be enabled first\n");
2480 pm_runtime_mark_last_busy(adev->ddev->dev);
2481 pm_runtime_put_autosuspend(adev->ddev->dev);
2485 err = kstrtou32(buf, 10, &value);
2487 pm_runtime_mark_last_busy(adev->ddev->dev);
2488 pm_runtime_put_autosuspend(adev->ddev->dev);
2492 value = (value * 100) / 255;
2494 if (is_support_sw_smu(adev))
2495 err = smu_set_fan_speed_percent(&adev->smu, value);
2496 else if (adev->powerplay.pp_funcs->set_fan_speed_percent)
2497 err = amdgpu_dpm_set_fan_speed_percent(adev, value);
2501 pm_runtime_mark_last_busy(adev->ddev->dev);
2502 pm_runtime_put_autosuspend(adev->ddev->dev);
2510 static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
2511 struct device_attribute *attr,
2514 struct amdgpu_device *adev = dev_get_drvdata(dev);
2518 if (adev->in_gpu_reset)
2521 err = pm_runtime_get_sync(adev->ddev->dev);
2523 pm_runtime_put_autosuspend(adev->ddev->dev);
2527 if (is_support_sw_smu(adev))
2528 err = smu_get_fan_speed_percent(&adev->smu, &speed);
2529 else if (adev->powerplay.pp_funcs->get_fan_speed_percent)
2530 err = amdgpu_dpm_get_fan_speed_percent(adev, &speed);
2534 pm_runtime_mark_last_busy(adev->ddev->dev);
2535 pm_runtime_put_autosuspend(adev->ddev->dev);
2540 speed = (speed * 255) / 100;
2542 return sprintf(buf, "%i\n", speed);
2545 static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
2546 struct device_attribute *attr,
2549 struct amdgpu_device *adev = dev_get_drvdata(dev);
2553 if (adev->in_gpu_reset)
2556 err = pm_runtime_get_sync(adev->ddev->dev);
2558 pm_runtime_put_autosuspend(adev->ddev->dev);
2562 if (is_support_sw_smu(adev))
2563 err = smu_get_fan_speed_rpm(&adev->smu, &speed);
2564 else if (adev->powerplay.pp_funcs->get_fan_speed_rpm)
2565 err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
2569 pm_runtime_mark_last_busy(adev->ddev->dev);
2570 pm_runtime_put_autosuspend(adev->ddev->dev);
2575 return sprintf(buf, "%i\n", speed);
2578 static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
2579 struct device_attribute *attr,
2582 struct amdgpu_device *adev = dev_get_drvdata(dev);
2584 u32 size = sizeof(min_rpm);
2587 if (adev->in_gpu_reset)
2590 r = pm_runtime_get_sync(adev->ddev->dev);
2592 pm_runtime_put_autosuspend(adev->ddev->dev);
2596 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM,
2597 (void *)&min_rpm, &size);
2599 pm_runtime_mark_last_busy(adev->ddev->dev);
2600 pm_runtime_put_autosuspend(adev->ddev->dev);
2605 return snprintf(buf, PAGE_SIZE, "%d\n", min_rpm);
2608 static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
2609 struct device_attribute *attr,
2612 struct amdgpu_device *adev = dev_get_drvdata(dev);
2614 u32 size = sizeof(max_rpm);
2617 if (adev->in_gpu_reset)
2620 r = pm_runtime_get_sync(adev->ddev->dev);
2622 pm_runtime_put_autosuspend(adev->ddev->dev);
2626 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM,
2627 (void *)&max_rpm, &size);
2629 pm_runtime_mark_last_busy(adev->ddev->dev);
2630 pm_runtime_put_autosuspend(adev->ddev->dev);
2635 return snprintf(buf, PAGE_SIZE, "%d\n", max_rpm);
2638 static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
2639 struct device_attribute *attr,
2642 struct amdgpu_device *adev = dev_get_drvdata(dev);
2646 if (adev->in_gpu_reset)
2649 err = pm_runtime_get_sync(adev->ddev->dev);
2651 pm_runtime_put_autosuspend(adev->ddev->dev);
2655 if (is_support_sw_smu(adev))
2656 err = smu_get_fan_speed_rpm(&adev->smu, &rpm);
2657 else if (adev->powerplay.pp_funcs->get_fan_speed_rpm)
2658 err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);
2662 pm_runtime_mark_last_busy(adev->ddev->dev);
2663 pm_runtime_put_autosuspend(adev->ddev->dev);
2668 return sprintf(buf, "%i\n", rpm);
2671 static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
2672 struct device_attribute *attr,
2673 const char *buf, size_t count)
2675 struct amdgpu_device *adev = dev_get_drvdata(dev);
2680 if (adev->in_gpu_reset)
2683 err = pm_runtime_get_sync(adev->ddev->dev);
2685 pm_runtime_put_autosuspend(adev->ddev->dev);
2689 if (is_support_sw_smu(adev))
2690 pwm_mode = smu_get_fan_control_mode(&adev->smu);
2692 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
2694 if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
2695 pm_runtime_mark_last_busy(adev->ddev->dev);
2696 pm_runtime_put_autosuspend(adev->ddev->dev);
2700 err = kstrtou32(buf, 10, &value);
2702 pm_runtime_mark_last_busy(adev->ddev->dev);
2703 pm_runtime_put_autosuspend(adev->ddev->dev);
2707 if (is_support_sw_smu(adev))
2708 err = smu_set_fan_speed_rpm(&adev->smu, value);
2709 else if (adev->powerplay.pp_funcs->set_fan_speed_rpm)
2710 err = amdgpu_dpm_set_fan_speed_rpm(adev, value);
2714 pm_runtime_mark_last_busy(adev->ddev->dev);
2715 pm_runtime_put_autosuspend(adev->ddev->dev);
2723 static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
2724 struct device_attribute *attr,
2727 struct amdgpu_device *adev = dev_get_drvdata(dev);
2731 if (adev->in_gpu_reset)
2734 ret = pm_runtime_get_sync(adev->ddev->dev);
2736 pm_runtime_put_autosuspend(adev->ddev->dev);
2740 if (is_support_sw_smu(adev)) {
2741 pwm_mode = smu_get_fan_control_mode(&adev->smu);
2743 if (!adev->powerplay.pp_funcs->get_fan_control_mode) {
2744 pm_runtime_mark_last_busy(adev->ddev->dev);
2745 pm_runtime_put_autosuspend(adev->ddev->dev);
2749 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
2752 pm_runtime_mark_last_busy(adev->ddev->dev);
2753 pm_runtime_put_autosuspend(adev->ddev->dev);
2755 return sprintf(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1);
2758 static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
2759 struct device_attribute *attr,
2763 struct amdgpu_device *adev = dev_get_drvdata(dev);
2768 if (adev->in_gpu_reset)
2771 err = kstrtoint(buf, 10, &value);
2776 pwm_mode = AMD_FAN_CTRL_AUTO;
2777 else if (value == 1)
2778 pwm_mode = AMD_FAN_CTRL_MANUAL;
2782 err = pm_runtime_get_sync(adev->ddev->dev);
2784 pm_runtime_put_autosuspend(adev->ddev->dev);
2788 if (is_support_sw_smu(adev)) {
2789 smu_set_fan_control_mode(&adev->smu, pwm_mode);
2791 if (!adev->powerplay.pp_funcs->set_fan_control_mode) {
2792 pm_runtime_mark_last_busy(adev->ddev->dev);
2793 pm_runtime_put_autosuspend(adev->ddev->dev);
2796 amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
2799 pm_runtime_mark_last_busy(adev->ddev->dev);
2800 pm_runtime_put_autosuspend(adev->ddev->dev);
2805 static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
2806 struct device_attribute *attr,
2809 struct amdgpu_device *adev = dev_get_drvdata(dev);
2811 int r, size = sizeof(vddgfx);
2813 if (adev->in_gpu_reset)
2816 r = pm_runtime_get_sync(adev->ddev->dev);
2818 pm_runtime_put_autosuspend(adev->ddev->dev);
2822 /* get the voltage */
2823 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX,
2824 (void *)&vddgfx, &size);
2826 pm_runtime_mark_last_busy(adev->ddev->dev);
2827 pm_runtime_put_autosuspend(adev->ddev->dev);
2832 return snprintf(buf, PAGE_SIZE, "%d\n", vddgfx);
2835 static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev,
2836 struct device_attribute *attr,
2839 return snprintf(buf, PAGE_SIZE, "vddgfx\n");
2842 static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
2843 struct device_attribute *attr,
2846 struct amdgpu_device *adev = dev_get_drvdata(dev);
2848 int r, size = sizeof(vddnb);
2850 if (adev->in_gpu_reset)
2853 /* only APUs have vddnb */
2854 if (!(adev->flags & AMD_IS_APU))
2857 r = pm_runtime_get_sync(adev->ddev->dev);
2859 pm_runtime_put_autosuspend(adev->ddev->dev);
2863 /* get the voltage */
2864 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB,
2865 (void *)&vddnb, &size);
2867 pm_runtime_mark_last_busy(adev->ddev->dev);
2868 pm_runtime_put_autosuspend(adev->ddev->dev);
2873 return snprintf(buf, PAGE_SIZE, "%d\n", vddnb);
2876 static ssize_t amdgpu_hwmon_show_vddnb_label(struct device *dev,
2877 struct device_attribute *attr,
2880 return snprintf(buf, PAGE_SIZE, "vddnb\n");
2883 static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
2884 struct device_attribute *attr,
2887 struct amdgpu_device *adev = dev_get_drvdata(dev);
2889 int r, size = sizeof(u32);
2892 if (adev->in_gpu_reset)
2895 r = pm_runtime_get_sync(adev->ddev->dev);
2897 pm_runtime_put_autosuspend(adev->ddev->dev);
2901 /* get the voltage */
2902 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER,
2903 (void *)&query, &size);
2905 pm_runtime_mark_last_busy(adev->ddev->dev);
2906 pm_runtime_put_autosuspend(adev->ddev->dev);
2911 /* convert to microwatts */
2912 uw = (query >> 8) * 1000000 + (query & 0xff) * 1000;
2914 return snprintf(buf, PAGE_SIZE, "%u\n", uw);
2917 static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev,
2918 struct device_attribute *attr,
2921 return sprintf(buf, "%i\n", 0);
2924 static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
2925 struct device_attribute *attr,
2928 struct amdgpu_device *adev = dev_get_drvdata(dev);
2933 if (adev->in_gpu_reset)
2936 r = pm_runtime_get_sync(adev->ddev->dev);
2938 pm_runtime_put_autosuspend(adev->ddev->dev);
2942 if (is_support_sw_smu(adev)) {
2943 smu_get_power_limit(&adev->smu, &limit, true);
2944 size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
2945 } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
2946 adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, true);
2947 size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
2949 size = snprintf(buf, PAGE_SIZE, "\n");
2952 pm_runtime_mark_last_busy(adev->ddev->dev);
2953 pm_runtime_put_autosuspend(adev->ddev->dev);
2958 static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
2959 struct device_attribute *attr,
2962 struct amdgpu_device *adev = dev_get_drvdata(dev);
2967 if (adev->in_gpu_reset)
2970 r = pm_runtime_get_sync(adev->ddev->dev);
2972 pm_runtime_put_autosuspend(adev->ddev->dev);
2976 if (is_support_sw_smu(adev)) {
2977 smu_get_power_limit(&adev->smu, &limit, false);
2978 size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
2979 } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
2980 adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, false);
2981 size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
2983 size = snprintf(buf, PAGE_SIZE, "\n");
2986 pm_runtime_mark_last_busy(adev->ddev->dev);
2987 pm_runtime_put_autosuspend(adev->ddev->dev);
2993 static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
2994 struct device_attribute *attr,
2998 struct amdgpu_device *adev = dev_get_drvdata(dev);
3002 if (adev->in_gpu_reset)
3005 if (amdgpu_sriov_vf(adev))
3008 err = kstrtou32(buf, 10, &value);
3012 value = value / 1000000; /* convert to Watt */
3015 err = pm_runtime_get_sync(adev->ddev->dev);
3017 pm_runtime_put_autosuspend(adev->ddev->dev);
3021 if (is_support_sw_smu(adev))
3022 err = smu_set_power_limit(&adev->smu, value);
3023 else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_power_limit)
3024 err = adev->powerplay.pp_funcs->set_power_limit(adev->powerplay.pp_handle, value);
3028 pm_runtime_mark_last_busy(adev->ddev->dev);
3029 pm_runtime_put_autosuspend(adev->ddev->dev);
3037 static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
3038 struct device_attribute *attr,
3041 struct amdgpu_device *adev = dev_get_drvdata(dev);
3043 int r, size = sizeof(sclk);
3045 if (adev->in_gpu_reset)
3048 r = pm_runtime_get_sync(adev->ddev->dev);
3050 pm_runtime_put_autosuspend(adev->ddev->dev);
3055 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK,
3056 (void *)&sclk, &size);
3058 pm_runtime_mark_last_busy(adev->ddev->dev);
3059 pm_runtime_put_autosuspend(adev->ddev->dev);
3064 return snprintf(buf, PAGE_SIZE, "%u\n", sclk * 10 * 1000);
3067 static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev,
3068 struct device_attribute *attr,
3071 return snprintf(buf, PAGE_SIZE, "sclk\n");
3074 static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
3075 struct device_attribute *attr,
3078 struct amdgpu_device *adev = dev_get_drvdata(dev);
3080 int r, size = sizeof(mclk);
3082 if (adev->in_gpu_reset)
3085 r = pm_runtime_get_sync(adev->ddev->dev);
3087 pm_runtime_put_autosuspend(adev->ddev->dev);
3092 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK,
3093 (void *)&mclk, &size);
3095 pm_runtime_mark_last_busy(adev->ddev->dev);
3096 pm_runtime_put_autosuspend(adev->ddev->dev);
3101 return snprintf(buf, PAGE_SIZE, "%u\n", mclk * 10 * 1000);
3104 static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
3105 struct device_attribute *attr,
3108 return snprintf(buf, PAGE_SIZE, "mclk\n");
3114 * The amdgpu driver exposes the following sensor interfaces:
3116 * - GPU temperature (via the on-die sensor)
3120 * - Northbridge voltage (APUs only)
3126 * - GPU gfx/compute engine clock
3128 * - GPU memory clock (dGPU only)
3130 * hwmon interfaces for GPU temperature:
3132 * - temp[1-3]_input: the on die GPU temperature in millidegrees Celsius
3133 * - temp2_input and temp3_input are supported on SOC15 dGPUs only
3135 * - temp[1-3]_label: temperature channel label
3136 * - temp2_label and temp3_label are supported on SOC15 dGPUs only
3138 * - temp[1-3]_crit: temperature critical max value in millidegrees Celsius
3139 * - temp2_crit and temp3_crit are supported on SOC15 dGPUs only
3141 * - temp[1-3]_crit_hyst: temperature hysteresis for critical limit in millidegrees Celsius
3142 * - temp2_crit_hyst and temp3_crit_hyst are supported on SOC15 dGPUs only
3144 * - temp[1-3]_emergency: temperature emergency max value(asic shutdown) in millidegrees Celsius
3145 * - these are supported on SOC15 dGPUs only
3147 * hwmon interfaces for GPU voltage:
3149 * - in0_input: the voltage on the GPU in millivolts
3151 * - in1_input: the voltage on the Northbridge in millivolts
3153 * hwmon interfaces for GPU power:
3155 * - power1_average: average power used by the GPU in microWatts
3157 * - power1_cap_min: minimum cap supported in microWatts
3159 * - power1_cap_max: maximum cap supported in microWatts
3161 * - power1_cap: selected power cap in microWatts
3163 * hwmon interfaces for GPU fan:
3165 * - pwm1: pulse width modulation fan level (0-255)
3167 * - pwm1_enable: pulse width modulation fan control method (0: no fan speed control, 1: manual fan speed control using pwm interface, 2: automatic fan speed control)
3169 * - pwm1_min: pulse width modulation fan control minimum level (0)
3171 * - pwm1_max: pulse width modulation fan control maximum level (255)
3173 * - fan1_min: an minimum value Unit: revolution/min (RPM)
3175 * - fan1_max: an maxmum value Unit: revolution/max (RPM)
3177 * - fan1_input: fan speed in RPM
3179 * - fan[1-\*]_target: Desired fan speed Unit: revolution/min (RPM)
3181 * - fan[1-\*]_enable: Enable or disable the sensors.1: Enable 0: Disable
3183 * hwmon interfaces for GPU clocks:
3185 * - freq1_input: the gfx/compute clock in hertz
3187 * - freq2_input: the memory clock in hertz
3189 * You can use hwmon tools like sensors to view this information on your system.
3193 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_EDGE);
3194 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0);
3195 static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1);
3196 static SENSOR_DEVICE_ATTR(temp1_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_EDGE);
3197 static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_JUNCTION);
3198 static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 0);
3199 static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 1);
3200 static SENSOR_DEVICE_ATTR(temp2_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_JUNCTION);
3201 static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_MEM);
3202 static SENSOR_DEVICE_ATTR(temp3_crit, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 0);
3203 static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 1);
3204 static SENSOR_DEVICE_ATTR(temp3_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_MEM);
3205 static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_EDGE);
3206 static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_JUNCTION);
3207 static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_MEM);
3208 static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0);
3209 static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0);
3210 static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0);
3211 static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0);
3212 static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0);
3213 static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO, amdgpu_hwmon_get_fan1_min, NULL, 0);
3214 static SENSOR_DEVICE_ATTR(fan1_max, S_IRUGO, amdgpu_hwmon_get_fan1_max, NULL, 0);
3215 static SENSOR_DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_target, amdgpu_hwmon_set_fan1_target, 0);
3216 static SENSOR_DEVICE_ATTR(fan1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_enable, amdgpu_hwmon_set_fan1_enable, 0);
3217 static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, amdgpu_hwmon_show_vddgfx, NULL, 0);
3218 static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, amdgpu_hwmon_show_vddgfx_label, NULL, 0);
3219 static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, amdgpu_hwmon_show_vddnb, NULL, 0);
3220 static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, amdgpu_hwmon_show_vddnb_label, NULL, 0);
3221 static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 0);
3222 static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0);
3223 static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 0);
3224 static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0);
3225 static SENSOR_DEVICE_ATTR(freq1_input, S_IRUGO, amdgpu_hwmon_show_sclk, NULL, 0);
3226 static SENSOR_DEVICE_ATTR(freq1_label, S_IRUGO, amdgpu_hwmon_show_sclk_label, NULL, 0);
3227 static SENSOR_DEVICE_ATTR(freq2_input, S_IRUGO, amdgpu_hwmon_show_mclk, NULL, 0);
3228 static SENSOR_DEVICE_ATTR(freq2_label, S_IRUGO, amdgpu_hwmon_show_mclk_label, NULL, 0);
3230 static struct attribute *hwmon_attributes[] = {
3231 &sensor_dev_attr_temp1_input.dev_attr.attr,
3232 &sensor_dev_attr_temp1_crit.dev_attr.attr,
3233 &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
3234 &sensor_dev_attr_temp2_input.dev_attr.attr,
3235 &sensor_dev_attr_temp2_crit.dev_attr.attr,
3236 &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr,
3237 &sensor_dev_attr_temp3_input.dev_attr.attr,
3238 &sensor_dev_attr_temp3_crit.dev_attr.attr,
3239 &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr,
3240 &sensor_dev_attr_temp1_emergency.dev_attr.attr,
3241 &sensor_dev_attr_temp2_emergency.dev_attr.attr,
3242 &sensor_dev_attr_temp3_emergency.dev_attr.attr,
3243 &sensor_dev_attr_temp1_label.dev_attr.attr,
3244 &sensor_dev_attr_temp2_label.dev_attr.attr,
3245 &sensor_dev_attr_temp3_label.dev_attr.attr,
3246 &sensor_dev_attr_pwm1.dev_attr.attr,
3247 &sensor_dev_attr_pwm1_enable.dev_attr.attr,
3248 &sensor_dev_attr_pwm1_min.dev_attr.attr,
3249 &sensor_dev_attr_pwm1_max.dev_attr.attr,
3250 &sensor_dev_attr_fan1_input.dev_attr.attr,
3251 &sensor_dev_attr_fan1_min.dev_attr.attr,
3252 &sensor_dev_attr_fan1_max.dev_attr.attr,
3253 &sensor_dev_attr_fan1_target.dev_attr.attr,
3254 &sensor_dev_attr_fan1_enable.dev_attr.attr,
3255 &sensor_dev_attr_in0_input.dev_attr.attr,
3256 &sensor_dev_attr_in0_label.dev_attr.attr,
3257 &sensor_dev_attr_in1_input.dev_attr.attr,
3258 &sensor_dev_attr_in1_label.dev_attr.attr,
3259 &sensor_dev_attr_power1_average.dev_attr.attr,
3260 &sensor_dev_attr_power1_cap_max.dev_attr.attr,
3261 &sensor_dev_attr_power1_cap_min.dev_attr.attr,
3262 &sensor_dev_attr_power1_cap.dev_attr.attr,
3263 &sensor_dev_attr_freq1_input.dev_attr.attr,
3264 &sensor_dev_attr_freq1_label.dev_attr.attr,
3265 &sensor_dev_attr_freq2_input.dev_attr.attr,
3266 &sensor_dev_attr_freq2_label.dev_attr.attr,
3270 static umode_t hwmon_attributes_visible(struct kobject *kobj,
3271 struct attribute *attr, int index)
3273 struct device *dev = kobj_to_dev(kobj);
3274 struct amdgpu_device *adev = dev_get_drvdata(dev);
3275 umode_t effective_mode = attr->mode;
3277 /* under multi-vf mode, the hwmon attributes are all not supported */
3278 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
3281 /* there is no fan under pp one vf mode */
3282 if (amdgpu_sriov_is_pp_one_vf(adev) &&
3283 (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3284 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3285 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3286 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3287 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3288 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3289 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3290 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3291 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3294 /* Skip fan attributes if fan is not present */
3295 if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3296 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3297 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3298 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3299 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3300 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3301 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3302 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3303 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3306 /* Skip fan attributes on APU */
3307 if ((adev->flags & AMD_IS_APU) &&
3308 (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3309 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3310 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3311 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3312 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3313 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3314 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3315 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3316 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3319 /* Skip crit temp on APU */
3320 if ((adev->flags & AMD_IS_APU) && (adev->family >= AMDGPU_FAMILY_CZ) &&
3321 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
3322 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr))
3325 /* Skip limit attributes if DPM is not enabled */
3326 if (!adev->pm.dpm_enabled &&
3327 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
3328 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
3329 attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3330 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3331 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3332 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3333 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3334 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3335 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3336 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3337 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3340 if (!is_support_sw_smu(adev)) {
3341 /* mask fan attributes if we have no bindings for this asic to expose */
3342 if ((!adev->powerplay.pp_funcs->get_fan_speed_percent &&
3343 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
3344 (!adev->powerplay.pp_funcs->get_fan_control_mode &&
3345 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
3346 effective_mode &= ~S_IRUGO;
3348 if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
3349 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
3350 (!adev->powerplay.pp_funcs->set_fan_control_mode &&
3351 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
3352 effective_mode &= ~S_IWUSR;
3355 if (((adev->flags & AMD_IS_APU) ||
3356 adev->family == AMDGPU_FAMILY_SI || /* not implemented yet */
3357 adev->family == AMDGPU_FAMILY_KV) && /* not implemented yet */
3358 (attr == &sensor_dev_attr_power1_average.dev_attr.attr ||
3359 attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
3360 attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr||
3361 attr == &sensor_dev_attr_power1_cap.dev_attr.attr))
3364 if (!is_support_sw_smu(adev)) {
3365 /* hide max/min values if we can't both query and manage the fan */
3366 if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
3367 !adev->powerplay.pp_funcs->get_fan_speed_percent) &&
3368 (!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
3369 !adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
3370 (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3371 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
3374 if ((!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
3375 !adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
3376 (attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3377 attr == &sensor_dev_attr_fan1_min.dev_attr.attr))
3381 if ((adev->family == AMDGPU_FAMILY_SI || /* not implemented yet */
3382 adev->family == AMDGPU_FAMILY_KV) && /* not implemented yet */
3383 (attr == &sensor_dev_attr_in0_input.dev_attr.attr ||
3384 attr == &sensor_dev_attr_in0_label.dev_attr.attr))
3387 /* only APUs have vddnb */
3388 if (!(adev->flags & AMD_IS_APU) &&
3389 (attr == &sensor_dev_attr_in1_input.dev_attr.attr ||
3390 attr == &sensor_dev_attr_in1_label.dev_attr.attr))
3393 /* no mclk on APUs */
3394 if ((adev->flags & AMD_IS_APU) &&
3395 (attr == &sensor_dev_attr_freq2_input.dev_attr.attr ||
3396 attr == &sensor_dev_attr_freq2_label.dev_attr.attr))
3399 /* only SOC15 dGPUs support hotspot and mem temperatures */
3400 if (((adev->flags & AMD_IS_APU) ||
3401 adev->asic_type < CHIP_VEGA10) &&
3402 (attr == &sensor_dev_attr_temp2_crit.dev_attr.attr ||
3403 attr == &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr ||
3404 attr == &sensor_dev_attr_temp3_crit.dev_attr.attr ||
3405 attr == &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr ||
3406 attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr ||
3407 attr == &sensor_dev_attr_temp2_emergency.dev_attr.attr ||
3408 attr == &sensor_dev_attr_temp3_emergency.dev_attr.attr ||
3409 attr == &sensor_dev_attr_temp2_input.dev_attr.attr ||
3410 attr == &sensor_dev_attr_temp3_input.dev_attr.attr ||
3411 attr == &sensor_dev_attr_temp2_label.dev_attr.attr ||
3412 attr == &sensor_dev_attr_temp3_label.dev_attr.attr))
3415 return effective_mode;
3418 static const struct attribute_group hwmon_attrgroup = {
3419 .attrs = hwmon_attributes,
3420 .is_visible = hwmon_attributes_visible,
3423 static const struct attribute_group *hwmon_groups[] = {
3428 void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
3430 struct amdgpu_device *adev =
3431 container_of(work, struct amdgpu_device,
3432 pm.dpm.thermal.work);
3433 /* switch to the thermal state */
3434 enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
3435 int temp, size = sizeof(temp);
3437 if (!adev->pm.dpm_enabled)
3440 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP,
3441 (void *)&temp, &size)) {
3442 if (temp < adev->pm.dpm.thermal.min_temp)
3443 /* switch back the user state */
3444 dpm_state = adev->pm.dpm.user_state;
3446 if (adev->pm.dpm.thermal.high_to_low)
3447 /* switch back the user state */
3448 dpm_state = adev->pm.dpm.user_state;
3450 mutex_lock(&adev->pm.mutex);
3451 if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
3452 adev->pm.dpm.thermal_active = true;
3454 adev->pm.dpm.thermal_active = false;
3455 adev->pm.dpm.state = dpm_state;
3456 mutex_unlock(&adev->pm.mutex);
3458 amdgpu_pm_compute_clocks(adev);
3461 static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
3462 enum amd_pm_state_type dpm_state)
3465 struct amdgpu_ps *ps;
3467 bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ?
3470 /* check if the vblank period is too short to adjust the mclk */
3471 if (single_display && adev->powerplay.pp_funcs->vblank_too_short) {
3472 if (amdgpu_dpm_vblank_too_short(adev))
3473 single_display = false;
3476 /* certain older asics have a separare 3D performance state,
3477 * so try that first if the user selected performance
3479 if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
3480 dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
3481 /* balanced states don't exist at the moment */
3482 if (dpm_state == POWER_STATE_TYPE_BALANCED)
3483 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
3486 /* Pick the best power state based on current conditions */
3487 for (i = 0; i < adev->pm.dpm.num_ps; i++) {
3488 ps = &adev->pm.dpm.ps[i];
3489 ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
3490 switch (dpm_state) {
3492 case POWER_STATE_TYPE_BATTERY:
3493 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
3494 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
3501 case POWER_STATE_TYPE_BALANCED:
3502 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
3503 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
3510 case POWER_STATE_TYPE_PERFORMANCE:
3511 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
3512 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
3519 /* internal states */
3520 case POWER_STATE_TYPE_INTERNAL_UVD:
3521 if (adev->pm.dpm.uvd_ps)
3522 return adev->pm.dpm.uvd_ps;
3525 case POWER_STATE_TYPE_INTERNAL_UVD_SD:
3526 if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
3529 case POWER_STATE_TYPE_INTERNAL_UVD_HD:
3530 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
3533 case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
3534 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
3537 case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
3538 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
3541 case POWER_STATE_TYPE_INTERNAL_BOOT:
3542 return adev->pm.dpm.boot_ps;
3543 case POWER_STATE_TYPE_INTERNAL_THERMAL:
3544 if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
3547 case POWER_STATE_TYPE_INTERNAL_ACPI:
3548 if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
3551 case POWER_STATE_TYPE_INTERNAL_ULV:
3552 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
3555 case POWER_STATE_TYPE_INTERNAL_3DPERF:
3556 if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
3563 /* use a fallback state if we didn't match */
3564 switch (dpm_state) {
3565 case POWER_STATE_TYPE_INTERNAL_UVD_SD:
3566 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
3567 goto restart_search;
3568 case POWER_STATE_TYPE_INTERNAL_UVD_HD:
3569 case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
3570 case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
3571 if (adev->pm.dpm.uvd_ps) {
3572 return adev->pm.dpm.uvd_ps;
3574 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
3575 goto restart_search;
3577 case POWER_STATE_TYPE_INTERNAL_THERMAL:
3578 dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
3579 goto restart_search;
3580 case POWER_STATE_TYPE_INTERNAL_ACPI:
3581 dpm_state = POWER_STATE_TYPE_BATTERY;
3582 goto restart_search;
3583 case POWER_STATE_TYPE_BATTERY:
3584 case POWER_STATE_TYPE_BALANCED:
3585 case POWER_STATE_TYPE_INTERNAL_3DPERF:
3586 dpm_state = POWER_STATE_TYPE_PERFORMANCE;
3587 goto restart_search;
3595 static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
3597 struct amdgpu_ps *ps;
3598 enum amd_pm_state_type dpm_state;
3602 /* if dpm init failed */
3603 if (!adev->pm.dpm_enabled)
3606 if (adev->pm.dpm.user_state != adev->pm.dpm.state) {
3607 /* add other state override checks here */
3608 if ((!adev->pm.dpm.thermal_active) &&
3609 (!adev->pm.dpm.uvd_active))
3610 adev->pm.dpm.state = adev->pm.dpm.user_state;
3612 dpm_state = adev->pm.dpm.state;
3614 ps = amdgpu_dpm_pick_power_state(adev, dpm_state);
3616 adev->pm.dpm.requested_ps = ps;
3620 if (amdgpu_dpm == 1 && adev->powerplay.pp_funcs->print_power_state) {
3621 printk("switching from power state:\n");
3622 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
3623 printk("switching to power state:\n");
3624 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
3627 /* update whether vce is active */
3628 ps->vce_active = adev->pm.dpm.vce_active;
3629 if (adev->powerplay.pp_funcs->display_configuration_changed)
3630 amdgpu_dpm_display_configuration_changed(adev);
3632 ret = amdgpu_dpm_pre_set_power_state(adev);
3636 if (adev->powerplay.pp_funcs->check_state_equal) {
3637 if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal))
3644 amdgpu_dpm_set_power_state(adev);
3645 amdgpu_dpm_post_set_power_state(adev);
3647 adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
3648 adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
3650 if (adev->powerplay.pp_funcs->force_performance_level) {
3651 if (adev->pm.dpm.thermal_active) {
3652 enum amd_dpm_forced_level level = adev->pm.dpm.forced_level;
3653 /* force low perf level for thermal */
3654 amdgpu_dpm_force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW);
3655 /* save the user's level */
3656 adev->pm.dpm.forced_level = level;
3658 /* otherwise, user selected level */
3659 amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level);
3664 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
3668 if (adev->family == AMDGPU_FAMILY_SI) {
3669 mutex_lock(&adev->pm.mutex);
3671 adev->pm.dpm.uvd_active = true;
3672 adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
3674 adev->pm.dpm.uvd_active = false;
3676 mutex_unlock(&adev->pm.mutex);
3678 amdgpu_pm_compute_clocks(adev);
3680 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
3682 DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
3683 enable ? "enable" : "disable", ret);
3685 /* enable/disable Low Memory PState for UVD (4k videos) */
3686 if (adev->asic_type == CHIP_STONEY &&
3687 adev->uvd.decode_image_width >= WIDTH_4K) {
3688 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
3690 if (hwmgr && hwmgr->hwmgr_func &&
3691 hwmgr->hwmgr_func->update_nbdpm_pstate)
3692 hwmgr->hwmgr_func->update_nbdpm_pstate(hwmgr,
3699 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
3703 if (adev->family == AMDGPU_FAMILY_SI) {
3704 mutex_lock(&adev->pm.mutex);
3706 adev->pm.dpm.vce_active = true;
3707 /* XXX select vce level based on ring/task */
3708 adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
3710 adev->pm.dpm.vce_active = false;
3712 mutex_unlock(&adev->pm.mutex);
3714 amdgpu_pm_compute_clocks(adev);
3716 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
3718 DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
3719 enable ? "enable" : "disable", ret);
3723 void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
3727 if (adev->powerplay.pp_funcs->print_power_state == NULL)
3730 for (i = 0; i < adev->pm.dpm.num_ps; i++)
3731 amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]);
3735 void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
3739 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable);
3741 DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
3742 enable ? "enable" : "disable", ret);
3745 int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
3749 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->load_firmware) {
3750 r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle);
3752 pr_err("smu firmware loading failed\n");
3755 *smu_version = adev->pm.fw_version;
3760 int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
3765 if (adev->pm.sysfs_initialized)
3768 if (adev->pm.dpm_enabled == 0)
3771 INIT_LIST_HEAD(&adev->pm.pm_attr_list);
3773 adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
3776 if (IS_ERR(adev->pm.int_hwmon_dev)) {
3777 ret = PTR_ERR(adev->pm.int_hwmon_dev);
3779 "Unable to register hwmon device: %d\n", ret);
3783 switch (amdgpu_virt_get_sriov_vf_mode(adev)) {
3784 case SRIOV_VF_MODE_ONE_VF:
3785 mask = ATTR_FLAG_ONEVF;
3787 case SRIOV_VF_MODE_MULTI_VF:
3790 case SRIOV_VF_MODE_BARE_METAL:
3792 mask = ATTR_FLAG_MASK_ALL;
3796 ret = amdgpu_device_attr_create_groups(adev,
3797 amdgpu_device_attrs,
3798 ARRAY_SIZE(amdgpu_device_attrs),
3800 &adev->pm.pm_attr_list);
3804 adev->pm.sysfs_initialized = true;
3809 void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
3811 if (adev->pm.dpm_enabled == 0)
3814 if (adev->pm.int_hwmon_dev)
3815 hwmon_device_unregister(adev->pm.int_hwmon_dev);
3817 amdgpu_device_attr_remove_groups(adev, &adev->pm.pm_attr_list);
3820 void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
3824 if (!adev->pm.dpm_enabled)
3827 if (adev->mode_info.num_crtc)
3828 amdgpu_display_bandwidth_update(adev);
3830 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
3831 struct amdgpu_ring *ring = adev->rings[i];
3832 if (ring && ring->sched.ready)
3833 amdgpu_fence_wait_empty(ring);
3836 if (is_support_sw_smu(adev)) {
3837 struct smu_dpm_context *smu_dpm = &adev->smu.smu_dpm;
3838 smu_handle_task(&adev->smu,
3840 AMD_PP_TASK_DISPLAY_CONFIG_CHANGE,
3843 if (adev->powerplay.pp_funcs->dispatch_tasks) {
3844 if (!amdgpu_device_has_dc_support(adev)) {
3845 mutex_lock(&adev->pm.mutex);
3846 amdgpu_dpm_get_active_displays(adev);
3847 adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
3848 adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
3849 adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
3850 /* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */
3851 if (adev->pm.pm_display_cfg.vrefresh > 120)
3852 adev->pm.pm_display_cfg.min_vblank_time = 0;
3853 if (adev->powerplay.pp_funcs->display_configuration_change)
3854 adev->powerplay.pp_funcs->display_configuration_change(
3855 adev->powerplay.pp_handle,
3856 &adev->pm.pm_display_cfg);
3857 mutex_unlock(&adev->pm.mutex);
3859 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL);
3861 mutex_lock(&adev->pm.mutex);
3862 amdgpu_dpm_get_active_displays(adev);
3863 amdgpu_dpm_change_power_state_locked(adev);
3864 mutex_unlock(&adev->pm.mutex);
3872 #if defined(CONFIG_DEBUG_FS)
3874 static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev)
3882 size = sizeof(value);
3883 seq_printf(m, "GFX Clocks and Power:\n");
3884 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, (void *)&value, &size))
3885 seq_printf(m, "\t%u MHz (MCLK)\n", value/100);
3886 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, (void *)&value, &size))
3887 seq_printf(m, "\t%u MHz (SCLK)\n", value/100);
3888 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK, (void *)&value, &size))
3889 seq_printf(m, "\t%u MHz (PSTATE_SCLK)\n", value/100);
3890 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK, (void *)&value, &size))
3891 seq_printf(m, "\t%u MHz (PSTATE_MCLK)\n", value/100);
3892 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, (void *)&value, &size))
3893 seq_printf(m, "\t%u mV (VDDGFX)\n", value);
3894 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size))
3895 seq_printf(m, "\t%u mV (VDDNB)\n", value);
3896 size = sizeof(uint32_t);
3897 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, (void *)&query, &size))
3898 seq_printf(m, "\t%u.%u W (average GPU)\n", query >> 8, query & 0xff);
3899 size = sizeof(value);
3900 seq_printf(m, "\n");
3903 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, (void *)&value, &size))
3904 seq_printf(m, "GPU Temperature: %u C\n", value/1000);
3907 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, (void *)&value, &size))
3908 seq_printf(m, "GPU Load: %u %%\n", value);
3910 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, (void *)&value, &size))
3911 seq_printf(m, "MEM Load: %u %%\n", value);
3913 seq_printf(m, "\n");
3915 /* SMC feature mask */
3916 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size))
3917 seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64);
3919 if (adev->asic_type > CHIP_VEGA20) {
3921 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_POWER_STATE, (void *)&value, &size)) {
3923 seq_printf(m, "VCN: Disabled\n");
3925 seq_printf(m, "VCN: Enabled\n");
3926 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
3927 seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
3928 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
3929 seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
3932 seq_printf(m, "\n");
3935 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) {
3937 seq_printf(m, "UVD: Disabled\n");
3939 seq_printf(m, "UVD: Enabled\n");
3940 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
3941 seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
3942 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
3943 seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
3946 seq_printf(m, "\n");
3949 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) {
3951 seq_printf(m, "VCE: Disabled\n");
3953 seq_printf(m, "VCE: Enabled\n");
3954 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size))
3955 seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
3963 static void amdgpu_parse_cg_state(struct seq_file *m, u32 flags)
3967 for (i = 0; clocks[i].flag; i++)
3968 seq_printf(m, "\t%s: %s\n", clocks[i].name,
3969 (flags & clocks[i].flag) ? "On" : "Off");
3972 static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
3974 struct drm_info_node *node = (struct drm_info_node *) m->private;
3975 struct drm_device *dev = node->minor->dev;
3976 struct amdgpu_device *adev = dev->dev_private;
3980 if (adev->in_gpu_reset)
3983 r = pm_runtime_get_sync(dev->dev);
3985 pm_runtime_put_autosuspend(dev->dev);
3989 if (!adev->pm.dpm_enabled) {
3990 seq_printf(m, "dpm not enabled\n");
3991 pm_runtime_mark_last_busy(dev->dev);
3992 pm_runtime_put_autosuspend(dev->dev);
3996 if (!is_support_sw_smu(adev) &&
3997 adev->powerplay.pp_funcs->debugfs_print_current_performance_level) {
3998 mutex_lock(&adev->pm.mutex);
3999 if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level)
4000 adev->powerplay.pp_funcs->debugfs_print_current_performance_level(adev, m);
4002 seq_printf(m, "Debugfs support not implemented for this asic\n");
4003 mutex_unlock(&adev->pm.mutex);
4006 r = amdgpu_debugfs_pm_info_pp(m, adev);
4011 amdgpu_device_ip_get_clockgating_state(adev, &flags);
4013 seq_printf(m, "Clock Gating Flags Mask: 0x%x\n", flags);
4014 amdgpu_parse_cg_state(m, flags);
4015 seq_printf(m, "\n");
4018 pm_runtime_mark_last_busy(dev->dev);
4019 pm_runtime_put_autosuspend(dev->dev);
4024 static const struct drm_info_list amdgpu_pm_info_list[] = {
4025 {"amdgpu_pm_info", amdgpu_debugfs_pm_info, 0, NULL},
4029 int amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
4031 #if defined(CONFIG_DEBUG_FS)
4032 return amdgpu_debugfs_add_files(adev, amdgpu_pm_info_list, ARRAY_SIZE(amdgpu_pm_info_list));