2 * Copyright 2017 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Rafał Miłecki <zajec5@gmail.com>
23 * Alex Deucher <alexdeucher@gmail.com>
26 #include <drm/drm_debugfs.h>
29 #include "amdgpu_drv.h"
30 #include "amdgpu_pm.h"
31 #include "amdgpu_dpm.h"
32 #include "amdgpu_smu.h"
34 #include <linux/pci.h>
35 #include <linux/hwmon.h>
36 #include <linux/hwmon-sysfs.h>
37 #include <linux/nospec.h>
38 #include <linux/pm_runtime.h>
39 #include <asm/processor.h>
42 static const struct cg_flag_name clocks[] = {
43 {AMD_CG_SUPPORT_GFX_FGCG, "Graphics Fine Grain Clock Gating"},
44 {AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"},
45 {AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"},
46 {AMD_CG_SUPPORT_GFX_CGCG, "Graphics Coarse Grain Clock Gating"},
47 {AMD_CG_SUPPORT_GFX_CGLS, "Graphics Coarse Grain memory Light Sleep"},
48 {AMD_CG_SUPPORT_GFX_CGTS, "Graphics Coarse Grain Tree Shader Clock Gating"},
49 {AMD_CG_SUPPORT_GFX_CGTS_LS, "Graphics Coarse Grain Tree Shader Light Sleep"},
50 {AMD_CG_SUPPORT_GFX_CP_LS, "Graphics Command Processor Light Sleep"},
51 {AMD_CG_SUPPORT_GFX_RLC_LS, "Graphics Run List Controller Light Sleep"},
52 {AMD_CG_SUPPORT_GFX_3D_CGCG, "Graphics 3D Coarse Grain Clock Gating"},
53 {AMD_CG_SUPPORT_GFX_3D_CGLS, "Graphics 3D Coarse Grain memory Light Sleep"},
54 {AMD_CG_SUPPORT_MC_LS, "Memory Controller Light Sleep"},
55 {AMD_CG_SUPPORT_MC_MGCG, "Memory Controller Medium Grain Clock Gating"},
56 {AMD_CG_SUPPORT_SDMA_LS, "System Direct Memory Access Light Sleep"},
57 {AMD_CG_SUPPORT_SDMA_MGCG, "System Direct Memory Access Medium Grain Clock Gating"},
58 {AMD_CG_SUPPORT_BIF_MGCG, "Bus Interface Medium Grain Clock Gating"},
59 {AMD_CG_SUPPORT_BIF_LS, "Bus Interface Light Sleep"},
60 {AMD_CG_SUPPORT_UVD_MGCG, "Unified Video Decoder Medium Grain Clock Gating"},
61 {AMD_CG_SUPPORT_VCE_MGCG, "Video Compression Engine Medium Grain Clock Gating"},
62 {AMD_CG_SUPPORT_HDP_LS, "Host Data Path Light Sleep"},
63 {AMD_CG_SUPPORT_HDP_MGCG, "Host Data Path Medium Grain Clock Gating"},
64 {AMD_CG_SUPPORT_DRM_MGCG, "Digital Right Management Medium Grain Clock Gating"},
65 {AMD_CG_SUPPORT_DRM_LS, "Digital Right Management Light Sleep"},
66 {AMD_CG_SUPPORT_ROM_MGCG, "Rom Medium Grain Clock Gating"},
67 {AMD_CG_SUPPORT_DF_MGCG, "Data Fabric Medium Grain Clock Gating"},
68 {AMD_CG_SUPPORT_VCN_MGCG, "VCN Medium Grain Clock Gating"},
69 {AMD_CG_SUPPORT_HDP_DS, "Host Data Path Deep Sleep"},
70 {AMD_CG_SUPPORT_HDP_SD, "Host Data Path Shutdown"},
71 {AMD_CG_SUPPORT_IH_CG, "Interrupt Handler Clock Gating"},
72 {AMD_CG_SUPPORT_JPEG_MGCG, "JPEG Medium Grain Clock Gating"},
74 {AMD_CG_SUPPORT_ATHUB_MGCG, "Address Translation Hub Medium Grain Clock Gating"},
75 {AMD_CG_SUPPORT_ATHUB_LS, "Address Translation Hub Light Sleep"},
79 static const struct hwmon_temp_label {
80 enum PP_HWMON_TEMP channel;
83 {PP_TEMP_EDGE, "edge"},
84 {PP_TEMP_JUNCTION, "junction"},
89 * DOC: power_dpm_state
91 * The power_dpm_state file is a legacy interface and is only provided for
92 * backwards compatibility. The amdgpu driver provides a sysfs API for adjusting
93 * certain power related parameters. The file power_dpm_state is used for this.
94 * It accepts the following arguments:
104 * On older GPUs, the vbios provided a special power state for battery
105 * operation. Selecting battery switched to this state. This is no
106 * longer provided on newer GPUs so the option does nothing in that case.
110 * On older GPUs, the vbios provided a special power state for balanced
111 * operation. Selecting balanced switched to this state. This is no
112 * longer provided on newer GPUs so the option does nothing in that case.
116 * On older GPUs, the vbios provided a special power state for performance
117 * operation. Selecting performance switched to this state. This is no
118 * longer provided on newer GPUs so the option does nothing in that case.
122 static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
123 struct device_attribute *attr,
126 struct drm_device *ddev = dev_get_drvdata(dev);
127 struct amdgpu_device *adev = drm_to_adev(ddev);
128 enum amd_pm_state_type pm;
131 if (amdgpu_in_reset(adev))
134 ret = pm_runtime_get_sync(ddev->dev);
136 pm_runtime_put_autosuspend(ddev->dev);
140 if (is_support_sw_smu(adev)) {
141 if (adev->smu.ppt_funcs->get_current_power_state)
142 pm = smu_get_current_power_state(&adev->smu);
144 pm = adev->pm.dpm.user_state;
145 } else if (adev->powerplay.pp_funcs->get_current_power_state) {
146 pm = amdgpu_dpm_get_current_power_state(adev);
148 pm = adev->pm.dpm.user_state;
151 pm_runtime_mark_last_busy(ddev->dev);
152 pm_runtime_put_autosuspend(ddev->dev);
154 return snprintf(buf, PAGE_SIZE, "%s\n",
155 (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
156 (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
159 static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
160 struct device_attribute *attr,
164 struct drm_device *ddev = dev_get_drvdata(dev);
165 struct amdgpu_device *adev = drm_to_adev(ddev);
166 enum amd_pm_state_type state;
169 if (amdgpu_in_reset(adev))
172 if (strncmp("battery", buf, strlen("battery")) == 0)
173 state = POWER_STATE_TYPE_BATTERY;
174 else if (strncmp("balanced", buf, strlen("balanced")) == 0)
175 state = POWER_STATE_TYPE_BALANCED;
176 else if (strncmp("performance", buf, strlen("performance")) == 0)
177 state = POWER_STATE_TYPE_PERFORMANCE;
181 ret = pm_runtime_get_sync(ddev->dev);
183 pm_runtime_put_autosuspend(ddev->dev);
187 if (is_support_sw_smu(adev)) {
188 mutex_lock(&adev->pm.mutex);
189 adev->pm.dpm.user_state = state;
190 mutex_unlock(&adev->pm.mutex);
191 } else if (adev->powerplay.pp_funcs->dispatch_tasks) {
192 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state);
194 mutex_lock(&adev->pm.mutex);
195 adev->pm.dpm.user_state = state;
196 mutex_unlock(&adev->pm.mutex);
198 amdgpu_pm_compute_clocks(adev);
200 pm_runtime_mark_last_busy(ddev->dev);
201 pm_runtime_put_autosuspend(ddev->dev);
208 * DOC: power_dpm_force_performance_level
210 * The amdgpu driver provides a sysfs API for adjusting certain power
211 * related parameters. The file power_dpm_force_performance_level is
212 * used for this. It accepts the following arguments:
232 * When auto is selected, the driver will attempt to dynamically select
233 * the optimal power profile for current conditions in the driver.
237 * When low is selected, the clocks are forced to the lowest power state.
241 * When high is selected, the clocks are forced to the highest power state.
245 * When manual is selected, the user can manually adjust which power states
246 * are enabled for each clock domain via the sysfs pp_dpm_mclk, pp_dpm_sclk,
247 * and pp_dpm_pcie files and adjust the power state transition heuristics
248 * via the pp_power_profile_mode sysfs file.
255 * When the profiling modes are selected, clock and power gating are
256 * disabled and the clocks are set for different profiling cases. This
257 * mode is recommended for profiling specific work loads where you do
258 * not want clock or power gating for clock fluctuation to interfere
259 * with your results. profile_standard sets the clocks to a fixed clock
260 * level which varies from asic to asic. profile_min_sclk forces the sclk
261 * to the lowest level. profile_min_mclk forces the mclk to the lowest level.
262 * profile_peak sets all clocks (mclk, sclk, pcie) to the highest levels.
266 static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
267 struct device_attribute *attr,
270 struct drm_device *ddev = dev_get_drvdata(dev);
271 struct amdgpu_device *adev = drm_to_adev(ddev);
272 enum amd_dpm_forced_level level = 0xff;
275 if (amdgpu_in_reset(adev))
278 ret = pm_runtime_get_sync(ddev->dev);
280 pm_runtime_put_autosuspend(ddev->dev);
284 if (is_support_sw_smu(adev))
285 level = smu_get_performance_level(&adev->smu);
286 else if (adev->powerplay.pp_funcs->get_performance_level)
287 level = amdgpu_dpm_get_performance_level(adev);
289 level = adev->pm.dpm.forced_level;
291 pm_runtime_mark_last_busy(ddev->dev);
292 pm_runtime_put_autosuspend(ddev->dev);
294 return snprintf(buf, PAGE_SIZE, "%s\n",
295 (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
296 (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
297 (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" :
298 (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" :
299 (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" :
300 (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
301 (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
302 (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" :
306 static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
307 struct device_attribute *attr,
311 struct drm_device *ddev = dev_get_drvdata(dev);
312 struct amdgpu_device *adev = drm_to_adev(ddev);
313 enum amd_dpm_forced_level level;
314 enum amd_dpm_forced_level current_level = 0xff;
317 if (amdgpu_in_reset(adev))
320 if (strncmp("low", buf, strlen("low")) == 0) {
321 level = AMD_DPM_FORCED_LEVEL_LOW;
322 } else if (strncmp("high", buf, strlen("high")) == 0) {
323 level = AMD_DPM_FORCED_LEVEL_HIGH;
324 } else if (strncmp("auto", buf, strlen("auto")) == 0) {
325 level = AMD_DPM_FORCED_LEVEL_AUTO;
326 } else if (strncmp("manual", buf, strlen("manual")) == 0) {
327 level = AMD_DPM_FORCED_LEVEL_MANUAL;
328 } else if (strncmp("profile_exit", buf, strlen("profile_exit")) == 0) {
329 level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT;
330 } else if (strncmp("profile_standard", buf, strlen("profile_standard")) == 0) {
331 level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD;
332 } else if (strncmp("profile_min_sclk", buf, strlen("profile_min_sclk")) == 0) {
333 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK;
334 } else if (strncmp("profile_min_mclk", buf, strlen("profile_min_mclk")) == 0) {
335 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK;
336 } else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) {
337 level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
342 ret = pm_runtime_get_sync(ddev->dev);
344 pm_runtime_put_autosuspend(ddev->dev);
348 if (is_support_sw_smu(adev))
349 current_level = smu_get_performance_level(&adev->smu);
350 else if (adev->powerplay.pp_funcs->get_performance_level)
351 current_level = amdgpu_dpm_get_performance_level(adev);
353 if (current_level == level) {
354 pm_runtime_mark_last_busy(ddev->dev);
355 pm_runtime_put_autosuspend(ddev->dev);
359 if (adev->asic_type == CHIP_RAVEN) {
360 if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) {
361 if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL && level == AMD_DPM_FORCED_LEVEL_MANUAL)
362 amdgpu_gfx_off_ctrl(adev, false);
363 else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL && level != AMD_DPM_FORCED_LEVEL_MANUAL)
364 amdgpu_gfx_off_ctrl(adev, true);
368 /* profile_exit setting is valid only when current mode is in profile mode */
369 if (!(current_level & (AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
370 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
371 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
372 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)) &&
373 (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) {
374 pr_err("Currently not in any profile mode!\n");
375 pm_runtime_mark_last_busy(ddev->dev);
376 pm_runtime_put_autosuspend(ddev->dev);
380 if (is_support_sw_smu(adev)) {
381 ret = smu_force_performance_level(&adev->smu, level);
383 pm_runtime_mark_last_busy(ddev->dev);
384 pm_runtime_put_autosuspend(ddev->dev);
387 } else if (adev->powerplay.pp_funcs->force_performance_level) {
388 mutex_lock(&adev->pm.mutex);
389 if (adev->pm.dpm.thermal_active) {
390 mutex_unlock(&adev->pm.mutex);
391 pm_runtime_mark_last_busy(ddev->dev);
392 pm_runtime_put_autosuspend(ddev->dev);
395 ret = amdgpu_dpm_force_performance_level(adev, level);
397 mutex_unlock(&adev->pm.mutex);
398 pm_runtime_mark_last_busy(ddev->dev);
399 pm_runtime_put_autosuspend(ddev->dev);
402 adev->pm.dpm.forced_level = level;
404 mutex_unlock(&adev->pm.mutex);
406 pm_runtime_mark_last_busy(ddev->dev);
407 pm_runtime_put_autosuspend(ddev->dev);
412 static ssize_t amdgpu_get_pp_num_states(struct device *dev,
413 struct device_attribute *attr,
416 struct drm_device *ddev = dev_get_drvdata(dev);
417 struct amdgpu_device *adev = drm_to_adev(ddev);
418 struct pp_states_info data;
421 if (amdgpu_in_reset(adev))
424 ret = pm_runtime_get_sync(ddev->dev);
426 pm_runtime_put_autosuspend(ddev->dev);
430 if (is_support_sw_smu(adev)) {
431 ret = smu_get_power_num_states(&adev->smu, &data);
434 } else if (adev->powerplay.pp_funcs->get_pp_num_states) {
435 amdgpu_dpm_get_pp_num_states(adev, &data);
437 memset(&data, 0, sizeof(data));
440 pm_runtime_mark_last_busy(ddev->dev);
441 pm_runtime_put_autosuspend(ddev->dev);
443 buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums);
444 for (i = 0; i < data.nums; i++)
445 buf_len += snprintf(buf + buf_len, PAGE_SIZE, "%d %s\n", i,
446 (data.states[i] == POWER_STATE_TYPE_INTERNAL_BOOT) ? "boot" :
447 (data.states[i] == POWER_STATE_TYPE_BATTERY) ? "battery" :
448 (data.states[i] == POWER_STATE_TYPE_BALANCED) ? "balanced" :
449 (data.states[i] == POWER_STATE_TYPE_PERFORMANCE) ? "performance" : "default");
454 static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
455 struct device_attribute *attr,
458 struct drm_device *ddev = dev_get_drvdata(dev);
459 struct amdgpu_device *adev = drm_to_adev(ddev);
460 struct pp_states_info data;
461 struct smu_context *smu = &adev->smu;
462 enum amd_pm_state_type pm = 0;
465 if (amdgpu_in_reset(adev))
468 ret = pm_runtime_get_sync(ddev->dev);
470 pm_runtime_put_autosuspend(ddev->dev);
474 if (is_support_sw_smu(adev)) {
475 pm = smu_get_current_power_state(smu);
476 ret = smu_get_power_num_states(smu, &data);
479 } else if (adev->powerplay.pp_funcs->get_current_power_state
480 && adev->powerplay.pp_funcs->get_pp_num_states) {
481 pm = amdgpu_dpm_get_current_power_state(adev);
482 amdgpu_dpm_get_pp_num_states(adev, &data);
485 pm_runtime_mark_last_busy(ddev->dev);
486 pm_runtime_put_autosuspend(ddev->dev);
488 for (i = 0; i < data.nums; i++) {
489 if (pm == data.states[i])
496 return snprintf(buf, PAGE_SIZE, "%d\n", i);
499 static ssize_t amdgpu_get_pp_force_state(struct device *dev,
500 struct device_attribute *attr,
503 struct drm_device *ddev = dev_get_drvdata(dev);
504 struct amdgpu_device *adev = drm_to_adev(ddev);
506 if (amdgpu_in_reset(adev))
509 if (adev->pp_force_state_enabled)
510 return amdgpu_get_pp_cur_state(dev, attr, buf);
512 return snprintf(buf, PAGE_SIZE, "\n");
515 static ssize_t amdgpu_set_pp_force_state(struct device *dev,
516 struct device_attribute *attr,
520 struct drm_device *ddev = dev_get_drvdata(dev);
521 struct amdgpu_device *adev = drm_to_adev(ddev);
522 enum amd_pm_state_type state = 0;
526 if (amdgpu_in_reset(adev))
529 if (strlen(buf) == 1)
530 adev->pp_force_state_enabled = false;
531 else if (is_support_sw_smu(adev))
532 adev->pp_force_state_enabled = false;
533 else if (adev->powerplay.pp_funcs->dispatch_tasks &&
534 adev->powerplay.pp_funcs->get_pp_num_states) {
535 struct pp_states_info data;
537 ret = kstrtoul(buf, 0, &idx);
538 if (ret || idx >= ARRAY_SIZE(data.states))
541 idx = array_index_nospec(idx, ARRAY_SIZE(data.states));
543 amdgpu_dpm_get_pp_num_states(adev, &data);
544 state = data.states[idx];
546 ret = pm_runtime_get_sync(ddev->dev);
548 pm_runtime_put_autosuspend(ddev->dev);
552 /* only set user selected power states */
553 if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
554 state != POWER_STATE_TYPE_DEFAULT) {
555 amdgpu_dpm_dispatch_task(adev,
556 AMD_PP_TASK_ENABLE_USER_STATE, &state);
557 adev->pp_force_state_enabled = true;
559 pm_runtime_mark_last_busy(ddev->dev);
560 pm_runtime_put_autosuspend(ddev->dev);
569 * The amdgpu driver provides a sysfs API for uploading new powerplay
570 * tables. The file pp_table is used for this. Reading the file
571 * will dump the current power play table. Writing to the file
572 * will attempt to upload a new powerplay table and re-initialize
573 * powerplay using that new table.
577 static ssize_t amdgpu_get_pp_table(struct device *dev,
578 struct device_attribute *attr,
581 struct drm_device *ddev = dev_get_drvdata(dev);
582 struct amdgpu_device *adev = drm_to_adev(ddev);
586 if (amdgpu_in_reset(adev))
589 ret = pm_runtime_get_sync(ddev->dev);
591 pm_runtime_put_autosuspend(ddev->dev);
595 if (is_support_sw_smu(adev)) {
596 size = smu_sys_get_pp_table(&adev->smu, (void **)&table);
597 pm_runtime_mark_last_busy(ddev->dev);
598 pm_runtime_put_autosuspend(ddev->dev);
601 } else if (adev->powerplay.pp_funcs->get_pp_table) {
602 size = amdgpu_dpm_get_pp_table(adev, &table);
603 pm_runtime_mark_last_busy(ddev->dev);
604 pm_runtime_put_autosuspend(ddev->dev);
608 pm_runtime_mark_last_busy(ddev->dev);
609 pm_runtime_put_autosuspend(ddev->dev);
613 if (size >= PAGE_SIZE)
614 size = PAGE_SIZE - 1;
616 memcpy(buf, table, size);
621 static ssize_t amdgpu_set_pp_table(struct device *dev,
622 struct device_attribute *attr,
626 struct drm_device *ddev = dev_get_drvdata(dev);
627 struct amdgpu_device *adev = drm_to_adev(ddev);
630 if (amdgpu_in_reset(adev))
633 ret = pm_runtime_get_sync(ddev->dev);
635 pm_runtime_put_autosuspend(ddev->dev);
639 if (is_support_sw_smu(adev)) {
640 ret = smu_sys_set_pp_table(&adev->smu, (void *)buf, count);
642 pm_runtime_mark_last_busy(ddev->dev);
643 pm_runtime_put_autosuspend(ddev->dev);
646 } else if (adev->powerplay.pp_funcs->set_pp_table)
647 amdgpu_dpm_set_pp_table(adev, buf, count);
649 pm_runtime_mark_last_busy(ddev->dev);
650 pm_runtime_put_autosuspend(ddev->dev);
656 * DOC: pp_od_clk_voltage
658 * The amdgpu driver provides a sysfs API for adjusting the clocks and voltages
659 * in each power level within a power state. The pp_od_clk_voltage is used for
662 * Note that the actual memory controller clock rate are exposed, not
663 * the effective memory clock of the DRAMs. To translate it, use the
666 * Clock conversion (Mhz):
668 * HBM: effective_memory_clock = memory_controller_clock * 1
670 * G5: effective_memory_clock = memory_controller_clock * 1
672 * G6: effective_memory_clock = memory_controller_clock * 2
674 * DRAM data rate (MT/s):
676 * HBM: effective_memory_clock * 2 = data_rate
678 * G5: effective_memory_clock * 4 = data_rate
680 * G6: effective_memory_clock * 8 = data_rate
684 * data_rate * vram_bit_width / 8 = memory_bandwidth
690 * memory_controller_clock = 1750 Mhz
692 * effective_memory_clock = 1750 Mhz * 1 = 1750 Mhz
694 * data rate = 1750 * 4 = 7000 MT/s
696 * memory_bandwidth = 7000 * 128 bits / 8 = 112000 MB/s
700 * memory_controller_clock = 875 Mhz
702 * effective_memory_clock = 875 Mhz * 2 = 1750 Mhz
704 * data rate = 1750 * 8 = 14000 MT/s
706 * memory_bandwidth = 14000 * 256 bits / 8 = 448000 MB/s
708 * < For Vega10 and previous ASICs >
710 * Reading the file will display:
712 * - a list of engine clock levels and voltages labeled OD_SCLK
714 * - a list of memory clock levels and voltages labeled OD_MCLK
716 * - a list of valid ranges for sclk, mclk, and voltage labeled OD_RANGE
718 * To manually adjust these settings, first select manual using
719 * power_dpm_force_performance_level. Enter a new value for each
720 * level by writing a string that contains "s/m level clock voltage" to
721 * the file. E.g., "s 1 500 820" will update sclk level 1 to be 500 MHz
722 * at 820 mV; "m 0 350 810" will update mclk level 0 to be 350 MHz at
723 * 810 mV. When you have edited all of the states as needed, write
724 * "c" (commit) to the file to commit your changes. If you want to reset to the
725 * default power levels, write "r" (reset) to the file to reset them.
728 * < For Vega20 and newer ASICs >
730 * Reading the file will display:
732 * - minimum and maximum engine clock labeled OD_SCLK
734 * - minimum(not available for Vega20 and Navi1x) and maximum memory
735 * clock labeled OD_MCLK
737 * - three <frequency, voltage> points labeled OD_VDDC_CURVE.
738 * They can be used to calibrate the sclk voltage curve.
740 * - voltage offset(in mV) applied on target voltage calculation.
741 * This is available for Sienna Cichlid, Navy Flounder and Dimgrey
742 * Cavefish. For these ASICs, the target voltage calculation can be
743 * illustrated by "voltage = voltage calculated from v/f curve +
744 * overdrive vddgfx offset"
746 * - a list of valid ranges for sclk, mclk, and voltage curve points
749 * To manually adjust these settings:
751 * - First select manual using power_dpm_force_performance_level
753 * - For clock frequency setting, enter a new value by writing a
754 * string that contains "s/m index clock" to the file. The index
755 * should be 0 if to set minimum clock. And 1 if to set maximum
756 * clock. E.g., "s 0 500" will update minimum sclk to be 500 MHz.
757 * "m 1 800" will update maximum mclk to be 800Mhz.
759 * For sclk voltage curve, enter the new values by writing a
760 * string that contains "vc point clock voltage" to the file. The
761 * points are indexed by 0, 1 and 2. E.g., "vc 0 300 600" will
762 * update point1 with clock set as 300Mhz and voltage as
763 * 600mV. "vc 2 1000 1000" will update point3 with clock set
764 * as 1000Mhz and voltage 1000mV.
766 * To update the voltage offset applied for gfxclk/voltage calculation,
767 * enter the new value by writing a string that contains "vo offset".
768 * This is supported by Sienna Cichlid, Navy Flounder and Dimgrey Cavefish.
769 * And the offset can be a positive or negative value.
771 * - When you have edited all of the states as needed, write "c" (commit)
772 * to the file to commit your changes
774 * - If you want to reset to the default power levels, write "r" (reset)
775 * to the file to reset them
779 static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
780 struct device_attribute *attr,
784 struct drm_device *ddev = dev_get_drvdata(dev);
785 struct amdgpu_device *adev = drm_to_adev(ddev);
787 uint32_t parameter_size = 0;
792 const char delimiter[3] = {' ', '\n', '\0'};
795 if (amdgpu_in_reset(adev))
802 type = PP_OD_EDIT_SCLK_VDDC_TABLE;
803 else if (*buf == 'p')
804 type = PP_OD_EDIT_CCLK_VDDC_TABLE;
805 else if (*buf == 'm')
806 type = PP_OD_EDIT_MCLK_VDDC_TABLE;
808 type = PP_OD_RESTORE_DEFAULT_TABLE;
809 else if (*buf == 'c')
810 type = PP_OD_COMMIT_DPM_TABLE;
811 else if (!strncmp(buf, "vc", 2))
812 type = PP_OD_EDIT_VDDC_CURVE;
813 else if (!strncmp(buf, "vo", 2))
814 type = PP_OD_EDIT_VDDGFX_OFFSET;
818 memcpy(buf_cpy, buf, count+1);
822 if ((type == PP_OD_EDIT_VDDC_CURVE) ||
823 (type == PP_OD_EDIT_VDDGFX_OFFSET))
825 while (isspace(*++tmp_str));
827 while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
828 if (strlen(sub_str) == 0)
830 ret = kstrtol(sub_str, 0, ¶meter[parameter_size]);
835 while (isspace(*tmp_str))
839 ret = pm_runtime_get_sync(ddev->dev);
841 pm_runtime_put_autosuspend(ddev->dev);
845 if (is_support_sw_smu(adev)) {
846 ret = smu_od_edit_dpm_table(&adev->smu, type,
847 parameter, parameter_size);
850 pm_runtime_mark_last_busy(ddev->dev);
851 pm_runtime_put_autosuspend(ddev->dev);
856 if (adev->powerplay.pp_funcs->set_fine_grain_clk_vol) {
857 ret = amdgpu_dpm_set_fine_grain_clk_vol(adev, type,
861 pm_runtime_mark_last_busy(ddev->dev);
862 pm_runtime_put_autosuspend(ddev->dev);
867 if (adev->powerplay.pp_funcs->odn_edit_dpm_table) {
868 ret = amdgpu_dpm_odn_edit_dpm_table(adev, type,
869 parameter, parameter_size);
871 pm_runtime_mark_last_busy(ddev->dev);
872 pm_runtime_put_autosuspend(ddev->dev);
877 if (type == PP_OD_COMMIT_DPM_TABLE) {
878 if (adev->powerplay.pp_funcs->dispatch_tasks) {
879 amdgpu_dpm_dispatch_task(adev,
880 AMD_PP_TASK_READJUST_POWER_STATE,
882 pm_runtime_mark_last_busy(ddev->dev);
883 pm_runtime_put_autosuspend(ddev->dev);
886 pm_runtime_mark_last_busy(ddev->dev);
887 pm_runtime_put_autosuspend(ddev->dev);
892 pm_runtime_mark_last_busy(ddev->dev);
893 pm_runtime_put_autosuspend(ddev->dev);
898 static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
899 struct device_attribute *attr,
902 struct drm_device *ddev = dev_get_drvdata(dev);
903 struct amdgpu_device *adev = drm_to_adev(ddev);
907 if (amdgpu_in_reset(adev))
910 ret = pm_runtime_get_sync(ddev->dev);
912 pm_runtime_put_autosuspend(ddev->dev);
916 if (is_support_sw_smu(adev)) {
917 size = smu_print_clk_levels(&adev->smu, SMU_OD_SCLK, buf);
918 size += smu_print_clk_levels(&adev->smu, SMU_OD_MCLK, buf+size);
919 size += smu_print_clk_levels(&adev->smu, SMU_OD_VDDC_CURVE, buf+size);
920 size += smu_print_clk_levels(&adev->smu, SMU_OD_VDDGFX_OFFSET, buf+size);
921 size += smu_print_clk_levels(&adev->smu, SMU_OD_RANGE, buf+size);
922 size += smu_print_clk_levels(&adev->smu, SMU_OD_CCLK, buf+size);
923 } else if (adev->powerplay.pp_funcs->print_clock_levels) {
924 size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
925 size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size);
926 size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf+size);
927 size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf+size);
929 size = snprintf(buf, PAGE_SIZE, "\n");
931 pm_runtime_mark_last_busy(ddev->dev);
932 pm_runtime_put_autosuspend(ddev->dev);
940 * The amdgpu driver provides a sysfs API for adjusting what powerplay
941 * features to be enabled. The file pp_features is used for this. And
942 * this is only available for Vega10 and later dGPUs.
944 * Reading back the file will show you the followings:
945 * - Current ppfeature masks
946 * - List of the all supported powerplay features with their naming,
947 * bitmasks and enablement status('Y'/'N' means "enabled"/"disabled").
949 * To manually enable or disable a specific feature, just set or clear
950 * the corresponding bit from original ppfeature masks and input the
951 * new ppfeature masks.
953 static ssize_t amdgpu_set_pp_features(struct device *dev,
954 struct device_attribute *attr,
958 struct drm_device *ddev = dev_get_drvdata(dev);
959 struct amdgpu_device *adev = drm_to_adev(ddev);
960 uint64_t featuremask;
963 if (amdgpu_in_reset(adev))
966 ret = kstrtou64(buf, 0, &featuremask);
970 ret = pm_runtime_get_sync(ddev->dev);
972 pm_runtime_put_autosuspend(ddev->dev);
976 if (is_support_sw_smu(adev)) {
977 ret = smu_sys_set_pp_feature_mask(&adev->smu, featuremask);
979 pm_runtime_mark_last_busy(ddev->dev);
980 pm_runtime_put_autosuspend(ddev->dev);
983 } else if (adev->powerplay.pp_funcs->set_ppfeature_status) {
984 ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask);
986 pm_runtime_mark_last_busy(ddev->dev);
987 pm_runtime_put_autosuspend(ddev->dev);
991 pm_runtime_mark_last_busy(ddev->dev);
992 pm_runtime_put_autosuspend(ddev->dev);
997 static ssize_t amdgpu_get_pp_features(struct device *dev,
998 struct device_attribute *attr,
1001 struct drm_device *ddev = dev_get_drvdata(dev);
1002 struct amdgpu_device *adev = drm_to_adev(ddev);
1006 if (amdgpu_in_reset(adev))
1009 ret = pm_runtime_get_sync(ddev->dev);
1011 pm_runtime_put_autosuspend(ddev->dev);
1015 if (is_support_sw_smu(adev))
1016 size = smu_sys_get_pp_feature_mask(&adev->smu, buf);
1017 else if (adev->powerplay.pp_funcs->get_ppfeature_status)
1018 size = amdgpu_dpm_get_ppfeature_status(adev, buf);
1020 size = snprintf(buf, PAGE_SIZE, "\n");
1022 pm_runtime_mark_last_busy(ddev->dev);
1023 pm_runtime_put_autosuspend(ddev->dev);
1029 * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk pp_dpm_pcie
1031 * The amdgpu driver provides a sysfs API for adjusting what power levels
1032 * are enabled for a given power state. The files pp_dpm_sclk, pp_dpm_mclk,
1033 * pp_dpm_socclk, pp_dpm_fclk, pp_dpm_dcefclk and pp_dpm_pcie are used for
1036 * pp_dpm_socclk and pp_dpm_dcefclk interfaces are only available for
1037 * Vega10 and later ASICs.
1038 * pp_dpm_fclk interface is only available for Vega20 and later ASICs.
1040 * Reading back the files will show you the available power levels within
1041 * the power state and the clock information for those levels.
1043 * To manually adjust these states, first select manual using
1044 * power_dpm_force_performance_level.
1045 * Secondly, enter a new value for each level by inputing a string that
1046 * contains " echo xx xx xx > pp_dpm_sclk/mclk/pcie"
1049 * .. code-block:: bash
1051 * echo "4 5 6" > pp_dpm_sclk
1053 * will enable sclk levels 4, 5, and 6.
1055 * NOTE: change to the dcefclk max dpm level is not supported now
1058 static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
1059 struct device_attribute *attr,
1062 struct drm_device *ddev = dev_get_drvdata(dev);
1063 struct amdgpu_device *adev = drm_to_adev(ddev);
1067 if (amdgpu_in_reset(adev))
1070 ret = pm_runtime_get_sync(ddev->dev);
1072 pm_runtime_put_autosuspend(ddev->dev);
1076 if (is_support_sw_smu(adev))
1077 size = smu_print_clk_levels(&adev->smu, SMU_SCLK, buf);
1078 else if (adev->powerplay.pp_funcs->print_clock_levels)
1079 size = amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf);
1081 size = snprintf(buf, PAGE_SIZE, "\n");
1083 pm_runtime_mark_last_busy(ddev->dev);
1084 pm_runtime_put_autosuspend(ddev->dev);
1090 * Worst case: 32 bits individually specified, in octal at 12 characters
1091 * per line (+1 for \n).
1093 #define AMDGPU_MASK_BUF_MAX (32 * 13)
1095 static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
1098 unsigned long level;
1099 char *sub_str = NULL;
1101 char buf_cpy[AMDGPU_MASK_BUF_MAX + 1];
1102 const char delimiter[3] = {' ', '\n', '\0'};
1107 bytes = min(count, sizeof(buf_cpy) - 1);
1108 memcpy(buf_cpy, buf, bytes);
1109 buf_cpy[bytes] = '\0';
1111 while ((sub_str = strsep(&tmp, delimiter)) != NULL) {
1112 if (strlen(sub_str)) {
1113 ret = kstrtoul(sub_str, 0, &level);
1114 if (ret || level > 31)
1116 *mask |= 1 << level;
1124 static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
1125 struct device_attribute *attr,
1129 struct drm_device *ddev = dev_get_drvdata(dev);
1130 struct amdgpu_device *adev = drm_to_adev(ddev);
1134 if (amdgpu_in_reset(adev))
1137 ret = amdgpu_read_mask(buf, count, &mask);
1141 ret = pm_runtime_get_sync(ddev->dev);
1143 pm_runtime_put_autosuspend(ddev->dev);
1147 if (is_support_sw_smu(adev))
1148 ret = smu_force_clk_levels(&adev->smu, SMU_SCLK, mask);
1149 else if (adev->powerplay.pp_funcs->force_clock_level)
1150 ret = amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
1152 pm_runtime_mark_last_busy(ddev->dev);
1153 pm_runtime_put_autosuspend(ddev->dev);
1161 static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
1162 struct device_attribute *attr,
1165 struct drm_device *ddev = dev_get_drvdata(dev);
1166 struct amdgpu_device *adev = drm_to_adev(ddev);
1170 if (amdgpu_in_reset(adev))
1173 ret = pm_runtime_get_sync(ddev->dev);
1175 pm_runtime_put_autosuspend(ddev->dev);
1179 if (is_support_sw_smu(adev))
1180 size = smu_print_clk_levels(&adev->smu, SMU_MCLK, buf);
1181 else if (adev->powerplay.pp_funcs->print_clock_levels)
1182 size = amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf);
1184 size = snprintf(buf, PAGE_SIZE, "\n");
1186 pm_runtime_mark_last_busy(ddev->dev);
1187 pm_runtime_put_autosuspend(ddev->dev);
1192 static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
1193 struct device_attribute *attr,
1197 struct drm_device *ddev = dev_get_drvdata(dev);
1198 struct amdgpu_device *adev = drm_to_adev(ddev);
1202 if (amdgpu_in_reset(adev))
1205 ret = amdgpu_read_mask(buf, count, &mask);
1209 ret = pm_runtime_get_sync(ddev->dev);
1211 pm_runtime_put_autosuspend(ddev->dev);
1215 if (is_support_sw_smu(adev))
1216 ret = smu_force_clk_levels(&adev->smu, SMU_MCLK, mask);
1217 else if (adev->powerplay.pp_funcs->force_clock_level)
1218 ret = amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
1220 pm_runtime_mark_last_busy(ddev->dev);
1221 pm_runtime_put_autosuspend(ddev->dev);
1229 static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev,
1230 struct device_attribute *attr,
1233 struct drm_device *ddev = dev_get_drvdata(dev);
1234 struct amdgpu_device *adev = drm_to_adev(ddev);
1238 if (amdgpu_in_reset(adev))
1241 ret = pm_runtime_get_sync(ddev->dev);
1243 pm_runtime_put_autosuspend(ddev->dev);
1247 if (is_support_sw_smu(adev))
1248 size = smu_print_clk_levels(&adev->smu, SMU_SOCCLK, buf);
1249 else if (adev->powerplay.pp_funcs->print_clock_levels)
1250 size = amdgpu_dpm_print_clock_levels(adev, PP_SOCCLK, buf);
1252 size = snprintf(buf, PAGE_SIZE, "\n");
1254 pm_runtime_mark_last_busy(ddev->dev);
1255 pm_runtime_put_autosuspend(ddev->dev);
1260 static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
1261 struct device_attribute *attr,
1265 struct drm_device *ddev = dev_get_drvdata(dev);
1266 struct amdgpu_device *adev = drm_to_adev(ddev);
1270 if (amdgpu_in_reset(adev))
1273 ret = amdgpu_read_mask(buf, count, &mask);
1277 ret = pm_runtime_get_sync(ddev->dev);
1279 pm_runtime_put_autosuspend(ddev->dev);
1283 if (is_support_sw_smu(adev))
1284 ret = smu_force_clk_levels(&adev->smu, SMU_SOCCLK, mask);
1285 else if (adev->powerplay.pp_funcs->force_clock_level)
1286 ret = amdgpu_dpm_force_clock_level(adev, PP_SOCCLK, mask);
1290 pm_runtime_mark_last_busy(ddev->dev);
1291 pm_runtime_put_autosuspend(ddev->dev);
1299 static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev,
1300 struct device_attribute *attr,
1303 struct drm_device *ddev = dev_get_drvdata(dev);
1304 struct amdgpu_device *adev = drm_to_adev(ddev);
1308 if (amdgpu_in_reset(adev))
1311 ret = pm_runtime_get_sync(ddev->dev);
1313 pm_runtime_put_autosuspend(ddev->dev);
1317 if (is_support_sw_smu(adev))
1318 size = smu_print_clk_levels(&adev->smu, SMU_FCLK, buf);
1319 else if (adev->powerplay.pp_funcs->print_clock_levels)
1320 size = amdgpu_dpm_print_clock_levels(adev, PP_FCLK, buf);
1322 size = snprintf(buf, PAGE_SIZE, "\n");
1324 pm_runtime_mark_last_busy(ddev->dev);
1325 pm_runtime_put_autosuspend(ddev->dev);
1330 static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
1331 struct device_attribute *attr,
1335 struct drm_device *ddev = dev_get_drvdata(dev);
1336 struct amdgpu_device *adev = drm_to_adev(ddev);
1340 if (amdgpu_in_reset(adev))
1343 ret = amdgpu_read_mask(buf, count, &mask);
1347 ret = pm_runtime_get_sync(ddev->dev);
1349 pm_runtime_put_autosuspend(ddev->dev);
1353 if (is_support_sw_smu(adev))
1354 ret = smu_force_clk_levels(&adev->smu, SMU_FCLK, mask);
1355 else if (adev->powerplay.pp_funcs->force_clock_level)
1356 ret = amdgpu_dpm_force_clock_level(adev, PP_FCLK, mask);
1360 pm_runtime_mark_last_busy(ddev->dev);
1361 pm_runtime_put_autosuspend(ddev->dev);
1369 static ssize_t amdgpu_get_pp_dpm_vclk(struct device *dev,
1370 struct device_attribute *attr,
1373 struct drm_device *ddev = dev_get_drvdata(dev);
1374 struct amdgpu_device *adev = drm_to_adev(ddev);
1378 if (amdgpu_in_reset(adev))
1381 ret = pm_runtime_get_sync(ddev->dev);
1383 pm_runtime_put_autosuspend(ddev->dev);
1387 if (is_support_sw_smu(adev))
1388 size = smu_print_clk_levels(&adev->smu, SMU_VCLK, buf);
1390 size = snprintf(buf, PAGE_SIZE, "\n");
1392 pm_runtime_mark_last_busy(ddev->dev);
1393 pm_runtime_put_autosuspend(ddev->dev);
1398 static ssize_t amdgpu_set_pp_dpm_vclk(struct device *dev,
1399 struct device_attribute *attr,
1403 struct drm_device *ddev = dev_get_drvdata(dev);
1404 struct amdgpu_device *adev = drm_to_adev(ddev);
1408 if (amdgpu_in_reset(adev))
1411 ret = amdgpu_read_mask(buf, count, &mask);
1415 ret = pm_runtime_get_sync(ddev->dev);
1417 pm_runtime_put_autosuspend(ddev->dev);
1421 if (is_support_sw_smu(adev))
1422 ret = smu_force_clk_levels(&adev->smu, SMU_VCLK, mask);
1426 pm_runtime_mark_last_busy(ddev->dev);
1427 pm_runtime_put_autosuspend(ddev->dev);
1435 static ssize_t amdgpu_get_pp_dpm_dclk(struct device *dev,
1436 struct device_attribute *attr,
1439 struct drm_device *ddev = dev_get_drvdata(dev);
1440 struct amdgpu_device *adev = drm_to_adev(ddev);
1444 if (amdgpu_in_reset(adev))
1447 ret = pm_runtime_get_sync(ddev->dev);
1449 pm_runtime_put_autosuspend(ddev->dev);
1453 if (is_support_sw_smu(adev))
1454 size = smu_print_clk_levels(&adev->smu, SMU_DCLK, buf);
1456 size = snprintf(buf, PAGE_SIZE, "\n");
1458 pm_runtime_mark_last_busy(ddev->dev);
1459 pm_runtime_put_autosuspend(ddev->dev);
1464 static ssize_t amdgpu_set_pp_dpm_dclk(struct device *dev,
1465 struct device_attribute *attr,
1469 struct drm_device *ddev = dev_get_drvdata(dev);
1470 struct amdgpu_device *adev = drm_to_adev(ddev);
1474 if (amdgpu_in_reset(adev))
1477 ret = amdgpu_read_mask(buf, count, &mask);
1481 ret = pm_runtime_get_sync(ddev->dev);
1483 pm_runtime_put_autosuspend(ddev->dev);
1487 if (is_support_sw_smu(adev))
1488 ret = smu_force_clk_levels(&adev->smu, SMU_DCLK, mask);
1492 pm_runtime_mark_last_busy(ddev->dev);
1493 pm_runtime_put_autosuspend(ddev->dev);
1501 static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
1502 struct device_attribute *attr,
1505 struct drm_device *ddev = dev_get_drvdata(dev);
1506 struct amdgpu_device *adev = drm_to_adev(ddev);
1510 if (amdgpu_in_reset(adev))
1513 ret = pm_runtime_get_sync(ddev->dev);
1515 pm_runtime_put_autosuspend(ddev->dev);
1519 if (is_support_sw_smu(adev))
1520 size = smu_print_clk_levels(&adev->smu, SMU_DCEFCLK, buf);
1521 else if (adev->powerplay.pp_funcs->print_clock_levels)
1522 size = amdgpu_dpm_print_clock_levels(adev, PP_DCEFCLK, buf);
1524 size = snprintf(buf, PAGE_SIZE, "\n");
1526 pm_runtime_mark_last_busy(ddev->dev);
1527 pm_runtime_put_autosuspend(ddev->dev);
1532 static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
1533 struct device_attribute *attr,
1537 struct drm_device *ddev = dev_get_drvdata(dev);
1538 struct amdgpu_device *adev = drm_to_adev(ddev);
1542 if (amdgpu_in_reset(adev))
1545 ret = amdgpu_read_mask(buf, count, &mask);
1549 ret = pm_runtime_get_sync(ddev->dev);
1551 pm_runtime_put_autosuspend(ddev->dev);
1555 if (is_support_sw_smu(adev))
1556 ret = smu_force_clk_levels(&adev->smu, SMU_DCEFCLK, mask);
1557 else if (adev->powerplay.pp_funcs->force_clock_level)
1558 ret = amdgpu_dpm_force_clock_level(adev, PP_DCEFCLK, mask);
1562 pm_runtime_mark_last_busy(ddev->dev);
1563 pm_runtime_put_autosuspend(ddev->dev);
1571 static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
1572 struct device_attribute *attr,
1575 struct drm_device *ddev = dev_get_drvdata(dev);
1576 struct amdgpu_device *adev = drm_to_adev(ddev);
1580 if (amdgpu_in_reset(adev))
1583 ret = pm_runtime_get_sync(ddev->dev);
1585 pm_runtime_put_autosuspend(ddev->dev);
1589 if (is_support_sw_smu(adev))
1590 size = smu_print_clk_levels(&adev->smu, SMU_PCIE, buf);
1591 else if (adev->powerplay.pp_funcs->print_clock_levels)
1592 size = amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf);
1594 size = snprintf(buf, PAGE_SIZE, "\n");
1596 pm_runtime_mark_last_busy(ddev->dev);
1597 pm_runtime_put_autosuspend(ddev->dev);
1602 static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
1603 struct device_attribute *attr,
1607 struct drm_device *ddev = dev_get_drvdata(dev);
1608 struct amdgpu_device *adev = drm_to_adev(ddev);
1612 if (amdgpu_in_reset(adev))
1615 ret = amdgpu_read_mask(buf, count, &mask);
1619 ret = pm_runtime_get_sync(ddev->dev);
1621 pm_runtime_put_autosuspend(ddev->dev);
1625 if (is_support_sw_smu(adev))
1626 ret = smu_force_clk_levels(&adev->smu, SMU_PCIE, mask);
1627 else if (adev->powerplay.pp_funcs->force_clock_level)
1628 ret = amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
1632 pm_runtime_mark_last_busy(ddev->dev);
1633 pm_runtime_put_autosuspend(ddev->dev);
1641 static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
1642 struct device_attribute *attr,
1645 struct drm_device *ddev = dev_get_drvdata(dev);
1646 struct amdgpu_device *adev = drm_to_adev(ddev);
1650 if (amdgpu_in_reset(adev))
1653 ret = pm_runtime_get_sync(ddev->dev);
1655 pm_runtime_put_autosuspend(ddev->dev);
1659 if (is_support_sw_smu(adev))
1661 else if (adev->powerplay.pp_funcs->get_sclk_od)
1662 value = amdgpu_dpm_get_sclk_od(adev);
1664 pm_runtime_mark_last_busy(ddev->dev);
1665 pm_runtime_put_autosuspend(ddev->dev);
1667 return snprintf(buf, PAGE_SIZE, "%d\n", value);
1670 static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
1671 struct device_attribute *attr,
1675 struct drm_device *ddev = dev_get_drvdata(dev);
1676 struct amdgpu_device *adev = drm_to_adev(ddev);
1680 if (amdgpu_in_reset(adev))
1683 ret = kstrtol(buf, 0, &value);
1688 ret = pm_runtime_get_sync(ddev->dev);
1690 pm_runtime_put_autosuspend(ddev->dev);
1694 if (is_support_sw_smu(adev)) {
1697 if (adev->powerplay.pp_funcs->set_sclk_od)
1698 amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
1700 if (adev->powerplay.pp_funcs->dispatch_tasks) {
1701 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
1703 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1704 amdgpu_pm_compute_clocks(adev);
1708 pm_runtime_mark_last_busy(ddev->dev);
1709 pm_runtime_put_autosuspend(ddev->dev);
1714 static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
1715 struct device_attribute *attr,
1718 struct drm_device *ddev = dev_get_drvdata(dev);
1719 struct amdgpu_device *adev = drm_to_adev(ddev);
1723 if (amdgpu_in_reset(adev))
1726 ret = pm_runtime_get_sync(ddev->dev);
1728 pm_runtime_put_autosuspend(ddev->dev);
1732 if (is_support_sw_smu(adev))
1734 else if (adev->powerplay.pp_funcs->get_mclk_od)
1735 value = amdgpu_dpm_get_mclk_od(adev);
1737 pm_runtime_mark_last_busy(ddev->dev);
1738 pm_runtime_put_autosuspend(ddev->dev);
1740 return snprintf(buf, PAGE_SIZE, "%d\n", value);
1743 static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
1744 struct device_attribute *attr,
1748 struct drm_device *ddev = dev_get_drvdata(dev);
1749 struct amdgpu_device *adev = drm_to_adev(ddev);
1753 if (amdgpu_in_reset(adev))
1756 ret = kstrtol(buf, 0, &value);
1761 ret = pm_runtime_get_sync(ddev->dev);
1763 pm_runtime_put_autosuspend(ddev->dev);
1767 if (is_support_sw_smu(adev)) {
1770 if (adev->powerplay.pp_funcs->set_mclk_od)
1771 amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
1773 if (adev->powerplay.pp_funcs->dispatch_tasks) {
1774 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
1776 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
1777 amdgpu_pm_compute_clocks(adev);
1781 pm_runtime_mark_last_busy(ddev->dev);
1782 pm_runtime_put_autosuspend(ddev->dev);
1788 * DOC: pp_power_profile_mode
1790 * The amdgpu driver provides a sysfs API for adjusting the heuristics
1791 * related to switching between power levels in a power state. The file
1792 * pp_power_profile_mode is used for this.
1794 * Reading this file outputs a list of all of the predefined power profiles
1795 * and the relevant heuristics settings for that profile.
1797 * To select a profile or create a custom profile, first select manual using
1798 * power_dpm_force_performance_level. Writing the number of a predefined
1799 * profile to pp_power_profile_mode will enable those heuristics. To
1800 * create a custom set of heuristics, write a string of numbers to the file
1801 * starting with the number of the custom profile along with a setting
1802 * for each heuristic parameter. Due to differences across asic families
1803 * the heuristic parameters vary from family to family.
1807 static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
1808 struct device_attribute *attr,
1811 struct drm_device *ddev = dev_get_drvdata(dev);
1812 struct amdgpu_device *adev = drm_to_adev(ddev);
1816 if (amdgpu_in_reset(adev))
1819 ret = pm_runtime_get_sync(ddev->dev);
1821 pm_runtime_put_autosuspend(ddev->dev);
1825 if (is_support_sw_smu(adev))
1826 size = smu_get_power_profile_mode(&adev->smu, buf);
1827 else if (adev->powerplay.pp_funcs->get_power_profile_mode)
1828 size = amdgpu_dpm_get_power_profile_mode(adev, buf);
1830 size = snprintf(buf, PAGE_SIZE, "\n");
1832 pm_runtime_mark_last_busy(ddev->dev);
1833 pm_runtime_put_autosuspend(ddev->dev);
1839 static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
1840 struct device_attribute *attr,
1845 struct drm_device *ddev = dev_get_drvdata(dev);
1846 struct amdgpu_device *adev = drm_to_adev(ddev);
1847 uint32_t parameter_size = 0;
1849 char *sub_str, buf_cpy[128];
1853 long int profile_mode = 0;
1854 const char delimiter[3] = {' ', '\n', '\0'};
1856 if (amdgpu_in_reset(adev))
1861 ret = kstrtol(tmp, 0, &profile_mode);
1865 if (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
1866 if (count < 2 || count > 127)
1868 while (isspace(*++buf))
1870 memcpy(buf_cpy, buf, count-i);
1872 while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
1873 if (strlen(sub_str) == 0)
1875 ret = kstrtol(sub_str, 0, ¶meter[parameter_size]);
1879 while (isspace(*tmp_str))
1883 parameter[parameter_size] = profile_mode;
1885 ret = pm_runtime_get_sync(ddev->dev);
1887 pm_runtime_put_autosuspend(ddev->dev);
1891 if (is_support_sw_smu(adev))
1892 ret = smu_set_power_profile_mode(&adev->smu, parameter, parameter_size, true);
1893 else if (adev->powerplay.pp_funcs->set_power_profile_mode)
1894 ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
1896 pm_runtime_mark_last_busy(ddev->dev);
1897 pm_runtime_put_autosuspend(ddev->dev);
1906 * DOC: gpu_busy_percent
1908 * The amdgpu driver provides a sysfs API for reading how busy the GPU
1909 * is as a percentage. The file gpu_busy_percent is used for this.
1910 * The SMU firmware computes a percentage of load based on the
1911 * aggregate activity level in the IP cores.
1913 static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev,
1914 struct device_attribute *attr,
1917 struct drm_device *ddev = dev_get_drvdata(dev);
1918 struct amdgpu_device *adev = drm_to_adev(ddev);
1919 int r, value, size = sizeof(value);
1921 if (amdgpu_in_reset(adev))
1924 r = pm_runtime_get_sync(ddev->dev);
1926 pm_runtime_put_autosuspend(ddev->dev);
1930 /* read the IP busy sensor */
1931 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD,
1932 (void *)&value, &size);
1934 pm_runtime_mark_last_busy(ddev->dev);
1935 pm_runtime_put_autosuspend(ddev->dev);
1940 return snprintf(buf, PAGE_SIZE, "%d\n", value);
1944 * DOC: mem_busy_percent
1946 * The amdgpu driver provides a sysfs API for reading how busy the VRAM
1947 * is as a percentage. The file mem_busy_percent is used for this.
1948 * The SMU firmware computes a percentage of load based on the
1949 * aggregate activity level in the IP cores.
1951 static ssize_t amdgpu_get_mem_busy_percent(struct device *dev,
1952 struct device_attribute *attr,
1955 struct drm_device *ddev = dev_get_drvdata(dev);
1956 struct amdgpu_device *adev = drm_to_adev(ddev);
1957 int r, value, size = sizeof(value);
1959 if (amdgpu_in_reset(adev))
1962 r = pm_runtime_get_sync(ddev->dev);
1964 pm_runtime_put_autosuspend(ddev->dev);
1968 /* read the IP busy sensor */
1969 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD,
1970 (void *)&value, &size);
1972 pm_runtime_mark_last_busy(ddev->dev);
1973 pm_runtime_put_autosuspend(ddev->dev);
1978 return snprintf(buf, PAGE_SIZE, "%d\n", value);
1984 * The amdgpu driver provides a sysfs API for estimating how much data
1985 * has been received and sent by the GPU in the last second through PCIe.
1986 * The file pcie_bw is used for this.
1987 * The Perf counters count the number of received and sent messages and return
1988 * those values, as well as the maximum payload size of a PCIe packet (mps).
1989 * Note that it is not possible to easily and quickly obtain the size of each
1990 * packet transmitted, so we output the max payload size (mps) to allow for
1991 * quick estimation of the PCIe bandwidth usage
1993 static ssize_t amdgpu_get_pcie_bw(struct device *dev,
1994 struct device_attribute *attr,
1997 struct drm_device *ddev = dev_get_drvdata(dev);
1998 struct amdgpu_device *adev = drm_to_adev(ddev);
1999 uint64_t count0 = 0, count1 = 0;
2002 if (amdgpu_in_reset(adev))
2005 if (adev->flags & AMD_IS_APU)
2008 if (!adev->asic_funcs->get_pcie_usage)
2011 ret = pm_runtime_get_sync(ddev->dev);
2013 pm_runtime_put_autosuspend(ddev->dev);
2017 amdgpu_asic_get_pcie_usage(adev, &count0, &count1);
2019 pm_runtime_mark_last_busy(ddev->dev);
2020 pm_runtime_put_autosuspend(ddev->dev);
2022 return snprintf(buf, PAGE_SIZE, "%llu %llu %i\n",
2023 count0, count1, pcie_get_mps(adev->pdev));
2029 * The amdgpu driver provides a sysfs API for providing a unique ID for the GPU
2030 * The file unique_id is used for this.
2031 * This will provide a Unique ID that will persist from machine to machine
2033 * NOTE: This will only work for GFX9 and newer. This file will be absent
2034 * on unsupported ASICs (GFX8 and older)
2036 static ssize_t amdgpu_get_unique_id(struct device *dev,
2037 struct device_attribute *attr,
2040 struct drm_device *ddev = dev_get_drvdata(dev);
2041 struct amdgpu_device *adev = drm_to_adev(ddev);
2043 if (amdgpu_in_reset(adev))
2046 if (adev->unique_id)
2047 return snprintf(buf, PAGE_SIZE, "%016llx\n", adev->unique_id);
2053 * DOC: thermal_throttling_logging
2055 * Thermal throttling pulls down the clock frequency and thus the performance.
2056 * It's an useful mechanism to protect the chip from overheating. Since it
2057 * impacts performance, the user controls whether it is enabled and if so,
2058 * the log frequency.
2060 * Reading back the file shows you the status(enabled or disabled) and
2061 * the interval(in seconds) between each thermal logging.
2063 * Writing an integer to the file, sets a new logging interval, in seconds.
2064 * The value should be between 1 and 3600. If the value is less than 1,
2065 * thermal logging is disabled. Values greater than 3600 are ignored.
2067 static ssize_t amdgpu_get_thermal_throttling_logging(struct device *dev,
2068 struct device_attribute *attr,
2071 struct drm_device *ddev = dev_get_drvdata(dev);
2072 struct amdgpu_device *adev = drm_to_adev(ddev);
2074 return snprintf(buf, PAGE_SIZE, "%s: thermal throttling logging %s, with interval %d seconds\n",
2075 adev_to_drm(adev)->unique,
2076 atomic_read(&adev->throttling_logging_enabled) ? "enabled" : "disabled",
2077 adev->throttling_logging_rs.interval / HZ + 1);
2080 static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev,
2081 struct device_attribute *attr,
2085 struct drm_device *ddev = dev_get_drvdata(dev);
2086 struct amdgpu_device *adev = drm_to_adev(ddev);
2087 long throttling_logging_interval;
2088 unsigned long flags;
2091 ret = kstrtol(buf, 0, &throttling_logging_interval);
2095 if (throttling_logging_interval > 3600)
2098 if (throttling_logging_interval > 0) {
2099 raw_spin_lock_irqsave(&adev->throttling_logging_rs.lock, flags);
2101 * Reset the ratelimit timer internals.
2102 * This can effectively restart the timer.
2104 adev->throttling_logging_rs.interval =
2105 (throttling_logging_interval - 1) * HZ;
2106 adev->throttling_logging_rs.begin = 0;
2107 adev->throttling_logging_rs.printed = 0;
2108 adev->throttling_logging_rs.missed = 0;
2109 raw_spin_unlock_irqrestore(&adev->throttling_logging_rs.lock, flags);
2111 atomic_set(&adev->throttling_logging_enabled, 1);
2113 atomic_set(&adev->throttling_logging_enabled, 0);
2122 * The amdgpu driver provides a sysfs API for retrieving current gpu
2123 * metrics data. The file gpu_metrics is used for this. Reading the
2124 * file will dump all the current gpu metrics data.
2126 * These data include temperature, frequency, engines utilization,
2127 * power consume, throttler status, fan speed and cpu core statistics(
2128 * available for APU only). That's it will give a snapshot of all sensors
2131 static ssize_t amdgpu_get_gpu_metrics(struct device *dev,
2132 struct device_attribute *attr,
2135 struct drm_device *ddev = dev_get_drvdata(dev);
2136 struct amdgpu_device *adev = drm_to_adev(ddev);
2141 if (amdgpu_in_reset(adev))
2144 ret = pm_runtime_get_sync(ddev->dev);
2146 pm_runtime_put_autosuspend(ddev->dev);
2150 if (is_support_sw_smu(adev))
2151 size = smu_sys_get_gpu_metrics(&adev->smu, &gpu_metrics);
2152 else if (adev->powerplay.pp_funcs->get_gpu_metrics)
2153 size = amdgpu_dpm_get_gpu_metrics(adev, &gpu_metrics);
2158 if (size >= PAGE_SIZE)
2159 size = PAGE_SIZE - 1;
2161 memcpy(buf, gpu_metrics, size);
2164 pm_runtime_mark_last_busy(ddev->dev);
2165 pm_runtime_put_autosuspend(ddev->dev);
2170 static struct amdgpu_device_attr amdgpu_device_attrs[] = {
2171 AMDGPU_DEVICE_ATTR_RW(power_dpm_state, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2172 AMDGPU_DEVICE_ATTR_RW(power_dpm_force_performance_level, ATTR_FLAG_BASIC),
2173 AMDGPU_DEVICE_ATTR_RO(pp_num_states, ATTR_FLAG_BASIC),
2174 AMDGPU_DEVICE_ATTR_RO(pp_cur_state, ATTR_FLAG_BASIC),
2175 AMDGPU_DEVICE_ATTR_RW(pp_force_state, ATTR_FLAG_BASIC),
2176 AMDGPU_DEVICE_ATTR_RW(pp_table, ATTR_FLAG_BASIC),
2177 AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2178 AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2179 AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2180 AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2181 AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2182 AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
2183 AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk, ATTR_FLAG_BASIC),
2184 AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie, ATTR_FLAG_BASIC),
2185 AMDGPU_DEVICE_ATTR_RW(pp_sclk_od, ATTR_FLAG_BASIC),
2186 AMDGPU_DEVICE_ATTR_RW(pp_mclk_od, ATTR_FLAG_BASIC),
2187 AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode, ATTR_FLAG_BASIC),
2188 AMDGPU_DEVICE_ATTR_RW(pp_od_clk_voltage, ATTR_FLAG_BASIC),
2189 AMDGPU_DEVICE_ATTR_RO(gpu_busy_percent, ATTR_FLAG_BASIC),
2190 AMDGPU_DEVICE_ATTR_RO(mem_busy_percent, ATTR_FLAG_BASIC),
2191 AMDGPU_DEVICE_ATTR_RO(pcie_bw, ATTR_FLAG_BASIC),
2192 AMDGPU_DEVICE_ATTR_RW(pp_features, ATTR_FLAG_BASIC),
2193 AMDGPU_DEVICE_ATTR_RO(unique_id, ATTR_FLAG_BASIC),
2194 AMDGPU_DEVICE_ATTR_RW(thermal_throttling_logging, ATTR_FLAG_BASIC),
2195 AMDGPU_DEVICE_ATTR_RO(gpu_metrics, ATTR_FLAG_BASIC),
2198 static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
2199 uint32_t mask, enum amdgpu_device_attr_states *states)
2201 struct device_attribute *dev_attr = &attr->dev_attr;
2202 const char *attr_name = dev_attr->attr.name;
2203 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
2204 enum amd_asic_type asic_type = adev->asic_type;
2206 if (!(attr->flags & mask)) {
2207 *states = ATTR_STATE_UNSUPPORTED;
2211 #define DEVICE_ATTR_IS(_name) (!strcmp(attr_name, #_name))
2213 if (DEVICE_ATTR_IS(pp_dpm_socclk)) {
2214 if (asic_type < CHIP_VEGA10)
2215 *states = ATTR_STATE_UNSUPPORTED;
2216 } else if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) {
2217 if (asic_type < CHIP_VEGA10 || asic_type == CHIP_ARCTURUS)
2218 *states = ATTR_STATE_UNSUPPORTED;
2219 } else if (DEVICE_ATTR_IS(pp_dpm_fclk)) {
2220 if (asic_type < CHIP_VEGA20)
2221 *states = ATTR_STATE_UNSUPPORTED;
2222 } else if (DEVICE_ATTR_IS(pp_od_clk_voltage)) {
2223 *states = ATTR_STATE_UNSUPPORTED;
2224 if ((is_support_sw_smu(adev) && adev->smu.od_enabled) ||
2225 (is_support_sw_smu(adev) && adev->smu.is_apu) ||
2226 (!is_support_sw_smu(adev) && hwmgr->od_enabled))
2227 *states = ATTR_STATE_SUPPORTED;
2228 } else if (DEVICE_ATTR_IS(mem_busy_percent)) {
2229 if (adev->flags & AMD_IS_APU || asic_type == CHIP_VEGA10)
2230 *states = ATTR_STATE_UNSUPPORTED;
2231 } else if (DEVICE_ATTR_IS(pcie_bw)) {
2232 /* PCIe Perf counters won't work on APU nodes */
2233 if (adev->flags & AMD_IS_APU)
2234 *states = ATTR_STATE_UNSUPPORTED;
2235 } else if (DEVICE_ATTR_IS(unique_id)) {
2236 if (asic_type != CHIP_VEGA10 &&
2237 asic_type != CHIP_VEGA20 &&
2238 asic_type != CHIP_ARCTURUS)
2239 *states = ATTR_STATE_UNSUPPORTED;
2240 } else if (DEVICE_ATTR_IS(pp_features)) {
2241 if (adev->flags & AMD_IS_APU || asic_type < CHIP_VEGA10)
2242 *states = ATTR_STATE_UNSUPPORTED;
2243 } else if (DEVICE_ATTR_IS(gpu_metrics)) {
2244 if (asic_type < CHIP_VEGA12)
2245 *states = ATTR_STATE_UNSUPPORTED;
2246 } else if (DEVICE_ATTR_IS(pp_dpm_vclk)) {
2247 if (!(asic_type == CHIP_VANGOGH))
2248 *states = ATTR_STATE_UNSUPPORTED;
2249 } else if (DEVICE_ATTR_IS(pp_dpm_dclk)) {
2250 if (!(asic_type == CHIP_VANGOGH))
2251 *states = ATTR_STATE_UNSUPPORTED;
2254 if (asic_type == CHIP_ARCTURUS) {
2255 /* Arcturus does not support standalone mclk/socclk/fclk level setting */
2256 if (DEVICE_ATTR_IS(pp_dpm_mclk) ||
2257 DEVICE_ATTR_IS(pp_dpm_socclk) ||
2258 DEVICE_ATTR_IS(pp_dpm_fclk)) {
2259 dev_attr->attr.mode &= ~S_IWUGO;
2260 dev_attr->store = NULL;
2264 #undef DEVICE_ATTR_IS
2270 static int amdgpu_device_attr_create(struct amdgpu_device *adev,
2271 struct amdgpu_device_attr *attr,
2272 uint32_t mask, struct list_head *attr_list)
2275 struct device_attribute *dev_attr = &attr->dev_attr;
2276 const char *name = dev_attr->attr.name;
2277 enum amdgpu_device_attr_states attr_states = ATTR_STATE_SUPPORTED;
2278 struct amdgpu_device_attr_entry *attr_entry;
2280 int (*attr_update)(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
2281 uint32_t mask, enum amdgpu_device_attr_states *states) = default_attr_update;
2285 attr_update = attr->attr_update ? attr_update : default_attr_update;
2287 ret = attr_update(adev, attr, mask, &attr_states);
2289 dev_err(adev->dev, "failed to update device file %s, ret = %d\n",
2294 if (attr_states == ATTR_STATE_UNSUPPORTED)
2297 ret = device_create_file(adev->dev, dev_attr);
2299 dev_err(adev->dev, "failed to create device file %s, ret = %d\n",
2303 attr_entry = kmalloc(sizeof(*attr_entry), GFP_KERNEL);
2307 attr_entry->attr = attr;
2308 INIT_LIST_HEAD(&attr_entry->entry);
2310 list_add_tail(&attr_entry->entry, attr_list);
2315 static void amdgpu_device_attr_remove(struct amdgpu_device *adev, struct amdgpu_device_attr *attr)
2317 struct device_attribute *dev_attr = &attr->dev_attr;
2319 device_remove_file(adev->dev, dev_attr);
2322 static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
2323 struct list_head *attr_list);
2325 static int amdgpu_device_attr_create_groups(struct amdgpu_device *adev,
2326 struct amdgpu_device_attr *attrs,
2329 struct list_head *attr_list)
2334 for (i = 0; i < counts; i++) {
2335 ret = amdgpu_device_attr_create(adev, &attrs[i], mask, attr_list);
2343 amdgpu_device_attr_remove_groups(adev, attr_list);
2348 static void amdgpu_device_attr_remove_groups(struct amdgpu_device *adev,
2349 struct list_head *attr_list)
2351 struct amdgpu_device_attr_entry *entry, *entry_tmp;
2353 if (list_empty(attr_list))
2356 list_for_each_entry_safe(entry, entry_tmp, attr_list, entry) {
2357 amdgpu_device_attr_remove(adev, entry->attr);
2358 list_del(&entry->entry);
2363 static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
2364 struct device_attribute *attr,
2367 struct amdgpu_device *adev = dev_get_drvdata(dev);
2368 int channel = to_sensor_dev_attr(attr)->index;
2369 int r, temp = 0, size = sizeof(temp);
2371 if (amdgpu_in_reset(adev))
2374 if (channel >= PP_TEMP_MAX)
2377 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2379 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2384 case PP_TEMP_JUNCTION:
2385 /* get current junction temperature */
2386 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP,
2387 (void *)&temp, &size);
2390 /* get current edge temperature */
2391 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_EDGE_TEMP,
2392 (void *)&temp, &size);
2395 /* get current memory temperature */
2396 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_TEMP,
2397 (void *)&temp, &size);
2404 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2405 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2410 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
2413 static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev,
2414 struct device_attribute *attr,
2417 struct amdgpu_device *adev = dev_get_drvdata(dev);
2418 int hyst = to_sensor_dev_attr(attr)->index;
2422 temp = adev->pm.dpm.thermal.min_temp;
2424 temp = adev->pm.dpm.thermal.max_temp;
2426 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
2429 static ssize_t amdgpu_hwmon_show_hotspot_temp_thresh(struct device *dev,
2430 struct device_attribute *attr,
2433 struct amdgpu_device *adev = dev_get_drvdata(dev);
2434 int hyst = to_sensor_dev_attr(attr)->index;
2438 temp = adev->pm.dpm.thermal.min_hotspot_temp;
2440 temp = adev->pm.dpm.thermal.max_hotspot_crit_temp;
2442 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
2445 static ssize_t amdgpu_hwmon_show_mem_temp_thresh(struct device *dev,
2446 struct device_attribute *attr,
2449 struct amdgpu_device *adev = dev_get_drvdata(dev);
2450 int hyst = to_sensor_dev_attr(attr)->index;
2454 temp = adev->pm.dpm.thermal.min_mem_temp;
2456 temp = adev->pm.dpm.thermal.max_mem_crit_temp;
2458 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
2461 static ssize_t amdgpu_hwmon_show_temp_label(struct device *dev,
2462 struct device_attribute *attr,
2465 int channel = to_sensor_dev_attr(attr)->index;
2467 if (channel >= PP_TEMP_MAX)
2470 return snprintf(buf, PAGE_SIZE, "%s\n", temp_label[channel].label);
2473 static ssize_t amdgpu_hwmon_show_temp_emergency(struct device *dev,
2474 struct device_attribute *attr,
2477 struct amdgpu_device *adev = dev_get_drvdata(dev);
2478 int channel = to_sensor_dev_attr(attr)->index;
2481 if (channel >= PP_TEMP_MAX)
2485 case PP_TEMP_JUNCTION:
2486 temp = adev->pm.dpm.thermal.max_hotspot_emergency_temp;
2489 temp = adev->pm.dpm.thermal.max_edge_emergency_temp;
2492 temp = adev->pm.dpm.thermal.max_mem_emergency_temp;
2496 return snprintf(buf, PAGE_SIZE, "%d\n", temp);
2499 static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
2500 struct device_attribute *attr,
2503 struct amdgpu_device *adev = dev_get_drvdata(dev);
2507 if (amdgpu_in_reset(adev))
2510 ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2512 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2516 if (is_support_sw_smu(adev)) {
2517 pwm_mode = smu_get_fan_control_mode(&adev->smu);
2519 if (!adev->powerplay.pp_funcs->get_fan_control_mode) {
2520 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2521 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2525 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
2528 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2529 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2531 return sprintf(buf, "%i\n", pwm_mode);
2534 static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
2535 struct device_attribute *attr,
2539 struct amdgpu_device *adev = dev_get_drvdata(dev);
2543 if (amdgpu_in_reset(adev))
2546 err = kstrtoint(buf, 10, &value);
2550 ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2552 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2556 if (is_support_sw_smu(adev)) {
2557 smu_set_fan_control_mode(&adev->smu, value);
2559 if (!adev->powerplay.pp_funcs->set_fan_control_mode) {
2560 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2561 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2565 amdgpu_dpm_set_fan_control_mode(adev, value);
2568 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2569 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2574 static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev,
2575 struct device_attribute *attr,
2578 return sprintf(buf, "%i\n", 0);
2581 static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev,
2582 struct device_attribute *attr,
2585 return sprintf(buf, "%i\n", 255);
2588 static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
2589 struct device_attribute *attr,
2590 const char *buf, size_t count)
2592 struct amdgpu_device *adev = dev_get_drvdata(dev);
2597 if (amdgpu_in_reset(adev))
2600 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2602 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2606 if (is_support_sw_smu(adev))
2607 pwm_mode = smu_get_fan_control_mode(&adev->smu);
2609 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
2611 if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
2612 pr_info("manual fan speed control should be enabled first\n");
2613 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2614 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2618 err = kstrtou32(buf, 10, &value);
2620 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2621 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2625 value = (value * 100) / 255;
2627 if (is_support_sw_smu(adev))
2628 err = smu_set_fan_speed_percent(&adev->smu, value);
2629 else if (adev->powerplay.pp_funcs->set_fan_speed_percent)
2630 err = amdgpu_dpm_set_fan_speed_percent(adev, value);
2634 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2635 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2643 static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
2644 struct device_attribute *attr,
2647 struct amdgpu_device *adev = dev_get_drvdata(dev);
2651 if (amdgpu_in_reset(adev))
2654 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2656 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2660 if (is_support_sw_smu(adev))
2661 err = smu_get_fan_speed_percent(&adev->smu, &speed);
2662 else if (adev->powerplay.pp_funcs->get_fan_speed_percent)
2663 err = amdgpu_dpm_get_fan_speed_percent(adev, &speed);
2667 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2668 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2673 speed = (speed * 255) / 100;
2675 return sprintf(buf, "%i\n", speed);
2678 static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
2679 struct device_attribute *attr,
2682 struct amdgpu_device *adev = dev_get_drvdata(dev);
2686 if (amdgpu_in_reset(adev))
2689 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2691 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2695 if (is_support_sw_smu(adev))
2696 err = smu_get_fan_speed_rpm(&adev->smu, &speed);
2697 else if (adev->powerplay.pp_funcs->get_fan_speed_rpm)
2698 err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
2702 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2703 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2708 return sprintf(buf, "%i\n", speed);
2711 static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
2712 struct device_attribute *attr,
2715 struct amdgpu_device *adev = dev_get_drvdata(dev);
2717 u32 size = sizeof(min_rpm);
2720 if (amdgpu_in_reset(adev))
2723 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2725 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2729 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM,
2730 (void *)&min_rpm, &size);
2732 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2733 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2738 return snprintf(buf, PAGE_SIZE, "%d\n", min_rpm);
2741 static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
2742 struct device_attribute *attr,
2745 struct amdgpu_device *adev = dev_get_drvdata(dev);
2747 u32 size = sizeof(max_rpm);
2750 if (amdgpu_in_reset(adev))
2753 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2755 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2759 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM,
2760 (void *)&max_rpm, &size);
2762 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2763 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2768 return snprintf(buf, PAGE_SIZE, "%d\n", max_rpm);
2771 static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
2772 struct device_attribute *attr,
2775 struct amdgpu_device *adev = dev_get_drvdata(dev);
2779 if (amdgpu_in_reset(adev))
2782 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2784 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2788 if (is_support_sw_smu(adev))
2789 err = smu_get_fan_speed_rpm(&adev->smu, &rpm);
2790 else if (adev->powerplay.pp_funcs->get_fan_speed_rpm)
2791 err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);
2795 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2796 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2801 return sprintf(buf, "%i\n", rpm);
2804 static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
2805 struct device_attribute *attr,
2806 const char *buf, size_t count)
2808 struct amdgpu_device *adev = dev_get_drvdata(dev);
2813 if (amdgpu_in_reset(adev))
2816 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2818 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2822 if (is_support_sw_smu(adev))
2823 pwm_mode = smu_get_fan_control_mode(&adev->smu);
2825 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
2827 if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
2828 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2829 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2833 err = kstrtou32(buf, 10, &value);
2835 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2836 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2840 if (is_support_sw_smu(adev))
2841 err = smu_set_fan_speed_rpm(&adev->smu, value);
2842 else if (adev->powerplay.pp_funcs->set_fan_speed_rpm)
2843 err = amdgpu_dpm_set_fan_speed_rpm(adev, value);
2847 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2848 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2856 static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
2857 struct device_attribute *attr,
2860 struct amdgpu_device *adev = dev_get_drvdata(dev);
2864 if (amdgpu_in_reset(adev))
2867 ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2869 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2873 if (is_support_sw_smu(adev)) {
2874 pwm_mode = smu_get_fan_control_mode(&adev->smu);
2876 if (!adev->powerplay.pp_funcs->get_fan_control_mode) {
2877 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2878 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2882 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
2885 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2886 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2888 return sprintf(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1);
2891 static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
2892 struct device_attribute *attr,
2896 struct amdgpu_device *adev = dev_get_drvdata(dev);
2901 if (amdgpu_in_reset(adev))
2904 err = kstrtoint(buf, 10, &value);
2909 pwm_mode = AMD_FAN_CTRL_AUTO;
2910 else if (value == 1)
2911 pwm_mode = AMD_FAN_CTRL_MANUAL;
2915 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2917 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2921 if (is_support_sw_smu(adev)) {
2922 smu_set_fan_control_mode(&adev->smu, pwm_mode);
2924 if (!adev->powerplay.pp_funcs->set_fan_control_mode) {
2925 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2926 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2929 amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
2932 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2933 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2938 static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
2939 struct device_attribute *attr,
2942 struct amdgpu_device *adev = dev_get_drvdata(dev);
2944 int r, size = sizeof(vddgfx);
2946 if (amdgpu_in_reset(adev))
2949 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2951 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2955 /* get the voltage */
2956 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX,
2957 (void *)&vddgfx, &size);
2959 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
2960 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2965 return snprintf(buf, PAGE_SIZE, "%d\n", vddgfx);
2968 static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev,
2969 struct device_attribute *attr,
2972 return snprintf(buf, PAGE_SIZE, "vddgfx\n");
2975 static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
2976 struct device_attribute *attr,
2979 struct amdgpu_device *adev = dev_get_drvdata(dev);
2981 int r, size = sizeof(vddnb);
2983 if (amdgpu_in_reset(adev))
2986 /* only APUs have vddnb */
2987 if (!(adev->flags & AMD_IS_APU))
2990 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
2992 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
2996 /* get the voltage */
2997 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB,
2998 (void *)&vddnb, &size);
3000 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
3001 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
3006 return snprintf(buf, PAGE_SIZE, "%d\n", vddnb);
3009 static ssize_t amdgpu_hwmon_show_vddnb_label(struct device *dev,
3010 struct device_attribute *attr,
3013 return snprintf(buf, PAGE_SIZE, "vddnb\n");
3016 static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
3017 struct device_attribute *attr,
3020 struct amdgpu_device *adev = dev_get_drvdata(dev);
3022 int r, size = sizeof(u32);
3025 if (amdgpu_in_reset(adev))
3028 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
3030 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
3034 /* get the voltage */
3035 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER,
3036 (void *)&query, &size);
3038 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
3039 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
3044 /* convert to microwatts */
3045 uw = (query >> 8) * 1000000 + (query & 0xff) * 1000;
3047 return snprintf(buf, PAGE_SIZE, "%u\n", uw);
3050 static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev,
3051 struct device_attribute *attr,
3054 return sprintf(buf, "%i\n", 0);
3057 static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
3058 struct device_attribute *attr,
3061 struct amdgpu_device *adev = dev_get_drvdata(dev);
3062 int limit_type = to_sensor_dev_attr(attr)->index;
3063 uint32_t limit = limit_type << 24;
3067 if (amdgpu_in_reset(adev))
3070 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
3072 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
3076 if (is_support_sw_smu(adev)) {
3077 smu_get_power_limit(&adev->smu, &limit, SMU_PPT_LIMIT_MAX);
3078 size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
3079 } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
3080 adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, true);
3081 size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
3083 size = snprintf(buf, PAGE_SIZE, "\n");
3086 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
3087 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
3092 static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
3093 struct device_attribute *attr,
3096 struct amdgpu_device *adev = dev_get_drvdata(dev);
3097 int limit_type = to_sensor_dev_attr(attr)->index;
3098 uint32_t limit = limit_type << 24;
3102 if (amdgpu_in_reset(adev))
3105 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
3107 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
3111 if (is_support_sw_smu(adev)) {
3112 smu_get_power_limit(&adev->smu, &limit, SMU_PPT_LIMIT_CURRENT);
3113 size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
3114 } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
3115 adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, false);
3116 size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
3118 size = snprintf(buf, PAGE_SIZE, "\n");
3121 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
3122 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
3127 static ssize_t amdgpu_hwmon_show_power_label(struct device *dev,
3128 struct device_attribute *attr,
3131 int limit_type = to_sensor_dev_attr(attr)->index;
3133 return snprintf(buf, PAGE_SIZE, "%s\n",
3134 limit_type == SMU_FAST_PPT_LIMIT ? "fastPPT" : "slowPPT");
3137 static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
3138 struct device_attribute *attr,
3142 struct amdgpu_device *adev = dev_get_drvdata(dev);
3143 int limit_type = to_sensor_dev_attr(attr)->index;
3147 if (amdgpu_in_reset(adev))
3150 if (amdgpu_sriov_vf(adev))
3153 err = kstrtou32(buf, 10, &value);
3157 value = value / 1000000; /* convert to Watt */
3158 value |= limit_type << 24;
3160 err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
3162 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
3166 if (is_support_sw_smu(adev))
3167 err = smu_set_power_limit(&adev->smu, value);
3168 else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_power_limit)
3169 err = adev->powerplay.pp_funcs->set_power_limit(adev->powerplay.pp_handle, value);
3173 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
3174 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
3182 static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
3183 struct device_attribute *attr,
3186 struct amdgpu_device *adev = dev_get_drvdata(dev);
3188 int r, size = sizeof(sclk);
3190 if (amdgpu_in_reset(adev))
3193 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
3195 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
3200 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK,
3201 (void *)&sclk, &size);
3203 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
3204 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
3209 return snprintf(buf, PAGE_SIZE, "%u\n", sclk * 10 * 1000);
3212 static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev,
3213 struct device_attribute *attr,
3216 return snprintf(buf, PAGE_SIZE, "sclk\n");
3219 static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
3220 struct device_attribute *attr,
3223 struct amdgpu_device *adev = dev_get_drvdata(dev);
3225 int r, size = sizeof(mclk);
3227 if (amdgpu_in_reset(adev))
3230 r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
3232 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
3237 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK,
3238 (void *)&mclk, &size);
3240 pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
3241 pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
3246 return snprintf(buf, PAGE_SIZE, "%u\n", mclk * 10 * 1000);
3249 static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
3250 struct device_attribute *attr,
3253 return snprintf(buf, PAGE_SIZE, "mclk\n");
3259 * The amdgpu driver exposes the following sensor interfaces:
3261 * - GPU temperature (via the on-die sensor)
3265 * - Northbridge voltage (APUs only)
3271 * - GPU gfx/compute engine clock
3273 * - GPU memory clock (dGPU only)
3275 * hwmon interfaces for GPU temperature:
3277 * - temp[1-3]_input: the on die GPU temperature in millidegrees Celsius
3278 * - temp2_input and temp3_input are supported on SOC15 dGPUs only
3280 * - temp[1-3]_label: temperature channel label
3281 * - temp2_label and temp3_label are supported on SOC15 dGPUs only
3283 * - temp[1-3]_crit: temperature critical max value in millidegrees Celsius
3284 * - temp2_crit and temp3_crit are supported on SOC15 dGPUs only
3286 * - temp[1-3]_crit_hyst: temperature hysteresis for critical limit in millidegrees Celsius
3287 * - temp2_crit_hyst and temp3_crit_hyst are supported on SOC15 dGPUs only
3289 * - temp[1-3]_emergency: temperature emergency max value(asic shutdown) in millidegrees Celsius
3290 * - these are supported on SOC15 dGPUs only
3292 * hwmon interfaces for GPU voltage:
3294 * - in0_input: the voltage on the GPU in millivolts
3296 * - in1_input: the voltage on the Northbridge in millivolts
3298 * hwmon interfaces for GPU power:
3300 * - power1_average: average power used by the GPU in microWatts
3302 * - power1_cap_min: minimum cap supported in microWatts
3304 * - power1_cap_max: maximum cap supported in microWatts
3306 * - power1_cap: selected power cap in microWatts
3308 * hwmon interfaces for GPU fan:
3310 * - pwm1: pulse width modulation fan level (0-255)
3312 * - pwm1_enable: pulse width modulation fan control method (0: no fan speed control, 1: manual fan speed control using pwm interface, 2: automatic fan speed control)
3314 * - pwm1_min: pulse width modulation fan control minimum level (0)
3316 * - pwm1_max: pulse width modulation fan control maximum level (255)
3318 * - fan1_min: an minimum value Unit: revolution/min (RPM)
3320 * - fan1_max: an maxmum value Unit: revolution/max (RPM)
3322 * - fan1_input: fan speed in RPM
3324 * - fan[1-\*]_target: Desired fan speed Unit: revolution/min (RPM)
3326 * - fan[1-\*]_enable: Enable or disable the sensors.1: Enable 0: Disable
3328 * hwmon interfaces for GPU clocks:
3330 * - freq1_input: the gfx/compute clock in hertz
3332 * - freq2_input: the memory clock in hertz
3334 * You can use hwmon tools like sensors to view this information on your system.
3338 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_EDGE);
3339 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0);
3340 static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1);
3341 static SENSOR_DEVICE_ATTR(temp1_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_EDGE);
3342 static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_JUNCTION);
3343 static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 0);
3344 static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 1);
3345 static SENSOR_DEVICE_ATTR(temp2_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_JUNCTION);
3346 static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_MEM);
3347 static SENSOR_DEVICE_ATTR(temp3_crit, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 0);
3348 static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 1);
3349 static SENSOR_DEVICE_ATTR(temp3_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_MEM);
3350 static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_EDGE);
3351 static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_JUNCTION);
3352 static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_MEM);
3353 static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0);
3354 static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0);
3355 static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0);
3356 static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0);
3357 static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0);
3358 static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO, amdgpu_hwmon_get_fan1_min, NULL, 0);
3359 static SENSOR_DEVICE_ATTR(fan1_max, S_IRUGO, amdgpu_hwmon_get_fan1_max, NULL, 0);
3360 static SENSOR_DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_target, amdgpu_hwmon_set_fan1_target, 0);
3361 static SENSOR_DEVICE_ATTR(fan1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_enable, amdgpu_hwmon_set_fan1_enable, 0);
3362 static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, amdgpu_hwmon_show_vddgfx, NULL, 0);
3363 static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, amdgpu_hwmon_show_vddgfx_label, NULL, 0);
3364 static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, amdgpu_hwmon_show_vddnb, NULL, 0);
3365 static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, amdgpu_hwmon_show_vddnb_label, NULL, 0);
3366 static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 0);
3367 static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0);
3368 static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 0);
3369 static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0);
3370 static SENSOR_DEVICE_ATTR(power1_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 0);
3371 static SENSOR_DEVICE_ATTR(power2_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 1);
3372 static SENSOR_DEVICE_ATTR(power2_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 1);
3373 static SENSOR_DEVICE_ATTR(power2_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 1);
3374 static SENSOR_DEVICE_ATTR(power2_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 1);
3375 static SENSOR_DEVICE_ATTR(power2_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 1);
3376 static SENSOR_DEVICE_ATTR(freq1_input, S_IRUGO, amdgpu_hwmon_show_sclk, NULL, 0);
3377 static SENSOR_DEVICE_ATTR(freq1_label, S_IRUGO, amdgpu_hwmon_show_sclk_label, NULL, 0);
3378 static SENSOR_DEVICE_ATTR(freq2_input, S_IRUGO, amdgpu_hwmon_show_mclk, NULL, 0);
3379 static SENSOR_DEVICE_ATTR(freq2_label, S_IRUGO, amdgpu_hwmon_show_mclk_label, NULL, 0);
3381 static struct attribute *hwmon_attributes[] = {
3382 &sensor_dev_attr_temp1_input.dev_attr.attr,
3383 &sensor_dev_attr_temp1_crit.dev_attr.attr,
3384 &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr,
3385 &sensor_dev_attr_temp2_input.dev_attr.attr,
3386 &sensor_dev_attr_temp2_crit.dev_attr.attr,
3387 &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr,
3388 &sensor_dev_attr_temp3_input.dev_attr.attr,
3389 &sensor_dev_attr_temp3_crit.dev_attr.attr,
3390 &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr,
3391 &sensor_dev_attr_temp1_emergency.dev_attr.attr,
3392 &sensor_dev_attr_temp2_emergency.dev_attr.attr,
3393 &sensor_dev_attr_temp3_emergency.dev_attr.attr,
3394 &sensor_dev_attr_temp1_label.dev_attr.attr,
3395 &sensor_dev_attr_temp2_label.dev_attr.attr,
3396 &sensor_dev_attr_temp3_label.dev_attr.attr,
3397 &sensor_dev_attr_pwm1.dev_attr.attr,
3398 &sensor_dev_attr_pwm1_enable.dev_attr.attr,
3399 &sensor_dev_attr_pwm1_min.dev_attr.attr,
3400 &sensor_dev_attr_pwm1_max.dev_attr.attr,
3401 &sensor_dev_attr_fan1_input.dev_attr.attr,
3402 &sensor_dev_attr_fan1_min.dev_attr.attr,
3403 &sensor_dev_attr_fan1_max.dev_attr.attr,
3404 &sensor_dev_attr_fan1_target.dev_attr.attr,
3405 &sensor_dev_attr_fan1_enable.dev_attr.attr,
3406 &sensor_dev_attr_in0_input.dev_attr.attr,
3407 &sensor_dev_attr_in0_label.dev_attr.attr,
3408 &sensor_dev_attr_in1_input.dev_attr.attr,
3409 &sensor_dev_attr_in1_label.dev_attr.attr,
3410 &sensor_dev_attr_power1_average.dev_attr.attr,
3411 &sensor_dev_attr_power1_cap_max.dev_attr.attr,
3412 &sensor_dev_attr_power1_cap_min.dev_attr.attr,
3413 &sensor_dev_attr_power1_cap.dev_attr.attr,
3414 &sensor_dev_attr_power1_label.dev_attr.attr,
3415 &sensor_dev_attr_power2_average.dev_attr.attr,
3416 &sensor_dev_attr_power2_cap_max.dev_attr.attr,
3417 &sensor_dev_attr_power2_cap_min.dev_attr.attr,
3418 &sensor_dev_attr_power2_cap.dev_attr.attr,
3419 &sensor_dev_attr_power2_label.dev_attr.attr,
3420 &sensor_dev_attr_freq1_input.dev_attr.attr,
3421 &sensor_dev_attr_freq1_label.dev_attr.attr,
3422 &sensor_dev_attr_freq2_input.dev_attr.attr,
3423 &sensor_dev_attr_freq2_label.dev_attr.attr,
3427 static umode_t hwmon_attributes_visible(struct kobject *kobj,
3428 struct attribute *attr, int index)
3430 struct device *dev = kobj_to_dev(kobj);
3431 struct amdgpu_device *adev = dev_get_drvdata(dev);
3432 umode_t effective_mode = attr->mode;
3434 /* under multi-vf mode, the hwmon attributes are all not supported */
3435 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
3438 /* there is no fan under pp one vf mode */
3439 if (amdgpu_sriov_is_pp_one_vf(adev) &&
3440 (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3441 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3442 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3443 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3444 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3445 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3446 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3447 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3448 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3451 /* Skip fan attributes if fan is not present */
3452 if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3453 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3454 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3455 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3456 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3457 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3458 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3459 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3460 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3463 /* Skip fan attributes on APU */
3464 if ((adev->flags & AMD_IS_APU) &&
3465 (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3466 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3467 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3468 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3469 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3470 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3471 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3472 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3473 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3476 /* Skip crit temp on APU */
3477 if ((adev->flags & AMD_IS_APU) && (adev->family >= AMDGPU_FAMILY_CZ) &&
3478 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
3479 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr))
3482 /* Skip limit attributes if DPM is not enabled */
3483 if (!adev->pm.dpm_enabled &&
3484 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
3485 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
3486 attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
3487 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
3488 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3489 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
3490 attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
3491 attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
3492 attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3493 attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
3494 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
3497 if (!is_support_sw_smu(adev)) {
3498 /* mask fan attributes if we have no bindings for this asic to expose */
3499 if ((!adev->powerplay.pp_funcs->get_fan_speed_percent &&
3500 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
3501 (!adev->powerplay.pp_funcs->get_fan_control_mode &&
3502 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
3503 effective_mode &= ~S_IRUGO;
3505 if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
3506 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
3507 (!adev->powerplay.pp_funcs->set_fan_control_mode &&
3508 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
3509 effective_mode &= ~S_IWUSR;
3512 if (((adev->family == AMDGPU_FAMILY_SI) ||
3513 ((adev->flags & AMD_IS_APU) &&
3514 (adev->asic_type != CHIP_VANGOGH))) && /* not implemented yet */
3515 (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
3516 attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr||
3517 attr == &sensor_dev_attr_power1_cap.dev_attr.attr))
3520 if (((adev->family == AMDGPU_FAMILY_SI) ||
3521 ((adev->flags & AMD_IS_APU) &&
3522 (adev->asic_type < CHIP_RENOIR))) && /* not implemented yet */
3523 (attr == &sensor_dev_attr_power1_average.dev_attr.attr))
3526 if (!is_support_sw_smu(adev)) {
3527 /* hide max/min values if we can't both query and manage the fan */
3528 if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
3529 !adev->powerplay.pp_funcs->get_fan_speed_percent) &&
3530 (!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
3531 !adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
3532 (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
3533 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
3536 if ((!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
3537 !adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
3538 (attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
3539 attr == &sensor_dev_attr_fan1_min.dev_attr.attr))
3543 if ((adev->family == AMDGPU_FAMILY_SI || /* not implemented yet */
3544 adev->family == AMDGPU_FAMILY_KV) && /* not implemented yet */
3545 (attr == &sensor_dev_attr_in0_input.dev_attr.attr ||
3546 attr == &sensor_dev_attr_in0_label.dev_attr.attr))
3549 /* only APUs have vddnb */
3550 if (!(adev->flags & AMD_IS_APU) &&
3551 (attr == &sensor_dev_attr_in1_input.dev_attr.attr ||
3552 attr == &sensor_dev_attr_in1_label.dev_attr.attr))
3555 /* no mclk on APUs */
3556 if ((adev->flags & AMD_IS_APU) &&
3557 (attr == &sensor_dev_attr_freq2_input.dev_attr.attr ||
3558 attr == &sensor_dev_attr_freq2_label.dev_attr.attr))
3561 /* only SOC15 dGPUs support hotspot and mem temperatures */
3562 if (((adev->flags & AMD_IS_APU) ||
3563 adev->asic_type < CHIP_VEGA10) &&
3564 (attr == &sensor_dev_attr_temp2_crit.dev_attr.attr ||
3565 attr == &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr ||
3566 attr == &sensor_dev_attr_temp3_crit.dev_attr.attr ||
3567 attr == &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr ||
3568 attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr ||
3569 attr == &sensor_dev_attr_temp2_emergency.dev_attr.attr ||
3570 attr == &sensor_dev_attr_temp3_emergency.dev_attr.attr ||
3571 attr == &sensor_dev_attr_temp2_input.dev_attr.attr ||
3572 attr == &sensor_dev_attr_temp3_input.dev_attr.attr ||
3573 attr == &sensor_dev_attr_temp2_label.dev_attr.attr ||
3574 attr == &sensor_dev_attr_temp3_label.dev_attr.attr))
3577 /* only Vangogh has fast PPT limit and power labels */
3578 if (!(adev->asic_type == CHIP_VANGOGH) &&
3579 (attr == &sensor_dev_attr_power2_average.dev_attr.attr ||
3580 attr == &sensor_dev_attr_power2_cap_max.dev_attr.attr ||
3581 attr == &sensor_dev_attr_power2_cap_min.dev_attr.attr ||
3582 attr == &sensor_dev_attr_power2_cap.dev_attr.attr ||
3583 attr == &sensor_dev_attr_power2_label.dev_attr.attr ||
3584 attr == &sensor_dev_attr_power1_label.dev_attr.attr))
3587 return effective_mode;
3590 static const struct attribute_group hwmon_attrgroup = {
3591 .attrs = hwmon_attributes,
3592 .is_visible = hwmon_attributes_visible,
3595 static const struct attribute_group *hwmon_groups[] = {
3600 int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
3605 if (adev->pm.sysfs_initialized)
3608 if (adev->pm.dpm_enabled == 0)
3611 INIT_LIST_HEAD(&adev->pm.pm_attr_list);
3613 adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
3616 if (IS_ERR(adev->pm.int_hwmon_dev)) {
3617 ret = PTR_ERR(adev->pm.int_hwmon_dev);
3619 "Unable to register hwmon device: %d\n", ret);
3623 switch (amdgpu_virt_get_sriov_vf_mode(adev)) {
3624 case SRIOV_VF_MODE_ONE_VF:
3625 mask = ATTR_FLAG_ONEVF;
3627 case SRIOV_VF_MODE_MULTI_VF:
3630 case SRIOV_VF_MODE_BARE_METAL:
3632 mask = ATTR_FLAG_MASK_ALL;
3636 ret = amdgpu_device_attr_create_groups(adev,
3637 amdgpu_device_attrs,
3638 ARRAY_SIZE(amdgpu_device_attrs),
3640 &adev->pm.pm_attr_list);
3644 adev->pm.sysfs_initialized = true;
3649 void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
3651 if (adev->pm.dpm_enabled == 0)
3654 if (adev->pm.int_hwmon_dev)
3655 hwmon_device_unregister(adev->pm.int_hwmon_dev);
3657 amdgpu_device_attr_remove_groups(adev, &adev->pm.pm_attr_list);
3663 #if defined(CONFIG_DEBUG_FS)
3665 static void amdgpu_debugfs_prints_cpu_info(struct seq_file *m,
3666 struct amdgpu_device *adev) {
3671 if (is_support_cclk_dpm(adev)) {
3672 p_val = kcalloc(adev->smu.cpu_core_num, sizeof(uint16_t),
3675 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_CPU_CLK,
3676 (void *)p_val, &size)) {
3677 for (i = 0; i < adev->smu.cpu_core_num; i++)
3678 seq_printf(m, "\t%u MHz (CPU%d)\n",
3686 static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev)
3689 uint64_t value64 = 0;
3694 size = sizeof(value);
3695 seq_printf(m, "GFX Clocks and Power:\n");
3697 amdgpu_debugfs_prints_cpu_info(m, adev);
3699 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, (void *)&value, &size))
3700 seq_printf(m, "\t%u MHz (MCLK)\n", value/100);
3701 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, (void *)&value, &size))
3702 seq_printf(m, "\t%u MHz (SCLK)\n", value/100);
3703 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK, (void *)&value, &size))
3704 seq_printf(m, "\t%u MHz (PSTATE_SCLK)\n", value/100);
3705 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK, (void *)&value, &size))
3706 seq_printf(m, "\t%u MHz (PSTATE_MCLK)\n", value/100);
3707 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, (void *)&value, &size))
3708 seq_printf(m, "\t%u mV (VDDGFX)\n", value);
3709 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size))
3710 seq_printf(m, "\t%u mV (VDDNB)\n", value);
3711 size = sizeof(uint32_t);
3712 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, (void *)&query, &size))
3713 seq_printf(m, "\t%u.%u W (average GPU)\n", query >> 8, query & 0xff);
3714 size = sizeof(value);
3715 seq_printf(m, "\n");
3718 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, (void *)&value, &size))
3719 seq_printf(m, "GPU Temperature: %u C\n", value/1000);
3722 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, (void *)&value, &size))
3723 seq_printf(m, "GPU Load: %u %%\n", value);
3725 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, (void *)&value, &size))
3726 seq_printf(m, "MEM Load: %u %%\n", value);
3728 seq_printf(m, "\n");
3730 /* SMC feature mask */
3731 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size))
3732 seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64);
3734 if (adev->asic_type > CHIP_VEGA20) {
3736 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_POWER_STATE, (void *)&value, &size)) {
3738 seq_printf(m, "VCN: Disabled\n");
3740 seq_printf(m, "VCN: Enabled\n");
3741 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
3742 seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
3743 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
3744 seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
3747 seq_printf(m, "\n");
3750 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) {
3752 seq_printf(m, "UVD: Disabled\n");
3754 seq_printf(m, "UVD: Enabled\n");
3755 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size))
3756 seq_printf(m, "\t%u MHz (DCLK)\n", value/100);
3757 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size))
3758 seq_printf(m, "\t%u MHz (VCLK)\n", value/100);
3761 seq_printf(m, "\n");
3764 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) {
3766 seq_printf(m, "VCE: Disabled\n");
3768 seq_printf(m, "VCE: Enabled\n");
3769 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size))
3770 seq_printf(m, "\t%u MHz (ECCLK)\n", value/100);
3778 static void amdgpu_parse_cg_state(struct seq_file *m, u32 flags)
3782 for (i = 0; clocks[i].flag; i++)
3783 seq_printf(m, "\t%s: %s\n", clocks[i].name,
3784 (flags & clocks[i].flag) ? "On" : "Off");
3787 static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
3789 struct drm_info_node *node = (struct drm_info_node *) m->private;
3790 struct drm_device *dev = node->minor->dev;
3791 struct amdgpu_device *adev = drm_to_adev(dev);
3795 if (amdgpu_in_reset(adev))
3798 r = pm_runtime_get_sync(dev->dev);
3800 pm_runtime_put_autosuspend(dev->dev);
3804 if (!adev->pm.dpm_enabled) {
3805 seq_printf(m, "dpm not enabled\n");
3806 pm_runtime_mark_last_busy(dev->dev);
3807 pm_runtime_put_autosuspend(dev->dev);
3811 if (!is_support_sw_smu(adev) &&
3812 adev->powerplay.pp_funcs->debugfs_print_current_performance_level) {
3813 mutex_lock(&adev->pm.mutex);
3814 if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level)
3815 adev->powerplay.pp_funcs->debugfs_print_current_performance_level(adev, m);
3817 seq_printf(m, "Debugfs support not implemented for this asic\n");
3818 mutex_unlock(&adev->pm.mutex);
3821 r = amdgpu_debugfs_pm_info_pp(m, adev);
3826 amdgpu_device_ip_get_clockgating_state(adev, &flags);
3828 seq_printf(m, "Clock Gating Flags Mask: 0x%x\n", flags);
3829 amdgpu_parse_cg_state(m, flags);
3830 seq_printf(m, "\n");
3833 pm_runtime_mark_last_busy(dev->dev);
3834 pm_runtime_put_autosuspend(dev->dev);
3839 static const struct drm_info_list amdgpu_pm_info_list[] = {
3840 {"amdgpu_pm_info", amdgpu_debugfs_pm_info, 0, NULL},
3844 int amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
3846 #if defined(CONFIG_DEBUG_FS)
3847 return amdgpu_debugfs_add_files(adev, amdgpu_pm_info_list, ARRAY_SIZE(amdgpu_pm_info_list));