drm/amdgpu: attr to control SS2.0 bias level (v2)
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / pm / amdgpu_pm.c
index 5fa65f1..2e1286f 100644 (file)
  *          Alex Deucher <alexdeucher@gmail.com>
  */
 
-#include <drm/drm_debugfs.h>
-
 #include "amdgpu.h"
 #include "amdgpu_drv.h"
 #include "amdgpu_pm.h"
 #include "amdgpu_dpm.h"
-#include "amdgpu_smu.h"
 #include "atom.h"
 #include <linux/pci.h>
 #include <linux/hwmon.h>
@@ -125,11 +122,14 @@ static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
 {
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = drm_to_adev(ddev);
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
        enum amd_pm_state_type pm;
        int ret;
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        ret = pm_runtime_get_sync(ddev->dev);
        if (ret < 0) {
@@ -137,12 +137,7 @@ static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
                return ret;
        }
 
-       if (is_support_sw_smu(adev)) {
-               if (adev->smu.ppt_funcs->get_current_power_state)
-                       pm = smu_get_current_power_state(&adev->smu);
-               else
-                       pm = adev->pm.dpm.user_state;
-       } else if (adev->powerplay.pp_funcs->get_current_power_state) {
+       if (pp_funcs->get_current_power_state) {
                pm = amdgpu_dpm_get_current_power_state(adev);
        } else {
                pm = adev->pm.dpm.user_state;
@@ -151,9 +146,9 @@ static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
        pm_runtime_mark_last_busy(ddev->dev);
        pm_runtime_put_autosuspend(ddev->dev);
 
-       return snprintf(buf, PAGE_SIZE, "%s\n",
-                       (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
-                       (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
+       return sysfs_emit(buf, "%s\n",
+                         (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
+                         (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance");
 }
 
 static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
@@ -168,6 +163,8 @@ static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        if (strncmp("battery", buf, strlen("battery")) == 0)
                state = POWER_STATE_TYPE_BATTERY;
@@ -274,6 +271,8 @@ static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        ret = pm_runtime_get_sync(ddev->dev);
        if (ret < 0) {
@@ -281,9 +280,7 @@ static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
                return ret;
        }
 
-       if (is_support_sw_smu(adev))
-               level = smu_get_performance_level(&adev->smu);
-       else if (adev->powerplay.pp_funcs->get_performance_level)
+       if (adev->powerplay.pp_funcs->get_performance_level)
                level = amdgpu_dpm_get_performance_level(adev);
        else
                level = adev->pm.dpm.forced_level;
@@ -291,16 +288,17 @@ static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
        pm_runtime_mark_last_busy(ddev->dev);
        pm_runtime_put_autosuspend(ddev->dev);
 
-       return snprintf(buf, PAGE_SIZE, "%s\n",
-                       (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
-                       (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
-                       (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" :
-                       (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" :
-                       (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" :
-                       (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
-                       (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
-                       (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" :
-                       "unknown");
+       return sysfs_emit(buf, "%s\n",
+                         (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
+                         (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
+                         (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" :
+                         (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" :
+                         (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" :
+                         (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" :
+                         (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" :
+                         (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" :
+                         (level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) ? "perf_determinism" :
+                         "unknown");
 }
 
 static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
@@ -310,12 +308,15 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
 {
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = drm_to_adev(ddev);
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
        enum amd_dpm_forced_level level;
        enum amd_dpm_forced_level current_level = 0xff;
        int ret = 0;
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        if (strncmp("low", buf, strlen("low")) == 0) {
                level = AMD_DPM_FORCED_LEVEL_LOW;
@@ -335,6 +336,8 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
                level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK;
        } else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) {
                level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
+       } else if (strncmp("perf_determinism", buf, strlen("perf_determinism")) == 0) {
+               level = AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM;
        }  else {
                return -EINVAL;
        }
@@ -345,9 +348,7 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
                return ret;
        }
 
-       if (is_support_sw_smu(adev))
-               current_level = smu_get_performance_level(&adev->smu);
-       else if (adev->powerplay.pp_funcs->get_performance_level)
+       if (pp_funcs->get_performance_level)
                current_level = amdgpu_dpm_get_performance_level(adev);
 
        if (current_level == level) {
@@ -377,14 +378,7 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
                return -EINVAL;
        }
 
-       if (is_support_sw_smu(adev)) {
-               ret = smu_force_performance_level(&adev->smu, level);
-               if (ret) {
-                       pm_runtime_mark_last_busy(ddev->dev);
-                       pm_runtime_put_autosuspend(ddev->dev);
-                       return -EINVAL;
-               }
-       } else if (adev->powerplay.pp_funcs->force_performance_level) {
+       if (pp_funcs->force_performance_level) {
                mutex_lock(&adev->pm.mutex);
                if (adev->pm.dpm.thermal_active) {
                        mutex_unlock(&adev->pm.mutex);
@@ -415,11 +409,14 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev,
 {
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = drm_to_adev(ddev);
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
        struct pp_states_info data;
        int i, buf_len, ret;
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        ret = pm_runtime_get_sync(ddev->dev);
        if (ret < 0) {
@@ -427,11 +424,7 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev,
                return ret;
        }
 
-       if (is_support_sw_smu(adev)) {
-               ret = smu_get_power_num_states(&adev->smu, &data);
-               if (ret)
-                       return ret;
-       } else if (adev->powerplay.pp_funcs->get_pp_num_states) {
+       if (pp_funcs->get_pp_num_states) {
                amdgpu_dpm_get_pp_num_states(adev, &data);
        } else {
                memset(&data, 0, sizeof(data));
@@ -457,13 +450,15 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
 {
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = drm_to_adev(ddev);
-       struct pp_states_info data;
-       struct smu_context *smu = &adev->smu;
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+       struct pp_states_info data = {0};
        enum amd_pm_state_type pm = 0;
        int i = 0, ret = 0;
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        ret = pm_runtime_get_sync(ddev->dev);
        if (ret < 0) {
@@ -471,13 +466,8 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
                return ret;
        }
 
-       if (is_support_sw_smu(adev)) {
-               pm = smu_get_current_power_state(smu);
-               ret = smu_get_power_num_states(smu, &data);
-               if (ret)
-                       return ret;
-       } else if (adev->powerplay.pp_funcs->get_current_power_state
-                && adev->powerplay.pp_funcs->get_pp_num_states) {
+       if (pp_funcs->get_current_power_state
+                && pp_funcs->get_pp_num_states) {
                pm = amdgpu_dpm_get_current_power_state(adev);
                amdgpu_dpm_get_pp_num_states(adev, &data);
        }
@@ -493,7 +483,7 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
        if (i == data.nums)
                i = -EINVAL;
 
-       return snprintf(buf, PAGE_SIZE, "%d\n", i);
+       return sysfs_emit(buf, "%d\n", i);
 }
 
 static ssize_t amdgpu_get_pp_force_state(struct device *dev,
@@ -505,11 +495,13 @@ static ssize_t amdgpu_get_pp_force_state(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        if (adev->pp_force_state_enabled)
                return amdgpu_get_pp_cur_state(dev, attr, buf);
        else
-               return snprintf(buf, PAGE_SIZE, "\n");
+               return sysfs_emit(buf, "\n");
 }
 
 static ssize_t amdgpu_set_pp_force_state(struct device *dev,
@@ -525,6 +517,8 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        if (strlen(buf) == 1)
                adev->pp_force_state_enabled = false;
@@ -585,6 +579,8 @@ static ssize_t amdgpu_get_pp_table(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        ret = pm_runtime_get_sync(ddev->dev);
        if (ret < 0) {
@@ -592,13 +588,7 @@ static ssize_t amdgpu_get_pp_table(struct device *dev,
                return ret;
        }
 
-       if (is_support_sw_smu(adev)) {
-               size = smu_sys_get_pp_table(&adev->smu, (void **)&table);
-               pm_runtime_mark_last_busy(ddev->dev);
-               pm_runtime_put_autosuspend(ddev->dev);
-               if (size < 0)
-                       return size;
-       } else if (adev->powerplay.pp_funcs->get_pp_table) {
+       if (adev->powerplay.pp_funcs->get_pp_table) {
                size = amdgpu_dpm_get_pp_table(adev, &table);
                pm_runtime_mark_last_busy(ddev->dev);
                pm_runtime_put_autosuspend(ddev->dev);
@@ -629,6 +619,8 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        ret = pm_runtime_get_sync(ddev->dev);
        if (ret < 0) {
@@ -636,15 +628,12 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
                return ret;
        }
 
-       if (is_support_sw_smu(adev)) {
-               ret = smu_sys_set_pp_table(&adev->smu, (void *)buf, count);
-               if (ret) {
-                       pm_runtime_mark_last_busy(ddev->dev);
-                       pm_runtime_put_autosuspend(ddev->dev);
-                       return ret;
-               }
-       } else if (adev->powerplay.pp_funcs->set_pp_table)
-               amdgpu_dpm_set_pp_table(adev, buf, count);
+       ret = amdgpu_dpm_set_pp_table(adev, buf, count);
+       if (ret) {
+               pm_runtime_mark_last_busy(ddev->dev);
+               pm_runtime_put_autosuspend(ddev->dev);
+               return ret;
+       }
 
        pm_runtime_mark_last_busy(ddev->dev);
        pm_runtime_put_autosuspend(ddev->dev);
@@ -746,6 +735,23 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
  * - a list of valid ranges for sclk, mclk, and voltage curve points
  *   labeled OD_RANGE
  *
+ * < For APUs >
+ *
+ * Reading the file will display:
+ *
+ * - minimum and maximum engine clock labeled OD_SCLK
+ *
+ * - a list of valid ranges for sclk labeled OD_RANGE
+ *
+ * < For VanGogh >
+ *
+ * Reading the file will display:
+ *
+ * - minimum and maximum engine clock labeled OD_SCLK
+ * - minimum and maximum core clocks labeled OD_CCLK
+ *
+ * - a list of valid ranges for sclk and cclk labeled OD_RANGE
+ *
  * To manually adjust these settings:
  *
  * - First select manual using power_dpm_force_performance_level
@@ -754,7 +760,10 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
  *   string that contains "s/m index clock" to the file. The index
  *   should be 0 if to set minimum clock. And 1 if to set maximum
  *   clock. E.g., "s 0 500" will update minimum sclk to be 500 MHz.
- *   "m 1 800" will update maximum mclk to be 800Mhz.
+ *   "m 1 800" will update maximum mclk to be 800Mhz. For core
+ *   clocks on VanGogh, the string contains "p core index clock".
+ *   E.g., "p 2 0 800" would set the minimum core clock on core
+ *   2 to 800Mhz.
  *
  *   For sclk voltage curve, enter the new values by writing a
  *   string that contains "vc point clock voltage" to the file. The
@@ -794,6 +803,8 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        if (count > 127)
                return -EINVAL;
@@ -842,53 +853,42 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
                return ret;
        }
 
-       if (is_support_sw_smu(adev)) {
-               ret = smu_od_edit_dpm_table(&adev->smu, type,
-                                           parameter, parameter_size);
-
+       if (adev->powerplay.pp_funcs->set_fine_grain_clk_vol) {
+               ret = amdgpu_dpm_set_fine_grain_clk_vol(adev, type,
+                                                       parameter,
+                                                       parameter_size);
                if (ret) {
                        pm_runtime_mark_last_busy(ddev->dev);
                        pm_runtime_put_autosuspend(ddev->dev);
                        return -EINVAL;
                }
-       } else {
-
-               if (adev->powerplay.pp_funcs->set_fine_grain_clk_vol) {
-                       ret = amdgpu_dpm_set_fine_grain_clk_vol(adev, type,
-                                                               parameter,
-                                                               parameter_size);
-                       if (ret) {
-                               pm_runtime_mark_last_busy(ddev->dev);
-                               pm_runtime_put_autosuspend(ddev->dev);
-                               return -EINVAL;
-                       }
-               }
+       }
 
-               if (adev->powerplay.pp_funcs->odn_edit_dpm_table) {
-                       ret = amdgpu_dpm_odn_edit_dpm_table(adev, type,
-                                               parameter, parameter_size);
-                       if (ret) {
-                               pm_runtime_mark_last_busy(ddev->dev);
-                               pm_runtime_put_autosuspend(ddev->dev);
-                               return -EINVAL;
-                       }
+       if (adev->powerplay.pp_funcs->odn_edit_dpm_table) {
+               ret = amdgpu_dpm_odn_edit_dpm_table(adev, type,
+                                                   parameter, parameter_size);
+               if (ret) {
+                       pm_runtime_mark_last_busy(ddev->dev);
+                       pm_runtime_put_autosuspend(ddev->dev);
+                       return -EINVAL;
                }
+       }
 
-               if (type == PP_OD_COMMIT_DPM_TABLE) {
-                       if (adev->powerplay.pp_funcs->dispatch_tasks) {
-                               amdgpu_dpm_dispatch_task(adev,
-                                               AMD_PP_TASK_READJUST_POWER_STATE,
-                                               NULL);
-                               pm_runtime_mark_last_busy(ddev->dev);
-                               pm_runtime_put_autosuspend(ddev->dev);
-                               return count;
-                       } else {
-                               pm_runtime_mark_last_busy(ddev->dev);
-                               pm_runtime_put_autosuspend(ddev->dev);
-                               return -EINVAL;
-                       }
+       if (type == PP_OD_COMMIT_DPM_TABLE) {
+               if (adev->powerplay.pp_funcs->dispatch_tasks) {
+                       amdgpu_dpm_dispatch_task(adev,
+                                                AMD_PP_TASK_READJUST_POWER_STATE,
+                                                NULL);
+                       pm_runtime_mark_last_busy(ddev->dev);
+                       pm_runtime_put_autosuspend(ddev->dev);
+                       return count;
+               } else {
+                       pm_runtime_mark_last_busy(ddev->dev);
+                       pm_runtime_put_autosuspend(ddev->dev);
+                       return -EINVAL;
                }
        }
+
        pm_runtime_mark_last_busy(ddev->dev);
        pm_runtime_put_autosuspend(ddev->dev);
 
@@ -906,6 +906,8 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        ret = pm_runtime_get_sync(ddev->dev);
        if (ret < 0) {
@@ -913,18 +915,13 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
                return ret;
        }
 
-       if (is_support_sw_smu(adev)) {
-               size = smu_print_clk_levels(&adev->smu, SMU_OD_SCLK, buf);
-               size += smu_print_clk_levels(&adev->smu, SMU_OD_MCLK, buf+size);
-               size += smu_print_clk_levels(&adev->smu, SMU_OD_VDDC_CURVE, buf+size);
-               size += smu_print_clk_levels(&adev->smu, SMU_OD_VDDGFX_OFFSET, buf+size);
-               size += smu_print_clk_levels(&adev->smu, SMU_OD_RANGE, buf+size);
-               size += smu_print_clk_levels(&adev->smu, SMU_OD_CCLK, buf+size);
-       } else if (adev->powerplay.pp_funcs->print_clock_levels) {
+       if (adev->powerplay.pp_funcs->print_clock_levels) {
                size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
                size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size);
                size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf+size);
+               size += amdgpu_dpm_print_clock_levels(adev, OD_VDDGFX_OFFSET, buf+size);
                size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf+size);
+               size += amdgpu_dpm_print_clock_levels(adev, OD_CCLK, buf+size);
        } else {
                size = snprintf(buf, PAGE_SIZE, "\n");
        }
@@ -962,6 +959,8 @@ static ssize_t amdgpu_set_pp_features(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        ret = kstrtou64(buf, 0, &featuremask);
        if (ret)
@@ -973,14 +972,7 @@ static ssize_t amdgpu_set_pp_features(struct device *dev,
                return ret;
        }
 
-       if (is_support_sw_smu(adev)) {
-               ret = smu_sys_set_pp_feature_mask(&adev->smu, featuremask);
-               if (ret) {
-                       pm_runtime_mark_last_busy(ddev->dev);
-                       pm_runtime_put_autosuspend(ddev->dev);
-                       return -EINVAL;
-               }
-       } else if (adev->powerplay.pp_funcs->set_ppfeature_status) {
+       if (adev->powerplay.pp_funcs->set_ppfeature_status) {
                ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask);
                if (ret) {
                        pm_runtime_mark_last_busy(ddev->dev);
@@ -1005,6 +997,8 @@ static ssize_t amdgpu_get_pp_features(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        ret = pm_runtime_get_sync(ddev->dev);
        if (ret < 0) {
@@ -1012,9 +1006,7 @@ static ssize_t amdgpu_get_pp_features(struct device *dev,
                return ret;
        }
 
-       if (is_support_sw_smu(adev))
-               size = smu_sys_get_pp_feature_mask(&adev->smu, buf);
-       else if (adev->powerplay.pp_funcs->get_ppfeature_status)
+       if (adev->powerplay.pp_funcs->get_ppfeature_status)
                size = amdgpu_dpm_get_ppfeature_status(adev, buf);
        else
                size = snprintf(buf, PAGE_SIZE, "\n");
@@ -1055,8 +1047,8 @@ static ssize_t amdgpu_get_pp_features(struct device *dev,
  * NOTE: change to the dcefclk max dpm level is not supported now
  */
 
-static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
-               struct device_attribute *attr,
+static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev,
+               enum pp_clock_type type,
                char *buf)
 {
        struct drm_device *ddev = dev_get_drvdata(dev);
@@ -1066,6 +1058,8 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        ret = pm_runtime_get_sync(ddev->dev);
        if (ret < 0) {
@@ -1073,10 +1067,8 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
                return ret;
        }
 
-       if (is_support_sw_smu(adev))
-               size = smu_print_clk_levels(&adev->smu, SMU_SCLK, buf);
-       else if (adev->powerplay.pp_funcs->print_clock_levels)
-               size = amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf);
+       if (adev->powerplay.pp_funcs->print_clock_levels)
+               size = amdgpu_dpm_print_clock_levels(adev, type, buf);
        else
                size = snprintf(buf, PAGE_SIZE, "\n");
 
@@ -1121,8 +1113,8 @@ static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
        return 0;
 }
 
-static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
-               struct device_attribute *attr,
+static ssize_t amdgpu_set_pp_dpm_clock(struct device *dev,
+               enum pp_clock_type type,
                const char *buf,
                size_t count)
 {
@@ -1133,6 +1125,8 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        ret = amdgpu_read_mask(buf, count, &mask);
        if (ret)
@@ -1144,10 +1138,10 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
                return ret;
        }
 
-       if (is_support_sw_smu(adev))
-               ret = smu_force_clk_levels(&adev->smu, SMU_SCLK, mask);
-       else if (adev->powerplay.pp_funcs->force_clock_level)
-               ret = amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
+       if (adev->powerplay.pp_funcs->force_clock_level)
+               ret = amdgpu_dpm_force_clock_level(adev, type, mask);
+       else
+               ret = 0;
 
        pm_runtime_mark_last_busy(ddev->dev);
        pm_runtime_put_autosuspend(ddev->dev);
@@ -1158,35 +1152,26 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
        return count;
 }
 
-static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
+static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
                struct device_attribute *attr,
                char *buf)
 {
-       struct drm_device *ddev = dev_get_drvdata(dev);
-       struct amdgpu_device *adev = drm_to_adev(ddev);
-       ssize_t size;
-       int ret;
-
-       if (amdgpu_in_reset(adev))
-               return -EPERM;
-
-       ret = pm_runtime_get_sync(ddev->dev);
-       if (ret < 0) {
-               pm_runtime_put_autosuspend(ddev->dev);
-               return ret;
-       }
-
-       if (is_support_sw_smu(adev))
-               size = smu_print_clk_levels(&adev->smu, SMU_MCLK, buf);
-       else if (adev->powerplay.pp_funcs->print_clock_levels)
-               size = amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf);
-       else
-               size = snprintf(buf, PAGE_SIZE, "\n");
+       return amdgpu_get_pp_dpm_clock(dev, PP_SCLK, buf);
+}
 
-       pm_runtime_mark_last_busy(ddev->dev);
-       pm_runtime_put_autosuspend(ddev->dev);
+static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
+               struct device_attribute *attr,
+               const char *buf,
+               size_t count)
+{
+       return amdgpu_set_pp_dpm_clock(dev, PP_SCLK, buf, count);
+}
 
-       return size;
+static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
+               struct device_attribute *attr,
+               char *buf)
+{
+       return amdgpu_get_pp_dpm_clock(dev, PP_MCLK, buf);
 }
 
 static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
@@ -1194,67 +1179,14 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
                const char *buf,
                size_t count)
 {
-       struct drm_device *ddev = dev_get_drvdata(dev);
-       struct amdgpu_device *adev = drm_to_adev(ddev);
-       uint32_t mask = 0;
-       int ret;
-
-       if (amdgpu_in_reset(adev))
-               return -EPERM;
-
-       ret = amdgpu_read_mask(buf, count, &mask);
-       if (ret)
-               return ret;
-
-       ret = pm_runtime_get_sync(ddev->dev);
-       if (ret < 0) {
-               pm_runtime_put_autosuspend(ddev->dev);
-               return ret;
-       }
-
-       if (is_support_sw_smu(adev))
-               ret = smu_force_clk_levels(&adev->smu, SMU_MCLK, mask);
-       else if (adev->powerplay.pp_funcs->force_clock_level)
-               ret = amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
-
-       pm_runtime_mark_last_busy(ddev->dev);
-       pm_runtime_put_autosuspend(ddev->dev);
-
-       if (ret)
-               return -EINVAL;
-
-       return count;
+       return amdgpu_set_pp_dpm_clock(dev, PP_MCLK, buf, count);
 }
 
 static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev,
                struct device_attribute *attr,
                char *buf)
 {
-       struct drm_device *ddev = dev_get_drvdata(dev);
-       struct amdgpu_device *adev = drm_to_adev(ddev);
-       ssize_t size;
-       int ret;
-
-       if (amdgpu_in_reset(adev))
-               return -EPERM;
-
-       ret = pm_runtime_get_sync(ddev->dev);
-       if (ret < 0) {
-               pm_runtime_put_autosuspend(ddev->dev);
-               return ret;
-       }
-
-       if (is_support_sw_smu(adev))
-               size = smu_print_clk_levels(&adev->smu, SMU_SOCCLK, buf);
-       else if (adev->powerplay.pp_funcs->print_clock_levels)
-               size = amdgpu_dpm_print_clock_levels(adev, PP_SOCCLK, buf);
-       else
-               size = snprintf(buf, PAGE_SIZE, "\n");
-
-       pm_runtime_mark_last_busy(ddev->dev);
-       pm_runtime_put_autosuspend(ddev->dev);
-
-       return size;
+       return amdgpu_get_pp_dpm_clock(dev, PP_SOCCLK, buf);
 }
 
 static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
@@ -1262,69 +1194,14 @@ static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
                const char *buf,
                size_t count)
 {
-       struct drm_device *ddev = dev_get_drvdata(dev);
-       struct amdgpu_device *adev = drm_to_adev(ddev);
-       int ret;
-       uint32_t mask = 0;
-
-       if (amdgpu_in_reset(adev))
-               return -EPERM;
-
-       ret = amdgpu_read_mask(buf, count, &mask);
-       if (ret)
-               return ret;
-
-       ret = pm_runtime_get_sync(ddev->dev);
-       if (ret < 0) {
-               pm_runtime_put_autosuspend(ddev->dev);
-               return ret;
-       }
-
-       if (is_support_sw_smu(adev))
-               ret = smu_force_clk_levels(&adev->smu, SMU_SOCCLK, mask);
-       else if (adev->powerplay.pp_funcs->force_clock_level)
-               ret = amdgpu_dpm_force_clock_level(adev, PP_SOCCLK, mask);
-       else
-               ret = 0;
-
-       pm_runtime_mark_last_busy(ddev->dev);
-       pm_runtime_put_autosuspend(ddev->dev);
-
-       if (ret)
-               return -EINVAL;
-
-       return count;
+       return amdgpu_set_pp_dpm_clock(dev, PP_SOCCLK, buf, count);
 }
 
 static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev,
                struct device_attribute *attr,
                char *buf)
 {
-       struct drm_device *ddev = dev_get_drvdata(dev);
-       struct amdgpu_device *adev = drm_to_adev(ddev);
-       ssize_t size;
-       int ret;
-
-       if (amdgpu_in_reset(adev))
-               return -EPERM;
-
-       ret = pm_runtime_get_sync(ddev->dev);
-       if (ret < 0) {
-               pm_runtime_put_autosuspend(ddev->dev);
-               return ret;
-       }
-
-       if (is_support_sw_smu(adev))
-               size = smu_print_clk_levels(&adev->smu, SMU_FCLK, buf);
-       else if (adev->powerplay.pp_funcs->print_clock_levels)
-               size = amdgpu_dpm_print_clock_levels(adev, PP_FCLK, buf);
-       else
-               size = snprintf(buf, PAGE_SIZE, "\n");
-
-       pm_runtime_mark_last_busy(ddev->dev);
-       pm_runtime_put_autosuspend(ddev->dev);
-
-       return size;
+       return amdgpu_get_pp_dpm_clock(dev, PP_FCLK, buf);
 }
 
 static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
@@ -1332,67 +1209,14 @@ static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
                const char *buf,
                size_t count)
 {
-       struct drm_device *ddev = dev_get_drvdata(dev);
-       struct amdgpu_device *adev = drm_to_adev(ddev);
-       int ret;
-       uint32_t mask = 0;
-
-       if (amdgpu_in_reset(adev))
-               return -EPERM;
-
-       ret = amdgpu_read_mask(buf, count, &mask);
-       if (ret)
-               return ret;
-
-       ret = pm_runtime_get_sync(ddev->dev);
-       if (ret < 0) {
-               pm_runtime_put_autosuspend(ddev->dev);
-               return ret;
-       }
-
-       if (is_support_sw_smu(adev))
-               ret = smu_force_clk_levels(&adev->smu, SMU_FCLK, mask);
-       else if (adev->powerplay.pp_funcs->force_clock_level)
-               ret = amdgpu_dpm_force_clock_level(adev, PP_FCLK, mask);
-       else
-               ret = 0;
-
-       pm_runtime_mark_last_busy(ddev->dev);
-       pm_runtime_put_autosuspend(ddev->dev);
-
-       if (ret)
-               return -EINVAL;
-
-       return count;
+       return amdgpu_set_pp_dpm_clock(dev, PP_FCLK, buf, count);
 }
 
 static ssize_t amdgpu_get_pp_dpm_vclk(struct device *dev,
                struct device_attribute *attr,
                char *buf)
 {
-       struct drm_device *ddev = dev_get_drvdata(dev);
-       struct amdgpu_device *adev = drm_to_adev(ddev);
-       ssize_t size;
-       int ret;
-
-       if (amdgpu_in_reset(adev))
-               return -EPERM;
-
-       ret = pm_runtime_get_sync(ddev->dev);
-       if (ret < 0) {
-               pm_runtime_put_autosuspend(ddev->dev);
-               return ret;
-       }
-
-       if (is_support_sw_smu(adev))
-               size = smu_print_clk_levels(&adev->smu, SMU_VCLK, buf);
-       else
-               size = snprintf(buf, PAGE_SIZE, "\n");
-
-       pm_runtime_mark_last_busy(ddev->dev);
-       pm_runtime_put_autosuspend(ddev->dev);
-
-       return size;
+       return amdgpu_get_pp_dpm_clock(dev, PP_VCLK, buf);
 }
 
 static ssize_t amdgpu_set_pp_dpm_vclk(struct device *dev,
@@ -1400,65 +1224,14 @@ static ssize_t amdgpu_set_pp_dpm_vclk(struct device *dev,
                const char *buf,
                size_t count)
 {
-       struct drm_device *ddev = dev_get_drvdata(dev);
-       struct amdgpu_device *adev = drm_to_adev(ddev);
-       int ret;
-       uint32_t mask = 0;
-
-       if (amdgpu_in_reset(adev))
-               return -EPERM;
-
-       ret = amdgpu_read_mask(buf, count, &mask);
-       if (ret)
-               return ret;
-
-       ret = pm_runtime_get_sync(ddev->dev);
-       if (ret < 0) {
-               pm_runtime_put_autosuspend(ddev->dev);
-               return ret;
-       }
-
-       if (is_support_sw_smu(adev))
-               ret = smu_force_clk_levels(&adev->smu, SMU_VCLK, mask);
-       else
-               ret = 0;
-
-       pm_runtime_mark_last_busy(ddev->dev);
-       pm_runtime_put_autosuspend(ddev->dev);
-
-       if (ret)
-               return -EINVAL;
-
-       return count;
+       return amdgpu_set_pp_dpm_clock(dev, PP_VCLK, buf, count);
 }
 
 static ssize_t amdgpu_get_pp_dpm_dclk(struct device *dev,
                struct device_attribute *attr,
                char *buf)
 {
-       struct drm_device *ddev = dev_get_drvdata(dev);
-       struct amdgpu_device *adev = drm_to_adev(ddev);
-       ssize_t size;
-       int ret;
-
-       if (amdgpu_in_reset(adev))
-               return -EPERM;
-
-       ret = pm_runtime_get_sync(ddev->dev);
-       if (ret < 0) {
-               pm_runtime_put_autosuspend(ddev->dev);
-               return ret;
-       }
-
-       if (is_support_sw_smu(adev))
-               size = smu_print_clk_levels(&adev->smu, SMU_DCLK, buf);
-       else
-               size = snprintf(buf, PAGE_SIZE, "\n");
-
-       pm_runtime_mark_last_busy(ddev->dev);
-       pm_runtime_put_autosuspend(ddev->dev);
-
-       return size;
+       return amdgpu_get_pp_dpm_clock(dev, PP_DCLK, buf);
 }
 
 static ssize_t amdgpu_set_pp_dpm_dclk(struct device *dev,
@@ -1466,67 +1239,14 @@ static ssize_t amdgpu_set_pp_dpm_dclk(struct device *dev,
                const char *buf,
                size_t count)
 {
-       struct drm_device *ddev = dev_get_drvdata(dev);
-       struct amdgpu_device *adev = drm_to_adev(ddev);
-       int ret;
-       uint32_t mask = 0;
-
-       if (amdgpu_in_reset(adev))
-               return -EPERM;
-
-       ret = amdgpu_read_mask(buf, count, &mask);
-       if (ret)
-               return ret;
-
-       ret = pm_runtime_get_sync(ddev->dev);
-       if (ret < 0) {
-               pm_runtime_put_autosuspend(ddev->dev);
-               return ret;
-       }
-
-       if (is_support_sw_smu(adev))
-               ret = smu_force_clk_levels(&adev->smu, SMU_DCLK, mask);
-       else
-               ret = 0;
-
-       pm_runtime_mark_last_busy(ddev->dev);
-       pm_runtime_put_autosuspend(ddev->dev);
-
-       if (ret)
-               return -EINVAL;
-
-       return count;
+       return amdgpu_set_pp_dpm_clock(dev, PP_DCLK, buf, count);
 }
 
 static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
                struct device_attribute *attr,
                char *buf)
 {
-       struct drm_device *ddev = dev_get_drvdata(dev);
-       struct amdgpu_device *adev = drm_to_adev(ddev);
-       ssize_t size;
-       int ret;
-
-       if (amdgpu_in_reset(adev))
-               return -EPERM;
-
-       ret = pm_runtime_get_sync(ddev->dev);
-       if (ret < 0) {
-               pm_runtime_put_autosuspend(ddev->dev);
-               return ret;
-       }
-
-       if (is_support_sw_smu(adev))
-               size = smu_print_clk_levels(&adev->smu, SMU_DCEFCLK, buf);
-       else if (adev->powerplay.pp_funcs->print_clock_levels)
-               size = amdgpu_dpm_print_clock_levels(adev, PP_DCEFCLK, buf);
-       else
-               size = snprintf(buf, PAGE_SIZE, "\n");
-
-       pm_runtime_mark_last_busy(ddev->dev);
-       pm_runtime_put_autosuspend(ddev->dev);
-
-       return size;
+       return amdgpu_get_pp_dpm_clock(dev, PP_DCEFCLK, buf);
 }
 
 static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
@@ -1534,108 +1254,22 @@ static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
                const char *buf,
                size_t count)
 {
-       struct drm_device *ddev = dev_get_drvdata(dev);
-       struct amdgpu_device *adev = drm_to_adev(ddev);
-       int ret;
-       uint32_t mask = 0;
-
-       if (amdgpu_in_reset(adev))
-               return -EPERM;
-
-       ret = amdgpu_read_mask(buf, count, &mask);
-       if (ret)
-               return ret;
-
-       ret = pm_runtime_get_sync(ddev->dev);
-       if (ret < 0) {
-               pm_runtime_put_autosuspend(ddev->dev);
-               return ret;
-       }
-
-       if (is_support_sw_smu(adev))
-               ret = smu_force_clk_levels(&adev->smu, SMU_DCEFCLK, mask);
-       else if (adev->powerplay.pp_funcs->force_clock_level)
-               ret = amdgpu_dpm_force_clock_level(adev, PP_DCEFCLK, mask);
-       else
-               ret = 0;
-
-       pm_runtime_mark_last_busy(ddev->dev);
-       pm_runtime_put_autosuspend(ddev->dev);
-
-       if (ret)
-               return -EINVAL;
-
-       return count;
+       return amdgpu_set_pp_dpm_clock(dev, PP_DCEFCLK, buf, count);
 }
 
 static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
                struct device_attribute *attr,
                char *buf)
 {
-       struct drm_device *ddev = dev_get_drvdata(dev);
-       struct amdgpu_device *adev = drm_to_adev(ddev);
-       ssize_t size;
-       int ret;
-
-       if (amdgpu_in_reset(adev))
-               return -EPERM;
-
-       ret = pm_runtime_get_sync(ddev->dev);
-       if (ret < 0) {
-               pm_runtime_put_autosuspend(ddev->dev);
-               return ret;
-       }
-
-       if (is_support_sw_smu(adev))
-               size = smu_print_clk_levels(&adev->smu, SMU_PCIE, buf);
-       else if (adev->powerplay.pp_funcs->print_clock_levels)
-               size = amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf);
-       else
-               size = snprintf(buf, PAGE_SIZE, "\n");
-
-       pm_runtime_mark_last_busy(ddev->dev);
-       pm_runtime_put_autosuspend(ddev->dev);
-
-       return size;
+       return amdgpu_get_pp_dpm_clock(dev, PP_PCIE, buf);
 }
 
 static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
                struct device_attribute *attr,
-               const char *buf,
-               size_t count)
-{
-       struct drm_device *ddev = dev_get_drvdata(dev);
-       struct amdgpu_device *adev = drm_to_adev(ddev);
-       int ret;
-       uint32_t mask = 0;
-
-       if (amdgpu_in_reset(adev))
-               return -EPERM;
-
-       ret = amdgpu_read_mask(buf, count, &mask);
-       if (ret)
-               return ret;
-
-       ret = pm_runtime_get_sync(ddev->dev);
-       if (ret < 0) {
-               pm_runtime_put_autosuspend(ddev->dev);
-               return ret;
-       }
-
-       if (is_support_sw_smu(adev))
-               ret = smu_force_clk_levels(&adev->smu, SMU_PCIE, mask);
-       else if (adev->powerplay.pp_funcs->force_clock_level)
-               ret = amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
-       else
-               ret = 0;
-
-       pm_runtime_mark_last_busy(ddev->dev);
-       pm_runtime_put_autosuspend(ddev->dev);
-
-       if (ret)
-               return -EINVAL;
-
-       return count;
+               const char *buf,
+               size_t count)
+{
+       return amdgpu_set_pp_dpm_clock(dev, PP_PCIE, buf, count);
 }
 
 static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
@@ -1649,6 +1283,8 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        ret = pm_runtime_get_sync(ddev->dev);
        if (ret < 0) {
@@ -1664,7 +1300,7 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
        pm_runtime_mark_last_busy(ddev->dev);
        pm_runtime_put_autosuspend(ddev->dev);
 
-       return snprintf(buf, PAGE_SIZE, "%d\n", value);
+       return sysfs_emit(buf, "%d\n", value);
 }
 
 static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
@@ -1679,6 +1315,8 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        ret = kstrtol(buf, 0, &value);
 
@@ -1722,6 +1360,8 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        ret = pm_runtime_get_sync(ddev->dev);
        if (ret < 0) {
@@ -1737,7 +1377,7 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
        pm_runtime_mark_last_busy(ddev->dev);
        pm_runtime_put_autosuspend(ddev->dev);
 
-       return snprintf(buf, PAGE_SIZE, "%d\n", value);
+       return sysfs_emit(buf, "%d\n", value);
 }
 
 static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
@@ -1752,6 +1392,8 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        ret = kstrtol(buf, 0, &value);
 
@@ -1815,6 +1457,8 @@ static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        ret = pm_runtime_get_sync(ddev->dev);
        if (ret < 0) {
@@ -1822,9 +1466,7 @@ static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
                return ret;
        }
 
-       if (is_support_sw_smu(adev))
-               size = smu_get_power_profile_mode(&adev->smu, buf);
-       else if (adev->powerplay.pp_funcs->get_power_profile_mode)
+       if (adev->powerplay.pp_funcs->get_power_profile_mode)
                size = amdgpu_dpm_get_power_profile_mode(adev, buf);
        else
                size = snprintf(buf, PAGE_SIZE, "\n");
@@ -1855,6 +1497,8 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        tmp[0] = *(buf);
        tmp[1] = '\0';
@@ -1888,9 +1532,7 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
                return ret;
        }
 
-       if (is_support_sw_smu(adev))
-               ret = smu_set_power_profile_mode(&adev->smu, parameter, parameter_size, true);
-       else if (adev->powerplay.pp_funcs->set_power_profile_mode)
+       if (adev->powerplay.pp_funcs->set_power_profile_mode)
                ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
 
        pm_runtime_mark_last_busy(ddev->dev);
@@ -1920,6 +1562,8 @@ static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        r = pm_runtime_get_sync(ddev->dev);
        if (r < 0) {
@@ -1937,7 +1581,7 @@ static ssize_t amdgpu_get_gpu_busy_percent(struct device *dev,
        if (r)
                return r;
 
-       return snprintf(buf, PAGE_SIZE, "%d\n", value);
+       return sysfs_emit(buf, "%d\n", value);
 }
 
 /**
@@ -1958,6 +1602,8 @@ static ssize_t amdgpu_get_mem_busy_percent(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        r = pm_runtime_get_sync(ddev->dev);
        if (r < 0) {
@@ -1975,7 +1621,7 @@ static ssize_t amdgpu_get_mem_busy_percent(struct device *dev,
        if (r)
                return r;
 
-       return snprintf(buf, PAGE_SIZE, "%d\n", value);
+       return sysfs_emit(buf, "%d\n", value);
 }
 
 /**
@@ -2001,6 +1647,8 @@ static ssize_t amdgpu_get_pcie_bw(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        if (adev->flags & AMD_IS_APU)
                return -ENODATA;
@@ -2019,8 +1667,8 @@ static ssize_t amdgpu_get_pcie_bw(struct device *dev,
        pm_runtime_mark_last_busy(ddev->dev);
        pm_runtime_put_autosuspend(ddev->dev);
 
-       return snprintf(buf, PAGE_SIZE, "%llu %llu %i\n",
-                       count0, count1, pcie_get_mps(adev->pdev));
+       return sysfs_emit(buf, "%llu %llu %i\n",
+                         count0, count1, pcie_get_mps(adev->pdev));
 }
 
 /**
@@ -2042,9 +1690,11 @@ static ssize_t amdgpu_get_unique_id(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        if (adev->unique_id)
-               return snprintf(buf, PAGE_SIZE, "%016llx\n", adev->unique_id);
+               return sysfs_emit(buf, "%016llx\n", adev->unique_id);
 
        return 0;
 }
@@ -2071,10 +1721,10 @@ static ssize_t amdgpu_get_thermal_throttling_logging(struct device *dev,
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = drm_to_adev(ddev);
 
-       return snprintf(buf, PAGE_SIZE, "%s: thermal throttling logging %s, with interval %d seconds\n",
-                       adev_to_drm(adev)->unique,
-                       atomic_read(&adev->throttling_logging_enabled) ? "enabled" : "disabled",
-                       adev->throttling_logging_rs.interval / HZ + 1);
+       return sysfs_emit(buf, "%s: thermal throttling logging %s, with interval %d seconds\n",
+                         adev_to_drm(adev)->unique,
+                         atomic_read(&adev->throttling_logging_enabled) ? "enabled" : "disabled",
+                         adev->throttling_logging_rs.interval / HZ + 1);
 }
 
 static ssize_t amdgpu_set_thermal_throttling_logging(struct device *dev,
@@ -2140,6 +1790,8 @@ static ssize_t amdgpu_get_gpu_metrics(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        ret = pm_runtime_get_sync(ddev->dev);
        if (ret < 0) {
@@ -2147,9 +1799,7 @@ static ssize_t amdgpu_get_gpu_metrics(struct device *dev,
                return ret;
        }
 
-       if (is_support_sw_smu(adev))
-               size = smu_sys_get_gpu_metrics(&adev->smu, &gpu_metrics);
-       else if (adev->powerplay.pp_funcs->get_gpu_metrics)
+       if (adev->powerplay.pp_funcs->get_gpu_metrics)
                size = amdgpu_dpm_get_gpu_metrics(adev, &gpu_metrics);
 
        if (size <= 0)
@@ -2167,9 +1817,193 @@ out:
        return size;
 }
 
+/**
+ * DOC: smartshift_apu_power
+ *
+ * The amdgpu driver provides a sysfs API for reporting APU power
+ * share if it supports smartshift. The value is expressed as
+ * the proportion of stapm limit where stapm limit is the total APU
+ * power limit. The result is in percentage. If APU power is 130% of
+ * STAPM, then APU is using 30% of the dGPU's headroom.
+ */
+
+static ssize_t amdgpu_get_smartshift_apu_power(struct device *dev, struct device_attribute *attr,
+                                              char *buf)
+{
+       struct drm_device *ddev = dev_get_drvdata(dev);
+       struct amdgpu_device *adev = drm_to_adev(ddev);
+       uint32_t ss_power, size;
+       int r = 0;
+
+       if (amdgpu_in_reset(adev))
+               return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
+
+       r = pm_runtime_get_sync(ddev->dev);
+       if (r < 0) {
+               pm_runtime_put_autosuspend(ddev->dev);
+               return r;
+       }
+
+       r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_SS_APU_SHARE,
+                                  (void *)&ss_power, &size);
+       if (r)
+               goto out;
+
+       r = sysfs_emit(buf, "%u%%\n", ss_power);
+
+out:
+       pm_runtime_mark_last_busy(ddev->dev);
+       pm_runtime_put_autosuspend(ddev->dev);
+       return r;
+}
+
+/**
+ * DOC: smartshift_dgpu_power
+ *
+ * The amdgpu driver provides a sysfs API for reporting the dGPU power
+ * share if the device is in HG and supports smartshift. The value
+ * is expressed as the proportion of stapm limit where stapm limit
+ * is the total APU power limit. The value is in percentage. If dGPU
+ * power is 20% higher than STAPM power(120%), it's using 20% of the
+ * APU's power headroom.
+ */
+
+static ssize_t amdgpu_get_smartshift_dgpu_power(struct device *dev, struct device_attribute *attr,
+                                               char *buf)
+{
+       struct drm_device *ddev = dev_get_drvdata(dev);
+       struct amdgpu_device *adev = drm_to_adev(ddev);
+       uint32_t ss_power, size;
+       int r = 0;
+
+       if (amdgpu_in_reset(adev))
+               return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
+
+       r = pm_runtime_get_sync(ddev->dev);
+       if (r < 0) {
+               pm_runtime_put_autosuspend(ddev->dev);
+               return r;
+       }
+
+       r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_SS_DGPU_SHARE,
+                                  (void *)&ss_power, &size);
+
+       if (r)
+               goto out;
+
+       r = sysfs_emit(buf, "%u%%\n", ss_power);
+
+out:
+       pm_runtime_mark_last_busy(ddev->dev);
+       pm_runtime_put_autosuspend(ddev->dev);
+       return r;
+}
+
+/**
+ * DOC: smartshift_bias
+ *
+ * The amdgpu driver provides a sysfs API for reporting the
+ * smartshift(SS2.0) bias level. The value ranges from -100 to 100
+ * and the default is 0. -100 sets maximum preference to APU
+ * and 100 sets max perference to dGPU.
+ */
+
+static ssize_t amdgpu_get_smartshift_bias(struct device *dev,
+                                         struct device_attribute *attr,
+                                         char *buf)
+{
+       int r = 0;
+
+       r = sysfs_emit(buf, "%d\n", amdgpu_smartshift_bias);
+
+       return r;
+}
+
+static ssize_t amdgpu_set_smartshift_bias(struct device *dev,
+                                         struct device_attribute *attr,
+                                         const char *buf, size_t count)
+{
+       struct drm_device *ddev = dev_get_drvdata(dev);
+       struct amdgpu_device *adev = drm_to_adev(ddev);
+       int r = 0;
+       int bias = 0;
+
+       if (amdgpu_in_reset(adev))
+               return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
+
+       r = pm_runtime_get_sync(ddev->dev);
+       if (r < 0) {
+               pm_runtime_put_autosuspend(ddev->dev);
+               return r;
+       }
+
+       r = kstrtoint(buf, 10, &bias);
+       if (r)
+               goto out;
+
+       if (bias > AMDGPU_SMARTSHIFT_MAX_BIAS)
+               bias = AMDGPU_SMARTSHIFT_MAX_BIAS;
+       else if (bias < AMDGPU_SMARTSHIFT_MIN_BIAS)
+               bias = AMDGPU_SMARTSHIFT_MIN_BIAS;
+
+       amdgpu_smartshift_bias = bias;
+       r = count;
+
+       /* TODO: upadte bias level with SMU message */
+
+out:
+       pm_runtime_mark_last_busy(ddev->dev);
+       pm_runtime_put_autosuspend(ddev->dev);
+       return r;
+}
+
+
+static int ss_power_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
+                               uint32_t mask, enum amdgpu_device_attr_states *states)
+{
+       uint32_t ss_power, size;
+
+       if (!amdgpu_acpi_is_power_shift_control_supported())
+               *states = ATTR_STATE_UNSUPPORTED;
+       else if ((adev->flags & AMD_IS_PX) &&
+                !amdgpu_device_supports_smart_shift(adev_to_drm(adev)))
+               *states = ATTR_STATE_UNSUPPORTED;
+       else if (amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_SS_APU_SHARE,
+                (void *)&ss_power, &size))
+               *states = ATTR_STATE_UNSUPPORTED;
+       else if (amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_SS_DGPU_SHARE,
+                (void *)&ss_power, &size))
+               *states = ATTR_STATE_UNSUPPORTED;
+
+       return 0;
+}
+
+static int ss_bias_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
+                              uint32_t mask, enum amdgpu_device_attr_states *states)
+{
+       uint32_t ss_power, size;
+
+       if (!amdgpu_device_supports_smart_shift(adev_to_drm(adev)))
+               *states = ATTR_STATE_UNSUPPORTED;
+       else if (amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_SS_APU_SHARE,
+                (void *)&ss_power, &size))
+               *states = ATTR_STATE_UNSUPPORTED;
+       else if (amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_SS_DGPU_SHARE,
+                (void *)&ss_power, &size))
+               *states = ATTR_STATE_UNSUPPORTED;
+
+       return 0;
+}
+
 static struct amdgpu_device_attr amdgpu_device_attrs[] = {
        AMDGPU_DEVICE_ATTR_RW(power_dpm_state,                          ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
-       AMDGPU_DEVICE_ATTR_RW(power_dpm_force_performance_level,        ATTR_FLAG_BASIC),
+       AMDGPU_DEVICE_ATTR_RW(power_dpm_force_performance_level,        ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
        AMDGPU_DEVICE_ATTR_RO(pp_num_states,                            ATTR_FLAG_BASIC),
        AMDGPU_DEVICE_ATTR_RO(pp_cur_state,                             ATTR_FLAG_BASIC),
        AMDGPU_DEVICE_ATTR_RW(pp_force_state,                           ATTR_FLAG_BASIC),
@@ -2193,6 +2027,12 @@ static struct amdgpu_device_attr amdgpu_device_attrs[] = {
        AMDGPU_DEVICE_ATTR_RO(unique_id,                                ATTR_FLAG_BASIC),
        AMDGPU_DEVICE_ATTR_RW(thermal_throttling_logging,               ATTR_FLAG_BASIC),
        AMDGPU_DEVICE_ATTR_RO(gpu_metrics,                              ATTR_FLAG_BASIC),
+       AMDGPU_DEVICE_ATTR_RO(smartshift_apu_power,                     ATTR_FLAG_BASIC,
+                             .attr_update = ss_power_attr_update),
+       AMDGPU_DEVICE_ATTR_RO(smartshift_dgpu_power,                    ATTR_FLAG_BASIC,
+                             .attr_update = ss_power_attr_update),
+       AMDGPU_DEVICE_ATTR_RW(smartshift_bias,                          ATTR_FLAG_BASIC,
+                             .attr_update = ss_bias_attr_update),
 };
 
 static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr,
@@ -2214,7 +2054,9 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
                if (asic_type < CHIP_VEGA10)
                        *states = ATTR_STATE_UNSUPPORTED;
        } else if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) {
-               if (asic_type < CHIP_VEGA10 || asic_type == CHIP_ARCTURUS)
+               if (asic_type < CHIP_VEGA10 ||
+                   asic_type == CHIP_ARCTURUS ||
+                   asic_type == CHIP_ALDEBARAN)
                        *states = ATTR_STATE_UNSUPPORTED;
        } else if (DEVICE_ATTR_IS(pp_dpm_fclk)) {
                if (asic_type < CHIP_VEGA20)
@@ -2261,6 +2103,14 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
                }
        }
 
+       if (DEVICE_ATTR_IS(pp_dpm_dcefclk)) {
+               /* SMU MP1 does not support dcefclk level setting */
+               if (asic_type >= CHIP_NAVI10) {
+                       dev_attr->attr.mode &= ~S_IWUGO;
+                       dev_attr->store = NULL;
+               }
+       }
+
 #undef DEVICE_ATTR_IS
 
        return 0;
@@ -2282,7 +2132,7 @@ static int amdgpu_device_attr_create(struct amdgpu_device *adev,
 
        BUG_ON(!attr);
 
-       attr_update = attr->attr_update ? attr_update : default_attr_update;
+       attr_update = attr->attr_update ? attr->attr_update : default_attr_update;
 
        ret = attr_update(adev, attr, mask, &attr_states);
        if (ret) {
@@ -2370,6 +2220,8 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        if (channel >= PP_TEMP_MAX)
                return -EINVAL;
@@ -2407,7 +2259,7 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
        if (r)
                return r;
 
-       return snprintf(buf, PAGE_SIZE, "%d\n", temp);
+       return sysfs_emit(buf, "%d\n", temp);
 }
 
 static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev,
@@ -2423,7 +2275,7 @@ static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev,
        else
                temp = adev->pm.dpm.thermal.max_temp;
 
-       return snprintf(buf, PAGE_SIZE, "%d\n", temp);
+       return sysfs_emit(buf, "%d\n", temp);
 }
 
 static ssize_t amdgpu_hwmon_show_hotspot_temp_thresh(struct device *dev,
@@ -2439,7 +2291,7 @@ static ssize_t amdgpu_hwmon_show_hotspot_temp_thresh(struct device *dev,
        else
                temp = adev->pm.dpm.thermal.max_hotspot_crit_temp;
 
-       return snprintf(buf, PAGE_SIZE, "%d\n", temp);
+       return sysfs_emit(buf, "%d\n", temp);
 }
 
 static ssize_t amdgpu_hwmon_show_mem_temp_thresh(struct device *dev,
@@ -2455,7 +2307,7 @@ static ssize_t amdgpu_hwmon_show_mem_temp_thresh(struct device *dev,
        else
                temp = adev->pm.dpm.thermal.max_mem_crit_temp;
 
-       return snprintf(buf, PAGE_SIZE, "%d\n", temp);
+       return sysfs_emit(buf, "%d\n", temp);
 }
 
 static ssize_t amdgpu_hwmon_show_temp_label(struct device *dev,
@@ -2467,7 +2319,7 @@ static ssize_t amdgpu_hwmon_show_temp_label(struct device *dev,
        if (channel >= PP_TEMP_MAX)
                return -EINVAL;
 
-       return snprintf(buf, PAGE_SIZE, "%s\n", temp_label[channel].label);
+       return sysfs_emit(buf, "%s\n", temp_label[channel].label);
 }
 
 static ssize_t amdgpu_hwmon_show_temp_emergency(struct device *dev,
@@ -2493,7 +2345,7 @@ static ssize_t amdgpu_hwmon_show_temp_emergency(struct device *dev,
                break;
        }
 
-       return snprintf(buf, PAGE_SIZE, "%d\n", temp);
+       return sysfs_emit(buf, "%d\n", temp);
 }
 
 static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
@@ -2506,6 +2358,8 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (ret < 0) {
@@ -2513,22 +2367,18 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
                return ret;
        }
 
-       if (is_support_sw_smu(adev)) {
-               pwm_mode = smu_get_fan_control_mode(&adev->smu);
-       } else {
-               if (!adev->powerplay.pp_funcs->get_fan_control_mode) {
-                       pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
-                       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
-                       return -EINVAL;
-               }
-
-               pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
+       if (!adev->powerplay.pp_funcs->get_fan_control_mode) {
+               pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+               pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+               return -EINVAL;
        }
 
+       pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
+
        pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
        pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
 
-       return sprintf(buf, "%i\n", pwm_mode);
+       return sprintf(buf, "%u\n", pwm_mode);
 }
 
 static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
@@ -2542,6 +2392,8 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        err = kstrtoint(buf, 10, &value);
        if (err)
@@ -2553,18 +2405,14 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
                return ret;
        }
 
-       if (is_support_sw_smu(adev)) {
-               smu_set_fan_control_mode(&adev->smu, value);
-       } else {
-               if (!adev->powerplay.pp_funcs->set_fan_control_mode) {
-                       pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
-                       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
-                       return -EINVAL;
-               }
-
-               amdgpu_dpm_set_fan_control_mode(adev, value);
+       if (!adev->powerplay.pp_funcs->set_fan_control_mode) {
+               pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+               pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+               return -EINVAL;
        }
 
+       amdgpu_dpm_set_fan_control_mode(adev, value);
+
        pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
        pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
 
@@ -2596,6 +2444,8 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (err < 0) {
@@ -2603,11 +2453,7 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
                return err;
        }
 
-       if (is_support_sw_smu(adev))
-               pwm_mode = smu_get_fan_control_mode(&adev->smu);
-       else
-               pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
-
+       pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
        if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
                pr_info("manual fan speed control should be enabled first\n");
                pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
@@ -2624,9 +2470,7 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
 
        value = (value * 100) / 255;
 
-       if (is_support_sw_smu(adev))
-               err = smu_set_fan_speed_percent(&adev->smu, value);
-       else if (adev->powerplay.pp_funcs->set_fan_speed_percent)
+       if (adev->powerplay.pp_funcs->set_fan_speed_percent)
                err = amdgpu_dpm_set_fan_speed_percent(adev, value);
        else
                err = -EINVAL;
@@ -2650,6 +2494,8 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (err < 0) {
@@ -2657,9 +2503,7 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
                return err;
        }
 
-       if (is_support_sw_smu(adev))
-               err = smu_get_fan_speed_percent(&adev->smu, &speed);
-       else if (adev->powerplay.pp_funcs->get_fan_speed_percent)
+       if (adev->powerplay.pp_funcs->get_fan_speed_percent)
                err = amdgpu_dpm_get_fan_speed_percent(adev, &speed);
        else
                err = -EINVAL;
@@ -2685,6 +2529,8 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (err < 0) {
@@ -2692,9 +2538,7 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
                return err;
        }
 
-       if (is_support_sw_smu(adev))
-               err = smu_get_fan_speed_rpm(&adev->smu, &speed);
-       else if (adev->powerplay.pp_funcs->get_fan_speed_rpm)
+       if (adev->powerplay.pp_funcs->get_fan_speed_rpm)
                err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
        else
                err = -EINVAL;
@@ -2719,6 +2563,8 @@ static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (r < 0) {
@@ -2735,7 +2581,7 @@ static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
        if (r)
                return r;
 
-       return snprintf(buf, PAGE_SIZE, "%d\n", min_rpm);
+       return sysfs_emit(buf, "%d\n", min_rpm);
 }
 
 static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
@@ -2749,6 +2595,8 @@ static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (r < 0) {
@@ -2765,7 +2613,7 @@ static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
        if (r)
                return r;
 
-       return snprintf(buf, PAGE_SIZE, "%d\n", max_rpm);
+       return sysfs_emit(buf, "%d\n", max_rpm);
 }
 
 static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
@@ -2778,6 +2626,8 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (err < 0) {
@@ -2785,9 +2635,7 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
                return err;
        }
 
-       if (is_support_sw_smu(adev))
-               err = smu_get_fan_speed_rpm(&adev->smu, &rpm);
-       else if (adev->powerplay.pp_funcs->get_fan_speed_rpm)
+       if (adev->powerplay.pp_funcs->get_fan_speed_rpm)
                err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);
        else
                err = -EINVAL;
@@ -2812,6 +2660,8 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (err < 0) {
@@ -2819,10 +2669,7 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
                return err;
        }
 
-       if (is_support_sw_smu(adev))
-               pwm_mode = smu_get_fan_control_mode(&adev->smu);
-       else
-               pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
+       pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
 
        if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
                pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
@@ -2837,9 +2684,7 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
                return err;
        }
 
-       if (is_support_sw_smu(adev))
-               err = smu_set_fan_speed_rpm(&adev->smu, value);
-       else if (adev->powerplay.pp_funcs->set_fan_speed_rpm)
+       if (adev->powerplay.pp_funcs->set_fan_speed_rpm)
                err = amdgpu_dpm_set_fan_speed_rpm(adev, value);
        else
                err = -EINVAL;
@@ -2863,6 +2708,8 @@ static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        ret = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (ret < 0) {
@@ -2870,18 +2717,14 @@ static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
                return ret;
        }
 
-       if (is_support_sw_smu(adev)) {
-               pwm_mode = smu_get_fan_control_mode(&adev->smu);
-       } else {
-               if (!adev->powerplay.pp_funcs->get_fan_control_mode) {
-                       pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
-                       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
-                       return -EINVAL;
-               }
-
-               pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
+       if (!adev->powerplay.pp_funcs->get_fan_control_mode) {
+               pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+               pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+               return -EINVAL;
        }
 
+       pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
+
        pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
        pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
 
@@ -2900,6 +2743,8 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        err = kstrtoint(buf, 10, &value);
        if (err)
@@ -2918,16 +2763,12 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
                return err;
        }
 
-       if (is_support_sw_smu(adev)) {
-               smu_set_fan_control_mode(&adev->smu, pwm_mode);
-       } else {
-               if (!adev->powerplay.pp_funcs->set_fan_control_mode) {
-                       pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
-                       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
-                       return -EINVAL;
-               }
-               amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
+       if (!adev->powerplay.pp_funcs->set_fan_control_mode) {
+               pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+               pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+               return -EINVAL;
        }
+       amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
 
        pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
        pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
@@ -2945,6 +2786,8 @@ static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (r < 0) {
@@ -2962,14 +2805,14 @@ static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
        if (r)
                return r;
 
-       return snprintf(buf, PAGE_SIZE, "%d\n", vddgfx);
+       return sysfs_emit(buf, "%d\n", vddgfx);
 }
 
 static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev,
                                              struct device_attribute *attr,
                                              char *buf)
 {
-       return snprintf(buf, PAGE_SIZE, "vddgfx\n");
+       return sysfs_emit(buf, "vddgfx\n");
 }
 
 static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
@@ -2982,6 +2825,8 @@ static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        /* only APUs have vddnb */
        if  (!(adev->flags & AMD_IS_APU))
@@ -3003,14 +2848,14 @@ static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
        if (r)
                return r;
 
-       return snprintf(buf, PAGE_SIZE, "%d\n", vddnb);
+       return sysfs_emit(buf, "%d\n", vddnb);
 }
 
 static ssize_t amdgpu_hwmon_show_vddnb_label(struct device *dev,
                                              struct device_attribute *attr,
                                              char *buf)
 {
-       return snprintf(buf, PAGE_SIZE, "vddnb\n");
+       return sysfs_emit(buf, "vddnb\n");
 }
 
 static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
@@ -3024,6 +2869,8 @@ static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (r < 0) {
@@ -3044,7 +2891,7 @@ static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
        /* convert to microwatts */
        uw = (query >> 8) * 1000000 + (query & 0xff) * 1000;
 
-       return snprintf(buf, PAGE_SIZE, "%u\n", uw);
+       return sysfs_emit(buf, "%u\n", uw);
 }
 
 static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev,
@@ -3059,13 +2906,17 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
                                         char *buf)
 {
        struct amdgpu_device *adev = dev_get_drvdata(dev);
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
        int limit_type = to_sensor_dev_attr(attr)->index;
        uint32_t limit = limit_type << 24;
+       uint32_t max_limit = 0;
        ssize_t size;
        int r;
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (r < 0) {
@@ -3076,9 +2927,10 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
        if (is_support_sw_smu(adev)) {
                smu_get_power_limit(&adev->smu, &limit, SMU_PPT_LIMIT_MAX);
                size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
-       } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
-               adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, true);
-               size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
+       } else if (pp_funcs && pp_funcs->get_power_limit) {
+               pp_funcs->get_power_limit(adev->powerplay.pp_handle,
+                               &limit, &max_limit, true);
+               size = snprintf(buf, PAGE_SIZE, "%u\n", max_limit * 1000000);
        } else {
                size = snprintf(buf, PAGE_SIZE, "\n");
        }
@@ -3094,6 +2946,7 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
                                         char *buf)
 {
        struct amdgpu_device *adev = dev_get_drvdata(dev);
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
        int limit_type = to_sensor_dev_attr(attr)->index;
        uint32_t limit = limit_type << 24;
        ssize_t size;
@@ -3101,6 +2954,8 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (r < 0) {
@@ -3111,8 +2966,9 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
        if (is_support_sw_smu(adev)) {
                smu_get_power_limit(&adev->smu, &limit, SMU_PPT_LIMIT_CURRENT);
                size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
-       } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
-               adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, false);
+       } else if (pp_funcs && pp_funcs->get_power_limit) {
+               pp_funcs->get_power_limit(adev->powerplay.pp_handle,
+                               &limit, NULL, false);
                size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
        } else {
                size = snprintf(buf, PAGE_SIZE, "\n");
@@ -3124,13 +2980,51 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
        return size;
 }
 
+static ssize_t amdgpu_hwmon_show_power_cap_default(struct device *dev,
+                                        struct device_attribute *attr,
+                                        char *buf)
+{
+       struct amdgpu_device *adev = dev_get_drvdata(dev);
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+       int limit_type = to_sensor_dev_attr(attr)->index;
+       uint32_t limit = limit_type << 24;
+       ssize_t size;
+       int r;
+
+       if (amdgpu_in_reset(adev))
+               return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
+
+       r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
+       if (r < 0) {
+               pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+               return r;
+       }
+
+       if (is_support_sw_smu(adev)) {
+               smu_get_power_limit(&adev->smu, &limit, SMU_PPT_LIMIT_DEFAULT);
+               size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
+       } else if (pp_funcs && pp_funcs->get_power_limit) {
+               pp_funcs->get_power_limit(adev->powerplay.pp_handle,
+                               &limit, NULL, true);
+               size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
+       } else {
+               size = snprintf(buf, PAGE_SIZE, "\n");
+       }
+
+       pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+
+       return size;
+}
 static ssize_t amdgpu_hwmon_show_power_label(struct device *dev,
                                         struct device_attribute *attr,
                                         char *buf)
 {
        int limit_type = to_sensor_dev_attr(attr)->index;
 
-       return snprintf(buf, PAGE_SIZE, "%s\n",
+       return sysfs_emit(buf, "%s\n",
                limit_type == SMU_FAST_PPT_LIMIT ? "fastPPT" : "slowPPT");
 }
 
@@ -3140,12 +3034,15 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
                size_t count)
 {
        struct amdgpu_device *adev = dev_get_drvdata(dev);
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
        int limit_type = to_sensor_dev_attr(attr)->index;
        int err;
        u32 value;
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        if (amdgpu_sriov_vf(adev))
                return -EINVAL;
@@ -3163,10 +3060,8 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
                return err;
        }
 
-       if (is_support_sw_smu(adev))
-               err = smu_set_power_limit(&adev->smu, value);
-       else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_power_limit)
-               err = adev->powerplay.pp_funcs->set_power_limit(adev->powerplay.pp_handle, value);
+       if (pp_funcs && pp_funcs->set_power_limit)
+               err = pp_funcs->set_power_limit(adev->powerplay.pp_handle, value);
        else
                err = -EINVAL;
 
@@ -3189,6 +3084,8 @@ static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (r < 0) {
@@ -3206,14 +3103,14 @@ static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
        if (r)
                return r;
 
-       return snprintf(buf, PAGE_SIZE, "%u\n", sclk * 10 * 1000);
+       return sysfs_emit(buf, "%u\n", sclk * 10 * 1000);
 }
 
 static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev,
                                            struct device_attribute *attr,
                                            char *buf)
 {
-       return snprintf(buf, PAGE_SIZE, "sclk\n");
+       return sysfs_emit(buf, "sclk\n");
 }
 
 static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
@@ -3226,6 +3123,8 @@ static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
        if (r < 0) {
@@ -3243,14 +3142,14 @@ static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
        if (r)
                return r;
 
-       return snprintf(buf, PAGE_SIZE, "%u\n", mclk * 10 * 1000);
+       return sysfs_emit(buf, "%u\n", mclk * 10 * 1000);
 }
 
 static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
                                            struct device_attribute *attr,
                                            char *buf)
 {
-       return snprintf(buf, PAGE_SIZE, "mclk\n");
+       return sysfs_emit(buf, "mclk\n");
 }
 
 /**
@@ -3315,9 +3214,9 @@ static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
  *
  * - pwm1_max: pulse width modulation fan control maximum level (255)
  *
- * - fan1_min: an minimum value Unit: revolution/min (RPM)
+ * - fan1_min: a minimum value Unit: revolution/min (RPM)
  *
- * - fan1_max: an maxmum value Unit: revolution/max (RPM)
+ * - fan1_max: a maximum value Unit: revolution/max (RPM)
  *
  * - fan1_input: fan speed in RPM
  *
@@ -3367,11 +3266,13 @@ static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg,
 static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0);
 static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 0);
 static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0);
+static SENSOR_DEVICE_ATTR(power1_cap_default, S_IRUGO, amdgpu_hwmon_show_power_cap_default, NULL, 0);
 static SENSOR_DEVICE_ATTR(power1_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 0);
 static SENSOR_DEVICE_ATTR(power2_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 1);
 static SENSOR_DEVICE_ATTR(power2_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 1);
 static SENSOR_DEVICE_ATTR(power2_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 1);
 static SENSOR_DEVICE_ATTR(power2_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 1);
+static SENSOR_DEVICE_ATTR(power2_cap_default, S_IRUGO, amdgpu_hwmon_show_power_cap_default, NULL, 1);
 static SENSOR_DEVICE_ATTR(power2_label, S_IRUGO, amdgpu_hwmon_show_power_label, NULL, 1);
 static SENSOR_DEVICE_ATTR(freq1_input, S_IRUGO, amdgpu_hwmon_show_sclk, NULL, 0);
 static SENSOR_DEVICE_ATTR(freq1_label, S_IRUGO, amdgpu_hwmon_show_sclk_label, NULL, 0);
@@ -3411,11 +3312,13 @@ static struct attribute *hwmon_attributes[] = {
        &sensor_dev_attr_power1_cap_max.dev_attr.attr,
        &sensor_dev_attr_power1_cap_min.dev_attr.attr,
        &sensor_dev_attr_power1_cap.dev_attr.attr,
+       &sensor_dev_attr_power1_cap_default.dev_attr.attr,
        &sensor_dev_attr_power1_label.dev_attr.attr,
        &sensor_dev_attr_power2_average.dev_attr.attr,
        &sensor_dev_attr_power2_cap_max.dev_attr.attr,
        &sensor_dev_attr_power2_cap_min.dev_attr.attr,
        &sensor_dev_attr_power2_cap.dev_attr.attr,
+       &sensor_dev_attr_power2_cap_default.dev_attr.attr,
        &sensor_dev_attr_power2_label.dev_attr.attr,
        &sensor_dev_attr_freq1_input.dev_attr.attr,
        &sensor_dev_attr_freq1_label.dev_attr.attr,
@@ -3514,7 +3417,8 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
              (adev->asic_type != CHIP_VANGOGH))) &&    /* not implemented yet */
            (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
             attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr||
-            attr == &sensor_dev_attr_power1_cap.dev_attr.attr))
+            attr == &sensor_dev_attr_power1_cap.dev_attr.attr ||
+            attr == &sensor_dev_attr_power1_cap_default.dev_attr.attr))
                return 0;
 
        if (((adev->family == AMDGPU_FAMILY_SI) ||
@@ -3580,6 +3484,7 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
                 attr == &sensor_dev_attr_power2_cap_max.dev_attr.attr ||
             attr == &sensor_dev_attr_power2_cap_min.dev_attr.attr ||
                 attr == &sensor_dev_attr_power2_cap.dev_attr.attr ||
+                attr == &sensor_dev_attr_power2_cap_default.dev_attr.attr ||
                 attr == &sensor_dev_attr_power2_label.dev_attr.attr ||
                 attr == &sensor_dev_attr_power1_label.dev_attr.attr))
                return 0;
@@ -3784,16 +3689,17 @@ static void amdgpu_parse_cg_state(struct seq_file *m, u32 flags)
                           (flags & clocks[i].flag) ? "On" : "Off");
 }
 
-static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
+static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused)
 {
-       struct drm_info_node *node = (struct drm_info_node *) m->private;
-       struct drm_device *dev = node->minor->dev;
-       struct amdgpu_device *adev = drm_to_adev(dev);
+       struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
+       struct drm_device *dev = adev_to_drm(adev);
        u32 flags = 0;
        int r;
 
        if (amdgpu_in_reset(adev))
                return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
 
        r = pm_runtime_get_sync(dev->dev);
        if (r < 0) {
@@ -3836,16 +3742,62 @@ out:
        return r;
 }
 
-static const struct drm_info_list amdgpu_pm_info_list[] = {
-       {"amdgpu_pm_info", amdgpu_debugfs_pm_info, 0, NULL},
+DEFINE_SHOW_ATTRIBUTE(amdgpu_debugfs_pm_info);
+
+/*
+ * amdgpu_pm_priv_buffer_read - Read memory region allocated to FW
+ *
+ * Reads debug memory region allocated to PMFW
+ */
+static ssize_t amdgpu_pm_prv_buffer_read(struct file *f, char __user *buf,
+                                        size_t size, loff_t *pos)
+{
+       struct amdgpu_device *adev = file_inode(f)->i_private;
+       const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+       void *pp_handle = adev->powerplay.pp_handle;
+       size_t smu_prv_buf_size;
+       void *smu_prv_buf;
+
+       if (amdgpu_in_reset(adev))
+               return -EPERM;
+       if (adev->in_suspend && !adev->in_runpm)
+               return -EPERM;
+
+       if (pp_funcs && pp_funcs->get_smu_prv_buf_details)
+               pp_funcs->get_smu_prv_buf_details(pp_handle, &smu_prv_buf,
+                                                 &smu_prv_buf_size);
+       else
+               return -ENOSYS;
+
+       if (!smu_prv_buf || !smu_prv_buf_size)
+               return -EINVAL;
+
+       return simple_read_from_buffer(buf, size, pos, smu_prv_buf,
+                                      smu_prv_buf_size);
+}
+
+static const struct file_operations amdgpu_debugfs_pm_prv_buffer_fops = {
+       .owner = THIS_MODULE,
+       .open = simple_open,
+       .read = amdgpu_pm_prv_buffer_read,
+       .llseek = default_llseek,
 };
+
 #endif
 
-int amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
+void amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
 {
 #if defined(CONFIG_DEBUG_FS)
-       return amdgpu_debugfs_add_files(adev, amdgpu_pm_info_list, ARRAY_SIZE(amdgpu_pm_info_list));
-#else
-       return 0;
+       struct drm_minor *minor = adev_to_drm(adev)->primary;
+       struct dentry *root = minor->debugfs_root;
+
+       debugfs_create_file("amdgpu_pm_info", 0444, root, adev,
+                           &amdgpu_debugfs_pm_info_fops);
+
+       if (adev->pm.smu_prv_buffer_size > 0)
+               debugfs_create_file_size("amdgpu_pm_prv_buffer", 0444, root,
+                                        adev,
+                                        &amdgpu_debugfs_pm_prv_buffer_fops,
+                                        adev->pm.smu_prv_buffer_size);
 #endif
 }