drm/amd/powerplay: update swSMU VCN/JPEG PG logics
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / powerplay / navi10_ppt.c
index 0c21e5d..82659b7 100644 (file)
  *
  */
 
+#define SWSMU_CODE_LAYER_L2
+
 #include <linux/firmware.h>
 #include <linux/pci.h>
+#include <linux/i2c.h>
 #include "amdgpu.h"
 #include "amdgpu_smu.h"
-#include "smu_internal.h"
 #include "atomfirmware.h"
 #include "amdgpu_atomfirmware.h"
 #include "amdgpu_atombios.h"
@@ -42,6 +44,7 @@
 #include "thm/thm_11_0_2_sh_mask.h"
 
 #include "asic_reg/mp/mp_11_0_sh_mask.h"
+#include "smu_cmn.h"
 
 /*
  * DO NOT use these for err/warn/info/debug messages.
@@ -53,6 +56,8 @@
 #undef pr_info
 #undef pr_debug
 
+#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
+
 #define FEATURE_MASK(feature) (1ULL << feature)
 #define SMC_DPM_FEATURE ( \
        FEATURE_MASK(FEATURE_DPM_PREFETCHER_BIT) | \
        FEATURE_MASK(FEATURE_DPM_LINK_BIT)       | \
        FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT))
 
-#define MSG_MAP(msg, index, valid_in_vf) \
-       [SMU_MSG_##msg] = {1, (index), (valid_in_vf)}
-
-static struct smu_11_0_msg_mapping navi10_message_map[SMU_MSG_MAX_COUNT] = {
+static struct cmn2asic_msg_mapping navi10_message_map[SMU_MSG_MAX_COUNT] = {
        MSG_MAP(TestMessage,                    PPSMC_MSG_TestMessage,                  1),
        MSG_MAP(GetSmuVersion,                  PPSMC_MSG_GetSmuVersion,                1),
        MSG_MAP(GetDriverIfVersion,             PPSMC_MSG_GetDriverIfVersion,           1),
@@ -138,7 +140,7 @@ static struct smu_11_0_msg_mapping navi10_message_map[SMU_MSG_MAX_COUNT] = {
        MSG_MAP(GetVoltageByDpmOverdrive,       PPSMC_MSG_GetVoltageByDpmOverdrive,     0),
 };
 
-static struct smu_11_0_cmn2aisc_mapping navi10_clk_map[SMU_CLK_COUNT] = {
+static struct cmn2asic_mapping navi10_clk_map[SMU_CLK_COUNT] = {
        CLK_MAP(GFXCLK, PPCLK_GFXCLK),
        CLK_MAP(SCLK,   PPCLK_GFXCLK),
        CLK_MAP(SOCCLK, PPCLK_SOCCLK),
@@ -153,7 +155,7 @@ static struct smu_11_0_cmn2aisc_mapping navi10_clk_map[SMU_CLK_COUNT] = {
        CLK_MAP(PHYCLK, PPCLK_PHYCLK),
 };
 
-static struct smu_11_0_cmn2aisc_mapping navi10_feature_mask_map[SMU_FEATURE_COUNT] = {
+static struct cmn2asic_mapping navi10_feature_mask_map[SMU_FEATURE_COUNT] = {
        FEA_MAP(DPM_PREFETCHER),
        FEA_MAP(DPM_GFXCLK),
        FEA_MAP(DPM_GFX_PACE),
@@ -199,7 +201,7 @@ static struct smu_11_0_cmn2aisc_mapping navi10_feature_mask_map[SMU_FEATURE_COUN
        FEA_MAP(APCC_DFLL),
 };
 
-static struct smu_11_0_cmn2aisc_mapping navi10_table_map[SMU_TABLE_COUNT] = {
+static struct cmn2asic_mapping navi10_table_map[SMU_TABLE_COUNT] = {
        TAB_MAP(PPTABLE),
        TAB_MAP(WATERMARKS),
        TAB_MAP(AVFS),
@@ -214,12 +216,12 @@ static struct smu_11_0_cmn2aisc_mapping navi10_table_map[SMU_TABLE_COUNT] = {
        TAB_MAP(PACE),
 };
 
-static struct smu_11_0_cmn2aisc_mapping navi10_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
+static struct cmn2asic_mapping navi10_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
        PWR_MAP(AC),
        PWR_MAP(DC),
 };
 
-static struct smu_11_0_cmn2aisc_mapping navi10_workload_map[PP_SMC_POWER_PROFILE_COUNT] = {
+static struct cmn2asic_mapping navi10_workload_map[PP_SMC_POWER_PROFILE_COUNT] = {
        WORKLOAD_MAP(PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT,       WORKLOAD_PPLIB_DEFAULT_BIT),
        WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D,         WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT),
        WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING,          WORKLOAD_PPLIB_POWER_SAVING_BIT),
@@ -229,100 +231,6 @@ static struct smu_11_0_cmn2aisc_mapping navi10_workload_map[PP_SMC_POWER_PROFILE
        WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM,               WORKLOAD_PPLIB_CUSTOM_BIT),
 };
 
-static int navi10_get_smu_msg_index(struct smu_context *smc, uint32_t index)
-{
-       struct smu_11_0_msg_mapping mapping;
-
-       if (index >= SMU_MSG_MAX_COUNT)
-               return -EINVAL;
-
-       mapping = navi10_message_map[index];
-       if (!(mapping.valid_mapping)) {
-               return -EINVAL;
-       }
-
-       if (amdgpu_sriov_vf(smc->adev) && !mapping.valid_in_vf)
-               return -EACCES;
-
-       return mapping.map_to;
-}
-
-static int navi10_get_smu_clk_index(struct smu_context *smc, uint32_t index)
-{
-       struct smu_11_0_cmn2aisc_mapping mapping;
-
-       if (index >= SMU_CLK_COUNT)
-               return -EINVAL;
-
-       mapping = navi10_clk_map[index];
-       if (!(mapping.valid_mapping)) {
-               return -EINVAL;
-       }
-
-       return mapping.map_to;
-}
-
-static int navi10_get_smu_feature_index(struct smu_context *smc, uint32_t index)
-{
-       struct smu_11_0_cmn2aisc_mapping mapping;
-
-       if (index >= SMU_FEATURE_COUNT)
-               return -EINVAL;
-
-       mapping = navi10_feature_mask_map[index];
-       if (!(mapping.valid_mapping)) {
-               return -EINVAL;
-       }
-
-       return mapping.map_to;
-}
-
-static int navi10_get_smu_table_index(struct smu_context *smc, uint32_t index)
-{
-       struct smu_11_0_cmn2aisc_mapping mapping;
-
-       if (index >= SMU_TABLE_COUNT)
-               return -EINVAL;
-
-       mapping = navi10_table_map[index];
-       if (!(mapping.valid_mapping)) {
-               return -EINVAL;
-       }
-
-       return mapping.map_to;
-}
-
-static int navi10_get_pwr_src_index(struct smu_context *smc, uint32_t index)
-{
-       struct smu_11_0_cmn2aisc_mapping mapping;
-
-       if (index >= SMU_POWER_SOURCE_COUNT)
-               return -EINVAL;
-
-       mapping = navi10_pwr_src_map[index];
-       if (!(mapping.valid_mapping)) {
-               return -EINVAL;
-       }
-
-       return mapping.map_to;
-}
-
-
-static int navi10_get_workload_type(struct smu_context *smu, enum PP_SMC_POWER_PROFILE profile)
-{
-       struct smu_11_0_cmn2aisc_mapping mapping;
-
-       if (profile > PP_SMC_POWER_PROFILE_CUSTOM)
-               return -EINVAL;
-
-       mapping = navi10_workload_map[profile];
-       if (!(mapping.valid_mapping)) {
-               return -EINVAL;
-       }
-
-       return mapping.map_to;
-}
-
 static bool is_asic_secure(struct smu_context *smu)
 {
        struct amdgpu_device *adev = smu->adev;
@@ -544,15 +452,23 @@ static int navi10_setup_pptable(struct smu_context *smu)
        return ret;
 }
 
-static int navi10_tables_init(struct smu_context *smu, struct smu_table *tables)
+static int navi10_tables_init(struct smu_context *smu)
 {
        struct smu_table_context *smu_table = &smu->smu_table;
+       struct smu_table *tables = smu_table->tables;
+       struct amdgpu_device *adev = smu->adev;
 
        SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t),
                       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
        SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t),
                       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
-       SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
+       if (adev->asic_type == CHIP_NAVI12)
+               SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_NV12_t),
+                              PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+       else
+               SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
+                              PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+       SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t),
                       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
        SMU_TABLE_INIT(tables, SMU_TABLE_OVERDRIVE, sizeof(OverDriveTable_t),
                       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
@@ -562,42 +478,83 @@ static int navi10_tables_init(struct smu_context *smu, struct smu_table *tables)
                       sizeof(DpmActivityMonitorCoeffInt_t), PAGE_SIZE,
                       AMDGPU_GEM_DOMAIN_VRAM);
 
-       smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
+       smu_table->metrics_table = kzalloc(adev->asic_type == CHIP_NAVI12 ?
+                                          sizeof(SmuMetrics_NV12_t) :
+                                          sizeof(SmuMetrics_t), GFP_KERNEL);
        if (!smu_table->metrics_table)
-               return -ENOMEM;
+               goto err0_out;
        smu_table->metrics_time = 0;
 
+       smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_0);
+       smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
+       if (!smu_table->gpu_metrics_table)
+               goto err1_out;
+
        smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL);
        if (!smu_table->watermarks_table)
-               return -ENOMEM;
+               goto err2_out;
 
        return 0;
+
+err2_out:
+       kfree(smu_table->gpu_metrics_table);
+err1_out:
+       kfree(smu_table->metrics_table);
+err0_out:
+       return -ENOMEM;
 }
 
-static int navi10_get_smu_metrics_data(struct smu_context *smu,
-                                      MetricsMember_t member,
-                                      uint32_t *value)
+static int navi10_get_metrics_table_locked(struct smu_context *smu,
+                                          SmuMetrics_t *metrics_table,
+                                          bool bypass_cache)
 {
        struct smu_table_context *smu_table= &smu->smu_table;
-       SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
        int ret = 0;
 
-       mutex_lock(&smu->metrics_lock);
-       if (!smu_table->metrics_time ||
-            time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(1))) {
-               ret = smu_update_table(smu,
+       if (bypass_cache ||
+           !smu_table->metrics_time ||
+           time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(1))) {
+               ret = smu_cmn_update_table(smu,
                                       SMU_TABLE_SMU_METRICS,
                                       0,
                                       smu_table->metrics_table,
                                       false);
                if (ret) {
                        dev_info(smu->adev->dev, "Failed to export SMU metrics table!\n");
-                       mutex_unlock(&smu->metrics_lock);
                        return ret;
                }
                smu_table->metrics_time = jiffies;
        }
 
+       if (metrics_table)
+               memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t));
+
+       return 0;
+}
+
+static int navi10_get_smu_metrics_data(struct smu_context *smu,
+                                      MetricsMember_t member,
+                                      uint32_t *value)
+{
+       struct smu_table_context *smu_table= &smu->smu_table;
+       /*
+        * This works for NV12 also. As although NV12 uses a different
+        * SmuMetrics structure from other NV1X ASICs, they share the
+        * same offsets for the heading parts(those members used here).
+        */
+       SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
+       int ret = 0;
+
+       mutex_lock(&smu->metrics_lock);
+
+       ret = navi10_get_metrics_table_locked(smu,
+                                             NULL,
+                                             false);
+       if (ret) {
+               mutex_unlock(&smu->metrics_lock);
+               return ret;
+       }
+
        switch (member) {
        case METRICS_CURR_GFXCLK:
                *value = metrics->CurrClock[PPCLK_GFXCLK];
@@ -675,9 +632,6 @@ static int navi10_allocate_dpm_context(struct smu_context *smu)
 {
        struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
 
-       if (smu_dpm->dpm_context)
-               return -EINVAL;
-
        smu_dpm->dpm_context = kzalloc(sizeof(struct smu_11_0_dpm_context),
                                       GFP_KERNEL);
        if (!smu_dpm->dpm_context)
@@ -688,6 +642,21 @@ static int navi10_allocate_dpm_context(struct smu_context *smu)
        return 0;
 }
 
+static int navi10_init_smc_tables(struct smu_context *smu)
+{
+       int ret = 0;
+
+       ret = navi10_tables_init(smu);
+       if (ret)
+               return ret;
+
+       ret = navi10_allocate_dpm_context(smu);
+       if (ret)
+               return ret;
+
+       return smu_v11_0_init_smc_tables(smu);
+}
+
 static int navi10_set_default_dpm_table(struct smu_context *smu)
 {
        struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
@@ -697,7 +666,7 @@ static int navi10_set_default_dpm_table(struct smu_context *smu)
 
        /* socclk dpm table setup */
        dpm_table = &dpm_context->dpm_tables.soc_table;
-       if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
+       if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
                ret = smu_v11_0_set_single_dpm_table(smu,
                                                     SMU_SOCCLK,
                                                     dpm_table);
@@ -715,7 +684,7 @@ static int navi10_set_default_dpm_table(struct smu_context *smu)
 
        /* gfxclk dpm table setup */
        dpm_table = &dpm_context->dpm_tables.gfx_table;
-       if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) {
+       if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) {
                ret = smu_v11_0_set_single_dpm_table(smu,
                                                     SMU_GFXCLK,
                                                     dpm_table);
@@ -733,7 +702,7 @@ static int navi10_set_default_dpm_table(struct smu_context *smu)
 
        /* uclk dpm table setup */
        dpm_table = &dpm_context->dpm_tables.uclk_table;
-       if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
+       if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
                ret = smu_v11_0_set_single_dpm_table(smu,
                                                     SMU_UCLK,
                                                     dpm_table);
@@ -751,7 +720,7 @@ static int navi10_set_default_dpm_table(struct smu_context *smu)
 
        /* vclk dpm table setup */
        dpm_table = &dpm_context->dpm_tables.vclk_table;
-       if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
+       if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
                ret = smu_v11_0_set_single_dpm_table(smu,
                                                     SMU_VCLK,
                                                     dpm_table);
@@ -769,7 +738,7 @@ static int navi10_set_default_dpm_table(struct smu_context *smu)
 
        /* dclk dpm table setup */
        dpm_table = &dpm_context->dpm_tables.dclk_table;
-       if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
+       if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
                ret = smu_v11_0_set_single_dpm_table(smu,
                                                     SMU_DCLK,
                                                     dpm_table);
@@ -787,7 +756,7 @@ static int navi10_set_default_dpm_table(struct smu_context *smu)
 
        /* dcefclk dpm table setup */
        dpm_table = &dpm_context->dpm_tables.dcef_table;
-       if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
+       if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
                ret = smu_v11_0_set_single_dpm_table(smu,
                                                     SMU_DCEFCLK,
                                                     dpm_table);
@@ -805,7 +774,7 @@ static int navi10_set_default_dpm_table(struct smu_context *smu)
 
        /* pixelclk dpm table setup */
        dpm_table = &dpm_context->dpm_tables.pixel_table;
-       if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
+       if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
                ret = smu_v11_0_set_single_dpm_table(smu,
                                                     SMU_PIXCLK,
                                                     dpm_table);
@@ -823,7 +792,7 @@ static int navi10_set_default_dpm_table(struct smu_context *smu)
 
        /* displayclk dpm table setup */
        dpm_table = &dpm_context->dpm_tables.display_table;
-       if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
+       if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
                ret = smu_v11_0_set_single_dpm_table(smu,
                                                     SMU_DISPCLK,
                                                     dpm_table);
@@ -841,7 +810,7 @@ static int navi10_set_default_dpm_table(struct smu_context *smu)
 
        /* phyclk dpm table setup */
        dpm_table = &dpm_context->dpm_tables.phy_table;
-       if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
+       if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
                ret = smu_v11_0_set_single_dpm_table(smu,
                                                     SMU_PHYCLK,
                                                     dpm_table);
@@ -862,25 +831,21 @@ static int navi10_set_default_dpm_table(struct smu_context *smu)
 
 static int navi10_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
 {
-       struct smu_power_context *smu_power = &smu->smu_power;
-       struct smu_power_gate *power_gate = &smu_power->power_gate;
        int ret = 0;
 
        if (enable) {
                /* vcn dpm on is a prerequisite for vcn power gate messages */
-               if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
-                       ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1, NULL);
+               if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
+                       ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1, NULL);
                        if (ret)
                                return ret;
                }
-               power_gate->vcn_gated = false;
        } else {
-               if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
-                       ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn, NULL);
+               if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
+                       ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PowerDownVcn, NULL);
                        if (ret)
                                return ret;
                }
-               power_gate->vcn_gated = true;
        }
 
        return ret;
@@ -888,24 +853,20 @@ static int navi10_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
 
 static int navi10_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
 {
-       struct smu_power_context *smu_power = &smu->smu_power;
-       struct smu_power_gate *power_gate = &smu_power->power_gate;
        int ret = 0;
 
        if (enable) {
-               if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
-                       ret = smu_send_smc_msg(smu, SMU_MSG_PowerUpJpeg, NULL);
+               if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
+                       ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PowerUpJpeg, NULL);
                        if (ret)
                                return ret;
                }
-               power_gate->jpeg_gated = false;
        } else {
-               if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
-                       ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownJpeg, NULL);
+               if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
+                       ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PowerDownJpeg, NULL);
                        if (ret)
                                return ret;
                }
-               power_gate->jpeg_gated = true;
        }
 
        return ret;
@@ -918,7 +879,9 @@ static int navi10_get_current_clk_freq_by_table(struct smu_context *smu,
        MetricsMember_t member_type;
        int clk_id = 0;
 
-       clk_id = smu_clk_get_index(smu, clk_type);
+       clk_id = smu_cmn_to_asic_specific_index(smu,
+                                               CMN2ASIC_MAPPING_CLK,
+                                               clk_type);
        if (clk_id < 0)
                return clk_id;
 
@@ -956,7 +919,9 @@ static bool navi10_is_support_fine_grained_dpm(struct smu_context *smu, enum smu
        DpmDescriptor_t *dpm_desc = NULL;
        uint32_t clk_index = 0;
 
-       clk_index = smu_clk_get_index(smu, clk_type);
+       clk_index = smu_cmn_to_asic_specific_index(smu,
+                                                  CMN2ASIC_MAPPING_CLK,
+                                                  clk_type);
        dpm_desc = &pptable->DpmDescriptor[clk_index];
 
        /* 0 - Fine grained DPM, 1 - Discrete DPM */
@@ -990,7 +955,6 @@ static int navi10_print_clk_levels(struct smu_context *smu,
        uint32_t gen_speed, lane_width;
        struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
        struct smu_11_0_dpm_context *dpm_context = smu_dpm->dpm_context;
-       struct amdgpu_device *adev = smu->adev;
        PPTable_t *pptable = (PPTable_t *)table_context->driver_pptable;
        OverDriveTable_t *od_table =
                (OverDriveTable_t *)table_context->overdrive_table;
@@ -1044,12 +1008,8 @@ static int navi10_print_clk_levels(struct smu_context *smu,
                }
                break;
        case SMU_PCIE:
-               gen_speed = (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
-                            PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK)
-                       >> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
-               lane_width = (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) &
-                             PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK)
-                       >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
+               gen_speed = smu_v11_0_get_current_pcie_link_speed_level(smu);
+               lane_width = smu_v11_0_get_current_pcie_link_width_level(smu);
                for (i = 0; i < NUM_LINK_LEVELS; i++)
                        size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i,
                                        (dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 0) ? "2.5GT/s," :
@@ -1337,11 +1297,11 @@ static int navi10_pre_display_config_changed(struct smu_context *smu)
        int ret = 0;
        uint32_t max_freq = 0;
 
-       ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0, NULL);
+       ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0, NULL);
        if (ret)
                return ret;
 
-       if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
+       if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
                ret = smu_v11_0_get_dpm_ultimate_freq(smu, SMU_UCLK, NULL, &max_freq);
                if (ret)
                        return ret;
@@ -1358,9 +1318,9 @@ static int navi10_display_config_changed(struct smu_context *smu)
        int ret = 0;
 
        if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
-           smu_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
-           smu_feature_is_supported(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
-               ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays,
+           smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
+           smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
+               ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays,
                                                  smu->display_config->num_display,
                                                  NULL);
                if (ret)
@@ -1413,7 +1373,7 @@ static bool navi10_is_dpm_running(struct smu_context *smu)
        int ret = 0;
        uint32_t feature_mask[2];
        unsigned long feature_enabled;
-       ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
+       ret = smu_cmn_get_enabled_mask(smu, feature_mask, 2);
        feature_enabled = (unsigned long)((uint64_t)feature_mask[0] |
                           ((uint64_t)feature_mask[1] << 32));
        return !!(feature_enabled & SMC_DPM_FEATURE);
@@ -1484,11 +1444,13 @@ static int navi10_get_power_profile_mode(struct smu_context *smu, char *buf)
 
        for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
                /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
-               workload_type = smu_workload_get_type(smu, i);
+               workload_type = smu_cmn_to_asic_specific_index(smu,
+                                                              CMN2ASIC_MAPPING_WORKLOAD,
+                                                              i);
                if (workload_type < 0)
                        return -EINVAL;
 
-               result = smu_update_table(smu,
+               result = smu_cmn_update_table(smu,
                                          SMU_TABLE_ACTIVITY_MONITOR_COEFF, workload_type,
                                          (void *)(&activity_monitor), false);
                if (result) {
@@ -1559,7 +1521,7 @@ static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, u
 
        if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
 
-               ret = smu_update_table(smu,
+               ret = smu_cmn_update_table(smu,
                                       SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
                                       (void *)(&activity_monitor), false);
                if (ret) {
@@ -1603,7 +1565,7 @@ static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, u
                        break;
                }
 
-               ret = smu_update_table(smu,
+               ret = smu_cmn_update_table(smu,
                                       SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
                                       (void *)(&activity_monitor), true);
                if (ret) {
@@ -1613,10 +1575,12 @@ static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, u
        }
 
        /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
-       workload_type = smu_workload_get_type(smu, smu->power_profile_mode);
+       workload_type = smu_cmn_to_asic_specific_index(smu,
+                                                      CMN2ASIC_MAPPING_WORKLOAD,
+                                                      smu->power_profile_mode);
        if (workload_type < 0)
                return -EINVAL;
-       smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
+       smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
                                    1 << workload_type, NULL);
 
        return ret;
@@ -1632,14 +1596,14 @@ static int navi10_notify_smc_display_config(struct smu_context *smu)
        min_clocks.dcef_clock_in_sr = smu->display_config->min_dcef_deep_sleep_set_clk;
        min_clocks.memory_clock = smu->display_config->min_mem_set_clock;
 
-       if (smu_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
+       if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
                clock_req.clock_type = amd_pp_dcef_clock;
                clock_req.clock_freq_in_khz = min_clocks.dcef_clock * 10;
 
                ret = smu_v11_0_display_clock_voltage_request(smu, &clock_req);
                if (!ret) {
-                       if (smu_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) {
-                               ret = smu_send_smc_msg_with_param(smu,
+                       if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) {
+                               ret = smu_cmn_send_smc_msg_with_param(smu,
                                                                  SMU_MSG_SetMinDeepSleepDcefclk,
                                                                  min_clocks.dcef_clock_in_sr/100,
                                                                  NULL);
@@ -1653,7 +1617,7 @@ static int navi10_notify_smc_display_config(struct smu_context *smu)
                }
        }
 
-       if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
+       if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
                ret = smu_v11_0_set_hard_freq_limited_range(smu, SMU_UCLK, min_clocks.memory_clock/100, 0);
                if (ret) {
                        dev_err(smu->adev->dev, "[%s] Set hard min uclk failed!", __func__);
@@ -1665,68 +1629,66 @@ static int navi10_notify_smc_display_config(struct smu_context *smu)
 }
 
 static int navi10_set_watermarks_table(struct smu_context *smu,
-                                      void *watermarks, struct
-                                      dm_pp_wm_sets_with_clock_ranges_soc15
-                                      *clock_ranges)
+                                      struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges)
 {
-       int i;
+       Watermarks_t *table = smu->smu_table.watermarks_table;
        int ret = 0;
-       Watermarks_t *table = watermarks;
+       int i;
 
-       if (!table || !clock_ranges)
-               return -EINVAL;
+       if (clock_ranges) {
+               if (clock_ranges->num_wm_dmif_sets > 4 ||
+                   clock_ranges->num_wm_mcif_sets > 4)
+                       return -EINVAL;
 
-       if (clock_ranges->num_wm_dmif_sets > 4 ||
-           clock_ranges->num_wm_mcif_sets > 4)
-               return -EINVAL;
+               for (i = 0; i < clock_ranges->num_wm_dmif_sets; i++) {
+                       table->WatermarkRow[1][i].MinClock =
+                               cpu_to_le16((uint16_t)
+                               (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz /
+                               1000));
+                       table->WatermarkRow[1][i].MaxClock =
+                               cpu_to_le16((uint16_t)
+                               (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz /
+                               1000));
+                       table->WatermarkRow[1][i].MinUclk =
+                               cpu_to_le16((uint16_t)
+                               (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz /
+                               1000));
+                       table->WatermarkRow[1][i].MaxUclk =
+                               cpu_to_le16((uint16_t)
+                               (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz /
+                               1000));
+                       table->WatermarkRow[1][i].WmSetting = (uint8_t)
+                                       clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
+               }
 
-       for (i = 0; i < clock_ranges->num_wm_dmif_sets; i++) {
-               table->WatermarkRow[1][i].MinClock =
-                       cpu_to_le16((uint16_t)
-                       (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz /
-                       1000));
-               table->WatermarkRow[1][i].MaxClock =
-                       cpu_to_le16((uint16_t)
-                       (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz /
-                       1000));
-               table->WatermarkRow[1][i].MinUclk =
-                       cpu_to_le16((uint16_t)
-                       (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz /
-                       1000));
-               table->WatermarkRow[1][i].MaxUclk =
-                       cpu_to_le16((uint16_t)
-                       (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz /
-                       1000));
-               table->WatermarkRow[1][i].WmSetting = (uint8_t)
-                               clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
-       }
+               for (i = 0; i < clock_ranges->num_wm_mcif_sets; i++) {
+                       table->WatermarkRow[0][i].MinClock =
+                               cpu_to_le16((uint16_t)
+                               (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz /
+                               1000));
+                       table->WatermarkRow[0][i].MaxClock =
+                               cpu_to_le16((uint16_t)
+                               (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz /
+                               1000));
+                       table->WatermarkRow[0][i].MinUclk =
+                               cpu_to_le16((uint16_t)
+                               (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz /
+                               1000));
+                       table->WatermarkRow[0][i].MaxUclk =
+                               cpu_to_le16((uint16_t)
+                               (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz /
+                               1000));
+                       table->WatermarkRow[0][i].WmSetting = (uint8_t)
+                                       clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
+               }
 
-       for (i = 0; i < clock_ranges->num_wm_mcif_sets; i++) {
-               table->WatermarkRow[0][i].MinClock =
-                       cpu_to_le16((uint16_t)
-                       (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz /
-                       1000));
-               table->WatermarkRow[0][i].MaxClock =
-                       cpu_to_le16((uint16_t)
-                       (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz /
-                       1000));
-               table->WatermarkRow[0][i].MinUclk =
-                       cpu_to_le16((uint16_t)
-                       (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz /
-                       1000));
-               table->WatermarkRow[0][i].MaxUclk =
-                       cpu_to_le16((uint16_t)
-                       (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz /
-                       1000));
-               table->WatermarkRow[0][i].WmSetting = (uint8_t)
-                               clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
+               smu->watermarks_bitmap |= WATERMARKS_EXIST;
        }
 
-       smu->watermarks_bitmap |= WATERMARKS_EXIST;
-
        /* pass data to smu controller */
-       if (!(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
-               ret = smu_write_watermarks_table(smu);
+       if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
+            !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
+               ret = smu_cmn_write_watermarks_table(smu);
                if (ret) {
                        dev_err(smu->adev->dev, "Failed to update WMTABLE!");
                        return ret;
@@ -1961,7 +1923,7 @@ static int navi10_update_pcie_parameters(struct smu_context *smu,
                        ((pptable->PcieGenSpeed[i] <= pcie_gen_cap) ? (pptable->PcieGenSpeed[i] << 8) :
                                (pcie_gen_cap << 8)) | ((pptable->PcieLaneCount[i] <= pcie_width_cap) ?
                                        pptable->PcieLaneCount[i] : pcie_width_cap);
-               ret = smu_send_smc_msg_with_param(smu,
+               ret = smu_cmn_send_smc_msg_with_param(smu,
                                          SMU_MSG_OverridePcieParameters,
                                          smu_pcie_arg,
                                          NULL);
@@ -2013,7 +1975,7 @@ static int navi10_overdrive_get_gfx_clk_base_voltage(struct smu_context *smu,
        uint32_t value = 0;
        int ret;
 
-       ret = smu_send_smc_msg_with_param(smu,
+       ret = smu_cmn_send_smc_msg_with_param(smu,
                                          SMU_MSG_GetVoltageByDpm,
                                          param,
                                          &value);
@@ -2047,7 +2009,7 @@ static int navi10_set_default_od_settings(struct smu_context *smu)
                (OverDriveTable_t *)smu->smu_table.boot_overdrive_table;
        int ret = 0;
 
-       ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, (void *)od_table, false);
+       ret = smu_cmn_update_table(smu, SMU_TABLE_OVERDRIVE, 0, (void *)od_table, false);
        if (ret) {
                dev_err(smu->adev->dev, "Failed to get overdrive table!\n");
                return ret;
@@ -2181,18 +2143,11 @@ static int navi10_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABL
                break;
        case PP_OD_COMMIT_DPM_TABLE:
                navi10_dump_od_table(smu, od_table);
-               ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, (void *)od_table, true);
+               ret = smu_cmn_update_table(smu, SMU_TABLE_OVERDRIVE, 0, (void *)od_table, true);
                if (ret) {
                        dev_err(smu->adev->dev, "Failed to import overdrive table!\n");
                        return ret;
                }
-               // no lock needed because smu_od_edit_dpm_table has it
-               ret = smu_handle_task(smu, smu->smu_dpm.dpm_level,
-                       AMD_PP_TASK_READJUST_POWER_STATE,
-                       false);
-               if (ret) {
-                       return ret;
-               }
                break;
        case PP_OD_EDIT_VDDC_CURVE:
                if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_CURVE)) {
@@ -2261,7 +2216,7 @@ static int navi10_run_btc(struct smu_context *smu)
 {
        int ret = 0;
 
-       ret = smu_send_smc_msg(smu, SMU_MSG_RunBtc, NULL);
+       ret = smu_cmn_send_smc_msg(smu, SMU_MSG_RunBtc, NULL);
        if (ret)
                dev_err(smu->adev->dev, "RunBtc failed!\n");
 
@@ -2273,9 +2228,9 @@ static int navi10_dummy_pstate_control(struct smu_context *smu, bool enable)
        int result = 0;
 
        if (!enable)
-               result = smu_send_smc_msg(smu, SMU_MSG_DAL_DISABLE_DUMMY_PSTATE_CHANGE, NULL);
+               result = smu_cmn_send_smc_msg(smu, SMU_MSG_DAL_DISABLE_DUMMY_PSTATE_CHANGE, NULL);
        else
-               result = smu_send_smc_msg(smu, SMU_MSG_DAL_ENABLE_DUMMY_PSTATE_CHANGE, NULL);
+               result = smu_cmn_send_smc_msg(smu, SMU_MSG_DAL_ENABLE_DUMMY_PSTATE_CHANGE, NULL);
 
        return result;
 }
@@ -2304,7 +2259,7 @@ static int navi10_disable_umc_cdr_12gbps_workaround(struct smu_context *smu)
        if (!navi10_need_umc_cdr_12gbps_workaround(smu->adev))
                return 0;
 
-       ret = smu_get_smc_version(smu, NULL, &smu_version);
+       ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
        if (ret)
                return ret;
 
@@ -2341,19 +2296,301 @@ static int navi10_disable_umc_cdr_12gbps_workaround(struct smu_context *smu)
        return navi10_dummy_pstate_control(smu, true);
 }
 
+static void navi10_fill_i2c_req(SwI2cRequest_t  *req, bool write,
+                                 uint8_t address, uint32_t numbytes,
+                                 uint8_t *data)
+{
+       int i;
+
+       BUG_ON(numbytes > MAX_SW_I2C_COMMANDS);
+
+       req->I2CcontrollerPort = 0;
+       req->I2CSpeed = 2;
+       req->SlaveAddress = address;
+       req->NumCmds = numbytes;
+
+       for (i = 0; i < numbytes; i++) {
+               SwI2cCmd_t *cmd =  &req->SwI2cCmds[i];
+
+               /* First 2 bytes are always write for lower 2b EEPROM address */
+               if (i < 2)
+                       cmd->Cmd = 1;
+               else
+                       cmd->Cmd = write;
+
+
+               /* Add RESTART for read  after address filled */
+               cmd->CmdConfig |= (i == 2 && !write) ? CMDCONFIG_RESTART_MASK : 0;
+
+               /* Add STOP in the end */
+               cmd->CmdConfig |= (i == (numbytes - 1)) ? CMDCONFIG_STOP_MASK : 0;
+
+               /* Fill with data regardless if read or write to simplify code */
+               cmd->RegisterAddr = data[i];
+       }
+}
+
+static int navi10_i2c_read_data(struct i2c_adapter *control,
+                                              uint8_t address,
+                                              uint8_t *data,
+                                              uint32_t numbytes)
+{
+       uint32_t  i, ret = 0;
+       SwI2cRequest_t req;
+       struct amdgpu_device *adev = to_amdgpu_device(control);
+       struct smu_table_context *smu_table = &adev->smu.smu_table;
+       struct smu_table *table = &smu_table->driver_table;
+
+       memset(&req, 0, sizeof(req));
+       navi10_fill_i2c_req(&req, false, address, numbytes, data);
+
+       mutex_lock(&adev->smu.mutex);
+       /* Now read data starting with that address */
+       ret = smu_cmn_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, &req,
+                                  true);
+       mutex_unlock(&adev->smu.mutex);
+
+       if (!ret) {
+               SwI2cRequest_t *res = (SwI2cRequest_t *)table->cpu_addr;
+
+               /* Assume SMU  fills res.SwI2cCmds[i].Data with read bytes */
+               for (i = 0; i < numbytes; i++)
+                       data[i] = res->SwI2cCmds[i].Data;
+
+               dev_dbg(adev->dev, "navi10_i2c_read_data, address = %x, bytes = %d, data :",
+                                 (uint16_t)address, numbytes);
+
+               print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE,
+                              8, 1, data, numbytes, false);
+       } else
+               dev_err(adev->dev, "navi10_i2c_read_data - error occurred :%x", ret);
+
+       return ret;
+}
+
+static int navi10_i2c_write_data(struct i2c_adapter *control,
+                                               uint8_t address,
+                                               uint8_t *data,
+                                               uint32_t numbytes)
+{
+       uint32_t ret;
+       SwI2cRequest_t req;
+       struct amdgpu_device *adev = to_amdgpu_device(control);
+
+       memset(&req, 0, sizeof(req));
+       navi10_fill_i2c_req(&req, true, address, numbytes, data);
+
+       mutex_lock(&adev->smu.mutex);
+       ret = smu_cmn_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, &req, true);
+       mutex_unlock(&adev->smu.mutex);
+
+       if (!ret) {
+               dev_dbg(adev->dev, "navi10_i2c_write(), address = %x, bytes = %d , data: ",
+                                        (uint16_t)address, numbytes);
+
+               print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE,
+                              8, 1, data, numbytes, false);
+               /*
+                * According to EEPROM spec there is a MAX of 10 ms required for
+                * EEPROM to flush internal RX buffer after STOP was issued at the
+                * end of write transaction. During this time the EEPROM will not be
+                * responsive to any more commands - so wait a bit more.
+                */
+               msleep(10);
+
+       } else
+               dev_err(adev->dev, "navi10_i2c_write- error occurred :%x", ret);
+
+       return ret;
+}
+
+static int navi10_i2c_xfer(struct i2c_adapter *i2c_adap,
+                             struct i2c_msg *msgs, int num)
+{
+       uint32_t  i, j, ret, data_size, data_chunk_size, next_eeprom_addr = 0;
+       uint8_t *data_ptr, data_chunk[MAX_SW_I2C_COMMANDS] = { 0 };
+
+       for (i = 0; i < num; i++) {
+               /*
+                * SMU interface allows at most MAX_SW_I2C_COMMANDS bytes of data at
+                * once and hence the data needs to be spliced into chunks and sent each
+                * chunk separately
+                */
+               data_size = msgs[i].len - 2;
+               data_chunk_size = MAX_SW_I2C_COMMANDS - 2;
+               next_eeprom_addr = (msgs[i].buf[0] << 8 & 0xff00) | (msgs[i].buf[1] & 0xff);
+               data_ptr = msgs[i].buf + 2;
+
+               for (j = 0; j < data_size / data_chunk_size; j++) {
+                       /* Insert the EEPROM dest addess, bits 0-15 */
+                       data_chunk[0] = ((next_eeprom_addr >> 8) & 0xff);
+                       data_chunk[1] = (next_eeprom_addr & 0xff);
+
+                       if (msgs[i].flags & I2C_M_RD) {
+                               ret = navi10_i2c_read_data(i2c_adap,
+                                                            (uint8_t)msgs[i].addr,
+                                                            data_chunk, MAX_SW_I2C_COMMANDS);
+
+                               memcpy(data_ptr, data_chunk + 2, data_chunk_size);
+                       } else {
+
+                               memcpy(data_chunk + 2, data_ptr, data_chunk_size);
+
+                               ret = navi10_i2c_write_data(i2c_adap,
+                                                             (uint8_t)msgs[i].addr,
+                                                             data_chunk, MAX_SW_I2C_COMMANDS);
+                       }
+
+                       if (ret) {
+                               num = -EIO;
+                               goto fail;
+                       }
+
+                       next_eeprom_addr += data_chunk_size;
+                       data_ptr += data_chunk_size;
+               }
+
+               if (data_size % data_chunk_size) {
+                       data_chunk[0] = ((next_eeprom_addr >> 8) & 0xff);
+                       data_chunk[1] = (next_eeprom_addr & 0xff);
+
+                       if (msgs[i].flags & I2C_M_RD) {
+                               ret = navi10_i2c_read_data(i2c_adap,
+                                                            (uint8_t)msgs[i].addr,
+                                                            data_chunk, (data_size % data_chunk_size) + 2);
+
+                               memcpy(data_ptr, data_chunk + 2, data_size % data_chunk_size);
+                       } else {
+                               memcpy(data_chunk + 2, data_ptr, data_size % data_chunk_size);
+
+                               ret = navi10_i2c_write_data(i2c_adap,
+                                                             (uint8_t)msgs[i].addr,
+                                                             data_chunk, (data_size % data_chunk_size) + 2);
+                       }
+
+                       if (ret) {
+                               num = -EIO;
+                               goto fail;
+                       }
+               }
+       }
+
+fail:
+       return num;
+}
+
+static u32 navi10_i2c_func(struct i2c_adapter *adap)
+{
+       return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+
+static const struct i2c_algorithm navi10_i2c_algo = {
+       .master_xfer = navi10_i2c_xfer,
+       .functionality = navi10_i2c_func,
+};
+
+static int navi10_i2c_control_init(struct smu_context *smu, struct i2c_adapter *control)
+{
+       struct amdgpu_device *adev = to_amdgpu_device(control);
+       int res;
+
+       control->owner = THIS_MODULE;
+       control->class = I2C_CLASS_SPD;
+       control->dev.parent = &adev->pdev->dev;
+       control->algo = &navi10_i2c_algo;
+       snprintf(control->name, sizeof(control->name), "AMDGPU SMU");
+
+       res = i2c_add_adapter(control);
+       if (res)
+               DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
+
+       return res;
+}
+
+static void navi10_i2c_control_fini(struct smu_context *smu, struct i2c_adapter *control)
+{
+       i2c_del_adapter(control);
+}
+
+static ssize_t navi10_get_gpu_metrics(struct smu_context *smu,
+                                     void **table)
+{
+       struct smu_table_context *smu_table = &smu->smu_table;
+       struct gpu_metrics_v1_0 *gpu_metrics =
+               (struct gpu_metrics_v1_0 *)smu_table->gpu_metrics_table;
+       struct amdgpu_device *adev = smu->adev;
+       SmuMetrics_NV12_t nv12_metrics = { 0 };
+       SmuMetrics_t metrics;
+       int ret = 0;
+
+       mutex_lock(&smu->metrics_lock);
+
+       ret = navi10_get_metrics_table_locked(smu,
+                                             &metrics,
+                                             true);
+       if (ret) {
+               mutex_unlock(&smu->metrics_lock);
+               return ret;
+       }
+
+       if (adev->asic_type == CHIP_NAVI12)
+               memcpy(&nv12_metrics, smu_table->metrics_table, sizeof(SmuMetrics_NV12_t));
+
+       mutex_unlock(&smu->metrics_lock);
+
+       smu_v11_0_init_gpu_metrics_v1_0(gpu_metrics);
+
+       gpu_metrics->temperature_edge = metrics.TemperatureEdge;
+       gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
+       gpu_metrics->temperature_mem = metrics.TemperatureMem;
+       gpu_metrics->temperature_vrgfx = metrics.TemperatureVrGfx;
+       gpu_metrics->temperature_vrsoc = metrics.TemperatureVrSoc;
+       gpu_metrics->temperature_vrmem = metrics.TemperatureVrMem0;
+
+       gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity;
+       gpu_metrics->average_umc_activity = metrics.AverageUclkActivity;
+
+       gpu_metrics->average_socket_power = metrics.AverageSocketPower;
+
+       gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequency;
+       gpu_metrics->average_socclk_frequency = metrics.AverageSocclkFrequency;
+       gpu_metrics->average_uclk_frequency = metrics.AverageUclkFrequency;
+
+       if (adev->asic_type == CHIP_NAVI12) {
+               gpu_metrics->energy_accumulator = nv12_metrics.EnergyAccumulator;
+               gpu_metrics->average_vclk0_frequency = nv12_metrics.AverageVclkFrequency;
+               gpu_metrics->average_dclk0_frequency = nv12_metrics.AverageDclkFrequency;
+               gpu_metrics->average_mm_activity = nv12_metrics.VcnActivityPercentage;
+       }
+
+       gpu_metrics->current_gfxclk = metrics.CurrClock[PPCLK_GFXCLK];
+       gpu_metrics->current_socclk = metrics.CurrClock[PPCLK_SOCCLK];
+       gpu_metrics->current_uclk = metrics.CurrClock[PPCLK_UCLK];
+       gpu_metrics->current_vclk0 = metrics.CurrClock[PPCLK_VCLK];
+       gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK];
+
+       gpu_metrics->throttle_status = metrics.ThrottlerStatus;
+
+       gpu_metrics->current_fan_speed = metrics.CurrFanSpeed;
+
+       gpu_metrics->pcie_link_width =
+                       smu_v11_0_get_current_pcie_link_width(smu);
+       gpu_metrics->pcie_link_speed =
+                       smu_v11_0_get_current_pcie_link_speed(smu);
+
+       *table = (void *)gpu_metrics;
+
+       return sizeof(struct gpu_metrics_v1_0);
+}
+
 static const struct pptable_funcs navi10_ppt_funcs = {
-       .tables_init = navi10_tables_init,
-       .alloc_dpm_context = navi10_allocate_dpm_context,
-       .get_smu_msg_index = navi10_get_smu_msg_index,
-       .get_smu_clk_index = navi10_get_smu_clk_index,
-       .get_smu_feature_index = navi10_get_smu_feature_index,
-       .get_smu_table_index = navi10_get_smu_table_index,
-       .get_smu_power_index = navi10_get_pwr_src_index,
-       .get_workload_type = navi10_get_workload_type,
        .get_allowed_feature_mask = navi10_get_allowed_feature_mask,
        .set_default_dpm_table = navi10_set_default_dpm_table,
        .dpm_set_vcn_enable = navi10_dpm_set_vcn_enable,
        .dpm_set_jpeg_enable = navi10_dpm_set_jpeg_enable,
+       .i2c_init = navi10_i2c_control_init,
+       .i2c_fini = navi10_i2c_control_fini,
        .print_clk_levels = navi10_print_clk_levels,
        .force_clk_levels = navi10_force_clk_levels,
        .populate_umd_state_clk = navi10_populate_umd_state_clk,
@@ -2377,7 +2614,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
        .init_microcode = smu_v11_0_init_microcode,
        .load_microcode = smu_v11_0_load_microcode,
        .fini_microcode = smu_v11_0_fini_microcode,
-       .init_smc_tables = smu_v11_0_init_smc_tables,
+       .init_smc_tables = navi10_init_smc_tables,
        .fini_smc_tables = smu_v11_0_fini_smc_tables,
        .init_power = smu_v11_0_init_power,
        .fini_power = smu_v11_0_fini_power,
@@ -2385,15 +2622,18 @@ static const struct pptable_funcs navi10_ppt_funcs = {
        .setup_pptable = navi10_setup_pptable,
        .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
        .check_fw_version = smu_v11_0_check_fw_version,
-       .write_pptable = smu_v11_0_write_pptable,
+       .write_pptable = smu_cmn_write_pptable,
        .set_driver_table_location = smu_v11_0_set_driver_table_location,
        .set_tool_table_location = smu_v11_0_set_tool_table_location,
        .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
        .system_features_control = smu_v11_0_system_features_control,
-       .send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
+       .send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param,
+       .send_smc_msg = smu_cmn_send_smc_msg,
        .init_display_count = smu_v11_0_init_display_count,
        .set_allowed_mask = smu_v11_0_set_allowed_mask,
-       .get_enabled_mask = smu_v11_0_get_enabled_mask,
+       .get_enabled_mask = smu_cmn_get_enabled_mask,
+       .feature_is_enabled = smu_cmn_feature_is_enabled,
+       .disable_all_features_with_exception = smu_cmn_disable_all_features_with_exception,
        .notify_display_change = smu_v11_0_notify_display_change,
        .set_power_limit = smu_v11_0_set_power_limit,
        .init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks,
@@ -2422,9 +2662,18 @@ static const struct pptable_funcs navi10_ppt_funcs = {
        .run_btc = navi10_run_btc,
        .disable_umc_cdr_12gbps_workaround = navi10_disable_umc_cdr_12gbps_workaround,
        .set_power_source = smu_v11_0_set_power_source,
+       .get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
+       .set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
+       .get_gpu_metrics = navi10_get_gpu_metrics,
 };
 
 void navi10_set_ppt_funcs(struct smu_context *smu)
 {
        smu->ppt_funcs = &navi10_ppt_funcs;
+       smu->message_map = navi10_message_map;
+       smu->clock_map = navi10_clk_map;
+       smu->feature_map = navi10_feature_mask_map;
+       smu->table_map = navi10_table_map;
+       smu->pwr_src_map = navi10_pwr_src_map;
+       smu->workload_map = navi10_workload_map;
 }