drm/amdgpu/powerplay: provide the interface to disable uclk switch for DAL
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm_pp_smu.c
index 350e7a6..e4d0b5b 100644 (file)
@@ -24,7 +24,6 @@
 #include <linux/string.h>
 #include <linux/acpi.h>
 
-#include <drm/drmP.h>
 #include <drm/drm_probe_helper.h>
 #include <drm/amdgpu_drm.h>
 #include "dm_services.h"
@@ -149,6 +148,23 @@ static void get_default_clock_levels(
        }
 }
 
+static enum smu_clk_type dc_to_smu_clock_type(
+               enum dm_pp_clock_type dm_pp_clk_type)
+{
+#define DCCLK_MAP_SMUCLK(dcclk, smuclk) \
+       [dcclk] = smuclk
+
+       static int dc_clk_type_map[] = {
+               DCCLK_MAP_SMUCLK(DM_PP_CLOCK_TYPE_DISPLAY_CLK,  SMU_DISPCLK),
+               DCCLK_MAP_SMUCLK(DM_PP_CLOCK_TYPE_ENGINE_CLK,   SMU_GFXCLK),
+               DCCLK_MAP_SMUCLK(DM_PP_CLOCK_TYPE_MEMORY_CLK,   SMU_MCLK),
+               DCCLK_MAP_SMUCLK(DM_PP_CLOCK_TYPE_DCEFCLK,      SMU_DCEFCLK),
+               DCCLK_MAP_SMUCLK(DM_PP_CLOCK_TYPE_SOCCLK,       SMU_SOCCLK),
+       };
+
+       return dc_clk_type_map[dm_pp_clk_type];
+}
+
 static enum amd_pp_clock_type dc_to_pp_clock_type(
                enum dm_pp_clock_type dm_pp_clk_type)
 {
@@ -292,7 +308,8 @@ static void pp_to_dc_clock_levels_with_voltage(
                        DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
 
        for (i = 0; i < clk_level_info->num_levels; i++) {
-               DRM_INFO("DM_PPLIB:\t %d in kHz\n", pp_clks->data[i].clocks_in_khz);
+               DRM_INFO("DM_PPLIB:\t %d in kHz, %d in mV\n", pp_clks->data[i].clocks_in_khz,
+                        pp_clks->data[i].voltage_in_mv);
                clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz;
                clk_level_info->data[i].voltage_in_mv = pp_clks->data[i].voltage_in_mv;
        }
@@ -317,7 +334,7 @@ bool dm_pp_get_clock_levels_by_type(
                }
        } else if (adev->smu.funcs && adev->smu.funcs->get_clock_by_type) {
                if (smu_get_clock_by_type(&adev->smu,
-                                         dc_to_pp_clock_type(clk_type),
+                                         dc_to_smu_clock_type(clk_type),
                                          &pp_clks)) {
                        get_default_clock_levels(clk_type, dc_clks);
                        return true;
@@ -630,16 +647,293 @@ void pp_rv_set_hard_min_fclk_by_freq(struct pp_smu *pp, int mhz)
        pp_funcs->set_hard_min_fclk_by_freq(pp_handle, mhz);
 }
 
+enum pp_smu_status pp_nv_set_wm_ranges(struct pp_smu *pp,
+               struct pp_smu_wm_range_sets *ranges)
+{
+       const struct dc_context *ctx = pp->dm;
+       struct amdgpu_device *adev = ctx->driver_context;
+       struct smu_context *smu = &adev->smu;
+       struct dm_pp_wm_sets_with_clock_ranges_soc15 wm_with_clock_ranges;
+       struct dm_pp_clock_range_for_dmif_wm_set_soc15 *wm_dce_clocks =
+                       wm_with_clock_ranges.wm_dmif_clocks_ranges;
+       struct dm_pp_clock_range_for_mcif_wm_set_soc15 *wm_soc_clocks =
+                       wm_with_clock_ranges.wm_mcif_clocks_ranges;
+       int32_t i;
+
+       wm_with_clock_ranges.num_wm_dmif_sets = ranges->num_reader_wm_sets;
+       wm_with_clock_ranges.num_wm_mcif_sets = ranges->num_writer_wm_sets;
+
+       for (i = 0; i < wm_with_clock_ranges.num_wm_dmif_sets; i++) {
+               if (ranges->reader_wm_sets[i].wm_inst > 3)
+                       wm_dce_clocks[i].wm_set_id = WM_SET_A;
+               else
+                       wm_dce_clocks[i].wm_set_id =
+                                       ranges->reader_wm_sets[i].wm_inst;
+               wm_dce_clocks[i].wm_max_dcfclk_clk_in_khz =
+                       ranges->reader_wm_sets[i].max_drain_clk_mhz * 1000;
+               wm_dce_clocks[i].wm_min_dcfclk_clk_in_khz =
+                       ranges->reader_wm_sets[i].min_drain_clk_mhz * 1000;
+               wm_dce_clocks[i].wm_max_mem_clk_in_khz =
+                       ranges->reader_wm_sets[i].max_fill_clk_mhz * 1000;
+               wm_dce_clocks[i].wm_min_mem_clk_in_khz =
+                       ranges->reader_wm_sets[i].min_fill_clk_mhz * 1000;
+       }
+
+       for (i = 0; i < wm_with_clock_ranges.num_wm_mcif_sets; i++) {
+               if (ranges->writer_wm_sets[i].wm_inst > 3)
+                       wm_soc_clocks[i].wm_set_id = WM_SET_A;
+               else
+                       wm_soc_clocks[i].wm_set_id =
+                                       ranges->writer_wm_sets[i].wm_inst;
+               wm_soc_clocks[i].wm_max_socclk_clk_in_khz =
+                       ranges->writer_wm_sets[i].max_fill_clk_mhz * 1000;
+               wm_soc_clocks[i].wm_min_socclk_clk_in_khz =
+                       ranges->writer_wm_sets[i].min_fill_clk_mhz * 1000;
+               wm_soc_clocks[i].wm_max_mem_clk_in_khz =
+                       ranges->writer_wm_sets[i].max_drain_clk_mhz * 1000;
+               wm_soc_clocks[i].wm_min_mem_clk_in_khz =
+                       ranges->writer_wm_sets[i].min_drain_clk_mhz * 1000;
+       }
+
+       if (!smu->funcs)
+               return PP_SMU_RESULT_UNSUPPORTED;
+
+       /* 0: successful or smu.funcs->set_watermarks_for_clock_ranges = NULL;
+        * 1: fail
+        */
+       if (smu_set_watermarks_for_clock_ranges(&adev->smu,
+                       &wm_with_clock_ranges))
+               return PP_SMU_RESULT_UNSUPPORTED;
+
+       return PP_SMU_RESULT_OK;
+}
+
+enum pp_smu_status pp_nv_set_pme_wa_enable(struct pp_smu *pp)
+{
+       const struct dc_context *ctx = pp->dm;
+       struct amdgpu_device *adev = ctx->driver_context;
+       struct smu_context *smu = &adev->smu;
+
+       if (!smu->funcs)
+               return PP_SMU_RESULT_UNSUPPORTED;
+
+       /* 0: successful or smu.funcs->set_azalia_d3_pme = NULL;  1: fail */
+       if (smu_set_azalia_d3_pme(smu))
+               return PP_SMU_RESULT_FAIL;
+
+       return PP_SMU_RESULT_OK;
+}
+
+enum pp_smu_status pp_nv_set_display_count(struct pp_smu *pp, int count)
+{
+       const struct dc_context *ctx = pp->dm;
+       struct amdgpu_device *adev = ctx->driver_context;
+       struct smu_context *smu = &adev->smu;
+
+       if (!smu->funcs)
+               return PP_SMU_RESULT_UNSUPPORTED;
+
+       /* 0: successful or smu.funcs->set_display_count = NULL;  1: fail */
+       if (smu_set_display_count(smu, count))
+               return PP_SMU_RESULT_FAIL;
+
+       return PP_SMU_RESULT_OK;
+}
+
+enum pp_smu_status pp_nv_set_min_deep_sleep_dcfclk(struct pp_smu *pp, int mhz)
+{
+       const struct dc_context *ctx = pp->dm;
+       struct amdgpu_device *adev = ctx->driver_context;
+       struct smu_context *smu = &adev->smu;
+
+       if (!smu->funcs)
+               return PP_SMU_RESULT_UNSUPPORTED;
+
+       /* 0: successful or smu.funcs->set_deep_sleep_dcefclk = NULL;1: fail */
+       if (smu_set_deep_sleep_dcefclk(smu, mhz))
+               return PP_SMU_RESULT_FAIL;
+
+       return PP_SMU_RESULT_OK;
+}
+
+enum pp_smu_status pp_nv_set_hard_min_dcefclk_by_freq(
+               struct pp_smu *pp, int mhz)
+{
+       const struct dc_context *ctx = pp->dm;
+       struct amdgpu_device *adev = ctx->driver_context;
+       struct smu_context *smu = &adev->smu;
+       struct pp_display_clock_request clock_req;
+
+       if (!smu->funcs)
+               return PP_SMU_RESULT_UNSUPPORTED;
+
+       clock_req.clock_type = amd_pp_dcef_clock;
+       clock_req.clock_freq_in_khz = mhz * 1000;
+
+       /* 0: successful or smu.funcs->display_clock_voltage_request = NULL
+        * 1: fail
+        */
+       if (smu_display_clock_voltage_request(smu, &clock_req))
+               return PP_SMU_RESULT_FAIL;
+
+       return PP_SMU_RESULT_OK;
+}
+
+enum pp_smu_status pp_nv_set_hard_min_uclk_by_freq(struct pp_smu *pp, int mhz)
+{
+       const struct dc_context *ctx = pp->dm;
+       struct amdgpu_device *adev = ctx->driver_context;
+       struct smu_context *smu = &adev->smu;
+       struct pp_display_clock_request clock_req;
+
+       if (!smu->funcs)
+               return PP_SMU_RESULT_UNSUPPORTED;
+
+       clock_req.clock_type = amd_pp_mem_clock;
+       clock_req.clock_freq_in_khz = mhz * 1000;
+
+       /* 0: successful or smu.funcs->display_clock_voltage_request = NULL
+        * 1: fail
+        */
+       if (smu_display_clock_voltage_request(smu, &clock_req))
+               return PP_SMU_RESULT_FAIL;
+
+       return PP_SMU_RESULT_OK;
+}
+
+enum pp_smu_status pp_nv_set_pstate_handshake_support(
+       struct pp_smu *pp, BOOLEAN pstate_handshake_supported)
+{
+       const struct dc_context *ctx = pp->dm;
+       struct amdgpu_device *adev = ctx->driver_context;
+       struct smu_context *smu = &adev->smu;
+
+       if (smu_display_disable_memory_clock_switch(smu, !pstate_handshake_supported))
+               return PP_SMU_RESULT_FAIL;
+
+       return PP_SMU_RESULT_OK;
+}
+
+enum pp_smu_status pp_nv_set_voltage_by_freq(struct pp_smu *pp,
+               enum pp_smu_nv_clock_id clock_id, int mhz)
+{
+       const struct dc_context *ctx = pp->dm;
+       struct amdgpu_device *adev = ctx->driver_context;
+       struct smu_context *smu = &adev->smu;
+       struct pp_display_clock_request clock_req;
+
+       if (!smu->funcs)
+               return PP_SMU_RESULT_UNSUPPORTED;
+
+       switch (clock_id) {
+       case PP_SMU_NV_DISPCLK:
+               clock_req.clock_type = amd_pp_disp_clock;
+               break;
+       case PP_SMU_NV_PHYCLK:
+               clock_req.clock_type = amd_pp_phy_clock;
+               break;
+       case PP_SMU_NV_PIXELCLK:
+               clock_req.clock_type = amd_pp_pixel_clock;
+               break;
+       default:
+               break;
+       }
+       clock_req.clock_freq_in_khz = mhz * 1000;
+
+       /* 0: successful or smu.funcs->display_clock_voltage_request = NULL
+        * 1: fail
+        */
+       if (smu_display_clock_voltage_request(smu, &clock_req))
+               return PP_SMU_RESULT_FAIL;
+
+       return PP_SMU_RESULT_OK;
+}
+
+enum pp_smu_status pp_nv_get_maximum_sustainable_clocks(
+               struct pp_smu *pp, struct pp_smu_nv_clock_table *max_clocks)
+{
+       const struct dc_context *ctx = pp->dm;
+       struct amdgpu_device *adev = ctx->driver_context;
+       struct smu_context *smu = &adev->smu;
+
+       if (!smu->funcs)
+               return PP_SMU_RESULT_UNSUPPORTED;
+
+       if (!smu->funcs->get_max_sustainable_clocks_by_dc)
+               return PP_SMU_RESULT_UNSUPPORTED;
+
+       if (!smu->funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks))
+               return PP_SMU_RESULT_OK;
+
+       return PP_SMU_RESULT_FAIL;
+}
+
+enum pp_smu_status pp_nv_get_uclk_dpm_states(struct pp_smu *pp,
+               unsigned int *clock_values_in_khz, unsigned int *num_states)
+{
+       const struct dc_context *ctx = pp->dm;
+       struct amdgpu_device *adev = ctx->driver_context;
+       struct smu_context *smu = &adev->smu;
+
+       if (!smu->ppt_funcs)
+               return PP_SMU_RESULT_UNSUPPORTED;
+
+       if (!smu->ppt_funcs->get_uclk_dpm_states)
+               return PP_SMU_RESULT_UNSUPPORTED;
+
+       if (!smu->ppt_funcs->get_uclk_dpm_states(smu,
+                       clock_values_in_khz, num_states))
+               return PP_SMU_RESULT_OK;
+
+       return PP_SMU_RESULT_FAIL;
+}
+
 void dm_pp_get_funcs(
                struct dc_context *ctx,
                struct pp_smu_funcs *funcs)
 {
-       funcs->rv_funcs.pp_smu.dm = ctx;
-       funcs->rv_funcs.set_wm_ranges = pp_rv_set_wm_ranges;
-       funcs->rv_funcs.set_pme_wa_enable = pp_rv_set_pme_wa_enable;
-       funcs->rv_funcs.set_display_count = pp_rv_set_active_display_count;
-       funcs->rv_funcs.set_min_deep_sleep_dcfclk = pp_rv_set_min_deep_sleep_dcfclk;
-       funcs->rv_funcs.set_hard_min_dcfclk_by_freq = pp_rv_set_hard_min_dcefclk_by_freq;
-       funcs->rv_funcs.set_hard_min_fclk_by_freq = pp_rv_set_hard_min_fclk_by_freq;
+       switch (ctx->dce_version) {
+       case DCN_VERSION_1_0:
+       case DCN_VERSION_1_01:
+               funcs->ctx.ver = PP_SMU_VER_RV;
+               funcs->rv_funcs.pp_smu.dm = ctx;
+               funcs->rv_funcs.set_wm_ranges = pp_rv_set_wm_ranges;
+               funcs->rv_funcs.set_pme_wa_enable = pp_rv_set_pme_wa_enable;
+               funcs->rv_funcs.set_display_count =
+                               pp_rv_set_active_display_count;
+               funcs->rv_funcs.set_min_deep_sleep_dcfclk =
+                               pp_rv_set_min_deep_sleep_dcfclk;
+               funcs->rv_funcs.set_hard_min_dcfclk_by_freq =
+                               pp_rv_set_hard_min_dcefclk_by_freq;
+               funcs->rv_funcs.set_hard_min_fclk_by_freq =
+                               pp_rv_set_hard_min_fclk_by_freq;
+               break;
+#ifdef CONFIG_DRM_AMD_DC_DCN2_0
+       case DCN_VERSION_2_0:
+               funcs->ctx.ver = PP_SMU_VER_NV;
+               funcs->nv_funcs.pp_smu.dm = ctx;
+               funcs->nv_funcs.set_display_count = pp_nv_set_display_count;
+               funcs->nv_funcs.set_hard_min_dcfclk_by_freq =
+                               pp_nv_set_hard_min_dcefclk_by_freq;
+               funcs->nv_funcs.set_min_deep_sleep_dcfclk =
+                               pp_nv_set_min_deep_sleep_dcfclk;
+               funcs->nv_funcs.set_voltage_by_freq =
+                               pp_nv_set_voltage_by_freq;
+               funcs->nv_funcs.set_wm_ranges = pp_nv_set_wm_ranges;
+
+               /* todo set_pme_wa_enable cause 4k@6ohz display not light up */
+               funcs->nv_funcs.set_pme_wa_enable = NULL;
+               /* todo debug waring message */
+               funcs->nv_funcs.set_hard_min_uclk_by_freq = pp_nv_set_hard_min_uclk_by_freq;
+               /* todo  compare data with window driver*/
+               funcs->nv_funcs.get_maximum_sustainable_clocks = pp_nv_get_maximum_sustainable_clocks;
+               /*todo  compare data with window driver */
+               funcs->nv_funcs.get_uclk_dpm_states = pp_nv_get_uclk_dpm_states;
+               funcs->nv_funcs.set_pstate_handshake_support = pp_nv_set_pstate_handshake_support;
+               break;
+#endif
+       default:
+               DRM_ERROR("smu version is not supported !\n");
+               break;
+       }
 }
-