Merge drm/drm-next into drm-misc-next
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
index 42fa2e1..0cb9346 100644 (file)
@@ -143,6 +143,9 @@ MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
 
+#define FIRMWARE_DCN_35_DMUB "amdgpu/dcn_3_5_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_DCN_35_DMUB);
+
 /* Number of bytes in PSP header for firmware. */
 #define PSP_HEADER_BYTES 0x100
 
@@ -411,6 +414,7 @@ static void dm_pflip_high_irq(void *interrupt_params)
        struct amdgpu_crtc *amdgpu_crtc;
        struct common_irq_params *irq_params = interrupt_params;
        struct amdgpu_device *adev = irq_params->adev;
+       struct drm_device *dev = adev_to_drm(adev);
        unsigned long flags;
        struct drm_pending_vblank_event *e;
        u32 vpos, hpos, v_blank_start, v_blank_end;
@@ -421,18 +425,17 @@ static void dm_pflip_high_irq(void *interrupt_params)
        /* IRQ could occur when in initial stage */
        /* TODO work and BO cleanup */
        if (amdgpu_crtc == NULL) {
-               DC_LOG_PFLIP("CRTC is null, returning.\n");
+               drm_dbg_state(dev, "CRTC is null, returning.\n");
                return;
        }
 
        spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
 
        if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
-               DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p]\n",
-                            amdgpu_crtc->pflip_status,
-                            AMDGPU_FLIP_SUBMITTED,
-                            amdgpu_crtc->crtc_id,
-                            amdgpu_crtc);
+               drm_dbg_state(dev,
+                             "amdgpu_crtc->pflip_status = %d != AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p]\n",
+                             amdgpu_crtc->pflip_status, AMDGPU_FLIP_SUBMITTED,
+                             amdgpu_crtc->crtc_id, amdgpu_crtc);
                spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
                return;
        }
@@ -498,9 +501,9 @@ static void dm_pflip_high_irq(void *interrupt_params)
        amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
        spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
 
-       DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
-                    amdgpu_crtc->crtc_id, amdgpu_crtc,
-                    vrr_active, (int) !e);
+       drm_dbg_state(dev,
+                     "crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
+                     amdgpu_crtc->crtc_id, amdgpu_crtc, vrr_active, (int)!e);
 }
 
 static void dm_vupdate_high_irq(void *interrupt_params)
@@ -530,9 +533,9 @@ static void dm_vupdate_high_irq(void *interrupt_params)
                        atomic64_set(&irq_params->previous_timestamp, vblank->time);
                }
 
-               DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
-                             acrtc->crtc_id,
-                             vrr_active);
+               drm_dbg_vbl(drm_dev,
+                           "crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
+                           vrr_active);
 
                /* Core vblank handling is done here after end of front-porch in
                 * vrr mode, as vblank timestamping will give valid results
@@ -583,8 +586,9 @@ static void dm_crtc_high_irq(void *interrupt_params)
 
        vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc);
 
-       DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
-                     vrr_active, acrtc->dm_irq_params.active_planes);
+       drm_dbg_vbl(adev_to_drm(adev),
+                   "crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
+                   vrr_active, acrtc->dm_irq_params.active_planes);
 
        /**
         * Core vblank handling at start of front-porch is only possible
@@ -1070,6 +1074,7 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
        const struct firmware *dmub_fw = adev->dm.dmub_fw;
        struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
        struct abm *abm = adev->dm.dc->res_pool->abm;
+       struct dc_context *ctx = adev->dm.dc->ctx;
        struct dmub_srv_hw_params hw_params;
        enum dmub_status status;
        const unsigned char *fw_inst_const, *fw_bss_data;
@@ -1091,6 +1096,10 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
                return -EINVAL;
        }
 
+       /* initialize register offsets for ASICs with runtime initialization available */
+       if (dmub_srv->hw_funcs.init_reg_offsets)
+               dmub_srv->hw_funcs.init_reg_offsets(dmub_srv, ctx);
+
        status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
        if (status != DMUB_STATUS_OK) {
                DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
@@ -1166,9 +1175,10 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
        for (i = 0; i < fb_info->num_fb; ++i)
                hw_params.fb[i] = &fb_info->fb[i];
 
-       switch (adev->ip_versions[DCE_HWIP][0]) {
+       switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
        case IP_VERSION(3, 1, 3):
        case IP_VERSION(3, 1, 4):
+       case IP_VERSION(3, 5, 0):
                hw_params.dpia_supported = true;
                hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia;
                break;
@@ -1247,7 +1257,7 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
        agp_top = adev->gmc.agp_end >> 24;
 
        /* AGP aperture is disabled */
-       if (agp_bot == agp_top) {
+       if (agp_bot > agp_top) {
                logical_addr_low = adev->gmc.fb_start >> 18;
                if (adev->apu_flags & AMD_APU_IS_RAVEN2)
                        /*
@@ -1598,7 +1608,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
 
        init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
 
-       switch (adev->ip_versions[DCE_HWIP][0]) {
+       switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
        case IP_VERSION(2, 1, 0):
                switch (adev->dm.dmcub_fw_version) {
                case 0: /* development */
@@ -1617,40 +1627,23 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
                break;
        }
 
-       switch (adev->asic_type) {
-       case CHIP_CARRIZO:
-       case CHIP_STONEY:
-               init_data.flags.gpu_vm_support = true;
-               break;
-       default:
-               switch (adev->ip_versions[DCE_HWIP][0]) {
-               case IP_VERSION(1, 0, 0):
-               case IP_VERSION(1, 0, 1):
-                       /* enable S/G on PCO and RV2 */
-                       if ((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
-                           (adev->apu_flags & AMD_APU_IS_PICASSO))
-                               init_data.flags.gpu_vm_support = true;
-                       break;
-               case IP_VERSION(2, 1, 0):
-               case IP_VERSION(3, 0, 1):
-               case IP_VERSION(3, 1, 2):
-               case IP_VERSION(3, 1, 3):
-               case IP_VERSION(3, 1, 4):
-               case IP_VERSION(3, 1, 5):
-               case IP_VERSION(3, 1, 6):
-                       init_data.flags.gpu_vm_support = true;
-                       break;
-               default:
-                       break;
-               }
-               break;
-       }
-       if (init_data.flags.gpu_vm_support &&
-           (amdgpu_sg_display == 0))
+       /* APU support S/G display by default except:
+        * ASICs before Carrizo,
+        * RAVEN1 (Users reported stability issue)
+        */
+
+       if (adev->asic_type < CHIP_CARRIZO) {
                init_data.flags.gpu_vm_support = false;
+       } else if (adev->asic_type == CHIP_RAVEN) {
+               if (adev->apu_flags & AMD_APU_IS_RAVEN)
+                       init_data.flags.gpu_vm_support = false;
+               else
+                       init_data.flags.gpu_vm_support = (amdgpu_sg_display != 0);
+       } else {
+               init_data.flags.gpu_vm_support = (amdgpu_sg_display != 0) && (adev->flags & AMD_IS_APU);
+       }
 
-       if (init_data.flags.gpu_vm_support)
-               adev->mode_info.gpu_vm_support = true;
+       adev->mode_info.gpu_vm_support = init_data.flags.gpu_vm_support;
 
        if (amdgpu_dc_feature_mask & DC_FBC_MASK)
                init_data.flags.fbc_support = true;
@@ -1671,7 +1664,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
 
        init_data.flags.seamless_boot_edp_requested = false;
 
-       if (check_seamless_boot_capability(adev)) {
+       if (amdgpu_device_seamless_boot_supported(adev)) {
                init_data.flags.seamless_boot_edp_requested = true;
                init_data.flags.allow_seamless_boot_optimization = true;
                DRM_INFO("Seamless boot condition check passed\n");
@@ -1681,6 +1674,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
 
        init_data.dcn_reg_offsets = adev->reg_offset[DCE_HWIP][0];
        init_data.nbio_reg_offsets = adev->reg_offset[NBIO_HWIP][0];
+       init_data.clk_reg_offsets = adev->reg_offset[CLK_HWIP][0];
 
        INIT_LIST_HEAD(&adev->dm.da_list);
 
@@ -1693,8 +1687,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
                DRM_INFO("Display Core v%s initialized on %s\n", DC_VER,
                         dce_version_to_string(adev->dm.dc->ctx->dce_version));
        } else {
-               DRM_INFO("Display Core v%s failed to initialize on %s\n", DC_VER,
-                        dce_version_to_string(adev->dm.dc->ctx->dce_version));
+               DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
                goto error;
        }
 
@@ -2005,7 +1998,7 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
                        return 0;
                break;
        default:
-               switch (adev->ip_versions[DCE_HWIP][0]) {
+               switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
                case IP_VERSION(2, 0, 2):
                case IP_VERSION(2, 0, 3):
                case IP_VERSION(2, 0, 0):
@@ -2021,6 +2014,7 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
                case IP_VERSION(3, 1, 6):
                case IP_VERSION(3, 2, 0):
                case IP_VERSION(3, 2, 1):
+               case IP_VERSION(3, 5, 0):
                        return 0;
                default:
                        break;
@@ -2094,7 +2088,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
        enum dmub_status status;
        int r;
 
-       switch (adev->ip_versions[DCE_HWIP][0]) {
+       switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
        case IP_VERSION(2, 1, 0):
                dmub_asic = DMUB_ASIC_DCN21;
                break;
@@ -2129,6 +2123,9 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
        case IP_VERSION(3, 2, 1):
                dmub_asic = DMUB_ASIC_DCN321;
                break;
+       case IP_VERSION(3, 5, 0):
+               dmub_asic = DMUB_ASIC_DCN35;
+               break;
        default:
                /* ASIC doesn't support DMUB. */
                return 0;
@@ -2463,7 +2460,7 @@ static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
         * therefore, this function apply to navi10/12/14 but not Renoir
         * *
         */
-       switch (adev->ip_versions[DCE_HWIP][0]) {
+       switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
        case IP_VERSION(2, 0, 2):
        case IP_VERSION(2, 0, 0):
                break;
@@ -2649,6 +2646,8 @@ static int dm_suspend(void *handle)
 
        WARN_ON(adev->dm.cached_state);
        adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
+       if (IS_ERR(adev->dm.cached_state))
+               return PTR_ERR(adev->dm.cached_state);
 
        s3_handle_mst(adev_to_drm(adev), true);
 
@@ -2686,6 +2685,7 @@ static void emulated_link_detect(struct dc_link *link)
        struct display_sink_capability sink_caps = { 0 };
        enum dc_edid_status edid_status;
        struct dc_context *dc_ctx = link->ctx;
+       struct drm_device *dev = adev_to_drm(dc_ctx->driver_context);
        struct dc_sink *sink = NULL;
        struct dc_sink *prev_sink = NULL;
 
@@ -2735,7 +2735,7 @@ static void emulated_link_detect(struct dc_link *link)
        }
 
        default:
-               DC_ERROR("Invalid connector type! signal:%d\n",
+               drm_err(dev, "Invalid connector type! signal:%d\n",
                        link->connector_signal);
                return;
        }
@@ -2745,7 +2745,7 @@ static void emulated_link_detect(struct dc_link *link)
 
        sink = dc_sink_create(&sink_init_data);
        if (!sink) {
-               DC_ERROR("Failed to create sink!\n");
+               drm_err(dev, "Failed to create sink!\n");
                return;
        }
 
@@ -2758,7 +2758,7 @@ static void emulated_link_detect(struct dc_link *link)
                        sink);
 
        if (edid_status != EDID_OK)
-               DC_ERROR("Failed to read EDID");
+               drm_err(dev, "Failed to read EDID\n");
 
 }
 
@@ -2777,7 +2777,7 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state,
        bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
 
        if (!bundle) {
-               dm_error("Failed to allocate update bundle\n");
+               drm_err(dm->ddev, "Failed to allocate update bundle\n");
                goto cleanup;
        }
 
@@ -2823,6 +2823,10 @@ static int dm_resume(void *handle)
        int i, r, j, ret;
        bool need_hotplug = false;
 
+       if (dm->dc->caps.ips_support) {
+               dc_dmub_srv_exit_low_power_state(dm->dc);
+       }
+
        if (amdgpu_in_reset(adev)) {
                dc_state = dm->cached_dc_state;
 
@@ -2848,6 +2852,7 @@ static int dm_resume(void *handle)
                        DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
 
                dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
+
                dc_resume(dm->dc);
 
                amdgpu_dm_irq_resume_early(adev);
@@ -3225,7 +3230,8 @@ void amdgpu_dm_update_connector_after_detect(
                        aconnector->timing_requested =
                                kzalloc(sizeof(struct dc_crtc_timing), GFP_KERNEL);
                        if (!aconnector->timing_requested)
-                               dm_error("failed to create aconnector->requested_timing\n");
+                               drm_err(dev,
+                                       "failed to create aconnector->requested_timing\n");
                }
 
                drm_connector_update_edid_property(connector, aconnector->edid);
@@ -4411,7 +4417,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
                }
 
        /* Use Outbox interrupt */
-       switch (adev->ip_versions[DCE_HWIP][0]) {
+       switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
        case IP_VERSION(3, 0, 0):
        case IP_VERSION(3, 1, 2):
        case IP_VERSION(3, 1, 3):
@@ -4421,6 +4427,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
        case IP_VERSION(3, 2, 0):
        case IP_VERSION(3, 2, 1):
        case IP_VERSION(2, 1, 0):
+       case IP_VERSION(3, 5, 0):
                if (register_outbox_irq_handlers(dm->adev)) {
                        DRM_ERROR("DM: Failed to initialize IRQ\n");
                        goto fail;
@@ -4428,12 +4435,12 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
                break;
        default:
                DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
-                             adev->ip_versions[DCE_HWIP][0]);
+                             amdgpu_ip_version(adev, DCE_HWIP, 0));
        }
 
        /* Determine whether to enable PSR support by default. */
        if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
-               switch (adev->ip_versions[DCE_HWIP][0]) {
+               switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
                case IP_VERSION(3, 1, 2):
                case IP_VERSION(3, 1, 3):
                case IP_VERSION(3, 1, 4):
@@ -4441,6 +4448,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
                case IP_VERSION(3, 1, 6):
                case IP_VERSION(3, 2, 0):
                case IP_VERSION(3, 2, 1):
+               case IP_VERSION(3, 5, 0):
                        psr_feature_enabled = true;
                        break;
                default:
@@ -4565,7 +4573,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
                }
                break;
        default:
-               switch (adev->ip_versions[DCE_HWIP][0]) {
+               switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
                case IP_VERSION(1, 0, 0):
                case IP_VERSION(1, 0, 1):
                case IP_VERSION(2, 0, 2):
@@ -4583,6 +4591,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
                case IP_VERSION(3, 1, 6):
                case IP_VERSION(3, 2, 0):
                case IP_VERSION(3, 2, 1):
+               case IP_VERSION(3, 5, 0):
                        if (dcn10_register_irq_handlers(dm->adev)) {
                                DRM_ERROR("DM: Failed to initialize IRQ\n");
                                goto fail;
@@ -4590,7 +4599,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
                        break;
                default:
                        DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
-                                       adev->ip_versions[DCE_HWIP][0]);
+                                       amdgpu_ip_version(adev, DCE_HWIP, 0));
                        goto fail;
                }
                break;
@@ -4673,14 +4682,14 @@ static int dm_init_microcode(struct amdgpu_device *adev)
        char *fw_name_dmub;
        int r;
 
-       switch (adev->ip_versions[DCE_HWIP][0]) {
+       switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
        case IP_VERSION(2, 1, 0):
                fw_name_dmub = FIRMWARE_RENOIR_DMUB;
                if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
                        fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
                break;
        case IP_VERSION(3, 0, 0):
-               if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0))
+               if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 0))
                        fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
                else
                        fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
@@ -4713,13 +4722,14 @@ static int dm_init_microcode(struct amdgpu_device *adev)
        case IP_VERSION(3, 2, 1):
                fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB;
                break;
+       case IP_VERSION(3, 5, 0):
+               fw_name_dmub = FIRMWARE_DCN_35_DMUB;
+               break;
        default:
                /* ASIC doesn't support DMUB. */
                return 0;
        }
        r = amdgpu_ucode_request(adev, &adev->dm.dmub_fw, fw_name_dmub);
-       if (r)
-               DRM_ERROR("DMUB firmware loading failed: %d\n", r);
        return r;
 }
 
@@ -4807,7 +4817,7 @@ static int dm_early_init(void *handle)
                break;
        default:
 
-               switch (adev->ip_versions[DCE_HWIP][0]) {
+               switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
                case IP_VERSION(2, 0, 2):
                case IP_VERSION(3, 0, 0):
                        adev->mode_info.num_crtc = 6;
@@ -4837,13 +4847,14 @@ static int dm_early_init(void *handle)
                case IP_VERSION(3, 1, 6):
                case IP_VERSION(3, 2, 0):
                case IP_VERSION(3, 2, 1):
+               case IP_VERSION(3, 5, 0):
                        adev->mode_info.num_crtc = 4;
                        adev->mode_info.num_hpd = 4;
                        adev->mode_info.num_dig = 4;
                        break;
                default:
                        DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
-                                       adev->ip_versions[DCE_HWIP][0]);
+                                       amdgpu_ip_version(adev, DCE_HWIP, 0));
                        return -EINVAL;
                }
                break;
@@ -5432,6 +5443,24 @@ get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing,
        return color_space;
 }
 
+static enum display_content_type
+get_output_content_type(const struct drm_connector_state *connector_state)
+{
+       switch (connector_state->content_type) {
+       default:
+       case DRM_MODE_CONTENT_TYPE_NO_DATA:
+               return DISPLAY_CONTENT_TYPE_NO_DATA;
+       case DRM_MODE_CONTENT_TYPE_GRAPHICS:
+               return DISPLAY_CONTENT_TYPE_GRAPHICS;
+       case DRM_MODE_CONTENT_TYPE_PHOTO:
+               return DISPLAY_CONTENT_TYPE_PHOTO;
+       case DRM_MODE_CONTENT_TYPE_CINEMA:
+               return DISPLAY_CONTENT_TYPE_CINEMA;
+       case DRM_MODE_CONTENT_TYPE_GAME:
+               return DISPLAY_CONTENT_TYPE_GAME;
+       }
+}
+
 static bool adjust_colour_depth_from_display_info(
        struct dc_crtc_timing *timing_out,
        const struct drm_display_info *info)
@@ -5566,6 +5595,7 @@ static void fill_stream_properties_from_drm_display_mode(
        }
 
        stream->output_color_space = get_output_color_space(timing_out, connector_state);
+       stream->content_type = get_output_content_type(connector_state);
 }
 
 static void fill_audio_info(struct audio_info *audio_info,
@@ -6099,8 +6129,6 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
 
        if (recalculate_timing)
                drm_mode_set_crtcinfo(&saved_mode, 0);
-       else if (!old_stream)
-               drm_mode_set_crtcinfo(&mode, 0);
 
        /*
         * If scaling is enabled and refresh rate didn't change
@@ -6116,10 +6144,10 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
                        requested_bpc);
 
        if (aconnector->timing_changed) {
-               DC_LOG_DEBUG("%s: overriding timing for automated test, bpc %d, changing to %d\n",
-                               __func__,
-                               stream->timing.display_color_depth,
-                               aconnector->timing_requested->display_color_depth);
+               drm_dbg(aconnector->base.dev,
+                       "overriding timing for automated test, bpc %d, changing to %d\n",
+                       stream->timing.display_color_depth,
+                       aconnector->timing_requested->display_color_depth);
                stream->timing = *aconnector->timing_requested;
        }
 
@@ -6415,15 +6443,23 @@ amdgpu_dm_connector_late_register(struct drm_connector *connector)
 static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector)
 {
        struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+       struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
        struct dc_link *dc_link = aconnector->dc_link;
        struct dc_sink *dc_em_sink = aconnector->dc_em_sink;
        struct edid *edid;
 
-       if (!connector->edid_override)
+       /*
+        * Note: drm_get_edid gets edid in the following order:
+        * 1) override EDID if set via edid_override debugfs,
+        * 2) firmware EDID if set via edid_firmware module parameter
+        * 3) regular DDC read.
+        */
+       edid = drm_get_edid(connector, &amdgpu_connector->ddc_bus->aux.ddc);
+       if (!edid) {
+               DRM_ERROR("No EDID found on connector: %s.\n", connector->name);
                return;
+       }
 
-       drm_edid_override_connector_update(&aconnector->base);
-       edid = aconnector->base.edid_blob_ptr->data;
        aconnector->edid = edid;
 
        /* Update emulated (virtual) sink's EDID */
@@ -6458,29 +6494,28 @@ static int get_modes(struct drm_connector *connector)
 
 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
 {
+       struct drm_connector *connector = &aconnector->base;
+       struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(&aconnector->base);
        struct dc_sink_init_data init_params = {
                        .link = aconnector->dc_link,
                        .sink_signal = SIGNAL_TYPE_VIRTUAL
        };
        struct edid *edid;
 
-       if (!aconnector->base.edid_blob_ptr) {
-               /* if connector->edid_override valid, pass
-                * it to edid_override to edid_blob_ptr
-                */
-
-               drm_edid_override_connector_update(&aconnector->base);
-
-               if (!aconnector->base.edid_blob_ptr) {
-                       DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
-                                       aconnector->base.name);
-
-                       aconnector->base.force = DRM_FORCE_OFF;
-                       return;
-               }
+       /*
+        * Note: drm_get_edid gets edid in the following order:
+        * 1) override EDID if set via edid_override debugfs,
+        * 2) firmware EDID if set via edid_firmware module parameter
+        * 3) regular DDC read.
+        */
+       edid = drm_get_edid(connector, &amdgpu_connector->ddc_bus->aux.ddc);
+       if (!edid) {
+               DRM_ERROR("No EDID found on connector: %s.\n", connector->name);
+               return;
        }
 
-       edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
+       if (drm_detect_hdmi_monitor(edid))
+               init_params.sink_signal = SIGNAL_TYPE_HDMI_TYPE_A;
 
        aconnector->edid = edid;
 
@@ -6662,6 +6697,8 @@ enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connec
                goto fail;
        }
 
+       drm_mode_set_crtcinfo(mode, 0);
+
        stream = create_validate_stream_for_sink(aconnector, mode,
                                                 to_dm_connector_state(connector->state),
                                                 NULL);
@@ -6766,6 +6803,14 @@ amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
                new_crtc_state->mode_changed = true;
        }
 
+       if (new_con_state->content_type != old_con_state->content_type) {
+               new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
+               if (IS_ERR(new_crtc_state))
+                       return PTR_ERR(new_crtc_state);
+
+               new_crtc_state->mode_changed = true;
+       }
+
        if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
                struct dc_info_packet hdr_infopacket;
 
@@ -7401,6 +7446,11 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
                                adev->mode_info.abm_level_property, 0);
        }
 
+       if (connector_type == DRM_MODE_CONNECTOR_HDMIA) {
+               /* Content Type is currently only implemented for HDMI. */
+               drm_connector_attach_content_type_property(&aconnector->base);
+       }
+
        if (connector_type == DRM_MODE_CONNECTOR_HDMIA) {
                if (!drm_mode_create_hdmi_colorspace_property(&aconnector->base, supported_colorspaces))
                        drm_connector_attach_colorspace_property(&aconnector->base);
@@ -7824,8 +7874,9 @@ static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
        /* Mark this event as consumed */
        acrtc->base.state->event = NULL;
 
-       DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
-                    acrtc->crtc_id);
+       drm_dbg_state(acrtc->base.dev,
+                     "crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
+                     acrtc->crtc_id);
 }
 
 static void update_freesync_state_on_stream(
@@ -8068,7 +8119,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
        bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
 
        if (!bundle) {
-               dm_error("Failed to allocate update bundle\n");
+               drm_err(dev, "Failed to allocate update bundle\n");
                goto cleanup;
        }
 
@@ -8656,7 +8707,9 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
                                status = dc_stream_get_status_from_state(dc_state,
                                                                         dm_new_crtc_state->stream);
                        if (!status)
-                               DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
+                               drm_err(dev,
+                                       "got no status for stream %p on acrtc%p\n",
+                                       dm_new_crtc_state->stream, acrtc);
                        else
                                acrtc->otg_inst = status->primary_otg_inst;
                }
@@ -8690,6 +8743,17 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
 
        trace_amdgpu_dm_atomic_commit_tail_begin(state);
 
+       if (dm->dc->caps.ips_support) {
+               for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
+                       if (new_con_state->crtc &&
+                               new_con_state->crtc->state->active &&
+                               drm_atomic_crtc_needs_modeset(new_con_state->crtc->state)) {
+                               dc_dmub_srv_exit_low_power_state(dm->dc);
+                               break;
+                       }
+               }
+       }
+
        drm_atomic_helper_update_legacy_modeset_state(dev, state);
        drm_dp_mst_atomic_wait_for_dependencies(state);
 
@@ -9817,7 +9881,7 @@ static int dm_update_plane_state(struct dc *dc,
 
                /* Block top most plane from being a video plane */
                if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
-                       if (is_video_format(new_plane_state->fb->format->format) && *is_top_most_overlay)
+                       if (amdgpu_dm_plane_is_video_format(new_plane_state->fb->format->format) && *is_top_most_overlay)
                                return -EINVAL;
 
                        *is_top_most_overlay = false;
@@ -9893,16 +9957,27 @@ static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state,
        }
 }
 
+static void
+dm_get_plane_scale(struct drm_plane_state *plane_state,
+                  int *out_plane_scale_w, int *out_plane_scale_h)
+{
+       int plane_src_w, plane_src_h;
+
+       dm_get_oriented_plane_size(plane_state, &plane_src_w, &plane_src_h);
+       *out_plane_scale_w = plane_state->crtc_w * 1000 / plane_src_w;
+       *out_plane_scale_h = plane_state->crtc_h * 1000 / plane_src_h;
+}
+
 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
                                struct drm_crtc *crtc,
                                struct drm_crtc_state *new_crtc_state)
 {
-       struct drm_plane *cursor = crtc->cursor, *underlying;
+       struct drm_plane *cursor = crtc->cursor, *plane, *underlying;
+       struct drm_plane_state *old_plane_state, *new_plane_state;
        struct drm_plane_state *new_cursor_state, *new_underlying_state;
        int i;
        int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
-       int cursor_src_w, cursor_src_h;
-       int underlying_src_w, underlying_src_h;
+       bool any_relevant_change = false;
 
        /* On DCE and DCN there is no dedicated hardware cursor plane. We get a
         * cursor per pipe but it's going to inherit the scaling and
@@ -9910,13 +9985,50 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state,
         * blending properties match the underlying planes'.
         */
 
-       new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
-       if (!new_cursor_state || !new_cursor_state->fb)
+       /* If no plane was enabled or changed scaling, no need to check again */
+       for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
+               int new_scale_w, new_scale_h, old_scale_w, old_scale_h;
+
+               if (!new_plane_state || !new_plane_state->fb || new_plane_state->crtc != crtc)
+                       continue;
+
+               if (!old_plane_state || !old_plane_state->fb || old_plane_state->crtc != crtc) {
+                       any_relevant_change = true;
+                       break;
+               }
+
+               if (new_plane_state->fb == old_plane_state->fb &&
+                   new_plane_state->crtc_w == old_plane_state->crtc_w &&
+                   new_plane_state->crtc_h == old_plane_state->crtc_h)
+                       continue;
+
+               dm_get_plane_scale(new_plane_state, &new_scale_w, &new_scale_h);
+               dm_get_plane_scale(old_plane_state, &old_scale_w, &old_scale_h);
+
+               if (new_scale_w != old_scale_w || new_scale_h != old_scale_h) {
+                       any_relevant_change = true;
+                       break;
+               }
+       }
+
+       if (!any_relevant_change)
+               return 0;
+
+       new_cursor_state = drm_atomic_get_plane_state(state, cursor);
+       if (IS_ERR(new_cursor_state))
+               return PTR_ERR(new_cursor_state);
+
+       if (!new_cursor_state->fb)
                return 0;
 
-       dm_get_oriented_plane_size(new_cursor_state, &cursor_src_w, &cursor_src_h);
-       cursor_scale_w = new_cursor_state->crtc_w * 1000 / cursor_src_w;
-       cursor_scale_h = new_cursor_state->crtc_h * 1000 / cursor_src_h;
+       dm_get_plane_scale(new_cursor_state, &cursor_scale_w, &cursor_scale_h);
+
+       /* Need to check all enabled planes, even if this commit doesn't change
+        * their state
+        */
+       i = drm_atomic_add_affected_planes(state, crtc);
+       if (i)
+               return i;
 
        for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
                /* Narrow down to non-cursor planes on the same CRTC as the cursor */
@@ -9927,10 +10039,8 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state,
                if (!new_underlying_state->fb)
                        continue;
 
-               dm_get_oriented_plane_size(new_underlying_state,
-                                          &underlying_src_w, &underlying_src_h);
-               underlying_scale_w = new_underlying_state->crtc_w * 1000 / underlying_src_w;
-               underlying_scale_h = new_underlying_state->crtc_h * 1000 / underlying_src_h;
+               dm_get_plane_scale(new_underlying_state,
+                                  &underlying_scale_w, &underlying_scale_h);
 
                if (cursor_scale_w != underlying_scale_w ||
                    cursor_scale_h != underlying_scale_h) {
@@ -10833,7 +10943,8 @@ void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
 {
 #ifdef DM_CHECK_ADDR_0
        if (address == 0) {
-               DC_ERR("invalid register write. address = 0");
+               drm_err(adev_to_drm(ctx->driver_context),
+                       "invalid register write. address = 0");
                return;
        }
 #endif
@@ -10847,7 +10958,8 @@ uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
        u32 value;
 #ifdef DM_CHECK_ADDR_0
        if (address == 0) {
-               DC_ERR("invalid register read; address = 0\n");
+               drm_err(adev_to_drm(ctx->driver_context),
+                       "invalid register read; address = 0\n");
                return 0;
        }
 #endif
@@ -10957,27 +11069,6 @@ int amdgpu_dm_process_dmub_set_config_sync(
        return ret;
 }
 
-/*
- * Check whether seamless boot is supported.
- *
- * So far we only support seamless boot on CHIP_VANGOGH.
- * If everything goes well, we may consider expanding
- * seamless boot to other ASICs.
- */
-bool check_seamless_boot_capability(struct amdgpu_device *adev)
-{
-       switch (adev->ip_versions[DCE_HWIP][0]) {
-       case IP_VERSION(3, 0, 1):
-               if (!adev->mman.keep_stolen_vga_memory)
-                       return true;
-               break;
-       default:
-               break;
-       }
-
-       return false;
-}
-
 bool dm_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type)
 {
        return dc_dmub_srv_cmd_run(ctx->dmub_srv, cmd, wait_type);