drm/amd/display/amdgpu_dm: Make amdgpu_dm_register_backlight_device() take an amdgpu_...
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
index 93dee3d..aee7950 100644 (file)
@@ -28,7 +28,6 @@
 
 #include "dm_services_types.h"
 #include "dc.h"
-#include "dc_link_dp.h"
 #include "link_enc_cfg.h"
 #include "dc/inc/core_types.h"
 #include "dal_asic_id.h"
 #include "dc/dc_edid_parser.h"
 #include "dc/dc_stat.h"
 #include "amdgpu_dm_trace.h"
+#include "dpcd_defs.h"
+#include "link/protocols/link_dpcd.h"
+#include "link_service_types.h"
+#include "link/protocols/link_dp_capability.h"
+#include "link/protocols/link_ddc.h"
 
 #include "vid.h"
 #include "amdgpu.h"
 #include "amdgpu_dm.h"
 #include "amdgpu_dm_plane.h"
 #include "amdgpu_dm_crtc.h"
-#ifdef CONFIG_DRM_AMD_DC_HDCP
 #include "amdgpu_dm_hdcp.h"
 #include <drm/display/drm_hdcp_helper.h>
-#endif
 #include "amdgpu_pm.h"
 #include "amdgpu_atombios.h"
 
@@ -66,7 +68,7 @@
 
 #include "ivsrcid/ivsrcid_vislands30.h"
 
-#include "i2caux_interface.h"
+#include <linux/backlight.h>
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/types.h>
 
 #include "modules/inc/mod_freesync.h"
 #include "modules/power/power_helpers.h"
-#include "modules/inc/mod_info_packet.h"
 
 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
@@ -210,7 +211,7 @@ static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
 
 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
                                    struct amdgpu_dm_connector *amdgpu_dm_connector,
-                                   uint32_t link_index,
+                                   u32 link_index,
                                    struct amdgpu_encoder *amdgpu_encoder);
 static int amdgpu_dm_encoder_init(struct drm_device *dev,
                                  struct amdgpu_encoder *aencoder,
@@ -262,7 +263,7 @@ static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
                                  u32 *vbl, u32 *position)
 {
-       uint32_t v_blank_start, v_blank_end, h_position, v_position;
+       u32 v_blank_start, v_blank_end, h_position, v_position;
 
        if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
                return -EINVAL;
@@ -341,12 +342,52 @@ static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
 {
        if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
                return true;
-       else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
+       else if (amdgpu_dm_crtc_vrr_active(old_state) != amdgpu_dm_crtc_vrr_active(new_state))
                return true;
        else
                return false;
 }
 
+static inline void reverse_planes_order(struct dc_surface_update *array_of_surface_update,
+                                       int planes_count)
+{
+       int i, j;
+
+       for (i = 0, j = planes_count - 1; i < j; i++, j--)
+               swap(array_of_surface_update[i], array_of_surface_update[j]);
+}
+
+/**
+ * update_planes_and_stream_adapter() - Send planes to be updated in DC
+ *
+ * DC has a generic way to update planes and stream via
+ * dc_update_planes_and_stream function; however, DM might need some
+ * adjustments and preparation before calling it. This function is a wrapper
+ * for the dc_update_planes_and_stream that does any required configuration
+ * before passing control to DC.
+ */
+static inline bool update_planes_and_stream_adapter(struct dc *dc,
+                                                   int update_type,
+                                                   int planes_count,
+                                                   struct dc_stream_state *stream,
+                                                   struct dc_stream_update *stream_update,
+                                                   struct dc_surface_update *array_of_surface_update)
+{
+       reverse_planes_order(array_of_surface_update, planes_count);
+
+       /*
+        * Previous frame finished and HW is ready for optimization.
+        */
+       if (update_type == UPDATE_TYPE_FAST)
+               dc_post_update_surfaces_to_stream(dc);
+
+       return dc_update_planes_and_stream(dc,
+                                          array_of_surface_update,
+                                          planes_count,
+                                          stream,
+                                          stream_update);
+}
+
 /**
  * dm_pflip_high_irq() - Handle pageflip interrupt
  * @interrupt_params: ignored
@@ -361,7 +402,7 @@ static void dm_pflip_high_irq(void *interrupt_params)
        struct amdgpu_device *adev = irq_params->adev;
        unsigned long flags;
        struct drm_pending_vblank_event *e;
-       uint32_t vpos, hpos, v_blank_start, v_blank_end;
+       u32 vpos, hpos, v_blank_start, v_blank_end;
        bool vrr_active;
 
        amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
@@ -391,7 +432,7 @@ static void dm_pflip_high_irq(void *interrupt_params)
 
        WARN_ON(!e);
 
-       vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
+       vrr_active = amdgpu_dm_crtc_vrr_active_irq(amdgpu_crtc);
 
        /* Fixed refresh rate, or VRR scanout position outside front-porch? */
        if (!vrr_active ||
@@ -465,7 +506,7 @@ static void dm_vupdate_high_irq(void *interrupt_params)
        acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
 
        if (acrtc) {
-               vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
+               vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc);
                drm_dev = acrtc->base.dev;
                vblank = &drm_dev->vblank[acrtc->base.index];
                previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
@@ -489,7 +530,7 @@ static void dm_vupdate_high_irq(void *interrupt_params)
                 * if a pageflip happened inside front-porch.
                 */
                if (vrr_active) {
-                       dm_crtc_handle_vblank(acrtc);
+                       amdgpu_dm_crtc_handle_vblank(acrtc);
 
                        /* BTR processing for pre-DCE12 ASICs */
                        if (acrtc->dm_irq_params.stream &&
@@ -529,7 +570,7 @@ static void dm_crtc_high_irq(void *interrupt_params)
        if (!acrtc)
                return;
 
-       vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
+       vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc);
 
        DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
                      vrr_active, acrtc->dm_irq_params.active_planes);
@@ -541,7 +582,7 @@ static void dm_crtc_high_irq(void *interrupt_params)
         * to dm_vupdate_high_irq after end of front-porch.
         */
        if (!vrr_active)
-               dm_crtc_handle_vblank(acrtc);
+               amdgpu_dm_crtc_handle_vblank(acrtc);
 
        /**
         * Following stuff must happen at start of vblank, for crc
@@ -648,7 +689,7 @@ static void dmub_hpd_callback(struct amdgpu_device *adev,
        struct drm_connector *connector;
        struct drm_connector_list_iter iter;
        struct dc_link *link;
-       uint8_t link_index = 0;
+       u8 link_index = 0;
        struct drm_device *dev;
 
        if (adev == NULL)
@@ -672,7 +713,14 @@ static void dmub_hpd_callback(struct amdgpu_device *adev,
        drm_for_each_connector_iter(connector, &iter) {
                aconnector = to_amdgpu_dm_connector(connector);
                if (link && aconnector->dc_link == link) {
-                       DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
+                       if (notify->type == DMUB_NOTIFICATION_HPD)
+                               DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
+                       else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ)
+                               DRM_INFO("DMUB HPD IRQ callback: link_index=%u\n", link_index);
+                       else
+                               DRM_WARN("DMUB Unknown HPD callback type %d, link_index=%u\n",
+                                               notify->type, link_index);
+
                        hpd_aconnector = aconnector;
                        break;
                }
@@ -749,7 +797,7 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
        struct amdgpu_device *adev = irq_params->adev;
        struct amdgpu_display_manager *dm = &adev->dm;
        struct dmcub_trace_buf_entry entry = { 0 };
-       uint32_t count = 0;
+       u32 count = 0;
        struct dmub_hpd_work *dmub_hpd_wrk;
        struct dc_link *plink = NULL;
 
@@ -772,15 +820,14 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
                                        DRM_ERROR("Failed to allocate dmub_hpd_wrk");
                                        return;
                                }
-                               dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
+                               dmub_hpd_wrk->dmub_notify = kmemdup(&notify, sizeof(struct dmub_notification),
+                                                                   GFP_ATOMIC);
                                if (!dmub_hpd_wrk->dmub_notify) {
                                        kfree(dmub_hpd_wrk);
                                        DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
                                        return;
                                }
                                INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
-                               if (dmub_hpd_wrk->dmub_notify)
-                                       memcpy(dmub_hpd_wrk->dmub_notify, &notify, sizeof(struct dmub_notification));
                                dmub_hpd_wrk->adev = adev;
                                if (notify.type == DMUB_NOTIFICATION_HPD) {
                                        plink = adev->dm.dc->links[notify.link_index];
@@ -1015,7 +1062,7 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
        struct dmub_srv_hw_params hw_params;
        enum dmub_status status;
        const unsigned char *fw_inst_const, *fw_bss_data;
-       uint32_t i, fw_inst_const_size, fw_bss_data_size;
+       u32 i, fw_inst_const_size, fw_bss_data_size;
        bool has_hw_support;
 
        if (!dmub_srv)
@@ -1176,10 +1223,10 @@ static void dm_dmub_hw_resume(struct amdgpu_device *adev)
 
 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
 {
-       uint64_t pt_base;
-       uint32_t logical_addr_low;
-       uint32_t logical_addr_high;
-       uint32_t agp_base, agp_bot, agp_top;
+       u64 pt_base;
+       u32 logical_addr_low;
+       u32 logical_addr_high;
+       u32 agp_base, agp_bot, agp_top;
        PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
 
        memset(pa_config, 0, sizeof(*pa_config));
@@ -1190,7 +1237,7 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
 
        /* AGP aperture is disabled */
        if (agp_bot == agp_top) {
-               logical_addr_low  = adev->gmc.vram_start >> 18;
+               logical_addr_low = adev->gmc.fb_start >> 18;
                if (adev->apu_flags & AMD_APU_IS_RAVEN2)
                        /*
                         * Raven2 has a HW issue that it is unable to use the vram which
@@ -1200,9 +1247,9 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
                         */
                        logical_addr_high = (adev->gmc.fb_end >> 18) + 0x1;
                else
-                       logical_addr_high = adev->gmc.vram_end >> 18;
+                       logical_addr_high = adev->gmc.fb_end >> 18;
        } else {
-               logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
+               logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
                if (adev->apu_flags & AMD_APU_IS_RAVEN2)
                        /*
                         * Raven2 has a HW issue that it is unable to use the vram which
@@ -1239,8 +1286,23 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
        pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
        pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
 
-       pa_config->is_hvm_enabled = 0;
+       pa_config->is_hvm_enabled = adev->mode_info.gpu_vm_support;
+
+}
+
+static void force_connector_state(
+       struct amdgpu_dm_connector *aconnector,
+       enum drm_connector_force force_state)
+{
+       struct drm_connector *connector = &aconnector->base;
+
+       mutex_lock(&connector->dev->mode_config.mutex);
+       aconnector->base.force = force_state;
+       mutex_unlock(&connector->dev->mode_config.mutex);
 
+       mutex_lock(&aconnector->hpd_lock);
+       drm_kms_helper_connector_hotplug_event(connector);
+       mutex_unlock(&aconnector->hpd_lock);
 }
 
 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
@@ -1251,6 +1313,9 @@ static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
        struct amdgpu_device *adev;
        enum dc_connection_type new_connection_type = dc_connection_none;
        unsigned long flags;
+       union test_response test_response;
+
+       memset(&test_response, 0, sizeof(test_response));
 
        offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
        aconnector = offload_work->offload_wq->aconnector;
@@ -1264,7 +1329,7 @@ static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
        dc_link = aconnector->dc_link;
 
        mutex_lock(&aconnector->hpd_lock);
-       if (!dc_link_detect_sink(dc_link, &new_connection_type))
+       if (!dc_link_detect_connection_type(dc_link, &new_connection_type))
                DRM_ERROR("KMS: Failed to detect connector\n");
        mutex_unlock(&aconnector->hpd_lock);
 
@@ -1275,15 +1340,49 @@ static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
                goto skip;
 
        mutex_lock(&adev->dm.dc_lock);
-       if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
+       if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
                dc_link_dp_handle_automated_test(dc_link);
+
+               if (aconnector->timing_changed) {
+                       /* force connector disconnect and reconnect */
+                       force_connector_state(aconnector, DRM_FORCE_OFF);
+                       msleep(100);
+                       force_connector_state(aconnector, DRM_FORCE_UNSPECIFIED);
+               }
+
+               test_response.bits.ACK = 1;
+
+               core_link_write_dpcd(
+               dc_link,
+               DP_TEST_RESPONSE,
+               &test_response.raw,
+               sizeof(test_response));
+       }
        else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
-                       hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
+                       dc_link_check_link_loss_status(dc_link, &offload_work->data) &&
                        dc_link_dp_allow_hpd_rx_irq(dc_link)) {
-               dc_link_dp_handle_link_loss(dc_link);
+               /* offload_work->data is from handle_hpd_rx_irq->
+                * schedule_hpd_rx_offload_work.this is defer handle
+                * for hpd short pulse. upon here, link status may be
+                * changed, need get latest link status from dpcd
+                * registers. if link status is good, skip run link
+                * training again.
+                */
+               union hpd_irq_data irq_data;
+
+               memset(&irq_data, 0, sizeof(irq_data));
+
+               /* before dc_link_dp_handle_link_loss, allow new link lost handle
+                * request be added to work queue if link lost at end of dc_link_
+                * dp_handle_link_loss
+                */
                spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
                offload_work->offload_wq->is_handling_link_loss = false;
                spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
+
+               if ((dc_link_dp_read_hpd_rx_irq_data(dc_link, &irq_data) == DC_OK) &&
+                       dc_link_check_link_loss_status(dc_link, &irq_data))
+                       dc_link_dp_handle_link_loss(dc_link);
        }
        mutex_unlock(&adev->dm.dc_lock);
 
@@ -1433,9 +1532,7 @@ static void retrieve_dmi_info(struct amdgpu_display_manager *dm)
 static int amdgpu_dm_init(struct amdgpu_device *adev)
 {
        struct dc_init_data init_data;
-#ifdef CONFIG_DRM_AMD_DC_HDCP
        struct dc_callback_init init_params;
-#endif
        int r;
 
        adev->dm.ddev = adev_to_drm(adev);
@@ -1443,9 +1540,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
 
        /* Zero all the fields */
        memset(&init_data, 0, sizeof(init_data));
-#ifdef CONFIG_DRM_AMD_DC_HDCP
        memset(&init_params, 0, sizeof(init_params));
-#endif
 
        mutex_init(&adev->dm.dpia_aux_lock);
        mutex_init(&adev->dm.dc_lock);
@@ -1551,6 +1646,11 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
        if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
                init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
 
+       /* Disable SubVP + DRR config by default */
+       init_data.flags.disable_subvp_drr = true;
+       if (amdgpu_dc_feature_mask & DC_ENABLE_SUBVP_DRR)
+               init_data.flags.disable_subvp_drr = false;
+
        init_data.flags.seamless_boot_edp_requested = false;
 
        if (check_seamless_boot_capability(adev)) {
@@ -1606,6 +1706,26 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
        /* TODO: Remove after DP2 receiver gets proper support of Cable ID feature */
        adev->dm.dc->debug.ignore_cable_id = true;
 
+       /* TODO: There is a new drm mst change where the freedom of
+        * vc_next_start_slot update is revoked/moved into drm, instead of in
+        * driver. This forces us to make sure to get vc_next_start_slot updated
+        * in drm function each time without considering if mst_state is active
+        * or not. Otherwise, next time hotplug will give wrong start_slot
+        * number. We are implementing a temporary solution to even notify drm
+        * mst deallocation when link is no longer of MST type when uncommitting
+        * the stream so we will have more time to work on a proper solution.
+        * Ideally when dm_helpers_dp_mst_stop_top_mgr message is triggered, we
+        * should notify drm to do a complete "reset" of its states and stop
+        * calling further drm mst functions when link is no longer of an MST
+        * type. This could happen when we unplug an MST hubs/displays. When
+        * uncommit stream comes later after unplug, we should just reset
+        * hardware states only.
+        */
+       adev->dm.dc->debug.temp_mst_deallocation_sequence = true;
+
+       if (adev->dm.dc->caps.dp_hdmi21_pcon_support)
+               DRM_INFO("DP-HDMI FRL PCON supported\n");
+
        r = dm_dmub_hw_init(adev);
        if (r) {
                DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
@@ -1646,7 +1766,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
                        DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
        }
 
-#ifdef CONFIG_DRM_AMD_DC_HDCP
        if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
                adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
 
@@ -1657,9 +1776,11 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
 
                dc_init_callbacks(adev->dm.dc, &init_params);
        }
-#endif
 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
-       adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
+       adev->dm.secure_display_ctxs = amdgpu_dm_crtc_secure_display_create_contexts(adev);
+       if (!adev->dm.secure_display_ctxs) {
+               DRM_ERROR("amdgpu: failed to initialize secure_display_ctxs.\n");
+       }
 #endif
        if (dc_is_dmub_outbox_supported(adev->dm.dc)) {
                init_completion(&adev->dm.dmub_aux_transfer_done);
@@ -1750,13 +1871,17 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
        amdgpu_dm_destroy_drm_device(&adev->dm);
 
 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
-       if (adev->dm.crc_rd_wrk) {
-               flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
-               kfree(adev->dm.crc_rd_wrk);
-               adev->dm.crc_rd_wrk = NULL;
+       if (adev->dm.secure_display_ctxs) {
+               for (i = 0; i < adev->mode_info.num_crtc; i++) {
+                       if (adev->dm.secure_display_ctxs[i].crtc) {
+                               flush_work(&adev->dm.secure_display_ctxs[i].notify_ta_work);
+                               flush_work(&adev->dm.secure_display_ctxs[i].forward_roi_work);
+                       }
+               }
+               kfree(adev->dm.secure_display_ctxs);
+               adev->dm.secure_display_ctxs = NULL;
        }
 #endif
-#ifdef CONFIG_DRM_AMD_DC_HDCP
        if (adev->dm.hdcp_workqueue) {
                hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
                adev->dm.hdcp_workqueue = NULL;
@@ -1764,7 +1889,6 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
 
        if (adev->dm.dc)
                dc_deinit_callbacks(adev->dm.dc);
-#endif
 
        dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
 
@@ -1888,25 +2012,17 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
                return 0;
        }
 
-       r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
-       if (r == -ENOENT) {
+       r = amdgpu_ucode_request(adev, &adev->dm.fw_dmcu, fw_name_dmcu);
+       if (r == -ENODEV) {
                /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
                DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
                adev->dm.fw_dmcu = NULL;
                return 0;
        }
-       if (r) {
-               dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
-                       fw_name_dmcu);
-               return r;
-       }
-
-       r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
        if (r) {
                dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
                        fw_name_dmcu);
-               release_firmware(adev->dm.fw_dmcu);
-               adev->dm.fw_dmcu = NULL;
+               amdgpu_ucode_release(&adev->dm.fw_dmcu);
                return r;
        }
 
@@ -1952,7 +2068,6 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
        struct dmub_srv_fb_info *fb_info;
        struct dmub_srv *dmub_srv;
        const struct dmcub_firmware_header_v1_0 *hdr;
-       const char *fw_name_dmub;
        enum dmub_asic dmub_asic;
        enum dmub_status status;
        int r;
@@ -1960,73 +2075,43 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
        switch (adev->ip_versions[DCE_HWIP][0]) {
        case IP_VERSION(2, 1, 0):
                dmub_asic = DMUB_ASIC_DCN21;
-               fw_name_dmub = FIRMWARE_RENOIR_DMUB;
-               if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
-                       fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
                break;
        case IP_VERSION(3, 0, 0):
-               if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
-                       dmub_asic = DMUB_ASIC_DCN30;
-                       fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
-               } else {
-                       dmub_asic = DMUB_ASIC_DCN30;
-                       fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
-               }
+               dmub_asic = DMUB_ASIC_DCN30;
                break;
        case IP_VERSION(3, 0, 1):
                dmub_asic = DMUB_ASIC_DCN301;
-               fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
                break;
        case IP_VERSION(3, 0, 2):
                dmub_asic = DMUB_ASIC_DCN302;
-               fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
                break;
        case IP_VERSION(3, 0, 3):
                dmub_asic = DMUB_ASIC_DCN303;
-               fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
                break;
        case IP_VERSION(3, 1, 2):
        case IP_VERSION(3, 1, 3):
                dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
-               fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
                break;
        case IP_VERSION(3, 1, 4):
                dmub_asic = DMUB_ASIC_DCN314;
-               fw_name_dmub = FIRMWARE_DCN_314_DMUB;
                break;
        case IP_VERSION(3, 1, 5):
                dmub_asic = DMUB_ASIC_DCN315;
-               fw_name_dmub = FIRMWARE_DCN_315_DMUB;
                break;
        case IP_VERSION(3, 1, 6):
                dmub_asic = DMUB_ASIC_DCN316;
-               fw_name_dmub = FIRMWARE_DCN316_DMUB;
                break;
        case IP_VERSION(3, 2, 0):
                dmub_asic = DMUB_ASIC_DCN32;
-               fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB;
                break;
        case IP_VERSION(3, 2, 1):
                dmub_asic = DMUB_ASIC_DCN321;
-               fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB;
                break;
        default:
                /* ASIC doesn't support DMUB. */
                return 0;
        }
 
-       r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
-       if (r) {
-               DRM_ERROR("DMUB firmware loading failed: %d\n", r);
-               return 0;
-       }
-
-       r = amdgpu_ucode_validate(adev->dm.dmub_fw);
-       if (r) {
-               DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
-               return 0;
-       }
-
        hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
        adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
 
@@ -2093,7 +2178,9 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
         * TODO: Move this into GART.
         */
        r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
-                                   AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
+                                   AMDGPU_GEM_DOMAIN_VRAM |
+                                   AMDGPU_GEM_DOMAIN_GTT,
+                                   &adev->dm.dmub_bo,
                                    &adev->dm.dmub_bo_gpu_addr,
                                    &adev->dm.dmub_bo_cpu_addr);
        if (r)
@@ -2148,11 +2235,8 @@ static int dm_sw_fini(void *handle)
                adev->dm.dmub_srv = NULL;
        }
 
-       release_firmware(adev->dm.dmub_fw);
-       adev->dm.dmub_fw = NULL;
-
-       release_firmware(adev->dm.fw_dmcu);
-       adev->dm.fw_dmcu = NULL;
+       amdgpu_ucode_release(&adev->dm.dmub_fw);
+       amdgpu_ucode_release(&adev->dm.fw_dmcu);
 
        return 0;
 }
@@ -2178,6 +2262,8 @@ static int detect_mst_link_for_all_connectors(struct drm_device *dev)
                                DRM_ERROR("DM_MST: Failed to start MST\n");
                                aconnector->dc_link->type =
                                        dc_connection_single;
+                               ret = dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
+                                                                    aconnector->dc_link);
                                break;
                        }
                }
@@ -2223,7 +2309,7 @@ static int dm_late_init(void *handle)
                struct dc_link *edp_links[MAX_NUM_EDP];
                int edp_num;
 
-               get_edp_links(adev->dm.dc, edp_links, &edp_num);
+               dc_get_edp_links(adev->dm.dc, edp_links, &edp_num);
                for (i = 0; i < edp_num; i++) {
                        if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
                                return -EINVAL;
@@ -2246,7 +2332,7 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend)
        drm_for_each_connector_iter(connector, &iter) {
                aconnector = to_amdgpu_dm_connector(connector);
                if (aconnector->dc_link->type != dc_connection_mst_branch ||
-                   aconnector->mst_port)
+                   aconnector->mst_root)
                        continue;
 
                mgr = &aconnector->mst_mgr;
@@ -2254,6 +2340,14 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend)
                if (suspend) {
                        drm_dp_mst_topology_mgr_suspend(mgr);
                } else {
+                       /* if extended timeout is supported in hardware,
+                        * default to LTTPR timeout (3.2ms) first as a W/A for DP link layer
+                        * CTS 4.2.1.1 regression introduced by CTS specs requirement update.
+                        */
+                       try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD);
+                       if (!dp_is_lttpr_present(aconnector->dc_link))
+                               try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD);
+
                        ret = drm_dp_mst_topology_mgr_resume(mgr, true);
                        if (ret < 0) {
                                dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
@@ -2391,11 +2485,11 @@ static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
                                         enable ? "enable" : "disable");
 
                        if (enable) {
-                               rc = dm_enable_vblank(&acrtc->base);
+                               rc = amdgpu_dm_crtc_enable_vblank(&acrtc->base);
                                if (rc)
                                        DRM_WARN("Failed to enable vblank interrupts\n");
                        } else {
-                               dm_disable_vblank(&acrtc->base);
+                               amdgpu_dm_crtc_disable_vblank(&acrtc->base);
                        }
 
                }
@@ -2438,7 +2532,7 @@ static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
                        goto fail;
        }
 
-       res = dc_commit_state(dc, context);
+       res = dc_commit_streams(dc, context->streams, context->stream_count);
 
 fail:
        dc_release_state(context);
@@ -2499,7 +2593,7 @@ struct amdgpu_dm_connector *
 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
                                             struct drm_crtc *crtc)
 {
-       uint32_t i;
+       u32 i;
        struct drm_connector_state *new_con_state;
        struct drm_connector *connector;
        struct drm_crtc *crtc_from_state;
@@ -2624,10 +2718,13 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state,
                        bundle->surface_updates[m].surface->force_full_update =
                                true;
                }
-               dc_commit_updates_for_stream(
-                       dm->dc, bundle->surface_updates,
-                       dc_state->stream_status->plane_count,
-                       dc_state->streams[k], &bundle->stream_update, dc_state);
+
+               update_planes_and_stream_adapter(dm->dc,
+                                        UPDATE_TYPE_FULL,
+                                        dc_state->stream_status->plane_count,
+                                        dc_state->streams[k],
+                                        &bundle->stream_update,
+                                        bundle->surface_updates);
        }
 
 cleanup:
@@ -2697,7 +2794,7 @@ static int dm_resume(void *handle)
                        dc_enable_dmub_outbox(adev->dm.dc);
                }
 
-               WARN_ON(!dc_commit_state(dm->dc, dc_state));
+               WARN_ON(!dc_commit_streams(dm->dc, dc_state->streams, dc_state->stream_count));
 
                dm_gpureset_commit_state(dm->cached_dc_state, dm);
 
@@ -2747,16 +2844,18 @@ static int dm_resume(void *handle)
        drm_for_each_connector_iter(connector, &iter) {
                aconnector = to_amdgpu_dm_connector(connector);
 
+               if (!aconnector->dc_link)
+                       continue;
+
                /*
                 * this is the case when traversing through already created
                 * MST connectors, should be skipped
                 */
-               if (aconnector->dc_link &&
-                   aconnector->dc_link->type == dc_connection_mst_branch)
+               if (aconnector->dc_link->type == dc_connection_mst_branch)
                        continue;
 
                mutex_lock(&aconnector->hpd_lock);
-               if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
+               if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type))
                        DRM_ERROR("KMS: Failed to detect connector\n");
 
                if (aconnector->base.force && new_connection_type == dc_connection_none) {
@@ -2863,7 +2962,7 @@ const struct amdgpu_ip_block_version dm_ip_block =
 
 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
        .fb_create = amdgpu_display_user_framebuffer_create,
-       .get_format_info = amd_get_format_info,
+       .get_format_info = amdgpu_dm_plane_get_format_info,
        .atomic_check = amdgpu_dm_atomic_check,
        .atomic_commit = drm_atomic_helper_commit,
 };
@@ -2876,30 +2975,18 @@ static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
 {
        struct amdgpu_dm_backlight_caps *caps;
-       struct amdgpu_display_manager *dm;
        struct drm_connector *conn_base;
        struct amdgpu_device *adev;
-       struct dc_link *link = NULL;
        struct drm_luminance_range_info *luminance_range;
-       int i;
 
-       if (!aconnector || !aconnector->dc_link)
-               return;
-
-       link = aconnector->dc_link;
-       if (link->connector_signal != SIGNAL_TYPE_EDP)
+       if (aconnector->bl_idx == -1 ||
+           aconnector->dc_link->connector_signal != SIGNAL_TYPE_EDP)
                return;
 
        conn_base = &aconnector->base;
        adev = drm_to_adev(conn_base->dev);
-       dm = &adev->dm;
-       for (i = 0; i < dm->num_of_edps; i++) {
-               if (link == dm->backlight_link[i])
-                       break;
-       }
-       if (i >= dm->num_of_edps)
-               return;
-       caps = &dm->backlight_caps[i];
+
+       caps = &adev->dm.backlight_caps[aconnector->bl_idx];
        caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
        caps->aux_support = false;
 
@@ -2914,8 +3001,14 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
                caps->aux_support = true;
 
        luminance_range = &conn_base->display_info.luminance_range;
-       caps->aux_min_input_signal = luminance_range->min_luminance;
-       caps->aux_max_input_signal = luminance_range->max_luminance;
+
+       if (luminance_range->max_luminance) {
+               caps->aux_min_input_signal = luminance_range->min_luminance;
+               caps->aux_max_input_signal = luminance_range->max_luminance;
+       } else {
+               caps->aux_min_input_signal = 0;
+               caps->aux_max_input_signal = 512;
+       }
 }
 
 void amdgpu_dm_update_connector_after_detect(
@@ -3034,6 +3127,10 @@ void amdgpu_dm_update_connector_after_detect(
                                                    aconnector->edid);
                }
 
+               aconnector->timing_requested = kzalloc(sizeof(struct dc_crtc_timing), GFP_KERNEL);
+               if (!aconnector->timing_requested)
+                       dm_error("%s: failed to create aconnector->requested_timing\n", __func__);
+
                drm_connector_update_edid_property(connector, aconnector->edid);
                amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
                update_connector_ext_caps(aconnector);
@@ -3045,11 +3142,11 @@ void amdgpu_dm_update_connector_after_detect(
                dc_sink_release(aconnector->dc_sink);
                aconnector->dc_sink = NULL;
                aconnector->edid = NULL;
-#ifdef CONFIG_DRM_AMD_DC_HDCP
+               kfree(aconnector->timing_requested);
+               aconnector->timing_requested = NULL;
                /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
                if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
                        connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
-#endif
        }
 
        mutex_unlock(&dev->mode_config.mutex);
@@ -3066,9 +3163,7 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
        struct drm_device *dev = connector->dev;
        enum dc_connection_type new_connection_type = dc_connection_none;
        struct amdgpu_device *adev = drm_to_adev(dev);
-#ifdef CONFIG_DRM_AMD_DC_HDCP
        struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
-#endif
        bool ret = false;
 
        if (adev->dm.disable_hpd_irq)
@@ -3080,16 +3175,16 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
         */
        mutex_lock(&aconnector->hpd_lock);
 
-#ifdef CONFIG_DRM_AMD_DC_HDCP
        if (adev->dm.hdcp_workqueue) {
                hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
                dm_con_state->update_hdcp = true;
        }
-#endif
        if (aconnector->fake_enable)
                aconnector->fake_enable = false;
 
-       if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
+       aconnector->timing_changed = false;
+
+       if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type))
                DRM_ERROR("KMS: Failed to detect connector\n");
 
        if (aconnector->base.force && new_connection_type == dc_connection_none) {
@@ -3130,8 +3225,8 @@ static void handle_hpd_irq(void *param)
 
 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
 {
-       uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
-       uint8_t dret;
+       u8 esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
+       u8 dret;
        bool new_irq_handled = false;
        int dpcd_addr;
        int dpcd_bytes_to_read;
@@ -3159,7 +3254,7 @@ static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
 
        while (dret == dpcd_bytes_to_read &&
                process_count < max_process_count) {
-               uint8_t retry;
+               u8 retry;
                dret = 0;
 
                process_count++;
@@ -3178,7 +3273,7 @@ static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
                                dpcd_bytes_to_read - 1;
 
                        for (retry = 0; retry < 3; retry++) {
-                               uint8_t wret;
+                               u8 wret;
 
                                wret = drm_dp_dpcd_write(
                                        &aconnector->dm_dp_aux.aux,
@@ -3238,7 +3333,7 @@ static void handle_hpd_rx_irq(void *param)
        union hpd_irq_data hpd_irq_data;
        bool link_loss = false;
        bool has_left_work = false;
-       int idx = aconnector->base.index;
+       int idx = dc_link->link_index;
        struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
 
        memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
@@ -3292,7 +3387,7 @@ static void handle_hpd_rx_irq(void *param)
 out:
        if (result && !is_mst_root_connector) {
                /* Downstream Port status changed. */
-               if (!dc_link_detect_sink(dc_link, &new_connection_type))
+               if (!dc_link_detect_connection_type(dc_link, &new_connection_type))
                        DRM_ERROR("KMS: Failed to detect connector\n");
 
                if (aconnector->base.force && new_connection_type == dc_connection_none) {
@@ -3330,12 +3425,10 @@ out:
                        }
                }
        }
-#ifdef CONFIG_DRM_AMD_DC_HDCP
        if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
                if (adev->dm.hdcp_workqueue)
                        hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
        }
-#endif
 
        if (dc_link->type != dc_connection_mst_branch)
                drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
@@ -3380,7 +3473,7 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
                                        (void *) aconnector);
 
                        if (adev->dm.hpd_rx_offload_wq)
-                               adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
+                               adev->dm.hpd_rx_offload_wq[dc_link->link_index].aconnector =
                                        aconnector;
                }
        }
@@ -4084,16 +4177,15 @@ static const struct backlight_ops amdgpu_dm_backlight_ops = {
 };
 
 static void
-amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
+amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector)
 {
-       char bl_name[16];
+       struct drm_device *drm = aconnector->base.dev;
+       struct amdgpu_display_manager *dm = &drm_to_adev(drm)->dm;
        struct backlight_properties props = { 0 };
-
-       amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
-       dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
+       char bl_name[16];
 
        if (!acpi_video_backlight_use_native()) {
-               drm_info(adev_to_drm(dm->adev), "Skipping amdgpu DM backlight registration\n");
+               drm_info(drm, "Skipping amdgpu DM backlight registration\n");
                /* Try registering an ACPI video backlight device instead. */
                acpi_video_register_backlight();
                return;
@@ -4104,17 +4196,16 @@ amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
        props.type = BACKLIGHT_RAW;
 
        snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
-                adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
+                drm->primary->index + aconnector->bl_idx);
 
-       dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
-                                                                      adev_to_drm(dm->adev)->dev,
-                                                                      dm,
-                                                                      &amdgpu_dm_backlight_ops,
-                                                                      &props);
+       dm->backlight_dev[aconnector->bl_idx] =
+               backlight_device_register(bl_name, drm->dev, dm,
+                                         &amdgpu_dm_backlight_ops, &props);
 
-       if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
+       if (IS_ERR(dm->backlight_dev[aconnector->bl_idx])) {
                DRM_ERROR("DM: Backlight registration failed!\n");
-       else
+               dm->backlight_dev[aconnector->bl_idx] = NULL;
+       } else
                DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
 }
 
@@ -4159,24 +4250,36 @@ static int initialize_plane(struct amdgpu_display_manager *dm,
 }
 
 
-static void register_backlight_device(struct amdgpu_display_manager *dm,
-                                     struct dc_link *link)
+static void setup_backlight_device(struct amdgpu_display_manager *dm,
+                                  struct amdgpu_dm_connector *aconnector)
 {
-       if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
-           link->type != dc_connection_none) {
-               /*
-                * Event if registration failed, we should continue with
-                * DM initialization because not having a backlight control
-                * is better then a black screen.
-                */
-               if (!dm->backlight_dev[dm->num_of_edps])
-                       amdgpu_dm_register_backlight_device(dm);
+       struct dc_link *link = aconnector->dc_link;
+       int bl_idx = dm->num_of_edps;
 
-               if (dm->backlight_dev[dm->num_of_edps]) {
-                       dm->backlight_link[dm->num_of_edps] = link;
-                       dm->num_of_edps++;
-               }
+       if (!(link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) ||
+           link->type == dc_connection_none)
+               return;
+
+       if (dm->num_of_edps >= AMDGPU_DM_MAX_NUM_EDP) {
+               drm_warn(adev_to_drm(dm->adev), "Too much eDP connections, skipping backlight setup for additional eDPs\n");
+               return;
        }
+
+       aconnector->bl_idx = bl_idx;
+
+       amdgpu_dm_update_backlight_caps(dm, bl_idx);
+       dm->brightness[bl_idx] = AMDGPU_MAX_BL_LEVEL;
+
+       amdgpu_dm_register_backlight_device(aconnector);
+       if (!dm->backlight_dev[bl_idx]) {
+               aconnector->bl_idx = -1;
+               return;
+       }
+
+       dm->backlight_link[bl_idx] = link;
+       dm->num_of_edps++;
+
+       update_connector_ext_caps(aconnector);
 }
 
 static void amdgpu_set_panel_orientation(struct drm_connector *connector);
@@ -4192,20 +4295,23 @@ static void amdgpu_set_panel_orientation(struct drm_connector *connector);
 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
 {
        struct amdgpu_display_manager *dm = &adev->dm;
-       int32_t i;
+       s32 i;
        struct amdgpu_dm_connector *aconnector = NULL;
        struct amdgpu_encoder *aencoder = NULL;
        struct amdgpu_mode_info *mode_info = &adev->mode_info;
-       uint32_t link_cnt;
-       int32_t primary_planes;
+       u32 link_cnt;
+       s32 primary_planes;
        enum dc_connection_type new_connection_type = dc_connection_none;
        const struct dc_plane_cap *plane;
        bool psr_feature_enabled = false;
+       int max_overlay = dm->dc->caps.max_slave_planes;
 
        dm->display_indexes_num = dm->dc->caps.max_streams;
        /* Update the actual used number of crtc */
        adev->mode_info.num_crtc = adev->dm.display_indexes_num;
 
+       amdgpu_dm_set_irq_funcs(adev);
+
        link_cnt = dm->dc->caps.max_links;
        if (amdgpu_dm_mode_config_init(dm->adev)) {
                DRM_ERROR("DM: Failed to initialize mode config\n");
@@ -4249,20 +4355,17 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
                if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
                        continue;
 
-               if (!plane->blends_with_above || !plane->blends_with_below)
-                       continue;
-
                if (!plane->pixel_format_support.argb8888)
                        continue;
 
+               if (max_overlay-- == 0)
+                       break;
+
                if (initialize_plane(dm, NULL, primary_planes + i,
                                     DRM_PLANE_TYPE_OVERLAY, plane)) {
                        DRM_ERROR("KMS: Failed to initialize overlay plane\n");
                        goto fail;
                }
-
-               /* Only create one overlay plane. */
-               break;
        }
 
        for (i = 0; i < dm->dc->caps.max_streams; i++)
@@ -4341,7 +4444,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
 
                link = dc_get_link_at_index(dm->dc, i);
 
-               if (!dc_link_detect_sink(link, &new_connection_type))
+               if (!dc_link_detect_connection_type(link, &new_connection_type))
                        DRM_ERROR("KMS: Failed to detect connector\n");
 
                if (aconnector->base.force && new_connection_type == dc_connection_none) {
@@ -4356,10 +4459,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
 
                        if (ret) {
                                amdgpu_dm_update_connector_after_detect(aconnector);
-                               register_backlight_device(dm, link);
-
-                               if (dm->num_of_edps)
-                                       update_connector_ext_caps(aconnector);
+                               setup_backlight_device(dm, aconnector);
 
                                if (psr_feature_enabled)
                                        amdgpu_dm_set_psr_caps(link);
@@ -4517,6 +4617,61 @@ DEVICE_ATTR_WO(s3_debug);
 
 #endif
 
+static int dm_init_microcode(struct amdgpu_device *adev)
+{
+       char *fw_name_dmub;
+       int r;
+
+       switch (adev->ip_versions[DCE_HWIP][0]) {
+       case IP_VERSION(2, 1, 0):
+               fw_name_dmub = FIRMWARE_RENOIR_DMUB;
+               if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
+                       fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
+               break;
+       case IP_VERSION(3, 0, 0):
+               if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0))
+                       fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
+               else
+                       fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
+               break;
+       case IP_VERSION(3, 0, 1):
+               fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
+               break;
+       case IP_VERSION(3, 0, 2):
+               fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
+               break;
+       case IP_VERSION(3, 0, 3):
+               fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
+               break;
+       case IP_VERSION(3, 1, 2):
+       case IP_VERSION(3, 1, 3):
+               fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
+               break;
+       case IP_VERSION(3, 1, 4):
+               fw_name_dmub = FIRMWARE_DCN_314_DMUB;
+               break;
+       case IP_VERSION(3, 1, 5):
+               fw_name_dmub = FIRMWARE_DCN_315_DMUB;
+               break;
+       case IP_VERSION(3, 1, 6):
+               fw_name_dmub = FIRMWARE_DCN316_DMUB;
+               break;
+       case IP_VERSION(3, 2, 0):
+               fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB;
+               break;
+       case IP_VERSION(3, 2, 1):
+               fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB;
+               break;
+       default:
+               /* ASIC doesn't support DMUB. */
+               return 0;
+       }
+       r = amdgpu_ucode_request(adev, &adev->dm.dmub_fw, fw_name_dmub);
+       if (r)
+               DRM_ERROR("DMUB firmware loading failed: %d\n", r);
+       return r;
+}
+
 static int dm_early_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -4643,8 +4798,6 @@ static int dm_early_init(void *handle)
                break;
        }
 
-       amdgpu_dm_set_irq_funcs(adev);
-
        if (adev->mode_info.funcs == NULL)
                adev->mode_info.funcs = &dm_display_funcs;
 
@@ -4660,7 +4813,7 @@ static int dm_early_init(void *handle)
 #endif
        adev->dc_enabled = true;
 
-       return 0;
+       return dm_init_microcode(adev);
 }
 
 static bool modereset_required(struct drm_crtc_state *crtc_state)
@@ -4725,7 +4878,7 @@ fill_plane_color_attributes(const struct drm_plane_state *plane_state,
 static int
 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
                            const struct drm_plane_state *plane_state,
-                           const uint64_t tiling_flags,
+                           const u64 tiling_flags,
                            struct dc_plane_info *plane_info,
                            struct dc_plane_address *address,
                            bool tmz_surface,
@@ -4823,7 +4976,7 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
        if (ret)
                return ret;
 
-       ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
+       ret = amdgpu_dm_plane_fill_plane_buffer_attributes(adev, afb, plane_info->format,
                                           plane_info->rotation, tiling_flags,
                                           &plane_info->tiling_info,
                                           &plane_info->plane_size,
@@ -4832,7 +4985,7 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
        if (ret)
                return ret;
 
-       fill_blending_from_plane_state(
+       amdgpu_dm_plane_fill_blending_from_plane_state(
                plane_state, &plane_info->per_pixel_alpha, &plane_info->pre_multiplied_alpha,
                &plane_info->global_alpha, &plane_info->global_alpha_value);
 
@@ -4851,7 +5004,7 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
        int ret;
        bool force_disable_dcc = false;
 
-       ret = fill_dc_scaling_info(adev, plane_state, &scaling_info);
+       ret = amdgpu_dm_plane_fill_dc_scaling_info(adev, plane_state, &scaling_info);
        if (ret)
                return ret;
 
@@ -4900,7 +5053,7 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev,
 
 static inline void fill_dc_dirty_rect(struct drm_plane *plane,
                                      struct rect *dirty_rect, int32_t x,
-                                     int32_t y, int32_t width, int32_t height,
+                                     s32 y, s32 width, s32 height,
                                      int *i, bool ffu)
 {
        if (*i > DC_MAX_DIRTY_RECTS)
@@ -4936,6 +5089,7 @@ out:
  * @new_plane_state: New state of @plane
  * @crtc_state: New state of CRTC connected to the @plane
  * @flip_addrs: DC flip tracking struct, which also tracts dirty rects
+ * @dirty_regions_changed: dirty regions changed
  *
  * For PSR SU, DC informs the DMUB uController of dirty rectangle regions
  * (referred to as "damage clips" in DRM nomenclature) that require updating on
@@ -4952,15 +5106,17 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,
                                struct drm_plane_state *old_plane_state,
                                struct drm_plane_state *new_plane_state,
                                struct drm_crtc_state *crtc_state,
-                               struct dc_flip_addrs *flip_addrs)
+                               struct dc_flip_addrs *flip_addrs,
+                               bool *dirty_regions_changed)
 {
        struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
        struct rect *dirty_rects = flip_addrs->dirty_rects;
-       uint32_t num_clips;
+       u32 num_clips;
        struct drm_mode_rect *clips;
        bool bb_changed;
        bool fb_changed;
-       uint32_t i = 0;
+       u32 i = 0;
+       *dirty_regions_changed = false;
 
        /*
         * Cursor plane has it's own dirty rect update interface. See
@@ -4978,9 +5134,9 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,
 
                for (; flip_addrs->dirty_rect_count < num_clips; clips++)
                        fill_dc_dirty_rect(new_plane_state->plane,
-                                          &dirty_rects[i], clips->x1,
-                                          clips->y1, clips->x2 - clips->x1,
-                                          clips->y2 - clips->y1,
+                                          &dirty_rects[flip_addrs->dirty_rect_count],
+                                          clips->x1, clips->y1,
+                                          clips->x2 - clips->x1, clips->y2 - clips->y1,
                                           &flip_addrs->dirty_rect_count,
                                           false);
                return;
@@ -5005,6 +5161,8 @@ static void fill_dc_dirty_rects(struct drm_plane *plane,
                new_plane_state->plane->base.id,
                bb_changed, fb_changed, num_clips);
 
+       *dirty_regions_changed = bb_changed;
+
        if (bb_changed) {
                fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i],
                                   new_plane_state->crtc_x,
@@ -5106,7 +5264,7 @@ static enum dc_color_depth
 convert_color_depth_from_display_info(const struct drm_connector *connector,
                                      bool is_y420, int requested_bpc)
 {
-       uint8_t bpc;
+       u8 bpc;
 
        if (is_y420) {
                bpc = 8;
@@ -5624,7 +5782,6 @@ static bool is_freesync_video_mode(const struct drm_display_mode *mode,
                return true;
 }
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
                            struct dc_sink *sink, struct dc_stream_state *stream,
                            struct dsc_dec_dpcd_caps *dsc_caps)
@@ -5650,11 +5807,15 @@ static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
                                    uint32_t max_dsc_target_bpp_limit_override)
 {
        const struct dc_link_settings *verified_link_cap = NULL;
-       uint32_t link_bw_in_kbps;
-       uint32_t edp_min_bpp_x16, edp_max_bpp_x16;
+       u32 link_bw_in_kbps;
+       u32 edp_min_bpp_x16, edp_max_bpp_x16;
        struct dc *dc = sink->ctx->dc;
        struct dc_dsc_bw_range bw_range = {0};
        struct dc_dsc_config dsc_cfg = {0};
+       struct dc_dsc_config_options dsc_options = {0};
+
+       dc_dsc_get_default_config_option(dc, &dsc_options);
+       dsc_options.max_target_bpp_limit_override_x16 = max_dsc_target_bpp_limit_override * 16;
 
        verified_link_cap = dc_link_get_link_cap(stream->link);
        link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap);
@@ -5677,8 +5838,7 @@ static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
                if (bw_range.max_kbps < link_bw_in_kbps) {
                        if (dc_dsc_compute_config(dc->res_pool->dscs[0],
                                        dsc_caps,
-                                       dc->debug.dsc_min_slice_height_override,
-                                       max_dsc_target_bpp_limit_override,
+                                       &dsc_options,
                                        0,
                                        &stream->timing,
                                        &dsc_cfg)) {
@@ -5692,8 +5852,7 @@ static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector,
 
        if (dc_dsc_compute_config(dc->res_pool->dscs[0],
                                dsc_caps,
-                               dc->debug.dsc_min_slice_height_override,
-                               max_dsc_target_bpp_limit_override,
+                               &dsc_options,
                                link_bw_in_kbps,
                                &stream->timing,
                                &dsc_cfg)) {
@@ -5708,12 +5867,16 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
                                        struct dsc_dec_dpcd_caps *dsc_caps)
 {
        struct drm_connector *drm_connector = &aconnector->base;
-       uint32_t link_bandwidth_kbps;
+       u32 link_bandwidth_kbps;
        struct dc *dc = sink->ctx->dc;
-       uint32_t max_supported_bw_in_kbps, timing_bw_in_kbps;
-       uint32_t dsc_max_supported_bw_in_kbps;
-       uint32_t max_dsc_target_bpp_limit_override =
+       u32 max_supported_bw_in_kbps, timing_bw_in_kbps;
+       u32 dsc_max_supported_bw_in_kbps;
+       u32 max_dsc_target_bpp_limit_override =
                drm_connector->display_info.max_dsc_bpp;
+       struct dc_dsc_config_options dsc_options = {0};
+
+       dc_dsc_get_default_config_option(dc, &dsc_options);
+       dsc_options.max_target_bpp_limit_override_x16 = max_dsc_target_bpp_limit_override * 16;
 
        link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
                                                        dc_link_get_link_cap(aconnector->dc_link));
@@ -5732,8 +5895,7 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
                if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
                        if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
                                                dsc_caps,
-                                               aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
-                                               max_dsc_target_bpp_limit_override,
+                                               &dsc_options,
                                                link_bandwidth_kbps,
                                                &stream->timing,
                                                &stream->timing.dsc_cfg)) {
@@ -5750,8 +5912,7 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
                                        dsc_max_supported_bw_in_kbps > 0)
                                if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
                                                dsc_caps,
-                                               aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
-                                               max_dsc_target_bpp_limit_override,
+                                               &dsc_options,
                                                dsc_max_supported_bw_in_kbps,
                                                &stream->timing,
                                                &stream->timing.dsc_cfg)) {
@@ -5775,7 +5936,6 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
        if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
                stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
 }
-#endif /* CONFIG_DRM_AMD_DC_DCN */
 
 static struct dc_stream_state *
 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
@@ -5798,9 +5958,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
        int mode_refresh;
        int preferred_refresh = 0;
        enum color_transfer_func tf = TRANSFER_FUNC_UNKNOWN;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        struct dsc_dec_dpcd_caps dsc_caps;
-#endif
 
        struct dc_sink *sink = NULL;
 
@@ -5891,12 +6049,18 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
                        stream, &mode, &aconnector->base, con_state, old_stream,
                        requested_bpc);
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
+       if (aconnector->timing_changed) {
+               DC_LOG_DEBUG("%s: overriding timing for automated test, bpc %d, changing to %d\n",
+                               __func__,
+                               stream->timing.display_color_depth,
+                               aconnector->timing_requested->display_color_depth);
+               stream->timing = *aconnector->timing_requested;
+       }
+
        /* SST DSC determination policy */
        update_dsc_caps(aconnector, sink, stream, &dsc_caps);
        if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
                apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
-#endif
 
        update_stream_scaling_settings(&mode, dm_state, stream);
 
@@ -6071,10 +6235,8 @@ static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
 {
        struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
-       const struct dc_link *link = aconnector->dc_link;
        struct amdgpu_device *adev = drm_to_adev(connector->dev);
        struct amdgpu_display_manager *dm = &adev->dm;
-       int i;
 
        /*
         * Call only if mst_mgr was initialized before since it's not done
@@ -6083,15 +6245,10 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
        if (aconnector->mst_mgr.dev)
                drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
 
-#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
-       defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
-       for (i = 0; i < dm->num_of_edps; i++) {
-               if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
-                       backlight_device_unregister(dm->backlight_dev[i]);
-                       dm->backlight_dev[i] = NULL;
-               }
+       if (aconnector->bl_idx != -1) {
+               backlight_device_unregister(dm->backlight_dev[aconnector->bl_idx]);
+               dm->backlight_dev[aconnector->bl_idx] = NULL;
        }
-#endif
 
        if (aconnector->dc_em_sink)
                dc_sink_release(aconnector->dc_em_sink);
@@ -6285,7 +6442,6 @@ static enum dc_status dm_validate_stream_and_context(struct dc *dc,
        dc_plane_state->plane_size.surface_size.width  = stream->src.width;
        dc_plane_state->plane_size.chroma_size.height  = stream->src.height;
        dc_plane_state->plane_size.chroma_size.width   = stream->src.width;
-       dc_plane_state->tiling_info.gfx9.swizzle =  DC_SW_UNKNOWN;
        dc_plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
        dc_plane_state->tiling_info.gfx9.swizzle = DC_SW_UNKNOWN;
        dc_plane_state->rotation = ROTATION_ANGLE_0;
@@ -6583,11 +6739,11 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
        int clock, bpp = 0;
        bool is_y420 = false;
 
-       if (!aconnector->port || !aconnector->dc_sink)
+       if (!aconnector->mst_output_port || !aconnector->dc_sink)
                return 0;
 
-       mst_port = aconnector->port;
-       mst_mgr = &aconnector->mst_port->mst_mgr;
+       mst_port = aconnector->mst_output_port;
+       mst_mgr = &aconnector->mst_root->mst_mgr;
 
        if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
                return 0;
@@ -6597,7 +6753,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
                return PTR_ERR(mst_state);
 
        if (!mst_state->pbn_div)
-               mst_state->pbn_div = dm_mst_get_pbn_divider(aconnector->mst_port->dc_link);
+               mst_state->pbn_div = dm_mst_get_pbn_divider(aconnector->mst_root->dc_link);
 
        if (!state->duplicated) {
                int max_bpc = conn_state->max_requested_bpc;
@@ -6626,7 +6782,6 @@ const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
        .atomic_check = dm_encoder_helper_atomic_check
 };
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
                                            struct dc_state *dc_state,
                                            struct dsc_mst_fairness_vars *vars)
@@ -6643,7 +6798,7 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
 
                aconnector = to_amdgpu_dm_connector(connector);
 
-               if (!aconnector->port)
+               if (!aconnector->mst_output_port)
                        continue;
 
                if (!new_con_state || !new_con_state->crtc)
@@ -6683,7 +6838,7 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
                        dm_conn_state->pbn = pbn;
                        dm_conn_state->vcpi_slots = slot_num;
 
-                       ret = drm_dp_mst_atomic_enable_dsc(state, aconnector->port,
+                       ret = drm_dp_mst_atomic_enable_dsc(state, aconnector->mst_output_port,
                                                           dm_conn_state->pbn, false);
                        if (ret < 0)
                                return ret;
@@ -6691,7 +6846,7 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
                        continue;
                }
 
-               vcpi = drm_dp_mst_atomic_enable_dsc(state, aconnector->port, pbn, true);
+               vcpi = drm_dp_mst_atomic_enable_dsc(state, aconnector->mst_output_port, pbn, true);
                if (vcpi < 0)
                        return vcpi;
 
@@ -6700,7 +6855,6 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
        }
        return 0;
 }
-#endif
 
 static int to_drm_connector_type(enum signal_type st)
 {
@@ -6934,7 +7088,7 @@ static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
        const struct drm_display_mode *m;
        struct drm_display_mode *new_mode;
        uint i;
-       uint32_t new_modes_count = 0;
+       u32 new_modes_count = 0;
 
        /* Standard FPS values
         *
@@ -6948,7 +7102,7 @@ static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
         * 60           - Commonly used
         * 48,72,96,120 - Multiples of 24
         */
-       static const uint32_t common_rates[] = {
+       static const u32 common_rates[] = {
                23976, 24000, 25000, 29970, 30000,
                48000, 50000, 60000, 72000, 96000, 120000
        };
@@ -6964,8 +7118,8 @@ static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
                return 0;
 
        for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
-               uint64_t target_vtotal, target_vtotal_diff;
-               uint64_t num, den;
+               u64 target_vtotal, target_vtotal_diff;
+               u64 num, den;
 
                if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
                        continue;
@@ -7025,12 +7179,18 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
                        to_amdgpu_dm_connector(connector);
        struct drm_encoder *encoder;
        struct edid *edid = amdgpu_dm_connector->edid;
+       struct dc_link_settings *verified_link_cap =
+                       &amdgpu_dm_connector->dc_link->verified_link_cap;
+       const struct dc *dc = amdgpu_dm_connector->dc_link->dc;
 
        encoder = amdgpu_dm_connector_to_encoder(connector);
 
        if (!drm_edid_is_valid(edid)) {
                amdgpu_dm_connector->num_modes =
                                drm_add_modes_noedid(connector, 640, 480);
+               if (dc->link_srv->dp_get_encoding_format(verified_link_cap) == DP_128b_132b_ENCODING)
+                       amdgpu_dm_connector->num_modes +=
+                               drm_add_modes_noedid(connector, 1920, 1080);
        } else {
                amdgpu_dm_connector_ddc_get_modes(connector, edid);
                amdgpu_dm_connector_add_common_modes(encoder, connector);
@@ -7057,6 +7217,7 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
                aconnector->base.funcs->reset(&aconnector->base);
 
        aconnector->connector_id = link_index;
+       aconnector->bl_idx = -1;
        aconnector->dc_link = link;
        aconnector->base.interlace_allowed = false;
        aconnector->base.doublescan_allowed = false;
@@ -7064,6 +7225,9 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
        aconnector->base.dpms = DRM_MODE_DPMS_OFF;
        aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
        aconnector->audio_inst = -1;
+       aconnector->pack_sdp_v1_3 = false;
+       aconnector->as_type = ADAPTIVE_SYNC_TYPE_NONE;
+       memset(&aconnector->vsdb_info, 0, sizeof(aconnector->vsdb_info));
        mutex_init(&aconnector->hpd_lock);
 
        /*
@@ -7105,11 +7269,10 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
                                adev->mode_info.underscan_vborder_property,
                                0);
 
-       if (!aconnector->mst_port)
+       if (!aconnector->mst_root)
                drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
 
-       /* This defaults to the max in the range, but we want 8bpc for non-edp. */
-       aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
+       aconnector->base.state->max_bpc = 16;
        aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
 
        if (connector_type == DRM_MODE_CONNECTOR_eDP &&
@@ -7123,13 +7286,11 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
            connector_type == DRM_MODE_CONNECTOR_eDP) {
                drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
 
-               if (!aconnector->mst_port)
+               if (!aconnector->mst_root)
                        drm_connector_attach_vrr_capable_property(&aconnector->base);
 
-#ifdef CONFIG_DRM_AMD_DC_HDCP
                if (adev->dm.hdcp_workqueue)
                        drm_connector_attach_content_protection_property(&aconnector->base, true);
-#endif
        }
 }
 
@@ -7207,7 +7368,7 @@ create_i2c(struct ddc_service *ddc_service,
  */
 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
                                    struct amdgpu_dm_connector *aconnector,
-                                   uint32_t link_index,
+                                   u32 link_index,
                                    struct amdgpu_encoder *aencoder)
 {
        int res = 0;
@@ -7391,28 +7552,55 @@ is_scaling_state_different(const struct dm_connector_state *dm_state,
        return false;
 }
 
-#ifdef CONFIG_DRM_AMD_DC_HDCP
-static bool is_content_protection_different(struct drm_connector_state *state,
-                                           const struct drm_connector_state *old_state,
-                                           const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
+static bool is_content_protection_different(struct drm_crtc_state *new_crtc_state,
+                                           struct drm_crtc_state *old_crtc_state,
+                                           struct drm_connector_state *new_conn_state,
+                                           struct drm_connector_state *old_conn_state,
+                                           const struct drm_connector *connector,
+                                           struct hdcp_workqueue *hdcp_w)
 {
        struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
        struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
 
-       /* Handle: Type0/1 change */
-       if (old_state->hdcp_content_type != state->hdcp_content_type &&
-           state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
-               state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+       pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n",
+               connector->index, connector->status, connector->dpms);
+       pr_debug("[HDCP_DM] state protection old: %x new: %x\n",
+               old_conn_state->content_protection, new_conn_state->content_protection);
+
+       if (old_crtc_state)
+               pr_debug("[HDCP_DM] old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
+               old_crtc_state->enable,
+               old_crtc_state->active,
+               old_crtc_state->mode_changed,
+               old_crtc_state->active_changed,
+               old_crtc_state->connectors_changed);
+
+       if (new_crtc_state)
+               pr_debug("[HDCP_DM] NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
+               new_crtc_state->enable,
+               new_crtc_state->active,
+               new_crtc_state->mode_changed,
+               new_crtc_state->active_changed,
+               new_crtc_state->connectors_changed);
+
+       /* hdcp content type change */
+       if (old_conn_state->hdcp_content_type != new_conn_state->hdcp_content_type &&
+           new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
+               new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+               pr_debug("[HDCP_DM] Type0/1 change %s :true\n", __func__);
                return true;
        }
 
-       /* CP is being re enabled, ignore this
-        *
-        * Handles:     ENABLED -> DESIRED
-        */
-       if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
-           state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
-               state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
+       /* CP is being re enabled, ignore this */
+       if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
+           new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
+               if (new_crtc_state && new_crtc_state->mode_changed) {
+                       new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+                       pr_debug("[HDCP_DM] ENABLED->DESIRED & mode_changed %s :true\n", __func__);
+                       return true;
+               }
+               new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
+               pr_debug("[HDCP_DM] ENABLED -> DESIRED %s :false\n", __func__);
                return false;
        }
 
@@ -7420,9 +7608,9 @@ static bool is_content_protection_different(struct drm_connector_state *state,
         *
         * Handles:     UNDESIRED -> ENABLED
         */
-       if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
-           state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
-               state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+       if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
+           new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
+               new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
 
        /* Stream removed and re-enabled
         *
@@ -7432,10 +7620,12 @@ static bool is_content_protection_different(struct drm_connector_state *state,
         *
         * Handles:     DESIRED -> DESIRED (Special case)
         */
-       if (!(old_state->crtc && old_state->crtc->enabled) &&
-               state->crtc && state->crtc->enabled &&
+       if (!(old_conn_state->crtc && old_conn_state->crtc->enabled) &&
+               new_conn_state->crtc && new_conn_state->crtc->enabled &&
                connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
                dm_con_state->update_hdcp = false;
+               pr_debug("[HDCP_DM] DESIRED->DESIRED (Stream removed and re-enabled) %s :true\n",
+                       __func__);
                return true;
        }
 
@@ -7447,35 +7637,41 @@ static bool is_content_protection_different(struct drm_connector_state *state,
         *
         * Handles:     DESIRED -> DESIRED (Special case)
         */
-       if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
-           connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
+       if (dm_con_state->update_hdcp &&
+       new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
+       connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
                dm_con_state->update_hdcp = false;
+               pr_debug("[HDCP_DM] DESIRED->DESIRED (Hot-plug, headless s3, dpms) %s :true\n",
+                       __func__);
                return true;
        }
 
-       /*
-        * Handles:     UNDESIRED -> UNDESIRED
-        *              DESIRED -> DESIRED
-        *              ENABLED -> ENABLED
-        */
-       if (old_state->content_protection == state->content_protection)
+       if (old_conn_state->content_protection == new_conn_state->content_protection) {
+               if (new_conn_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED) {
+                       if (new_crtc_state && new_crtc_state->mode_changed) {
+                               pr_debug("[HDCP_DM] DESIRED->DESIRED or ENABLE->ENABLE mode_change %s :true\n",
+                                       __func__);
+                               return true;
+                       }
+                       pr_debug("[HDCP_DM] DESIRED->DESIRED & ENABLE->ENABLE %s :false\n",
+                               __func__);
+                       return false;
+               }
+
+               pr_debug("[HDCP_DM] UNDESIRED->UNDESIRED %s :false\n", __func__);
                return false;
+       }
 
-       /*
-        * Handles:     UNDESIRED -> DESIRED
-        *              DESIRED -> UNDESIRED
-        *              ENABLED -> UNDESIRED
-        */
-       if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
+       if (new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED) {
+               pr_debug("[HDCP_DM] UNDESIRED->DESIRED or DESIRED->UNDESIRED or ENABLED->UNDESIRED %s :true\n",
+                       __func__);
                return true;
+       }
 
-       /*
-        * Handles:     DESIRED -> ENABLED
-        */
+       pr_debug("[HDCP_DM] DESIRED->ENABLED %s :false\n", __func__);
        return false;
 }
 
-#endif
 static void remove_stream(struct amdgpu_device *adev,
                          struct amdgpu_crtc *acrtc,
                          struct dc_stream_state *stream)
@@ -7517,6 +7713,8 @@ static void update_freesync_state_on_stream(
        struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
        unsigned long flags;
        bool pack_sdp_v1_3 = false;
+       struct amdgpu_dm_connector *aconn;
+       enum vrr_packet_type packet_type = PACKET_TYPE_VRR;
 
        if (!new_stream)
                return;
@@ -7541,7 +7739,7 @@ static void update_freesync_state_on_stream(
                        &vrr_params);
 
                if (adev->family < AMDGPU_FAMILY_AI &&
-                   amdgpu_dm_vrr_active(new_crtc_state)) {
+                   amdgpu_dm_crtc_vrr_active(new_crtc_state)) {
                        mod_freesync_handle_v_update(dm->freesync_module,
                                                     new_stream, &vrr_params);
 
@@ -7552,11 +7750,27 @@ static void update_freesync_state_on_stream(
                }
        }
 
+       aconn = (struct amdgpu_dm_connector *)new_stream->dm_stream_context;
+
+       if (aconn && aconn->as_type == FREESYNC_TYPE_PCON_IN_WHITELIST) {
+               pack_sdp_v1_3 = aconn->pack_sdp_v1_3;
+
+               if (aconn->vsdb_info.amd_vsdb_version == 1)
+                       packet_type = PACKET_TYPE_FS_V1;
+               else if (aconn->vsdb_info.amd_vsdb_version == 2)
+                       packet_type = PACKET_TYPE_FS_V2;
+               else if (aconn->vsdb_info.amd_vsdb_version == 3)
+                       packet_type = PACKET_TYPE_FS_V3;
+
+               mod_build_adaptive_sync_infopacket(new_stream, aconn->as_type, NULL,
+                                       &new_stream->adaptive_sync_infopacket);
+       }
+
        mod_freesync_build_vrr_infopacket(
                dm->freesync_module,
                new_stream,
                &vrr_params,
-               PACKET_TYPE_VRR,
+               packet_type,
                TRANSFER_FUNC_UNKNOWN,
                &vrr_infopacket,
                pack_sdp_v1_3);
@@ -7570,6 +7784,7 @@ static void update_freesync_state_on_stream(
        new_crtc_state->vrr_infopacket = vrr_infopacket;
 
        new_stream->vrr_infopacket = vrr_infopacket;
+       new_stream->allow_freesync = mod_freesync_get_freesync_enabled(&vrr_params);
 
        if (new_crtc_state->freesync_vrr_info_changed)
                DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
@@ -7642,8 +7857,8 @@ static void update_stream_irq_parameters(
 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
                                            struct dm_crtc_state *new_state)
 {
-       bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
-       bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
+       bool old_vrr_active = amdgpu_dm_crtc_vrr_active(old_state);
+       bool new_vrr_active = amdgpu_dm_crtc_vrr_active(new_state);
 
        if (!old_vrr_active && new_vrr_active) {
                /* Transition VRR inactive -> active:
@@ -7654,7 +7869,7 @@ static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
                 * We also need vupdate irq for the actual core vblank handling
                 * at end of vblank.
                 */
-               WARN_ON(dm_set_vupdate_irq(new_state->base.crtc, true) != 0);
+               WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, true) != 0);
                WARN_ON(drm_crtc_vblank_get(new_state->base.crtc) != 0);
                DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
                                 __func__, new_state->base.crtc->base.id);
@@ -7662,7 +7877,7 @@ static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
                /* Transition VRR active -> inactive:
                 * Allow vblank irq disable again for fixed refresh rate.
                 */
-               WARN_ON(dm_set_vupdate_irq(new_state->base.crtc, false) != 0);
+               WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, false) != 0);
                drm_crtc_vblank_put(new_state->base.crtc);
                DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
                                 __func__, new_state->base.crtc->base.id);
@@ -7681,7 +7896,7 @@ static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
         */
        for_each_old_plane_in_state(state, plane, old_plane_state, i)
                if (plane->type == DRM_PLANE_TYPE_CURSOR)
-                       handle_cursor_update(plane, old_plane_state);
+                       amdgpu_dm_plane_handle_cursor_update(plane, old_plane_state);
 }
 
 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
@@ -7691,8 +7906,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
                                    struct drm_crtc *pcrtc,
                                    bool wait_for_vblank)
 {
-       uint32_t i;
-       uint64_t timestamp_ns;
+       u32 i;
+       u64 timestamp_ns = ktime_get_ns();
        struct drm_plane *plane;
        struct drm_plane_state *old_plane_state, *new_plane_state;
        struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
@@ -7703,10 +7918,11 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
                        to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
        int planes_count = 0, vpos, hpos;
        unsigned long flags;
-       uint32_t target_vblank, last_flip_vblank;
-       bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
+       u32 target_vblank, last_flip_vblank;
+       bool vrr_active = amdgpu_dm_crtc_vrr_active(acrtc_state);
        bool cursor_update = false;
        bool pflip_present = false;
+       bool dirty_rects_changed = false;
        struct {
                struct dc_surface_update surface_updates[MAX_SURFACES];
                struct dc_plane_info plane_infos[MAX_SURFACES];
@@ -7765,7 +7981,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
                        bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
                }
 
-               fill_dc_scaling_info(dm->adev, new_plane_state,
+               amdgpu_dm_plane_fill_dc_scaling_info(dm->adev, new_plane_state,
                                     &bundle->scaling_infos[planes_count]);
 
                bundle->surface_updates[planes_count].scaling_info =
@@ -7794,10 +8010,32 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
                bundle->surface_updates[planes_count].plane_info =
                        &bundle->plane_infos[planes_count];
 
-               if (acrtc_state->stream->link->psr_settings.psr_feature_enabled)
+               if (acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
                        fill_dc_dirty_rects(plane, old_plane_state,
                                            new_plane_state, new_crtc_state,
-                                           &bundle->flip_addrs[planes_count]);
+                                           &bundle->flip_addrs[planes_count],
+                                           &dirty_rects_changed);
+
+                       /*
+                        * If the dirty regions changed, PSR-SU need to be disabled temporarily
+                        * and enabled it again after dirty regions are stable to avoid video glitch.
+                        * PSR-SU will be enabled in vblank_control_worker() if user pause the video
+                        * during the PSR-SU was disabled.
+                        */
+                       if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 &&
+                           acrtc_attach->dm_irq_params.allow_psr_entry &&
+#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
+                           !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
+#endif
+                           dirty_rects_changed) {
+                               mutex_lock(&dm->dc_lock);
+                               acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns =
+                               timestamp_ns;
+                               if (acrtc_state->stream->link->psr_settings.psr_allow_active)
+                                       amdgpu_dm_psr_disable(acrtc_state->stream);
+                               mutex_unlock(&dm->dc_lock);
+                       }
+               }
 
                /*
                 * Only allow immediate flips for fast updates that don't
@@ -7963,12 +8201,12 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
                                acrtc_state->stream->link->psr_settings.psr_allow_active)
                        amdgpu_dm_psr_disable(acrtc_state->stream);
 
-               dc_commit_updates_for_stream(dm->dc,
-                                                    bundle->surface_updates,
-                                                    planes_count,
-                                                    acrtc_state->stream,
-                                                    &bundle->stream_update,
-                                                    dc_state);
+               update_planes_and_stream_adapter(dm->dc,
+                                        acrtc_state->update_type,
+                                        planes_count,
+                                        acrtc_state->stream,
+                                        &bundle->stream_update,
+                                        bundle->surface_updates);
 
                /**
                 * Enable or disable the interrupts on the backend.
@@ -8016,7 +8254,10 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
                            !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) &&
 #endif
-                           !acrtc_state->stream->link->psr_settings.psr_allow_active)
+                           !acrtc_state->stream->link->psr_settings.psr_allow_active &&
+                           (timestamp_ns -
+                           acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns) >
+                           500000000)
                                amdgpu_dm_psr_enable(acrtc_state->stream);
                } else {
                        acrtc_attach->dm_irq_params.allow_psr_entry = false;
@@ -8141,7 +8382,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
        struct amdgpu_display_manager *dm = &adev->dm;
        struct dm_atomic_state *dm_state;
        struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
-       uint32_t i, j;
+       u32 i, j;
        struct drm_crtc *crtc;
        struct drm_crtc_state *old_crtc_state, *new_crtc_state;
        unsigned long flags;
@@ -8228,7 +8469,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
                 * aconnector as needed
                 */
 
-               if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
+               if (amdgpu_dm_crtc_modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
 
                        DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
 
@@ -8283,7 +8524,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
 
                dm_enable_per_frame_crtc_master_sync(dc_state);
                mutex_lock(&dm->dc_lock);
-               WARN_ON(!dc_commit_state(dm->dc, dc_state));
+               WARN_ON(!dc_commit_streams(dm->dc, dc_state->streams, dc_state->stream_count));
 
                /* Allow idle optimization when vblank count is 0 for display off */
                if (dm->active_vblank_irq_count == 0)
@@ -8309,16 +8550,66 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
                                acrtc->otg_inst = status->primary_otg_inst;
                }
        }
-#ifdef CONFIG_DRM_AMD_DC_HDCP
+       for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
+               struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
+               struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
+               struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+
+               pr_debug("[HDCP_DM] -------------- i : %x ----------\n", i);
+
+               if (!connector)
+                       continue;
+
+               pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n",
+                       connector->index, connector->status, connector->dpms);
+               pr_debug("[HDCP_DM] state protection old: %x new: %x\n",
+                       old_con_state->content_protection, new_con_state->content_protection);
+
+               if (aconnector->dc_sink) {
+                       if (aconnector->dc_sink->sink_signal != SIGNAL_TYPE_VIRTUAL &&
+                               aconnector->dc_sink->sink_signal != SIGNAL_TYPE_NONE) {
+                               pr_debug("[HDCP_DM] pipe_ctx dispname=%s\n",
+                               aconnector->dc_sink->edid_caps.display_name);
+                       }
+               }
+
+               new_crtc_state = NULL;
+               old_crtc_state = NULL;
+
+               if (acrtc) {
+                       new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
+                       old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
+               }
+
+               if (old_crtc_state)
+                       pr_debug("old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
+                       old_crtc_state->enable,
+                       old_crtc_state->active,
+                       old_crtc_state->mode_changed,
+                       old_crtc_state->active_changed,
+                       old_crtc_state->connectors_changed);
+
+               if (new_crtc_state)
+                       pr_debug("NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
+                       new_crtc_state->enable,
+                       new_crtc_state->active,
+                       new_crtc_state->mode_changed,
+                       new_crtc_state->active_changed,
+                       new_crtc_state->connectors_changed);
+       }
+
        for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
                struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
                struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
                struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
 
                new_crtc_state = NULL;
+               old_crtc_state = NULL;
 
-               if (acrtc)
+               if (acrtc) {
                        new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
+                       old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
+               }
 
                dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
 
@@ -8330,13 +8621,45 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
                        continue;
                }
 
-               if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
+               if (is_content_protection_different(new_crtc_state, old_crtc_state, new_con_state,
+                                                                                       old_con_state, connector, adev->dm.hdcp_workqueue)) {
+                       /* when display is unplugged from mst hub, connctor will
+                        * be destroyed within dm_dp_mst_connector_destroy. connector
+                        * hdcp perperties, like type, undesired, desired, enabled,
+                        * will be lost. So, save hdcp properties into hdcp_work within
+                        * amdgpu_dm_atomic_commit_tail. if the same display is
+                        * plugged back with same display index, its hdcp properties
+                        * will be retrieved from hdcp_work within dm_dp_mst_get_modes
+                        */
+
+                       bool enable_encryption = false;
+
+                       if (new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED)
+                               enable_encryption = true;
+
+                       if (aconnector->dc_link && aconnector->dc_sink &&
+                               aconnector->dc_link->type == dc_connection_mst_branch) {
+                               struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue;
+                               struct hdcp_workqueue *hdcp_w =
+                                       &hdcp_work[aconnector->dc_link->link_index];
+
+                               hdcp_w->hdcp_content_type[connector->index] =
+                                       new_con_state->hdcp_content_type;
+                               hdcp_w->content_protection[connector->index] =
+                                       new_con_state->content_protection;
+                       }
+
+                       if (new_crtc_state && new_crtc_state->mode_changed &&
+                               new_con_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED)
+                               enable_encryption = true;
+
+                       DRM_INFO("[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption);
+
                        hdcp_update_display(
                                adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
-                               new_con_state->hdcp_content_type,
-                               new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
+                               new_con_state->hdcp_content_type, enable_encryption);
+               }
        }
-#endif
 
        /* Handle connector state changes */
        for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
@@ -8413,12 +8736,11 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
 
 
                mutex_lock(&dm->dc_lock);
-               dc_commit_updates_for_stream(dm->dc,
-                                                    dummy_updates,
-                                                    status->plane_count,
-                                                    dm_new_crtc_state->stream,
-                                                    &stream_update,
-                                                    dc_state);
+               dc_update_planes_and_stream(dm->dc,
+                                           dummy_updates,
+                                           status->plane_count,
+                                           dm_new_crtc_state->stream,
+                                           &stream_update);
                mutex_unlock(&dm->dc_lock);
        }
 
@@ -8432,9 +8754,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
                struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
 #ifdef CONFIG_DEBUG_FS
                enum amdgpu_dm_pipe_crc_source cur_crc_src;
-#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
-               struct crc_rd_work *crc_rd_wrk;
-#endif
 #endif
                /* Count number of newly disabled CRTCs for dropping PM refs later. */
                if (old_crtc_state->active && !new_crtc_state->active)
@@ -8447,9 +8766,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
                update_stream_irq_parameters(dm, dm_new_crtc_state);
 
 #ifdef CONFIG_DEBUG_FS
-#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
-               crc_rd_wrk = dm->crc_rd_wrk;
-#endif
                spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
                cur_crc_src = acrtc->dm_irq_params.crc_src;
                spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
@@ -8478,10 +8794,12 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
                                if (amdgpu_dm_crc_window_is_activated(crtc)) {
                                        spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
                                        acrtc->dm_irq_params.window_param.update_win = true;
+
+                                       /**
+                                        * It takes 2 frames for HW to stably generate CRC when
+                                        * resuming from suspend, so we set skip_frame_cnt 2.
+                                        */
                                        acrtc->dm_irq_params.window_param.skip_frame_cnt = 2;
-                                       spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
-                                       crc_rd_wrk->crtc = crtc;
-                                       spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
                                        spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
                                }
 #endif
@@ -8772,7 +9090,7 @@ is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
 }
 
 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
-       uint64_t num, den, res;
+       u64 num, den, res;
        struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
 
        dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
@@ -8976,7 +9294,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
                if (modereset_required(new_crtc_state))
                        goto skip_modeset;
 
-               if (modeset_required(new_crtc_state, new_stream,
+               if (amdgpu_dm_crtc_modeset_required(new_crtc_state, new_stream,
                                     dm_old_crtc_state->stream)) {
 
                        WARN_ON(dm_new_crtc_state->stream);
@@ -9228,7 +9546,8 @@ static int dm_update_plane_state(struct dc *dc,
                                 struct drm_plane_state *old_plane_state,
                                 struct drm_plane_state *new_plane_state,
                                 bool enable,
-                                bool *lock_and_validation_needed)
+                                bool *lock_and_validation_needed,
+                                bool *is_top_most_overlay)
 {
 
        struct dm_atomic_state *dm_state = NULL;
@@ -9326,7 +9645,7 @@ static int dm_update_plane_state(struct dc *dc,
                if (!needs_reset)
                        return 0;
 
-               ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
+               ret = amdgpu_dm_plane_helper_check_state(new_plane_state, new_crtc_state);
                if (ret)
                        return ret;
 
@@ -9336,6 +9655,14 @@ static int dm_update_plane_state(struct dc *dc,
                if (!dc_new_plane_state)
                        return -ENOMEM;
 
+               /* Block top most plane from being a video plane */
+               if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
+                       if (is_video_format(new_plane_state->fb->format->format) && *is_top_most_overlay)
+                               return -EINVAL;
+                       else
+                               *is_top_most_overlay = false;
+               }
+
                DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
                                 plane->base.id, new_plane_crtc->base.id);
 
@@ -9464,7 +9791,6 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state,
        return 0;
 }
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
 {
        struct drm_connector *connector;
@@ -9479,7 +9805,7 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm
                        continue;
 
                aconnector = to_amdgpu_dm_connector(connector);
-               if (!aconnector->port || !aconnector->mst_port)
+               if (!aconnector->mst_output_port || !aconnector->mst_root)
                        aconnector = NULL;
                else
                        break;
@@ -9488,9 +9814,8 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm
        if (!aconnector)
                return 0;
 
-       return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
+       return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_root->mst_mgr);
 }
-#endif
 
 /**
  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
@@ -9532,12 +9857,11 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
        enum dc_status status;
        int ret, i;
        bool lock_and_validation_needed = false;
+       bool is_top_most_overlay = true;
        struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        struct drm_dp_mst_topology_mgr *mgr;
        struct drm_dp_mst_topology_state *mst_state;
        struct dsc_mst_fairness_vars vars[MAX_PIPES];
-#endif
 
        trace_amdgpu_dm_atomic_check_begin(state);
 
@@ -9568,7 +9892,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
                        new_crtc_state->connectors_changed = true;
        }
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        if (dc_resource_is_dsc_encoding_supported(dc)) {
                for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
                        if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
@@ -9580,7 +9903,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
                        }
                }
        }
-#endif
        for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
                dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
 
@@ -9658,7 +9980,11 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
         * `dcn10_can_pipe_disable_cursor`). By now, all modified planes are in
         * atomic state, so call drm helper to normalize zpos.
         */
-       drm_atomic_normalize_zpos(dev, state);
+       ret = drm_atomic_normalize_zpos(dev, state);
+       if (ret) {
+               drm_dbg(dev, "drm_atomic_normalize_zpos() failed\n");
+               goto fail;
+       }
 
        /* Remove exiting planes if they are modified */
        for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
@@ -9666,7 +9992,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
                                            old_plane_state,
                                            new_plane_state,
                                            false,
-                                           &lock_and_validation_needed);
+                                           &lock_and_validation_needed,
+                                           &is_top_most_overlay);
                if (ret) {
                        DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
                        goto fail;
@@ -9705,20 +10032,19 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
                                            old_plane_state,
                                            new_plane_state,
                                            true,
-                                           &lock_and_validation_needed);
+                                           &lock_and_validation_needed,
+                                           &is_top_most_overlay);
                if (ret) {
                        DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n");
                        goto fail;
                }
        }
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        if (dc_resource_is_dsc_encoding_supported(dc)) {
                ret = pre_validate_dsc(state, &dm_state, vars);
                if (ret != 0)
                        goto fail;
        }
-#endif
 
        /* Run this here since we want to validate the streams we created */
        ret = drm_atomic_helper_check_planes(dev, state);
@@ -9784,7 +10110,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
                lock_and_validation_needed = true;
        }
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        /* set the slot info for each mst_state based on the link encoding format */
        for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
                struct amdgpu_dm_connector *aconnector;
@@ -9804,7 +10129,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
                }
                drm_connector_list_iter_end(&iter);
        }
-#endif
 
        /**
         * Streams and planes are reset when there are changes that affect
@@ -9832,7 +10156,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
                        goto fail;
                }
 
-#if defined(CONFIG_DRM_AMD_DC_DCN)
                ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars);
                if (ret) {
                        DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
@@ -9844,7 +10167,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
                        DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n");
                        goto fail;
                }
-#endif
 
                /*
                 * Perform validation of MST topology in the state:
@@ -9940,7 +10262,7 @@ fail:
 static bool is_dp_capable_without_timing_msa(struct dc *dc,
                                             struct amdgpu_dm_connector *amdgpu_dm_connector)
 {
-       uint8_t dpcd_data;
+       u8 dpcd_data;
        bool capable = false;
 
        if (amdgpu_dm_connector->dc_link &&
@@ -9959,7 +10281,7 @@ static bool is_dp_capable_without_timing_msa(struct dc *dc,
 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
                unsigned int offset,
                unsigned int total_length,
-               uint8_t *data,
+               u8 *data,
                unsigned int length,
                struct amdgpu_hdmi_vsdb_info *vsdb)
 {
@@ -10014,7 +10336,7 @@ static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
 }
 
 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
-               uint8_t *edid_ext, int len,
+               u8 *edid_ext, int len,
                struct amdgpu_hdmi_vsdb_info *vsdb_info)
 {
        int i;
@@ -10055,7 +10377,7 @@ static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
 }
 
 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
-               uint8_t *edid_ext, int len,
+               u8 *edid_ext, int len,
                struct amdgpu_hdmi_vsdb_info *vsdb_info)
 {
        int i;
@@ -10071,21 +10393,25 @@ static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
 }
 
 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
-               uint8_t *edid_ext, int len,
+               u8 *edid_ext, int len,
                struct amdgpu_hdmi_vsdb_info *vsdb_info)
 {
        struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
+       bool ret;
 
+       mutex_lock(&adev->dm.dc_lock);
        if (adev->dm.dmub_srv)
-               return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
+               ret = parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
        else
-               return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
+               ret = parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
+       mutex_unlock(&adev->dm.dc_lock);
+       return ret;
 }
 
 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
                struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
 {
-       uint8_t *edid_ext = NULL;
+       u8 *edid_ext = NULL;
        int i;
        bool valid_vsdb_found = false;
 
@@ -10140,6 +10466,7 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
        struct amdgpu_device *adev = drm_to_adev(dev);
        struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
        bool freesync_capable = false;
+       enum adaptive_sync_type as_type = ADAPTIVE_SYNC_TYPE_NONE;
 
        if (!connector->state) {
                DRM_ERROR("%s - Connector has no state", __func__);
@@ -10232,6 +10559,26 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
                }
        }
 
+       as_type = dm_get_adaptive_sync_support_type(amdgpu_dm_connector->dc_link);
+
+       if (as_type == FREESYNC_TYPE_PCON_IN_WHITELIST) {
+               i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
+               if (i >= 0 && vsdb_info.freesync_supported && vsdb_info.amd_vsdb_version > 0) {
+
+                       amdgpu_dm_connector->pack_sdp_v1_3 = true;
+                       amdgpu_dm_connector->as_type = as_type;
+                       amdgpu_dm_connector->vsdb_info = vsdb_info;
+
+                       amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
+                       amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
+                       if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
+                               freesync_capable = true;
+
+                       connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
+                       connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
+               }
+       }
+
 update:
        if (dm_con_state)
                dm_con_state->freesync_capable = freesync_capable;
@@ -10261,7 +10608,7 @@ void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
 }
 
 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
-                      uint32_t value, const char *func_name)
+                      u32 value, const char *func_name)
 {
 #ifdef DM_CHECK_ADDR_0
        if (address == 0) {
@@ -10276,7 +10623,7 @@ void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
                          const char *func_name)
 {
-       uint32_t value;
+       u32 value;
 #ifdef DM_CHECK_ADDR_0
        if (address == 0) {
                DC_ERR("invalid register read; address = 0\n");
@@ -10355,6 +10702,7 @@ int amdgpu_dm_process_dmub_aux_transfer_sync(
        ret = p_notify->aux_reply.length;
        *operation_result = p_notify->result;
 out:
+       reinit_completion(&adev->dm.dmub_aux_transfer_done);
        mutex_unlock(&adev->dm.dpia_aux_lock);
        return ret;
 }
@@ -10382,6 +10730,8 @@ int amdgpu_dm_process_dmub_set_config_sync(
                *operation_result = SET_CONFIG_UNKNOWN_ERROR;
        }
 
+       if (!is_cmd_complete)
+               reinit_completion(&adev->dm.dmub_aux_transfer_done);
        mutex_unlock(&adev->dm.dpia_aux_lock);
        return ret;
 }