Merge tag 'mips-fixes_5.14_1' of git://git.kernel.org/pub/scm/linux/kernel/git/mips...
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / display / amdgpu_dm / amdgpu_dm.c
index 652cc1a..c0ae73b 100644 (file)
@@ -28,6 +28,7 @@
 
 #include "dm_services_types.h"
 #include "dc.h"
+#include "dc_link_dp.h"
 #include "dc/inc/core_types.h"
 #include "dal_asic_id.h"
 #include "dmub/dmub_srv.h"
@@ -35,6 +36,7 @@
 #include "dc/inc/hw/abm.h"
 #include "dc/dc_dmub_srv.h"
 #include "dc/dc_edid_parser.h"
+#include "dc/dc_stat.h"
 #include "amdgpu_dm_trace.h"
 
 #include "vid.h"
 #if defined(CONFIG_DEBUG_FS)
 #include "amdgpu_dm_debugfs.h"
 #endif
+#include "amdgpu_dm_psr.h"
 
 #include "ivsrcid/ivsrcid_vislands30.h"
 
+#include "i2caux_interface.h"
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/types.h>
@@ -104,6 +108,10 @@ MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
+#define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
+#define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
+MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
 
 #define FIRMWARE_RAVEN_DMCU            "amdgpu/raven_dmcu.bin"
 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
@@ -204,12 +212,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
 static void handle_cursor_update(struct drm_plane *plane,
                                 struct drm_plane_state *old_plane_state);
 
-static void amdgpu_dm_set_psr_caps(struct dc_link *link);
-static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream);
-static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream);
-static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream);
-static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm);
-
 static const struct drm_format_info *
 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
 
@@ -310,10 +312,8 @@ get_crtc_by_otg_inst(struct amdgpu_device *adev,
        struct drm_crtc *crtc;
        struct amdgpu_crtc *amdgpu_crtc;
 
-       if (otg_inst == -1) {
-               WARN_ON(1);
+       if (WARN_ON(otg_inst == -1))
                return adev->mode_info.crtcs[0];
-       }
 
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
                amdgpu_crtc = to_amdgpu_crtc(crtc);
@@ -392,8 +392,7 @@ static void dm_pflip_high_irq(void *interrupt_params)
        e = amdgpu_crtc->event;
        amdgpu_crtc->event = NULL;
 
-       if (!e)
-               WARN_ON(1);
+       WARN_ON(!e);
 
        vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
 
@@ -596,14 +595,14 @@ static void dm_crtc_high_irq(void *interrupt_params)
 }
 
 #if defined(CONFIG_DRM_AMD_DC_DCN)
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
 /**
  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
  * DCN generation ASICs
- * @interrupt params - interrupt parameters
+ * @interrupt_params: interrupt parameters
  *
  * Used to set crc window/read out crc value at vertical line 0 position
  */
-#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
 {
        struct common_irq_params *irq_params = interrupt_params;
@@ -618,6 +617,58 @@ static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
        amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
 }
 #endif
+
+/**
+ * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
+ * @interrupt_params: used for determining the Outbox instance
+ *
+ * Handles the Outbox Interrupt
+ * event handler.
+ */
+#define DMUB_TRACE_MAX_READ 64
+static void dm_dmub_outbox1_low_irq(void *interrupt_params)
+{
+       struct dmub_notification notify;
+       struct common_irq_params *irq_params = interrupt_params;
+       struct amdgpu_device *adev = irq_params->adev;
+       struct amdgpu_display_manager *dm = &adev->dm;
+       struct dmcub_trace_buf_entry entry = { 0 };
+       uint32_t count = 0;
+
+       if (dc_enable_dmub_notifications(adev->dm.dc)) {
+               if (irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
+                       do {
+                               dc_stat_get_dmub_notification(adev->dm.dc, &notify);
+                       } while (notify.pending_notification);
+
+                       if (adev->dm.dmub_notify)
+                               memcpy(adev->dm.dmub_notify, &notify, sizeof(struct dmub_notification));
+                       if (notify.type == DMUB_NOTIFICATION_AUX_REPLY)
+                               complete(&adev->dm.dmub_aux_transfer_done);
+                       // TODO : HPD Implementation
+
+               } else {
+                       DRM_ERROR("DM: Failed to receive correct outbox IRQ !");
+               }
+       }
+
+
+       do {
+               if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
+                       trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
+                                                       entry.param0, entry.param1);
+
+                       DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
+                                entry.trace_code, entry.tick_count, entry.param0, entry.param1);
+               } else
+                       break;
+
+               count++;
+
+       } while (count <= DMUB_TRACE_MAX_READ);
+
+       ASSERT(count <= DMUB_TRACE_MAX_READ);
+}
 #endif
 
 static int dm_set_clockgating_state(void *handle,
@@ -939,32 +990,6 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
 }
 
 #if defined(CONFIG_DRM_AMD_DC_DCN)
-#define DMUB_TRACE_MAX_READ 64
-static void dm_dmub_trace_high_irq(void *interrupt_params)
-{
-       struct common_irq_params *irq_params = interrupt_params;
-       struct amdgpu_device *adev = irq_params->adev;
-       struct amdgpu_display_manager *dm = &adev->dm;
-       struct dmcub_trace_buf_entry entry = { 0 };
-       uint32_t count = 0;
-
-       do {
-               if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
-                       trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
-                                                       entry.param0, entry.param1);
-
-                       DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
-                                entry.trace_code, entry.tick_count, entry.param0, entry.param1);
-               } else
-                       break;
-
-               count++;
-
-       } while (count <= DMUB_TRACE_MAX_READ);
-
-       ASSERT(count <= DMUB_TRACE_MAX_READ);
-}
-
 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
 {
        uint64_t pt_base;
@@ -1118,11 +1143,10 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
                if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
                        init_data.flags.disable_dmcu = true;
                break;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
        case CHIP_VANGOGH:
+       case CHIP_YELLOW_CARP:
                init_data.flags.gpu_vm_support = true;
                break;
-#endif
        default:
                break;
        }
@@ -1136,6 +1160,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
        if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
                init_data.flags.disable_fractional_pwm = true;
 
+       if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
+               init_data.flags.edp_no_power_sequencing = true;
+
        init_data.flags.power_down_display_on_boot = true;
 
        INIT_LIST_HEAD(&adev->dm.da_list);
@@ -1221,6 +1248,16 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
        adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
 #endif
+       if (dc_enable_dmub_notifications(adev->dm.dc)) {
+               init_completion(&adev->dm.dmub_aux_transfer_done);
+               adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
+               if (!adev->dm.dmub_notify) {
+                       DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
+                       goto error;
+               }
+               amdgpu_dm_outbox_init(adev);
+       }
+
        if (amdgpu_dm_initialize_drm_device(adev)) {
                DRM_ERROR(
                "amdgpu: failed to initialize sw for display support.\n");
@@ -1252,6 +1289,15 @@ error:
        return -EINVAL;
 }
 
+static int amdgpu_dm_early_fini(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       amdgpu_dm_audio_fini(adev);
+
+       return 0;
+}
+
 static void amdgpu_dm_fini(struct amdgpu_device *adev)
 {
        int i;
@@ -1260,8 +1306,6 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
                drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
        }
 
-       amdgpu_dm_audio_fini(adev);
-
        amdgpu_dm_destroy_drm_device(&adev->dm);
 
 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
@@ -1289,9 +1333,11 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
        }
 #endif
 
-       if (adev->dm.dc->ctx->dmub_srv) {
-               dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
-               adev->dm.dc->ctx->dmub_srv = NULL;
+       dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
+
+       if (dc_enable_dmub_notifications(adev->dm.dc)) {
+               kfree(adev->dm.dmub_notify);
+               adev->dm.dmub_notify = NULL;
        }
 
        if (adev->dm.dmub_bo)
@@ -1358,7 +1404,9 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
        case CHIP_SIENNA_CICHLID:
        case CHIP_NAVY_FLOUNDER:
        case CHIP_DIMGREY_CAVEFISH:
+       case CHIP_BEIGE_GOBY:
        case CHIP_VANGOGH:
+       case CHIP_YELLOW_CARP:
                return 0;
        case CHIP_NAVI12:
                fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
@@ -1473,6 +1521,14 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
                dmub_asic = DMUB_ASIC_DCN302;
                fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
                break;
+       case CHIP_BEIGE_GOBY:
+               dmub_asic = DMUB_ASIC_DCN303;
+               fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
+               break;
+       case CHIP_YELLOW_CARP:
+               dmub_asic = DMUB_ASIC_DCN31;
+               fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
+               break;
 
        default:
                /* ASIC doesn't support DMUB. */
@@ -1492,6 +1548,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
        }
 
        hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
+       adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
 
        if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
                adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
@@ -1505,7 +1562,6 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
                         adev->dm.dmcub_fw_version);
        }
 
-       adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
 
        adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
        dmub_srv = adev->dm.dmub_srv;
@@ -1659,7 +1715,6 @@ static int dm_late_init(void *handle)
        unsigned int linear_lut[16];
        int i;
        struct dmcu *dmcu = NULL;
-       bool ret = true;
 
        dmcu = adev->dm.dc->res_pool->dmcu;
 
@@ -1676,18 +1731,23 @@ static int dm_late_init(void *handle)
         * 0xFFFF x 0.01 = 0x28F
         */
        params.min_abm_backlight = 0x28F;
-
        /* In the case where abm is implemented on dmcub,
-        * dmcu object will be null.
-        * ABM 2.4 and up are implemented on dmcub.
-        */
-       if (dmcu)
-               ret = dmcu_load_iram(dmcu, params);
-       else if (adev->dm.dc->ctx->dmub_srv)
-               ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
+       * dmcu object will be null.
+       * ABM 2.4 and up are implemented on dmcub.
+       */
+       if (dmcu) {
+               if (!dmcu_load_iram(dmcu, params))
+                       return -EINVAL;
+       } else if (adev->dm.dc->ctx->dmub_srv) {
+               struct dc_link *edp_links[MAX_NUM_EDP];
+               int edp_num;
 
-       if (!ret)
-               return -EINVAL;
+               get_edp_links(adev->dm.dc, edp_links, &edp_num);
+               for (i = 0; i < edp_num; i++) {
+                       if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
+                               return -EINVAL;
+               }
+       }
 
        return detect_mst_link_for_all_connectors(adev_to_drm(adev));
 }
@@ -1945,9 +2005,6 @@ static int dm_suspend(void *handle)
                return ret;
        }
 
-#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
-       amdgpu_dm_crtc_secure_display_suspend(adev);
-#endif
        WARN_ON(adev->dm.cached_state);
        adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
 
@@ -2166,6 +2223,15 @@ static int dm_resume(void *handle)
                                        = 0xffffffff;
                        }
                }
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+               /*
+                * Resource allocation happens for link encoders for newer ASIC in
+                * dc_validate_global_state, so we need to revalidate it.
+                *
+                * This shouldn't fail (it passed once before), so warn if it does.
+                */
+               WARN_ON(dc_validate_global_state(dm->dc, dc_state, false) != DC_OK);
+#endif
 
                WARN_ON(!dc_commit_state(dm->dc, dc_state));
 
@@ -2271,10 +2337,6 @@ static int dm_resume(void *handle)
 
        dm->cached_state = NULL;
 
-#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
-       amdgpu_dm_crtc_secure_display_resume(adev);
-#endif
-
        amdgpu_dm_irq_resume_late(adev);
 
        amdgpu_dm_smu_write_watermarks_table(adev);
@@ -2298,6 +2360,7 @@ static const struct amd_ip_funcs amdgpu_dm_funcs = {
        .late_init = dm_late_init,
        .sw_init = dm_sw_init,
        .sw_fini = dm_sw_fini,
+       .early_fini = amdgpu_dm_early_fini,
        .hw_init = dm_hw_init,
        .hw_fini = dm_hw_fini,
        .suspend = dm_suspend,
@@ -2366,9 +2429,9 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
        max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
        min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
 
-       if (caps->ext_caps->bits.oled == 1 ||
+       if (caps->ext_caps->bits.oled == 1 /*||
            caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
-           caps->ext_caps->bits.hdr_aux_backlight_control == 1)
+           caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
                caps->aux_support = true;
 
        if (amdgpu_backlight == 0)
@@ -2696,6 +2759,7 @@ static void handle_hpd_rx_irq(void *param)
        enum dc_connection_type new_connection_type = dc_connection_none;
        struct amdgpu_device *adev = drm_to_adev(dev);
        union hpd_irq_data hpd_irq_data;
+       bool lock_flag = 0;
 
        memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
 
@@ -2708,8 +2772,7 @@ static void handle_hpd_rx_irq(void *param)
         * conflict, after implement i2c helper, this mutex should be
         * retired.
         */
-       if (dc_link->type != dc_connection_mst_branch)
-               mutex_lock(&aconnector->hpd_lock);
+       mutex_lock(&aconnector->hpd_lock);
 
        read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
 
@@ -2726,13 +2789,28 @@ static void handle_hpd_rx_irq(void *param)
                }
        }
 
-       mutex_lock(&adev->dm.dc_lock);
+       /*
+        * TODO: We need the lock to avoid touching DC state while it's being
+        * modified during automated compliance testing, or when link loss
+        * happens. While this should be split into subhandlers and proper
+        * interfaces to avoid having to conditionally lock like this in the
+        * outer layer, we need this workaround temporarily to allow MST
+        * lightup in some scenarios to avoid timeout.
+        */
+       if (!amdgpu_in_reset(adev) &&
+           (hpd_rx_irq_check_link_loss_status(dc_link, &hpd_irq_data) ||
+            hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST)) {
+               mutex_lock(&adev->dm.dc_lock);
+               lock_flag = 1;
+       }
+
 #ifdef CONFIG_DRM_AMD_DC_HDCP
        result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
 #else
        result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
 #endif
-       mutex_unlock(&adev->dm.dc_lock);
+       if (!amdgpu_in_reset(adev) && lock_flag)
+               mutex_unlock(&adev->dm.dc_lock);
 
 out:
        if (result && !is_mst_root_connector) {
@@ -2776,10 +2854,10 @@ out:
        }
 #endif
 
-       if (dc_link->type != dc_connection_mst_branch) {
+       if (dc_link->type != dc_connection_mst_branch)
                drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
-               mutex_unlock(&aconnector->hpd_lock);
-       }
+
+       mutex_unlock(&aconnector->hpd_lock);
 }
 
 static void register_hpd_handlers(struct amdgpu_device *adev)
@@ -3151,38 +3229,51 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
 
        }
 
-       if (dc->ctx->dmub_srv) {
-               i = DCN_1_0__SRCID__DMCUB_OUTBOX_HIGH_PRIORITY_READY_INT;
-               r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->dmub_trace_irq);
+       /* HPD */
+       r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
+                       &adev->hpd_irq);
+       if (r) {
+               DRM_ERROR("Failed to add hpd irq id!\n");
+               return r;
+       }
 
-               if (r) {
-                       DRM_ERROR("Failed to add dmub trace irq id!\n");
-                       return r;
-               }
+       register_hpd_handlers(adev);
 
-               int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+       return 0;
+}
+/* Register Outbox IRQ sources and initialize IRQ callbacks */
+static int register_outbox_irq_handlers(struct amdgpu_device *adev)
+{
+       struct dc *dc = adev->dm.dc;
+       struct common_irq_params *c_irq_params;
+       struct dc_interrupt_params int_params = {0};
+       int r, i;
+
+       int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
+       int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
+
+       r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
+                       &adev->dmub_outbox_irq);
+       if (r) {
+               DRM_ERROR("Failed to add outbox irq id!\n");
+               return r;
+       }
+
+       if (dc->ctx->dmub_srv) {
+               i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
+               int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
                int_params.irq_source =
-                       dc_interrupt_to_irq_source(dc, i, 0);
+               dc_interrupt_to_irq_source(dc, i, 0);
 
-               c_irq_params = &adev->dm.dmub_trace_params[0];
+               c_irq_params = &adev->dm.dmub_outbox_params[0];
 
                c_irq_params->adev = adev;
                c_irq_params->irq_src = int_params.irq_source;
 
                amdgpu_dm_irq_register_interrupt(adev, &int_params,
-                               dm_dmub_trace_high_irq, c_irq_params);
+                               dm_dmub_outbox1_low_irq, c_irq_params);
        }
 
-       /* HPD */
-       r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
-                       &adev->hpd_irq);
-       if (r) {
-               DRM_ERROR("Failed to add hpd irq id!\n");
-               return r;
-       }
-
-       register_hpd_handlers(adev);
-
        return 0;
 }
 #endif
@@ -3342,7 +3433,7 @@ static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
        if (dm->backlight_caps.caps_valid)
                return;
 
-       amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
+       amdgpu_acpi_get_backlight_caps(&caps);
        if (caps.caps_valid) {
                dm->backlight_caps.caps_valid = true;
                if (caps.aux_support)
@@ -3410,56 +3501,88 @@ static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *cap
                                 max - min);
 }
 
-static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
+static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
+                                        u32 user_brightness)
 {
-       struct amdgpu_display_manager *dm = bl_get_data(bd);
        struct amdgpu_dm_backlight_caps caps;
-       struct dc_link *link = NULL;
-       u32 brightness;
+       struct dc_link *link[AMDGPU_DM_MAX_NUM_EDP];
+       u32 brightness[AMDGPU_DM_MAX_NUM_EDP];
        bool rc;
+       int i;
 
        amdgpu_dm_update_backlight_caps(dm);
        caps = dm->backlight_caps;
 
-       link = (struct dc_link *)dm->backlight_link;
+       for (i = 0; i < dm->num_of_edps; i++) {
+               dm->brightness[i] = user_brightness;
+               brightness[i] = convert_brightness_from_user(&caps, dm->brightness[i]);
+               link[i] = (struct dc_link *)dm->backlight_link[i];
+       }
 
-       brightness = convert_brightness_from_user(&caps, bd->props.brightness);
-       // Change brightness based on AUX property
-       if (caps.aux_support)
-               rc = dc_link_set_backlight_level_nits(link, true, brightness,
-                                                     AUX_BL_DEFAULT_TRANSITION_TIME_MS);
-       else
-               rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
+       /* Change brightness based on AUX property */
+       if (caps.aux_support) {
+               for (i = 0; i < dm->num_of_edps; i++) {
+                       rc = dc_link_set_backlight_level_nits(link[i], true, brightness[i],
+                               AUX_BL_DEFAULT_TRANSITION_TIME_MS);
+                       if (!rc) {
+                               DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", i);
+                               break;
+                       }
+               }
+       } else {
+               for (i = 0; i < dm->num_of_edps; i++) {
+                       rc = dc_link_set_backlight_level(dm->backlight_link[i], brightness[i], 0);
+                       if (!rc) {
+                               DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", i);
+                               break;
+                       }
+               }
+       }
 
        return rc ? 0 : 1;
 }
 
-static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
+static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
 {
        struct amdgpu_display_manager *dm = bl_get_data(bd);
+
+       amdgpu_dm_backlight_set_level(dm, bd->props.brightness);
+
+       return 0;
+}
+
+static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm)
+{
        struct amdgpu_dm_backlight_caps caps;
 
        amdgpu_dm_update_backlight_caps(dm);
        caps = dm->backlight_caps;
 
        if (caps.aux_support) {
-               struct dc_link *link = (struct dc_link *)dm->backlight_link;
+               struct dc_link *link = (struct dc_link *)dm->backlight_link[0];
                u32 avg, peak;
                bool rc;
 
                rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
                if (!rc)
-                       return bd->props.brightness;
+                       return dm->brightness[0];
                return convert_brightness_to_user(&caps, avg);
        } else {
-               int ret = dc_link_get_backlight_level(dm->backlight_link);
+               int ret = dc_link_get_backlight_level(dm->backlight_link[0]);
 
                if (ret == DC_ERROR_UNEXPECTED)
-                       return bd->props.brightness;
+                       return dm->brightness[0];
                return convert_brightness_to_user(&caps, ret);
        }
 }
 
+static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
+{
+       struct amdgpu_display_manager *dm = bl_get_data(bd);
+
+       return amdgpu_dm_backlight_get_level(dm);
+}
+
 static const struct backlight_ops amdgpu_dm_backlight_ops = {
        .options = BL_CORE_SUSPENDRESUME,
        .get_brightness = amdgpu_dm_backlight_get_brightness,
@@ -3471,8 +3594,11 @@ amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
 {
        char bl_name[16];
        struct backlight_properties props = { 0 };
+       int i;
 
        amdgpu_dm_update_backlight_caps(dm);
+       for (i = 0; i < dm->num_of_edps; i++)
+               dm->brightness[i] = AMDGPU_MAX_BL_LEVEL;
 
        props.max_brightness = AMDGPU_MAX_BL_LEVEL;
        props.brightness = AMDGPU_MAX_BL_LEVEL;
@@ -3549,10 +3675,13 @@ static void register_backlight_device(struct amdgpu_display_manager *dm,
                 * DM initialization because not having a backlight control
                 * is better then a black screen.
                 */
-               amdgpu_dm_register_backlight_device(dm);
+               if (!dm->backlight_dev)
+                       amdgpu_dm_register_backlight_device(dm);
 
-               if (dm->backlight_dev)
-                       dm->backlight_link = link;
+               if (dm->backlight_dev) {
+                       dm->backlight_link[dm->num_of_edps] = link;
+                       dm->num_of_edps++;
+               }
        }
 #endif
 }
@@ -3643,6 +3772,23 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
                        goto fail;
                }
 
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+       /* Use Outbox interrupt */
+       switch (adev->asic_type) {
+       case CHIP_SIENNA_CICHLID:
+       case CHIP_NAVY_FLOUNDER:
+       case CHIP_YELLOW_CARP:
+       case CHIP_RENOIR:
+               if (register_outbox_irq_handlers(dm->adev)) {
+                       DRM_ERROR("DM: Failed to initialize IRQ\n");
+                       goto fail;
+               }
+               break;
+       default:
+               DRM_DEBUG_KMS("Unsupported ASIC type for outbox: 0x%X\n", adev->asic_type);
+       }
+#endif
+
        /* loops over all connectors on the board */
        for (i = 0; i < link_cnt; i++) {
                struct dc_link *link = NULL;
@@ -3734,7 +3880,9 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
        case CHIP_SIENNA_CICHLID:
        case CHIP_NAVY_FLOUNDER:
        case CHIP_DIMGREY_CAVEFISH:
+       case CHIP_BEIGE_GOBY:
        case CHIP_VANGOGH:
+       case CHIP_YELLOW_CARP:
                if (dcn10_register_irq_handlers(dm->adev)) {
                        DRM_ERROR("DM: Failed to initialize IRQ\n");
                        goto fail;
@@ -3756,7 +3904,6 @@ fail:
 
 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
 {
-       drm_mode_config_cleanup(dm->ddev);
        drm_atomic_private_obj_fini(&dm->atomic_obj);
        return;
 }
@@ -3907,12 +4054,22 @@ static int dm_early_init(void *handle)
                adev->mode_info.num_hpd = 6;
                adev->mode_info.num_dig = 6;
                break;
+       case CHIP_YELLOW_CARP:
+               adev->mode_info.num_crtc = 4;
+               adev->mode_info.num_hpd = 4;
+               adev->mode_info.num_dig = 4;
+               break;
        case CHIP_NAVI14:
        case CHIP_DIMGREY_CAVEFISH:
                adev->mode_info.num_crtc = 5;
                adev->mode_info.num_hpd = 5;
                adev->mode_info.num_dig = 5;
                break;
+       case CHIP_BEIGE_GOBY:
+               adev->mode_info.num_crtc = 2;
+               adev->mode_info.num_hpd = 2;
+               adev->mode_info.num_dig = 2;
+               break;
 #endif
        default:
                DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
@@ -4138,6 +4295,8 @@ fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
        if (adev->asic_type == CHIP_SIENNA_CICHLID ||
            adev->asic_type == CHIP_NAVY_FLOUNDER ||
            adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
+           adev->asic_type == CHIP_BEIGE_GOBY ||
+           adev->asic_type == CHIP_YELLOW_CARP ||
            adev->asic_type == CHIP_VANGOGH)
                tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
 }
@@ -4557,6 +4716,7 @@ get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, u
                break;
        case AMDGPU_FAMILY_NV:
        case AMDGPU_FAMILY_VGH:
+       case AMDGPU_FAMILY_YC:
                if (adev->asic_type >= CHIP_SIENNA_CICHLID)
                        add_gfx10_3_modifiers(adev, mods, &size, &capacity);
                else
@@ -4828,6 +4988,14 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
        case DRM_FORMAT_ABGR16161616F:
                plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
                break;
+       case DRM_FORMAT_XRGB16161616:
+       case DRM_FORMAT_ARGB16161616:
+               plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
+               break;
+       case DRM_FORMAT_XBGR16161616:
+       case DRM_FORMAT_ABGR16161616:
+               plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
+               break;
        default:
                DRM_ERROR(
                        "Unsupported screen format %p4cc\n",
@@ -5404,6 +5572,93 @@ static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
        }
 }
 
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
+                                                       struct dc_sink *sink, struct dc_stream_state *stream,
+                                                       struct dsc_dec_dpcd_caps *dsc_caps)
+{
+       stream->timing.flags.DSC = 0;
+
+       if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
+               dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
+                                     aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
+                                     aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
+                                     dsc_caps);
+       }
+}
+
+static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
+                                                                               struct dc_sink *sink, struct dc_stream_state *stream,
+                                                                               struct dsc_dec_dpcd_caps *dsc_caps)
+{
+       struct drm_connector *drm_connector = &aconnector->base;
+       uint32_t link_bandwidth_kbps;
+
+       link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
+                                                       dc_link_get_link_cap(aconnector->dc_link));
+       /* Set DSC policy according to dsc_clock_en */
+       dc_dsc_policy_set_enable_dsc_when_not_needed(
+               aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
+
+       if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
+
+               if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
+                                               dsc_caps,
+                                               aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
+                                               0,
+                                               link_bandwidth_kbps,
+                                               &stream->timing,
+                                               &stream->timing.dsc_cfg)) {
+                       stream->timing.flags.DSC = 1;
+                       DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
+               }
+       }
+
+       /* Overwrite the stream flag if DSC is enabled through debugfs */
+       if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
+               stream->timing.flags.DSC = 1;
+
+       if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
+               stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
+
+       if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
+               stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
+
+       if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
+               stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
+}
+#endif
+
+/**
+ * DOC: FreeSync Video
+ *
+ * When a userspace application wants to play a video, the content follows a
+ * standard format definition that usually specifies the FPS for that format.
+ * The below list illustrates some video format and the expected FPS,
+ * respectively:
+ *
+ * - TV/NTSC (23.976 FPS)
+ * - Cinema (24 FPS)
+ * - TV/PAL (25 FPS)
+ * - TV/NTSC (29.97 FPS)
+ * - TV/NTSC (30 FPS)
+ * - Cinema HFR (48 FPS)
+ * - TV/PAL (50 FPS)
+ * - Commonly used (60 FPS)
+ * - Multiples of 24 (48,72,96 FPS)
+ *
+ * The list of standards video format is not huge and can be added to the
+ * connector modeset list beforehand. With that, userspace can leverage
+ * FreeSync to extends the front porch in order to attain the target refresh
+ * rate. Such a switch will happen seamlessly, without screen blanking or
+ * reprogramming of the output in any other way. If the userspace requests a
+ * modesetting change compatible with FreeSync modes that only differ in the
+ * refresh rate, DC will skip the full update and avoid blink during the
+ * transition. For example, the video player can change the modesetting from
+ * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
+ * causing any display blink. This same concept can be applied to a mode
+ * setting change.
+ */
 static struct drm_display_mode *
 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
                          bool use_probed_modes)
@@ -5506,7 +5761,6 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
        int preferred_refresh = 0;
 #if defined(CONFIG_DRM_AMD_DC_DCN)
        struct dsc_dec_dpcd_caps dsc_caps;
-       uint32_t link_bandwidth_kbps;
 #endif
        struct dc_sink *sink = NULL;
 
@@ -5596,45 +5850,12 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
                        stream, &mode, &aconnector->base, con_state, old_stream,
                        requested_bpc);
 
-       stream->timing.flags.DSC = 0;
-
-       if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
 #if defined(CONFIG_DRM_AMD_DC_DCN)
-               dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
-                                     aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
-                                     aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
-                                     &dsc_caps);
-               link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
-                                                            dc_link_get_link_cap(aconnector->dc_link));
-
-               if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
-                       /* Set DSC policy according to dsc_clock_en */
-                       dc_dsc_policy_set_enable_dsc_when_not_needed(
-                               aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
-
-                       if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
-                                                 &dsc_caps,
-                                                 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
-                                                 0,
-                                                 link_bandwidth_kbps,
-                                                 &stream->timing,
-                                                 &stream->timing.dsc_cfg))
-                               stream->timing.flags.DSC = 1;
-                       /* Overwrite the stream flag if DSC is enabled through debugfs */
-                       if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
-                               stream->timing.flags.DSC = 1;
-
-                       if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
-                               stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
-
-                       if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
-                               stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
-
-                       if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
-                               stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
-               }
+       /* SST DSC determination policy */
+       update_dsc_caps(aconnector, sink, stream, &dsc_caps);
+       if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
+               apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
 #endif
-       }
 
        update_stream_scaling_settings(&mode, dm_state, stream);
 
@@ -5662,6 +5883,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
                                stream->use_vsc_sdp_for_colorimetry = true;
                }
                mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
+               aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
+
        }
 finish:
        dc_sink_release(sink);
@@ -6308,25 +6531,6 @@ static int fill_hdr_info_packet(const struct drm_connector_state *state,
        return 0;
 }
 
-static bool
-is_hdr_metadata_different(const struct drm_connector_state *old_state,
-                         const struct drm_connector_state *new_state)
-{
-       struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
-       struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
-
-       if (old_blob != new_blob) {
-               if (old_blob && new_blob &&
-                   old_blob->length == new_blob->length)
-                       return memcmp(old_blob->data, new_blob->data,
-                                     old_blob->length);
-
-               return true;
-       }
-
-       return false;
-}
-
 static int
 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
                                 struct drm_atomic_state *state)
@@ -6344,7 +6548,7 @@ amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
        if (!crtc)
                return 0;
 
-       if (is_hdr_metadata_different(old_con_state, new_con_state)) {
+       if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
                struct dc_info_packet hdr_infopacket;
 
                ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
@@ -6452,9 +6656,8 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
 
        dm_update_crtc_active_planes(crtc, crtc_state);
 
-       if (unlikely(!dm_crtc_state->stream &&
-                    modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
-               WARN_ON(1);
+       if (WARN_ON(unlikely(!dm_crtc_state->stream &&
+                    modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
                return ret;
        }
 
@@ -6579,13 +6782,13 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
 {
        struct dc_stream_state *stream = NULL;
        struct drm_connector *connector;
-       struct drm_connector_state *new_con_state, *old_con_state;
+       struct drm_connector_state *new_con_state;
        struct amdgpu_dm_connector *aconnector;
        struct dm_connector_state *dm_conn_state;
        int i, j, clock, bpp;
        int vcpi, pbn_div, pbn = 0;
 
-       for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
+       for_each_new_connector_in_state(state, connector, new_con_state, i) {
 
                aconnector = to_amdgpu_dm_connector(connector);
 
@@ -6953,6 +7156,10 @@ static const uint32_t rgb_formats[] = {
        DRM_FORMAT_XBGR2101010,
        DRM_FORMAT_ARGB2101010,
        DRM_FORMAT_ABGR2101010,
+       DRM_FORMAT_XRGB16161616,
+       DRM_FORMAT_XBGR16161616,
+       DRM_FORMAT_ARGB16161616,
+       DRM_FORMAT_ABGR16161616,
        DRM_FORMAT_XBGR8888,
        DRM_FORMAT_ABGR8888,
        DRM_FORMAT_RGB565,
@@ -7531,9 +7738,7 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
        if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
            connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
            connector_type == DRM_MODE_CONNECTOR_eDP) {
-               drm_object_attach_property(
-                       &aconnector->base.base,
-                       dm->ddev->mode_config.hdr_output_metadata_property, 0);
+               drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
 
                if (!aconnector->mst_port)
                        drm_connector_attach_vrr_capable_property(&aconnector->base);
@@ -8185,15 +8390,14 @@ static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
 {
        struct drm_plane *plane;
-       struct drm_plane_state *old_plane_state, *new_plane_state;
+       struct drm_plane_state *old_plane_state;
        int i;
 
        /*
         * TODO: Make this per-stream so we don't issue redundant updates for
         * commits with multiple streams.
         */
-       for_each_oldnew_plane_in_state(state, plane, old_plane_state,
-                                      new_plane_state, i)
+       for_each_old_plane_in_state(state, plane, old_plane_state, i)
                if (plane->type == DRM_PLANE_TYPE_CURSOR)
                        handle_cursor_update(plane, old_plane_state);
 }
@@ -8297,9 +8501,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
                 * deadlock during GPU reset when this fence will not signal
                 * but we hold reservation lock for the BO.
                 */
-               r = dma_resv_wait_timeout_rcu(abo->tbo.base.resv, true,
-                                                       false,
-                                                       msecs_to_jiffies(5000));
+               r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
+                                         msecs_to_jiffies(5000));
                if (unlikely(r <= 0))
                        DRM_ERROR("Waiting for fences timed out!");
 
@@ -8495,7 +8698,13 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
                else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
                                acrtc_state->stream->link->psr_settings.psr_feature_enabled &&
                                !acrtc_state->stream->link->psr_settings.psr_allow_active) {
-                       amdgpu_dm_psr_enable(acrtc_state->stream);
+                       struct amdgpu_dm_connector *aconn = (struct amdgpu_dm_connector *)
+                                       acrtc_state->stream->dm_stream_context;
+
+                       if (aconn->psr_skip_count > 0)
+                               aconn->psr_skip_count--;
+                       else
+                               amdgpu_dm_psr_enable(acrtc_state->stream);
                }
 
                mutex_unlock(&dm->dc_lock);
@@ -8838,7 +9047,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
                              dm_old_crtc_state->abm_level;
 
                hdr_changed =
-                       is_hdr_metadata_different(old_con_state, new_con_state);
+                       !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
 
                if (!scaling_changed && !abm_changed && !hdr_changed)
                        continue;
@@ -8864,7 +9073,10 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
                }
 
                status = dc_stream_get_status(dm_new_crtc_state->stream);
-               WARN_ON(!status);
+
+               if (WARN_ON(!status))
+                       continue;
+
                WARN_ON(!status->plane_count);
 
                /*
@@ -8914,6 +9126,12 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
 #ifdef CONFIG_DEBUG_FS
                bool configure_crc = false;
                enum amdgpu_dm_pipe_crc_source cur_crc_src;
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
+               struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
+#endif
+               spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
+               cur_crc_src = acrtc->dm_irq_params.crc_src;
+               spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
 #endif
                dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
 
@@ -8930,21 +9148,26 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
                         * settings for the stream.
                         */
                        dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
-                       spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
-                       cur_crc_src = acrtc->dm_irq_params.crc_src;
-                       spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
 
                        if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
                                configure_crc = true;
 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
-                               if (amdgpu_dm_crc_window_is_activated(crtc))
-                                       configure_crc = false;
+                               if (amdgpu_dm_crc_window_is_activated(crtc)) {
+                                       spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
+                                       acrtc->dm_irq_params.crc_window.update_win = true;
+                                       acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
+                                       spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
+                                       crc_rd_wrk->crtc = crtc;
+                                       spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
+                                       spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
+                               }
 #endif
                        }
 
                        if (configure_crc)
-                               amdgpu_dm_crtc_configure_crc_source(
-                                       crtc, dm_new_crtc_state, cur_crc_src);
+                               if (amdgpu_dm_crtc_configure_crc_source(
+                                       crtc, dm_new_crtc_state, cur_crc_src))
+                                       DRM_DEBUG_DRIVER("Failed to configure crc source");
 #endif
                }
        }
@@ -8965,6 +9188,12 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
        /* Update audio instances for each connector. */
        amdgpu_dm_commit_audio(dev, state);
 
+#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||          \
+       defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
+       /* restore the backlight level */
+       if (dm->backlight_dev && (amdgpu_dm_backlight_get_level(dm) != dm->brightness[0]))
+               amdgpu_dm_backlight_set_level(dm, dm->brightness[0]);
+#endif
        /*
         * send vblank event on all events not handled in flip and
         * mark consumed event for drm_atomic_helper_commit_hw_done
@@ -9464,7 +9693,8 @@ skip_modeset:
        BUG_ON(dm_new_crtc_state->stream == NULL);
 
        /* Scaling or underscan settings */
-       if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state))
+       if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
+                               drm_atomic_crtc_needs_modeset(new_crtc_state))
                update_stream_scaling_settings(
                        &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
 
@@ -9890,11 +10120,11 @@ static int validate_overlay(struct drm_atomic_state *state)
 {
        int i;
        struct drm_plane *plane;
-       struct drm_plane_state *old_plane_state, *new_plane_state;
-       struct drm_plane_state *primary_state, *cursor_state, *overlay_state = NULL;
+       struct drm_plane_state *new_plane_state;
+       struct drm_plane_state *primary_state, *overlay_state = NULL;
 
        /* Check if primary plane is contained inside overlay */
-       for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
+       for_each_new_plane_in_state_reverse(state, plane, new_plane_state, i) {
                if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
                        if (drm_atomic_plane_disabling(plane->state, new_plane_state))
                                return 0;
@@ -9921,14 +10151,6 @@ static int validate_overlay(struct drm_atomic_state *state)
        if (!primary_state->crtc)
                return 0;
 
-       /* check if cursor plane is enabled */
-       cursor_state = drm_atomic_get_plane_state(state, overlay_state->crtc->cursor);
-       if (IS_ERR(cursor_state))
-               return PTR_ERR(cursor_state);
-
-       if (drm_atomic_plane_disabling(plane->state, cursor_state))
-               return 0;
-
        /* Perform the bounds check to ensure the overlay plane covers the primary */
        if (primary_state->crtc_x < overlay_state->crtc_x ||
            primary_state->crtc_y < overlay_state->crtc_y ||
@@ -10031,6 +10253,10 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
                        dm_old_crtc_state->dsc_force_changed == false)
                        continue;
 
+               ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
+               if (ret)
+                       goto fail;
+
                if (!new_crtc_state->enable)
                        continue;
 
@@ -10511,136 +10737,6 @@ update:
                                                       freesync_capable);
 }
 
-static void amdgpu_dm_set_psr_caps(struct dc_link *link)
-{
-       uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
-
-       if (!(link->connector_signal & SIGNAL_TYPE_EDP))
-               return;
-       if (link->type == dc_connection_none)
-               return;
-       if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
-                                       dpcd_data, sizeof(dpcd_data))) {
-               link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
-
-               if (dpcd_data[0] == 0) {
-                       link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
-                       link->psr_settings.psr_feature_enabled = false;
-               } else {
-                       link->psr_settings.psr_version = DC_PSR_VERSION_1;
-                       link->psr_settings.psr_feature_enabled = true;
-               }
-
-               DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
-       }
-}
-
-/*
- * amdgpu_dm_link_setup_psr() - configure psr link
- * @stream: stream state
- *
- * Return: true if success
- */
-static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream)
-{
-       struct dc_link *link = NULL;
-       struct psr_config psr_config = {0};
-       struct psr_context psr_context = {0};
-       bool ret = false;
-
-       if (stream == NULL)
-               return false;
-
-       link = stream->link;
-
-       psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version;
-
-       if (psr_config.psr_version > 0) {
-               psr_config.psr_exit_link_training_required = 0x1;
-               psr_config.psr_frame_capture_indication_req = 0;
-               psr_config.psr_rfb_setup_time = 0x37;
-               psr_config.psr_sdp_transmit_line_num_deadline = 0x20;
-               psr_config.allow_smu_optimizations = 0x0;
-
-               ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context);
-
-       }
-       DRM_DEBUG_DRIVER("PSR link: %d\n",      link->psr_settings.psr_feature_enabled);
-
-       return ret;
-}
-
-/*
- * amdgpu_dm_psr_enable() - enable psr f/w
- * @stream: stream state
- *
- * Return: true if success
- */
-bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
-{
-       struct dc_link *link = stream->link;
-       unsigned int vsync_rate_hz = 0;
-       struct dc_static_screen_params params = {0};
-       /* Calculate number of static frames before generating interrupt to
-        * enter PSR.
-        */
-       // Init fail safe of 2 frames static
-       unsigned int num_frames_static = 2;
-
-       DRM_DEBUG_DRIVER("Enabling psr...\n");
-
-       vsync_rate_hz = div64_u64(div64_u64((
-                       stream->timing.pix_clk_100hz * 100),
-                       stream->timing.v_total),
-                       stream->timing.h_total);
-
-       /* Round up
-        * Calculate number of frames such that at least 30 ms of time has
-        * passed.
-        */
-       if (vsync_rate_hz != 0) {
-               unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
-               num_frames_static = (30000 / frame_time_microsec) + 1;
-       }
-
-       params.triggers.cursor_update = true;
-       params.triggers.overlay_update = true;
-       params.triggers.surface_update = true;
-       params.num_frames = num_frames_static;
-
-       dc_stream_set_static_screen_params(link->ctx->dc,
-                                          &stream, 1,
-                                          &params);
-
-       return dc_link_set_psr_allow_active(link, true, false, false);
-}
-
-/*
- * amdgpu_dm_psr_disable() - disable psr f/w
- * @stream:  stream state
- *
- * Return: true if success
- */
-static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
-{
-
-       DRM_DEBUG_DRIVER("Disabling psr...\n");
-
-       return dc_link_set_psr_allow_active(stream->link, false, true, false);
-}
-
-/*
- * amdgpu_dm_psr_disable() - disable psr f/w
- * if psr is enabled on any stream
- *
- * Return: true if success
- */
-static bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm)
-{
-       DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n");
-       return dc_set_psr_allow_active(dm->dc, false);
-}
-
 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
 {
        struct amdgpu_device *adev = drm_to_adev(dev);
@@ -10697,3 +10793,30 @@ uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
 
        return value;
 }
+
+int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int linkIndex,
+                               struct aux_payload *payload, enum aux_return_code_type *operation_result)
+{
+       struct amdgpu_device *adev = ctx->driver_context;
+       int ret = 0;
+
+       dc_process_dmub_aux_transfer_async(ctx->dc, linkIndex, payload);
+       ret = wait_for_completion_interruptible_timeout(&adev->dm.dmub_aux_transfer_done, 10*HZ);
+       if (ret == 0) {
+               *operation_result = AUX_RET_ERROR_TIMEOUT;
+               return -1;
+       }
+       *operation_result = (enum aux_return_code_type)adev->dm.dmub_notify->result;
+
+       if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
+               (*payload->reply) = adev->dm.dmub_notify->aux_reply.command;
+
+               // For read case, Copy data to payload
+               if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
+               (*payload->reply == AUX_TRANSACTION_REPLY_AUX_ACK))
+                       memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
+                       adev->dm.dmub_notify->aux_reply.length);
+       }
+
+       return adev->dm.dmub_notify->aux_reply.length;
+}