Merge tag 'amd-drm-next-5.14-2021-06-02' of https://gitlab.freedesktop.org/agd5f...
authorDave Airlie <airlied@redhat.com>
Thu, 3 Jun 2021 20:13:56 +0000 (06:13 +1000)
committerDave Airlie <airlied@redhat.com>
Thu, 3 Jun 2021 20:13:57 +0000 (06:13 +1000)
amd-drm-next-5.14-2021-06-02:

amdgpu:
- GC/MM register access macro clean up for SR-IOV
- Beige Goby updates
- W=1 Fixes
- Aldebaran fixes
- Misc display fixes
- ACPI ATCS/ATIF handling rework
- SR-IOV fixes
- RAS fixes
- 16bpc fixed point format support
- Initial smartshift support
- RV/PCO power tuning fixes for suspend/resume
- More buffer object subclassing work
- Add new INFO query for additional vbios information
- Add new placement for preemptable SG buffers

amdkfd:
- Misc fixes

radeon:
- W=1 Fixes
- Misc cleanups

UAPI:
- Add new INFO query for additional vbios information
  Useful for debugging vbios related issues.  Proposed umr patch:
  https://patchwork.freedesktop.org/patch/433297/
- 16bpc fixed point format support
  IGT test:
  https://lists.freedesktop.org/archives/igt-dev/2021-May/031507.html
  Proposed Vulkan patch:
  https://github.com/kleinerm/pal/commit/a25d4802074b13a8d5f7edc96ae45469ecbac3c4
- Add a new GEM flag which is only used internally in the kernel driver.  Userspace
  is not allowed to set it.

drm:
- 16bpc fixed point format fourcc

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210602214009.4553-1-alexander.deucher@amd.com
164 files changed:
drivers/gpu/drm/amd/amdgpu/Makefile
drivers/gpu/drm/amd/amdgpu/aldebaran.c
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
drivers/gpu/drm/amd/amdgpu/atom.c
drivers/gpu/drm/amd/amdgpu/atom.h
drivers/gpu/drm/amd/amdgpu/cik_sdma.c
drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
drivers/gpu/drm/amd/amdgpu/nv.c
drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
drivers/gpu/drm/amd/amdgpu/si_dma.c
drivers/gpu/drm/amd/amdgpu/smuio_v13_0.c
drivers/gpu/drm/amd/amdgpu/soc15.c
drivers/gpu/drm/amd/amdgpu/soc15_common.h
drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
drivers/gpu/drm/amd/amdkfd/kfd_process.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
drivers/gpu/drm/amd/display/dc/bios/command_table_helper.c
drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
drivers/gpu/drm/amd/display/dc/core/dc.c
drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
drivers/gpu/drm/amd/display/dc/core/dc_resource.c
drivers/gpu/drm/amd/display/dc/dc.h
drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
drivers/gpu/drm/amd/display/dc/dc_hw_types.h
drivers/gpu/drm/amd/display/dc/dc_link.h
drivers/gpu/drm/amd/display/dc/dc_stream.h
drivers/gpu/drm/amd/display/dc/dc_types.h
drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.h
drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.h
drivers/gpu/drm/amd/display/dc/dce/dmub_outbox.c
drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dce110/dce110_mem_input_v.c
drivers/gpu/drm/amd/display/dc/dce110/dce110_transform_v.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c
drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c
drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
drivers/gpu/drm/amd/display/dmub/dmub_srv.h
drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c
drivers/gpu/drm/amd/display/include/gpio_service_interface.h
drivers/gpu/drm/amd/display/include/link_service_types.h
drivers/gpu/drm/amd/display/modules/freesync/freesync.c
drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
drivers/gpu/drm/amd/display/modules/power/power_helpers.c
drivers/gpu/drm/amd/display/modules/power/power_helpers.h
drivers/gpu/drm/amd/include/aldebaran_ip_offset.h
drivers/gpu/drm/amd/include/amd_acpi.h
drivers/gpu/drm/amd/include/atomfirmware.h
drivers/gpu/drm/amd/include/kgd_pp_interface.h
drivers/gpu/drm/amd/pm/amdgpu_pm.c
drivers/gpu/drm/amd/pm/inc/smu11_driver_if_sienna_cichlid.h
drivers/gpu/drm/amd/pm/inc/smu_v13_0.h
drivers/gpu/drm/amd/pm/powerplay/hwmgr/hardwaremanager.c
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_thermal.c
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
drivers/gpu/drm/drm_fourcc.c
drivers/gpu/drm/radeon/cik.c
drivers/gpu/drm/radeon/evergreen.h
drivers/gpu/drm/radeon/r100.c
drivers/gpu/drm/radeon/radeon_asic.h
drivers/gpu/drm/radeon/radeon_cs.c
drivers/gpu/drm/radeon/radeon_vm.c
include/drm/drm_atomic.h
include/uapi/drm/amdgpu_drm.h
include/uapi/drm/drm_fourcc.h

index 6331a11..6cf0fe8 100644 (file)
@@ -51,9 +51,10 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
        atombios_encoders.o amdgpu_sa.o atombios_i2c.o \
        amdgpu_dma_buf.o amdgpu_vm.o amdgpu_ib.o amdgpu_pll.o \
        amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
-       amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \
-       amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o amdgpu_ids.o \
-       amdgpu_gmc.o amdgpu_mmhub.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \
+       amdgpu_gtt_mgr.o amdgpu_preempt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o \
+       amdgpu_atomfirmware.o amdgpu_vf_error.o amdgpu_sched.o \
+       amdgpu_debugfs.o amdgpu_ids.o amdgpu_gmc.o amdgpu_mmhub.o \
+       amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \
        amdgpu_vm_sdma.o amdgpu_discovery.o amdgpu_ras_eeprom.o amdgpu_nbio.o \
        amdgpu_umc.o smu_v11_0_i2c.o amdgpu_fru_eeprom.o amdgpu_rap.o \
        amdgpu_fw_attestation.o amdgpu_securedisplay.o amdgpu_hdp.o
index 65b1dca..148f6c3 100644 (file)
@@ -227,7 +227,7 @@ static int aldebaran_mode2_restore_ip(struct amdgpu_device *adev)
                        break;
                default:
                        break;
-               };
+               }
        }
 
        /* Reinit NBIF block */
index 916d061..267e9e1 100644 (file)
@@ -130,6 +130,13 @@ struct amdgpu_mgpu_info
        bool                            pending_reset;
 };
 
+enum amdgpu_ss {
+       AMDGPU_SS_DRV_LOAD,
+       AMDGPU_SS_DEV_D0,
+       AMDGPU_SS_DEV_D3,
+       AMDGPU_SS_DRV_UNLOAD
+};
+
 struct amdgpu_watchdog_timer
 {
        bool timeout_fatal_disable;
@@ -268,7 +275,6 @@ struct amdgpu_job;
 struct amdgpu_irq_src;
 struct amdgpu_fpriv;
 struct amdgpu_bo_va_mapping;
-struct amdgpu_atif;
 struct kfd_vm_fault_info;
 struct amdgpu_hive_info;
 struct amdgpu_reset_context;
@@ -682,20 +688,6 @@ struct amdgpu_vram_scratch {
        u64                             gpu_addr;
 };
 
-/*
- * ACPI
- */
-struct amdgpu_atcs_functions {
-       bool get_ext_state;
-       bool pcie_perf_req;
-       bool pcie_dev_rdy;
-       bool pcie_bus_width;
-};
-
-struct amdgpu_atcs {
-       struct amdgpu_atcs_functions functions;
-};
-
 /*
  * CGS
  */
@@ -825,8 +817,6 @@ struct amdgpu_device {
        struct notifier_block           acpi_nb;
        struct amdgpu_i2c_chan          *i2c_bus[AMDGPU_MAX_I2C_BUS];
        struct debugfs_blob_wrapper     debugfs_vbios_blob;
-       struct amdgpu_atif              *atif;
-       struct amdgpu_atcs              atcs;
        struct mutex                    srbm_mutex;
        /* GRBM index mutex. Protects concurrent access to GRBM index */
        struct mutex                    grbm_idx_mutex;
@@ -1146,6 +1136,7 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
  * Registers read & write functions.
  */
 #define AMDGPU_REGS_NO_KIQ    (1<<1)
+#define AMDGPU_REGS_RLC        (1<<2)
 
 #define RREG32_NO_KIQ(reg) amdgpu_device_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ)
 #define WREG32_NO_KIQ(reg, v) amdgpu_device_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ)
@@ -1282,6 +1273,7 @@ int amdgpu_device_mode1_reset(struct amdgpu_device *adev);
 bool amdgpu_device_supports_atpx(struct drm_device *dev);
 bool amdgpu_device_supports_px(struct drm_device *dev);
 bool amdgpu_device_supports_boco(struct drm_device *dev);
+bool amdgpu_device_supports_smart_shift(struct drm_device *dev);
 bool amdgpu_device_supports_baco(struct drm_device *dev);
 bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
                                      struct amdgpu_device *peer_adev);
@@ -1356,21 +1348,38 @@ struct amdgpu_afmt_acr {
 struct amdgpu_afmt_acr amdgpu_afmt_acr(uint32_t clock);
 
 /* amdgpu_acpi.c */
+
+/* ATCS Device/Driver State */
+#define AMDGPU_ATCS_PSC_DEV_STATE_D0           0
+#define AMDGPU_ATCS_PSC_DEV_STATE_D3_HOT       3
+#define AMDGPU_ATCS_PSC_DRV_STATE_OPR          0
+#define AMDGPU_ATCS_PSC_DRV_STATE_NOT_OPR      1
+
 #if defined(CONFIG_ACPI)
 int amdgpu_acpi_init(struct amdgpu_device *adev);
 void amdgpu_acpi_fini(struct amdgpu_device *adev);
 bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *adev);
+bool amdgpu_acpi_is_power_shift_control_supported(void);
 int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
                                                u8 perf_req, bool advertise);
+int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
+                                   u8 dev_state, bool drv_state);
+int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_state);
 int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
 
-void amdgpu_acpi_get_backlight_caps(struct amdgpu_device *adev,
-               struct amdgpu_dm_backlight_caps *caps);
+void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps);
 bool amdgpu_acpi_is_s0ix_supported(struct amdgpu_device *adev);
+void amdgpu_acpi_detect(void);
 #else
 static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
 static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
 static inline bool amdgpu_acpi_is_s0ix_supported(struct amdgpu_device *adev) { return false; }
+static inline void amdgpu_acpi_detect(void) { }
+static inline bool amdgpu_acpi_is_power_shift_control_supported(void) { return false; }
+static inline int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
+                                                 u8 dev_state, bool drv_state) { return 0; }
+static inline int amdgpu_acpi_smart_shift_update(struct drm_device *dev,
+                                                enum amdgpu_ss ss_state) { return 0; }
 #endif
 
 int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
index bf2939b..84a1b4b 100644 (file)
@@ -71,12 +71,31 @@ struct amdgpu_atif {
        struct amdgpu_dm_backlight_caps backlight_caps;
 };
 
+struct amdgpu_atcs_functions {
+       bool get_ext_state;
+       bool pcie_perf_req;
+       bool pcie_dev_rdy;
+       bool pcie_bus_width;
+       bool power_shift_control;
+};
+
+struct amdgpu_atcs {
+       acpi_handle handle;
+
+       struct amdgpu_atcs_functions functions;
+};
+
+static struct amdgpu_acpi_priv {
+       struct amdgpu_atif atif;
+       struct amdgpu_atcs atcs;
+} amdgpu_acpi_priv;
+
 /* Call the ATIF method
  */
 /**
  * amdgpu_atif_call - call an ATIF method
  *
- * @atif: acpi handle
+ * @atif: atif structure
  * @function: the ATIF function to execute
  * @params: ATIF function params
  *
@@ -207,35 +226,6 @@ out:
        return err;
 }
 
-static acpi_handle amdgpu_atif_probe_handle(acpi_handle dhandle)
-{
-       acpi_handle handle = NULL;
-       char acpi_method_name[255] = { 0 };
-       struct acpi_buffer buffer = { sizeof(acpi_method_name), acpi_method_name };
-       acpi_status status;
-
-       /* For PX/HG systems, ATIF and ATPX are in the iGPU's namespace, on dGPU only
-        * systems, ATIF is in the dGPU's namespace.
-        */
-       status = acpi_get_handle(dhandle, "ATIF", &handle);
-       if (ACPI_SUCCESS(status))
-               goto out;
-
-       if (amdgpu_has_atpx()) {
-               status = acpi_get_handle(amdgpu_atpx_get_dhandle(), "ATIF",
-                                        &handle);
-               if (ACPI_SUCCESS(status))
-                       goto out;
-       }
-
-       DRM_DEBUG_DRIVER("No ATIF handle found\n");
-       return NULL;
-out:
-       acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
-       DRM_DEBUG_DRIVER("Found ATIF handle %s\n", acpi_method_name);
-       return handle;
-}
-
 /**
  * amdgpu_atif_get_notification_params - determine notify configuration
  *
@@ -414,7 +404,7 @@ out:
 static int amdgpu_atif_handler(struct amdgpu_device *adev,
                               struct acpi_bus_event *event)
 {
-       struct amdgpu_atif *atif = adev->atif;
+       struct amdgpu_atif *atif = &amdgpu_acpi_priv.atif;
        int count;
 
        DRM_DEBUG_DRIVER("event, device_class = %s, type = %#x\n",
@@ -424,8 +414,7 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev,
                return NOTIFY_DONE;
 
        /* Is this actually our event? */
-       if (!atif ||
-           !atif->notification_cfg.enabled ||
+       if (!atif->notification_cfg.enabled ||
            event->type != atif->notification_cfg.command_code) {
                /* These events will generate keypresses otherwise */
                if (event->type == ACPI_VIDEO_NOTIFY_PROBE)
@@ -485,14 +474,15 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev,
 /**
  * amdgpu_atcs_call - call an ATCS method
  *
- * @handle: acpi handle
+ * @atcs: atcs structure
  * @function: the ATCS function to execute
  * @params: ATCS function params
  *
  * Executes the requested ATCS function (all asics).
  * Returns a pointer to the acpi output buffer.
  */
-static union acpi_object *amdgpu_atcs_call(acpi_handle handle, int function,
+static union acpi_object *amdgpu_atcs_call(struct amdgpu_atcs *atcs,
+                                          int function,
                                           struct acpi_buffer *params)
 {
        acpi_status status;
@@ -516,7 +506,7 @@ static union acpi_object *amdgpu_atcs_call(acpi_handle handle, int function,
                atcs_arg_elements[1].integer.value = 0;
        }
 
-       status = acpi_evaluate_object(handle, "ATCS", &atcs_arg, &buffer);
+       status = acpi_evaluate_object(atcs->handle, NULL, &atcs_arg, &buffer);
 
        /* Fail only if calling the method fails and ATIF is supported */
        if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
@@ -545,12 +535,12 @@ static void amdgpu_atcs_parse_functions(struct amdgpu_atcs_functions *f, u32 mas
        f->pcie_perf_req = mask & ATCS_PCIE_PERFORMANCE_REQUEST_SUPPORTED;
        f->pcie_dev_rdy = mask & ATCS_PCIE_DEVICE_READY_NOTIFICATION_SUPPORTED;
        f->pcie_bus_width = mask & ATCS_SET_PCIE_BUS_WIDTH_SUPPORTED;
+       f->power_shift_control = mask & ATCS_SET_POWER_SHIFT_CONTROL_SUPPORTED;
 }
 
 /**
  * amdgpu_atcs_verify_interface - verify ATCS
  *
- * @handle: acpi handle
  * @atcs: amdgpu atcs struct
  *
  * Execute the ATCS_FUNCTION_VERIFY_INTERFACE ATCS function
@@ -558,15 +548,14 @@ static void amdgpu_atcs_parse_functions(struct amdgpu_atcs_functions *f, u32 mas
  * (all asics).
  * returns 0 on success, error on failure.
  */
-static int amdgpu_atcs_verify_interface(acpi_handle handle,
-                                       struct amdgpu_atcs *atcs)
+static int amdgpu_atcs_verify_interface(struct amdgpu_atcs *atcs)
 {
        union acpi_object *info;
        struct atcs_verify_interface output;
        size_t size;
        int err = 0;
 
-       info = amdgpu_atcs_call(handle, ATCS_FUNCTION_VERIFY_INTERFACE, NULL);
+       info = amdgpu_atcs_call(atcs, ATCS_FUNCTION_VERIFY_INTERFACE, NULL);
        if (!info)
                return -EIO;
 
@@ -603,7 +592,7 @@ out:
  */
 bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *adev)
 {
-       struct amdgpu_atcs *atcs = &adev->atcs;
+       struct amdgpu_atcs *atcs = &amdgpu_acpi_priv.atcs;
 
        if (atcs->functions.pcie_perf_req && atcs->functions.pcie_dev_rdy)
                return true;
@@ -611,6 +600,18 @@ bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *ade
        return false;
 }
 
+/**
+ * amdgpu_acpi_is_power_shift_control_supported
+ *
+ * Check if the ATCS power shift control method
+ * is supported.
+ * returns true if supported, false if not.
+ */
+bool amdgpu_acpi_is_power_shift_control_supported(void)
+{
+       return amdgpu_acpi_priv.atcs.functions.power_shift_control;
+}
+
 /**
  * amdgpu_acpi_pcie_notify_device_ready
  *
@@ -622,19 +623,13 @@ bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *ade
  */
 int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev)
 {
-       acpi_handle handle;
        union acpi_object *info;
-       struct amdgpu_atcs *atcs = &adev->atcs;
-
-       /* Get the device handle */
-       handle = ACPI_HANDLE(&adev->pdev->dev);
-       if (!handle)
-               return -EINVAL;
+       struct amdgpu_atcs *atcs = &amdgpu_acpi_priv.atcs;
 
        if (!atcs->functions.pcie_dev_rdy)
                return -EINVAL;
 
-       info = amdgpu_atcs_call(handle, ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION, NULL);
+       info = amdgpu_atcs_call(atcs, ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION, NULL);
        if (!info)
                return -EIO;
 
@@ -657,9 +652,8 @@ int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev)
 int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
                                         u8 perf_req, bool advertise)
 {
-       acpi_handle handle;
        union acpi_object *info;
-       struct amdgpu_atcs *atcs = &adev->atcs;
+       struct amdgpu_atcs *atcs = &amdgpu_acpi_priv.atcs;
        struct atcs_pref_req_input atcs_input;
        struct atcs_pref_req_output atcs_output;
        struct acpi_buffer params;
@@ -669,11 +663,6 @@ int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
        if (amdgpu_acpi_pcie_notify_device_ready(adev))
                return -EINVAL;
 
-       /* Get the device handle */
-       handle = ACPI_HANDLE(&adev->pdev->dev);
-       if (!handle)
-               return -EINVAL;
-
        if (!atcs->functions.pcie_perf_req)
                return -EINVAL;
 
@@ -691,7 +680,7 @@ int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
        params.pointer = &atcs_input;
 
        while (retry--) {
-               info = amdgpu_atcs_call(handle, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST, &params);
+               info = amdgpu_atcs_call(atcs, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST, &params);
                if (!info)
                        return -EIO;
 
@@ -724,6 +713,96 @@ int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
        return 0;
 }
 
+/**
+ * amdgpu_acpi_power_shift_control
+ *
+ * @adev: amdgpu_device pointer
+ * @dev_state: device acpi state
+ * @drv_state: driver state
+ *
+ * Executes the POWER_SHIFT_CONTROL method to
+ * communicate current dGPU device state and
+ * driver state to APU/SBIOS.
+ * returns 0 on success, error on failure.
+ */
+int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
+                                   u8 dev_state, bool drv_state)
+{
+       union acpi_object *info;
+       struct amdgpu_atcs *atcs = &amdgpu_acpi_priv.atcs;
+       struct atcs_pwr_shift_input atcs_input;
+       struct acpi_buffer params;
+
+       if (!amdgpu_acpi_is_power_shift_control_supported())
+               return -EINVAL;
+
+       atcs_input.size = sizeof(struct atcs_pwr_shift_input);
+       /* dGPU id (bit 2-0: func num, 7-3: dev num, 15-8: bus num) */
+       atcs_input.dgpu_id = adev->pdev->devfn | (adev->pdev->bus->number << 8);
+       atcs_input.dev_acpi_state = dev_state;
+       atcs_input.drv_state = drv_state;
+
+       params.length = sizeof(struct atcs_pwr_shift_input);
+       params.pointer = &atcs_input;
+
+       info = amdgpu_atcs_call(atcs, ATCS_FUNCTION_POWER_SHIFT_CONTROL, &params);
+       if (!info) {
+               DRM_ERROR("ATCS PSC update failed\n");
+               return -EIO;
+       }
+
+       return 0;
+}
+
+/**
+ * amdgpu_acpi_smart_shift_update - update dGPU device state to SBIOS
+ *
+ * @dev: drm_device pointer
+ * @ss_state: current smart shift event
+ *
+ * returns 0 on success,
+ * otherwise return error number.
+ */
+int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_state)
+{
+       struct amdgpu_device *adev = drm_to_adev(dev);
+       int r;
+
+       if (!amdgpu_device_supports_smart_shift(dev))
+               return 0;
+
+       switch (ss_state) {
+       /* SBIOS trigger “stop”, “enable” and “start” at D0, Driver Operational.
+        * SBIOS trigger “stop” at D3, Driver Not Operational.
+        * SBIOS trigger “stop” and “disable” at D0, Driver NOT operational.
+        */
+       case AMDGPU_SS_DRV_LOAD:
+               r = amdgpu_acpi_power_shift_control(adev,
+                                                   AMDGPU_ATCS_PSC_DEV_STATE_D0,
+                                                   AMDGPU_ATCS_PSC_DRV_STATE_OPR);
+               break;
+       case AMDGPU_SS_DEV_D0:
+               r = amdgpu_acpi_power_shift_control(adev,
+                                                   AMDGPU_ATCS_PSC_DEV_STATE_D0,
+                                                   AMDGPU_ATCS_PSC_DRV_STATE_OPR);
+               break;
+       case AMDGPU_SS_DEV_D3:
+               r = amdgpu_acpi_power_shift_control(adev,
+                                                   AMDGPU_ATCS_PSC_DEV_STATE_D3_HOT,
+                                                   AMDGPU_ATCS_PSC_DRV_STATE_NOT_OPR);
+               break;
+       case AMDGPU_SS_DRV_UNLOAD:
+               r = amdgpu_acpi_power_shift_control(adev,
+                                                   AMDGPU_ATCS_PSC_DEV_STATE_D0,
+                                                   AMDGPU_ATCS_PSC_DRV_STATE_NOT_OPR);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return r;
+}
+
 /**
  * amdgpu_acpi_event - handle notify events
  *
@@ -767,50 +846,15 @@ static int amdgpu_acpi_event(struct notifier_block *nb,
  */
 int amdgpu_acpi_init(struct amdgpu_device *adev)
 {
-       acpi_handle handle, atif_handle;
-       struct amdgpu_atif *atif;
-       struct amdgpu_atcs *atcs = &adev->atcs;
-       int ret;
-
-       /* Get the device handle */
-       handle = ACPI_HANDLE(&adev->pdev->dev);
-
-       if (!adev->bios || !handle)
-               return 0;
-
-       /* Call the ATCS method */
-       ret = amdgpu_atcs_verify_interface(handle, atcs);
-       if (ret) {
-               DRM_DEBUG_DRIVER("Call to ATCS verify_interface failed: %d\n", ret);
-       }
-
-       /* Probe for ATIF, and initialize it if found */
-       atif_handle = amdgpu_atif_probe_handle(handle);
-       if (!atif_handle)
-               goto out;
-
-       atif = kzalloc(sizeof(*atif), GFP_KERNEL);
-       if (!atif) {
-               DRM_WARN("Not enough memory to initialize ATIF\n");
-               goto out;
-       }
-       atif->handle = atif_handle;
-
-       /* Call the ATIF method */
-       ret = amdgpu_atif_verify_interface(atif);
-       if (ret) {
-               DRM_DEBUG_DRIVER("Call to ATIF verify_interface failed: %d\n", ret);
-               kfree(atif);
-               goto out;
-       }
-       adev->atif = atif;
+       struct amdgpu_atif *atif = &amdgpu_acpi_priv.atif;
 
 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
        if (atif->notifications.brightness_change) {
                if (amdgpu_device_has_dc_support(adev)) {
 #if defined(CONFIG_DRM_AMD_DC)
                        struct amdgpu_display_manager *dm = &adev->dm;
-                       atif->bd = dm->backlight_dev;
+                       if (dm->backlight_dev)
+                               atif->bd = dm->backlight_dev;
 #endif
                } else {
                        struct drm_encoder *tmp;
@@ -832,6 +876,129 @@ int amdgpu_acpi_init(struct amdgpu_device *adev)
                }
        }
 #endif
+       adev->acpi_nb.notifier_call = amdgpu_acpi_event;
+       register_acpi_notifier(&adev->acpi_nb);
+
+       return 0;
+}
+
+void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps)
+{
+       struct amdgpu_atif *atif = &amdgpu_acpi_priv.atif;
+
+       caps->caps_valid = atif->backlight_caps.caps_valid;
+       caps->min_input_signal = atif->backlight_caps.min_input_signal;
+       caps->max_input_signal = atif->backlight_caps.max_input_signal;
+}
+
+/**
+ * amdgpu_acpi_fini - tear down driver acpi support
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Unregisters with the acpi notifier chain (all asics).
+ */
+void amdgpu_acpi_fini(struct amdgpu_device *adev)
+{
+       unregister_acpi_notifier(&adev->acpi_nb);
+}
+
+/**
+ * amdgpu_atif_pci_probe_handle - look up the ATIF handle
+ *
+ * @pdev: pci device
+ *
+ * Look up the ATIF handles (all asics).
+ * Returns true if the handle is found, false if not.
+ */
+static bool amdgpu_atif_pci_probe_handle(struct pci_dev *pdev)
+{
+       char acpi_method_name[255] = { 0 };
+       struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name};
+       acpi_handle dhandle, atif_handle;
+       acpi_status status;
+       int ret;
+
+       dhandle = ACPI_HANDLE(&pdev->dev);
+       if (!dhandle)
+               return false;
+
+       status = acpi_get_handle(dhandle, "ATIF", &atif_handle);
+       if (ACPI_FAILURE(status)) {
+               return false;
+       }
+       amdgpu_acpi_priv.atif.handle = atif_handle;
+       acpi_get_name(amdgpu_acpi_priv.atif.handle, ACPI_FULL_PATHNAME, &buffer);
+       DRM_DEBUG_DRIVER("Found ATIF handle %s\n", acpi_method_name);
+       ret = amdgpu_atif_verify_interface(&amdgpu_acpi_priv.atif);
+       if (ret) {
+               amdgpu_acpi_priv.atif.handle = 0;
+               return false;
+       }
+       return true;
+}
+
+/**
+ * amdgpu_atcs_pci_probe_handle - look up the ATCS handle
+ *
+ * @pdev: pci device
+ *
+ * Look up the ATCS handles (all asics).
+ * Returns true if the handle is found, false if not.
+ */
+static bool amdgpu_atcs_pci_probe_handle(struct pci_dev *pdev)
+{
+       char acpi_method_name[255] = { 0 };
+       struct acpi_buffer buffer = { sizeof(acpi_method_name), acpi_method_name };
+       acpi_handle dhandle, atcs_handle;
+       acpi_status status;
+       int ret;
+
+       dhandle = ACPI_HANDLE(&pdev->dev);
+       if (!dhandle)
+               return false;
+
+       status = acpi_get_handle(dhandle, "ATCS", &atcs_handle);
+       if (ACPI_FAILURE(status)) {
+               return false;
+       }
+       amdgpu_acpi_priv.atcs.handle = atcs_handle;
+       acpi_get_name(amdgpu_acpi_priv.atcs.handle, ACPI_FULL_PATHNAME, &buffer);
+       DRM_DEBUG_DRIVER("Found ATCS handle %s\n", acpi_method_name);
+       ret = amdgpu_atcs_verify_interface(&amdgpu_acpi_priv.atcs);
+       if (ret) {
+               amdgpu_acpi_priv.atcs.handle = 0;
+               return false;
+       }
+       return true;
+}
+
+/*
+ * amdgpu_acpi_detect - detect ACPI ATIF/ATCS methods
+ *
+ * Check if we have the ATIF/ATCS methods and populate
+ * the structures in the driver.
+ */
+void amdgpu_acpi_detect(void)
+{
+       struct amdgpu_atif *atif = &amdgpu_acpi_priv.atif;
+       struct amdgpu_atcs *atcs = &amdgpu_acpi_priv.atcs;
+       struct pci_dev *pdev = NULL;
+       int ret;
+
+       while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
+               if (!atif->handle)
+                       amdgpu_atif_pci_probe_handle(pdev);
+               if (!atcs->handle)
+                       amdgpu_atcs_pci_probe_handle(pdev);
+       }
+
+       while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) {
+               if (!atif->handle)
+                       amdgpu_atif_pci_probe_handle(pdev);
+               if (!atcs->handle)
+                       amdgpu_atcs_pci_probe_handle(pdev);
+       }
 
        if (atif->functions.sbios_requests && !atif->functions.system_params) {
                /* XXX check this workraround, if sbios request function is
@@ -861,37 +1028,6 @@ int amdgpu_acpi_init(struct amdgpu_device *adev)
        } else {
                atif->backlight_caps.caps_valid = false;
        }
-
-out:
-       adev->acpi_nb.notifier_call = amdgpu_acpi_event;
-       register_acpi_notifier(&adev->acpi_nb);
-
-       return ret;
-}
-
-void amdgpu_acpi_get_backlight_caps(struct amdgpu_device *adev,
-               struct amdgpu_dm_backlight_caps *caps)
-{
-       if (!adev->atif) {
-               caps->caps_valid = false;
-               return;
-       }
-       caps->caps_valid = adev->atif->backlight_caps.caps_valid;
-       caps->min_input_signal = adev->atif->backlight_caps.min_input_signal;
-       caps->max_input_signal = adev->atif->backlight_caps.max_input_signal;
-}
-
-/**
- * amdgpu_acpi_fini - tear down driver acpi support
- *
- * @adev: amdgpu_device pointer
- *
- * Unregisters with the acpi notifier chain (all asics).
- */
-void amdgpu_acpi_fini(struct amdgpu_device *adev)
-{
-       unregister_acpi_notifier(&adev->acpi_nb);
-       kfree(adev->atif);
 }
 
 /**
index 62aa1a6..491acdf 100644 (file)
@@ -96,8 +96,8 @@ static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
 
        lock_srbm(kgd, 0, 0, 0, vmid);
 
-       WREG32(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG), sh_mem_config);
-       WREG32(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_BASES), sh_mem_bases);
+       WREG32_SOC15(GC, 0, mmSH_MEM_CONFIG, sh_mem_config);
+       WREG32_SOC15(GC, 0, mmSH_MEM_BASES, sh_mem_bases);
        /* APE1 no longer exists on GFX9 */
 
        unlock_srbm(kgd);
@@ -161,7 +161,7 @@ static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
 
        lock_srbm(kgd, mec, pipe, 0, 0);
 
-       WREG32(SOC15_REG_OFFSET(GC, 0, mmCPC_INT_CNTL),
+       WREG32_SOC15(GC, 0, mmCPC_INT_CNTL,
                CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
                CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
 
@@ -239,13 +239,13 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
 
        for (reg = hqd_base;
             reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++)
-               WREG32(reg, mqd_hqd[reg - hqd_base]);
+               WREG32_SOC15_IP(GC, reg, mqd_hqd[reg - hqd_base]);
 
 
        /* Activate doorbell logic before triggering WPTR poll. */
        data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control,
                             CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
-       WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL), data);
+       WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL, data);
 
        if (wptr) {
                /* Don't read wptr with get_user because the user
@@ -274,27 +274,27 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
                guessed_wptr += m->cp_hqd_pq_wptr_lo & ~(queue_size - 1);
                guessed_wptr += (uint64_t)m->cp_hqd_pq_wptr_hi << 32;
 
-               WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_LO),
+               WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_LO,
                       lower_32_bits(guessed_wptr));
-               WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI),
+               WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_HI,
                       upper_32_bits(guessed_wptr));
-               WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR),
+               WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR,
                       lower_32_bits((uint64_t)wptr));
-               WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI),
+               WREG32_SOC15(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI,
                       upper_32_bits((uint64_t)wptr));
                pr_debug("%s setting CP_PQ_WPTR_POLL_CNTL1 to %x\n", __func__,
                         (uint32_t)get_queue_mask(adev, pipe_id, queue_id));
-               WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1),
+               WREG32_SOC15(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1,
                       (uint32_t)get_queue_mask(adev, pipe_id, queue_id));
        }
 
        /* Start the EOP fetcher */
-       WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_EOP_RPTR),
+       WREG32_SOC15(GC, 0, mmCP_HQD_EOP_RPTR,
               REG_SET_FIELD(m->cp_hqd_eop_rptr,
                             CP_HQD_EOP_RPTR, INIT_FETCHER, 1));
 
        data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
-       WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE), data);
+       WREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE, data);
 
        release_queue(kgd);
 
@@ -365,7 +365,7 @@ static int kgd_hqd_dump(struct kgd_dev *kgd,
                if (WARN_ON_ONCE(i >= HQD_N_REGS))      \
                        break;                          \
                (*dump)[i][0] = (addr) << 2;            \
-               (*dump)[i++][1] = RREG32(addr);         \
+               (*dump)[i++][1] = RREG32_SOC15_IP(GC, addr);            \
        } while (0)
 
        *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
@@ -497,13 +497,13 @@ static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
        uint32_t low, high;
 
        acquire_queue(kgd, pipe_id, queue_id);
-       act = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE));
+       act = RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE);
        if (act) {
                low = lower_32_bits(queue_address >> 8);
                high = upper_32_bits(queue_address >> 8);
 
-               if (low == RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_BASE)) &&
-                  high == RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_BASE_HI)))
+               if (low == RREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE) &&
+                  high == RREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI))
                        retval = true;
        }
        release_queue(kgd);
@@ -621,11 +621,11 @@ loop:
        preempt_enable();
 #endif
 
-       WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_DEQUEUE_REQUEST), type);
+       WREG32_SOC15(GC, 0, mmCP_HQD_DEQUEUE_REQUEST, type);
 
        end_jiffies = (utimeout * HZ / 1000) + jiffies;
        while (true) {
-               temp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE));
+               temp = RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE);
                if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK))
                        break;
                if (time_after(jiffies, end_jiffies)) {
@@ -716,8 +716,8 @@ static int kgd_wave_control_execute(struct kgd_dev *kgd,
 
        mutex_lock(&adev->grbm_idx_mutex);
 
-       WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX), gfx_index_val);
-       WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_CMD), sq_cmd);
+       WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, gfx_index_val);
+       WREG32_SOC15(GC, 0, mmSQ_CMD, sq_cmd);
 
        data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
                INSTANCE_BROADCAST_WRITES, 1);
@@ -726,7 +726,7 @@ static int kgd_wave_control_execute(struct kgd_dev *kgd,
        data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
                SE_BROADCAST_WRITES, 1);
 
-       WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX), data);
+       WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data);
        mutex_unlock(&adev->grbm_idx_mutex);
 
        return 0;
index fad3b91..d39cff4 100644 (file)
@@ -156,16 +156,16 @@ static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
                                mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
                break;
        case 1:
-               sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA1, 0,
+               sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0,
                                mmSDMA1_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
                break;
        case 2:
-               sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA2, 0,
-                               mmSDMA2_RLC0_RB_CNTL) - mmSDMA2_RLC0_RB_CNTL;
+               sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0,
+                               mmSDMA2_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
                break;
        case 3:
-               sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA3, 0,
-                               mmSDMA3_RLC0_RB_CNTL) - mmSDMA2_RLC0_RB_CNTL;
+               sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0,
+                               mmSDMA3_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
                break;
        }
 
@@ -450,7 +450,7 @@ static int hqd_sdma_dump_v10_3(struct kgd_dev *kgd,
                        engine_id, queue_id);
        uint32_t i = 0, reg;
 #undef HQD_N_REGS
-#define HQD_N_REGS (19+6+7+10)
+#define HQD_N_REGS (19+6+7+12)
 
        *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
        if (*dump == NULL)
index 928e8d5..141cd29 100644 (file)
@@ -621,14 +621,13 @@ kfd_mem_attach_userptr(struct amdgpu_device *adev, struct kgd_mem *mem,
 
        ret = amdgpu_gem_object_create(adev, bo_size, 1,
                                       AMDGPU_GEM_DOMAIN_CPU,
-                                      0, ttm_bo_type_sg,
-                                      mem->bo->tbo.base.resv,
+                                      AMDGPU_GEM_CREATE_PREEMPTIBLE,
+                                      ttm_bo_type_sg, mem->bo->tbo.base.resv,
                                       &gobj);
+       amdgpu_bo_unreserve(mem->bo);
        if (ret)
                return ret;
 
-       amdgpu_bo_unreserve(mem->bo);
-
        *bo = gem_to_amdgpu_bo(gobj);
        (*bo)->parent = amdgpu_bo_ref(mem->bo);
 
@@ -640,14 +639,16 @@ kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem,
                      struct amdgpu_bo **bo)
 {
        struct drm_gem_object *gobj;
+       int ret;
 
        if (!mem->dmabuf) {
                mem->dmabuf = amdgpu_gem_prime_export(&mem->bo->tbo.base,
                        mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE ?
                                DRM_RDWR : 0);
                if (IS_ERR(mem->dmabuf)) {
+                       ret = PTR_ERR(mem->dmabuf);
                        mem->dmabuf = NULL;
-                       return PTR_ERR(mem->dmabuf);
+                       return ret;
                }
        }
 
@@ -662,6 +663,7 @@ kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem,
        dma_buf_put(mem->dmabuf);
 
        *bo = gem_to_amdgpu_bo(gobj);
+       (*bo)->flags |= AMDGPU_GEM_CREATE_PREEMPTIBLE;
        (*bo)->parent = amdgpu_bo_ref(mem->bo);
 
        return 0;
@@ -1410,7 +1412,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
        } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
                domain = AMDGPU_GEM_DOMAIN_GTT;
                alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
-               alloc_flags = 0;
+               alloc_flags = AMDGPU_GEM_CREATE_PREEMPTIBLE;
                if (!offset || !*offset)
                        return -EINVAL;
                user_addr = untagged_addr(*offset);
index 90136f9..f6a8f0c 100644 (file)
@@ -396,10 +396,10 @@ void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
        spin_unlock(&adev->mm_stats.lock);
 }
 
-static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
-                                struct amdgpu_bo *bo)
+static int amdgpu_cs_bo_validate(void *param, struct amdgpu_bo *bo)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+       struct amdgpu_cs_parser *p = param;
        struct ttm_operation_ctx ctx = {
                .interruptible = true,
                .no_wait_gpu = false,
@@ -451,21 +451,6 @@ retry:
        return r;
 }
 
-static int amdgpu_cs_validate(void *param, struct amdgpu_bo *bo)
-{
-       struct amdgpu_cs_parser *p = param;
-       int r;
-
-       r = amdgpu_cs_bo_validate(p, bo);
-       if (r)
-               return r;
-
-       if (bo->shadow)
-               r = amdgpu_cs_bo_validate(p, bo->shadow);
-
-       return r;
-}
-
 static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
                            struct list_head *validated)
 {
@@ -493,7 +478,7 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
                                                     lobj->user_pages);
                }
 
-               r = amdgpu_cs_validate(p, bo);
+               r = amdgpu_cs_bo_validate(p, bo);
                if (r)
                        return r;
 
@@ -593,7 +578,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
        p->bytes_moved_vis = 0;
 
        r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm,
-                                     amdgpu_cs_validate, p);
+                                     amdgpu_cs_bo_validate, p);
        if (r) {
                DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n");
                goto error_validate;
index fc83445..e7a010b 100644 (file)
@@ -331,13 +331,15 @@ static int amdgpu_ctx_query(struct amdgpu_device *adev,
        return 0;
 }
 
+#define AMDGPU_RAS_COUNTE_DELAY_MS 3000
+
 static int amdgpu_ctx_query2(struct amdgpu_device *adev,
-       struct amdgpu_fpriv *fpriv, uint32_t id,
-       union drm_amdgpu_ctx_out *out)
+                            struct amdgpu_fpriv *fpriv, uint32_t id,
+                            union drm_amdgpu_ctx_out *out)
 {
+       struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
        struct amdgpu_ctx *ctx;
        struct amdgpu_ctx_mgr *mgr;
-       unsigned long ras_counter;
 
        if (!fpriv)
                return -EINVAL;
@@ -362,19 +364,28 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev,
        if (atomic_read(&ctx->guilty))
                out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY;
 
-       /*query ue count*/
-       ras_counter = amdgpu_ras_query_error_count(adev, false);
-       /*ras counter is monotonic increasing*/
-       if (ras_counter != ctx->ras_counter_ue) {
-               out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_UE;
-               ctx->ras_counter_ue = ras_counter;
-       }
+       if (adev->ras_enabled && con) {
+               /* Return the cached values in O(1),
+                * and schedule delayed work to cache
+                * new vaues.
+                */
+               int ce_count, ue_count;
+
+               ce_count = atomic_read(&con->ras_ce_count);
+               ue_count = atomic_read(&con->ras_ue_count);
+
+               if (ce_count != ctx->ras_counter_ce) {
+                       ctx->ras_counter_ce = ce_count;
+                       out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_CE;
+               }
+
+               if (ue_count != ctx->ras_counter_ue) {
+                       ctx->ras_counter_ue = ue_count;
+                       out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_UE;
+               }
 
-       /*query ce count*/
-       ras_counter = amdgpu_ras_query_error_count(adev, true);
-       if (ras_counter != ctx->ras_counter_ce) {
-               out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_CE;
-               ctx->ras_counter_ce = ras_counter;
+               schedule_delayed_work(&con->ras_counte_delay_work,
+                                     msecs_to_jiffies(AMDGPU_RAS_COUNTE_DELAY_MS));
        }
 
        mutex_unlock(&mgr->lock);
index bcaf271..a9bbb00 100644 (file)
@@ -990,7 +990,7 @@ err:
 }
 
 /**
- * amdgpu_debugfs_regs_gfxoff_write - Enable/disable GFXOFF
+ * amdgpu_debugfs_gfxoff_write - Enable/disable GFXOFF
  *
  * @f: open file handle
  * @buf: User buffer to write data from
@@ -1041,7 +1041,7 @@ static ssize_t amdgpu_debugfs_gfxoff_write(struct file *f, const char __user *bu
 
 
 /**
- * amdgpu_debugfs_regs_gfxoff_status - read gfxoff status
+ * amdgpu_debugfs_gfxoff_read - read gfxoff status
  *
  * @f: open file handle
  * @buf: User buffer to store read data in
index a10b4a7..0585442 100644 (file)
@@ -265,6 +265,21 @@ bool amdgpu_device_supports_baco(struct drm_device *dev)
        return amdgpu_asic_supports_baco(adev);
 }
 
+/**
+ * amdgpu_device_supports_smart_shift - Is the device dGPU with
+ * smart shift support
+ *
+ * @dev: drm_device pointer
+ *
+ * Returns true if the device is a dGPU with Smart Shift support,
+ * otherwise returns false.
+ */
+bool amdgpu_device_supports_smart_shift(struct drm_device *dev)
+{
+       return (amdgpu_device_supports_boco(dev) &&
+               amdgpu_acpi_is_power_shift_control_supported());
+}
+
 /*
  * VRAM access helper functions
  */
@@ -501,7 +516,7 @@ void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
            adev->gfx.rlc.funcs &&
            adev->gfx.rlc.funcs->is_rlcg_access_range) {
                if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
-                       return adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, v, 0);
+                       return adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, v, 0, 0);
        } else {
                writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
        }
@@ -3151,7 +3166,9 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
  */
 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
 {
-       if (amdgpu_sriov_vf(adev) || adev->enable_virtual_display)
+       if (amdgpu_sriov_vf(adev) || 
+           adev->enable_virtual_display ||
+           (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK))
                return false;
 
        return amdgpu_device_asic_has_dc_support(adev->asic_type);
@@ -3809,6 +3826,10 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
                return 0;
 
        adev->in_suspend = true;
+
+       if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3))
+               DRM_WARN("smart shift update failed\n");
+
        drm_kms_helper_poll_disable(dev);
 
        if (fbcon)
@@ -3918,6 +3939,9 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
 #endif
        adev->in_suspend = false;
 
+       if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0))
+               DRM_WARN("smart shift update failed\n");
+
        return 0;
 }
 
@@ -4694,7 +4718,7 @@ static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
        return 0;
 }
 
-void amdgpu_device_recheck_guilty_jobs(
+static void amdgpu_device_recheck_guilty_jobs(
        struct amdgpu_device *adev, struct list_head *device_list_handle,
        struct amdgpu_reset_context *reset_context)
 {
@@ -4997,6 +5021,8 @@ skip_hw_reset:
                        amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
                } else {
                        dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
+                       if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0))
+                               DRM_WARN("smart shift update failed\n");
                }
        }
 
index 0d7017a..809aa76 100644 (file)
  * - 3.39.0 - DMABUF implicit sync does a full pipeline sync
  * - 3.40.0 - Add AMDGPU_IDS_FLAGS_TMZ
  * - 3.41.0 - Add video codec query
+ * - 3.42.0 - Add 16bpc fixed point display support
  */
 #define KMS_DRIVER_MAJOR       3
-#define KMS_DRIVER_MINOR       41
+#define KMS_DRIVER_MINOR       42
 #define KMS_DRIVER_PATCHLEVEL  0
 
 int amdgpu_vram_limit;
@@ -1821,6 +1822,7 @@ static int __init amdgpu_init(void)
 
        DRM_INFO("amdgpu kernel modesetting enabled.\n");
        amdgpu_register_atpx_handler();
+       amdgpu_acpi_detect();
 
        /* Ignore KFD init failures. Normal when CONFIG_HSA_AMD is not set. */
        amdgpu_amdkfd_init();
index 8f4a8f8..39b6c6b 100644 (file)
@@ -101,7 +101,8 @@ static int amdgpu_fru_read_eeprom(struct amdgpu_device *adev, uint32_t addrptr,
 int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
 {
        unsigned char buff[34];
-       int addrptr = 0, size = 0;
+       int addrptr, size;
+       int len;
 
        if (!is_fru_eeprom_supported(adev))
                return 0;
@@ -109,7 +110,7 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
        /* If algo exists, it means that the i2c_adapter's initialized */
        if (!adev->pm.smu_i2c.algo) {
                DRM_WARN("Cannot access FRU, EEPROM accessor not initialized");
-               return 0;
+               return -ENODEV;
        }
 
        /* There's a lot of repetition here. This is due to the FRU having
@@ -128,7 +129,7 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
        size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
        if (size < 1) {
                DRM_ERROR("Failed to read FRU Manufacturer, ret:%d", size);
-               return size;
+               return -EINVAL;
        }
 
        /* Increment the addrptr by the size of the field, and 1 due to the
@@ -138,43 +139,45 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
        size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
        if (size < 1) {
                DRM_ERROR("Failed to read FRU product name, ret:%d", size);
-               return size;
+               return -EINVAL;
        }
 
+       len = size;
        /* Product name should only be 32 characters. Any more,
         * and something could be wrong. Cap it at 32 to be safe
         */
-       if (size > 32) {
+       if (len >= sizeof(adev->product_name)) {
                DRM_WARN("FRU Product Number is larger than 32 characters. This is likely a mistake");
-               size = 32;
+               len = sizeof(adev->product_name) - 1;
        }
        /* Start at 2 due to buff using fields 0 and 1 for the address */
-       memcpy(adev->product_name, &buff[2], size);
-       adev->product_name[size] = '\0';
+       memcpy(adev->product_name, &buff[2], len);
+       adev->product_name[len] = '\0';
 
        addrptr += size + 1;
        size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
        if (size < 1) {
                DRM_ERROR("Failed to read FRU product number, ret:%d", size);
-               return size;
+               return -EINVAL;
        }
 
+       len = size;
        /* Product number should only be 16 characters. Any more,
         * and something could be wrong. Cap it at 16 to be safe
         */
-       if (size > 16) {
+       if (len >= sizeof(adev->product_number)) {
                DRM_WARN("FRU Product Number is larger than 16 characters. This is likely a mistake");
-               size = 16;
+               len = sizeof(adev->product_number) - 1;
        }
-       memcpy(adev->product_number, &buff[2], size);
-       adev->product_number[size] = '\0';
+       memcpy(adev->product_number, &buff[2], len);
+       adev->product_number[len] = '\0';
 
        addrptr += size + 1;
        size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
 
        if (size < 1) {
                DRM_ERROR("Failed to read FRU product version, ret:%d", size);
-               return size;
+               return -EINVAL;
        }
 
        addrptr += size + 1;
@@ -182,18 +185,19 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
 
        if (size < 1) {
                DRM_ERROR("Failed to read FRU serial number, ret:%d", size);
-               return size;
+               return -EINVAL;
        }
 
+       len = size;
        /* Serial number should only be 16 characters. Any more,
         * and something could be wrong. Cap it at 16 to be safe
         */
-       if (size > 16) {
+       if (len >= sizeof(adev->serial)) {
                DRM_WARN("FRU Serial Number is larger than 16 characters. This is likely a mistake");
-               size = 16;
+               len = sizeof(adev->serial) - 1;
        }
-       memcpy(adev->serial, &buff[2], size);
-       adev->serial[size] = '\0';
+       memcpy(adev->serial, &buff[2], len);
+       adev->serial[len] = '\0';
 
        return 0;
 }
index 649ecdf..34243e1 100644 (file)
@@ -312,8 +312,6 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
                     int pages, struct page **pagelist, dma_addr_t *dma_addr,
                     uint64_t flags)
 {
-       int r, i;
-
        if (!adev->gart.ready) {
                WARN(1, "trying to bind memory to uninitialized GART !\n");
                return -EINVAL;
@@ -322,16 +320,26 @@ int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
        if (!adev->gart.ptr)
                return 0;
 
-       r = amdgpu_gart_map(adev, offset, pages, dma_addr, flags,
-                   adev->gart.ptr);
-       if (r)
-               return r;
+       return amdgpu_gart_map(adev, offset, pages, dma_addr, flags,
+                              adev->gart.ptr);
+}
+
+/**
+ * amdgpu_gart_invalidate_tlb - invalidate gart TLB
+ *
+ * @adev: amdgpu device driver pointer
+ *
+ * Invalidate gart TLB which can be use as a way to flush gart changes
+ *
+ */
+void amdgpu_gart_invalidate_tlb(struct amdgpu_device *adev)
+{
+       int i;
 
        mb();
        amdgpu_asic_flush_hdp(adev, NULL);
        for (i = 0; i < adev->num_vmhubs; i++)
                amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0);
-       return 0;
 }
 
 /**
index 030b9d4..f53f6a7 100644 (file)
@@ -66,5 +66,5 @@ int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
 int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
                     int pages, struct page **pagelist,
                     dma_addr_t *dma_addr, uint64_t flags);
-
+void amdgpu_gart_invalidate_tlb(struct amdgpu_device *adev);
 #endif
index aac8ef3..7061c4a 100644 (file)
@@ -537,7 +537,7 @@ int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev)
 }
 
 /**
- * amdgpu_tmz_set -- check and set if a device supports TMZ
+ * amdgpu_gmc_tmz_set -- check and set if a device supports TMZ
  * @adev: amdgpu_device pointer
  *
  * Check and set if an the device @adev supports Trusted Memory
@@ -583,7 +583,7 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
 }
 
 /**
- * amdgpu_noretry_set -- set per asic noretry defaults
+ * amdgpu_gmc_noretry_set -- set per asic noretry defaults
  * @adev: amdgpu_device pointer
  *
  * Set a per asic default for the no-retry parameter.
@@ -638,13 +638,18 @@ void amdgpu_gmc_set_vm_fault_masks(struct amdgpu_device *adev, int hub_type,
        for (i = 0; i < 16; i++) {
                reg = hub->vm_context0_cntl + hub->ctx_distance * i;
 
-               tmp = RREG32(reg);
+               tmp = (hub_type == AMDGPU_GFXHUB_0) ?
+                       RREG32_SOC15_IP(GC, reg) :
+                       RREG32_SOC15_IP(MMHUB, reg);
+
                if (enable)
                        tmp |= hub->vm_cntx_cntl_vm_fault;
                else
                        tmp &= ~hub->vm_cntx_cntl_vm_fault;
 
-               WREG32(reg, tmp);
+               (hub_type == AMDGPU_GFXHUB_0) ?
+                       WREG32_SOC15_IP(GC, reg, tmp) :
+                       WREG32_SOC15_IP(MMHUB, reg, tmp);
        }
 }
 
index 6a84c97..9ab3304 100644 (file)
@@ -216,10 +216,12 @@ uint64_t amdgpu_gtt_mgr_usage(struct ttm_resource_manager *man)
 int amdgpu_gtt_mgr_recover(struct ttm_resource_manager *man)
 {
        struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
+       struct amdgpu_device *adev;
        struct amdgpu_gtt_node *node;
        struct drm_mm_node *mm_node;
        int r = 0;
 
+       adev = container_of(mgr, typeof(*adev), mman.gtt_mgr);
        spin_lock(&mgr->lock);
        drm_mm_for_each_node(mm_node, &mgr->mm) {
                node = container_of(mm_node, struct amdgpu_gtt_node, node);
@@ -229,6 +231,8 @@ int amdgpu_gtt_mgr_recover(struct ttm_resource_manager *man)
        }
        spin_unlock(&mgr->lock);
 
+       amdgpu_gart_invalidate_tlb(adev);
+
        return r;
 }
 
index 2e6789a..77baf9b 100644 (file)
@@ -130,7 +130,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
        struct amdgpu_device *adev = ring->adev;
        struct amdgpu_ib *ib = &ibs[0];
        struct dma_fence *tmp = NULL;
-       bool skip_preamble, need_ctx_switch;
+       bool need_ctx_switch;
        unsigned patch_offset = ~0;
        struct amdgpu_vm *vm;
        uint64_t fence_ctx;
@@ -227,7 +227,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
        if (need_ctx_switch)
                status |= AMDGPU_HAVE_CTX_SWITCH;
 
-       skip_preamble = ring->current_ctx == fence_ctx;
        if (job && ring->funcs->emit_cntxcntl) {
                status |= job->preamble_status;
                status |= job->preemption_status;
@@ -245,14 +244,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
        for (i = 0; i < num_ibs; ++i) {
                ib = &ibs[i];
 
-               /* drop preamble IBs if we don't have a context switch */
-               if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) &&
-                   skip_preamble &&
-                   !(status & AMDGPU_PREAMBLE_IB_PRESENT_FIRST) &&
-                   !amdgpu_mcbp &&
-                   !amdgpu_sriov_vf(adev)) /* for SRIOV preemption, Preamble CE ib must be inserted anyway */
-                       continue;
-
                if (job && ring->funcs->emit_frame_cntl) {
                        if (secure != !!(ib->flags & AMDGPU_IB_FLAGS_SECURE)) {
                                amdgpu_ring_emit_frame_cntl(ring, false, secure);
index b4971e9..c7f3aae 100644 (file)
@@ -183,7 +183,7 @@ bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
 }
 
 /**
- * amdgpu_vm_grab_idle - grab idle VMID
+ * amdgpu_vmid_grab_idle - grab idle VMID
  *
  * @vm: vm to allocate id for
  * @ring: ring we want to submit job to
@@ -256,7 +256,7 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
 }
 
 /**
- * amdgpu_vm_grab_reserved - try to assign reserved VMID
+ * amdgpu_vmid_grab_reserved - try to assign reserved VMID
  *
  * @vm: vm to allocate id for
  * @ring: ring we want to submit job to
@@ -325,7 +325,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
 }
 
 /**
- * amdgpu_vm_grab_used - try to reuse a VMID
+ * amdgpu_vmid_grab_used - try to reuse a VMID
  *
  * @vm: vm to allocate id for
  * @ring: ring we want to submit job to
@@ -397,7 +397,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
 }
 
 /**
- * amdgpu_vm_grab_id - allocate the next free VMID
+ * amdgpu_vmid_grab - allocate the next free VMID
  *
  * @vm: vm to allocate id for
  * @ring: ring we want to submit job to
index d1bb69a..425596c 100644 (file)
@@ -92,6 +92,9 @@ void amdgpu_driver_unload_kms(struct drm_device *dev)
                pm_runtime_forbid(dev->dev);
        }
 
+       if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DRV_UNLOAD))
+               DRM_WARN("smart shift update failed\n");
+
        amdgpu_acpi_fini(adev);
        amdgpu_device_fini_hw(adev);
 }
@@ -215,6 +218,9 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
                pm_runtime_put_autosuspend(dev->dev);
        }
 
+       if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DRV_LOAD))
+               DRM_WARN("smart shift update failed\n");
+
 out:
        if (r) {
                /* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */
@@ -862,6 +868,21 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
                                            min((size_t)size, (size_t)(bios_size - bios_offset)))
                                        ? -EFAULT : 0;
                }
+               case AMDGPU_INFO_VBIOS_INFO: {
+                       struct drm_amdgpu_info_vbios vbios_info = {};
+                       struct atom_context *atom_context;
+
+                       atom_context = adev->mode_info.atom_context;
+                       memcpy(vbios_info.name, atom_context->name, sizeof(atom_context->name));
+                       memcpy(vbios_info.vbios_pn, atom_context->vbios_pn, sizeof(atom_context->vbios_pn));
+                       vbios_info.version = atom_context->version;
+                       memcpy(vbios_info.vbios_ver_str, atom_context->vbios_ver_str,
+                                               sizeof(atom_context->vbios_ver_str));
+                       memcpy(vbios_info.date, atom_context->date, sizeof(atom_context->date));
+
+                       return copy_to_user(out, &vbios_info,
+                                               min((size_t)size, sizeof(vbios_info))) ? -EFAULT : 0;
+               }
                default:
                        DRM_DEBUG_KMS("Invalid request %d\n",
                                        info->vbios_info.type);
index 3b509b0..a6fa396 100644 (file)
@@ -71,7 +71,7 @@ static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
        }
        amdgpu_bo_unref(&bo->parent);
 
-       if (bo->tbo.type == ttm_bo_type_device) {
+       if (bo->tbo.type != ttm_bo_type_kernel) {
                ubo = to_amdgpu_bo_user(bo);
                kfree(ubo->metadata);
        }
@@ -133,7 +133,9 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
        if (domain & AMDGPU_GEM_DOMAIN_GTT) {
                places[c].fpfn = 0;
                places[c].lpfn = 0;
-               places[c].mem_type = TTM_PL_TT;
+               places[c].mem_type =
+                       abo->flags & AMDGPU_GEM_CREATE_PREEMPTIBLE ?
+                       AMDGPU_PL_PREEMPT : TTM_PL_TT;
                places[c].flags = 0;
                c++;
        }
@@ -612,35 +614,6 @@ fail_unreserve:
        return r;
 }
 
-int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
-                           unsigned long size,
-                           struct amdgpu_bo *bo)
-{
-       struct amdgpu_bo_param bp;
-       int r;
-
-       if (bo->shadow)
-               return 0;
-
-       memset(&bp, 0, sizeof(bp));
-       bp.size = size;
-       bp.domain = AMDGPU_GEM_DOMAIN_GTT;
-       bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
-       bp.type = ttm_bo_type_kernel;
-       bp.resv = bo->tbo.base.resv;
-       bp.bo_ptr_size = sizeof(struct amdgpu_bo);
-
-       r = amdgpu_bo_create(adev, &bp, &bo->shadow);
-       if (!r) {
-               bo->shadow->parent = amdgpu_bo_ref(bo);
-               mutex_lock(&adev->shadow_list_lock);
-               list_add_tail(&bo->shadow->shadow_list, &adev->shadow_list);
-               mutex_unlock(&adev->shadow_list_lock);
-       }
-
-       return r;
-}
-
 /**
  * amdgpu_bo_create_user - create an &amdgpu_bo_user buffer object
  * @adev: amdgpu device object
@@ -668,6 +641,38 @@ int amdgpu_bo_create_user(struct amdgpu_device *adev,
        *ubo_ptr = to_amdgpu_bo_user(bo_ptr);
        return r;
 }
+
+/**
+ * amdgpu_bo_create_vm - create an &amdgpu_bo_vm buffer object
+ * @adev: amdgpu device object
+ * @bp: parameters to be used for the buffer object
+ * @vmbo_ptr: pointer to the buffer object pointer
+ *
+ * Create a BO to be for GPUVM.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
+
+int amdgpu_bo_create_vm(struct amdgpu_device *adev,
+                       struct amdgpu_bo_param *bp,
+                       struct amdgpu_bo_vm **vmbo_ptr)
+{
+       struct amdgpu_bo *bo_ptr;
+       int r;
+
+       /* bo_ptr_size will be determined by the caller and it depends on
+        * num of amdgpu_vm_pt entries.
+        */
+       BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo_vm));
+       r = amdgpu_bo_create(adev, bp, &bo_ptr);
+       if (r)
+               return r;
+
+       *vmbo_ptr = to_amdgpu_bo_vm(bo_ptr);
+       return r;
+}
+
 /**
  * amdgpu_bo_validate - validate an &amdgpu_bo buffer object
  * @bo: pointer to the buffer object
@@ -702,6 +707,22 @@ retry:
        return r;
 }
 
+/**
+ * amdgpu_bo_add_to_shadow_list - add a BO to the shadow list
+ *
+ * @bo: BO that will be inserted into the shadow list
+ *
+ * Insert a BO to the shadow list.
+ */
+void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo *bo)
+{
+       struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+
+       mutex_lock(&adev->shadow_list_lock);
+       list_add_tail(&bo->shadow_list, &adev->shadow_list);
+       mutex_unlock(&adev->shadow_list_lock);
+}
+
 /**
  * amdgpu_bo_restore_shadow - restore an &amdgpu_bo shadow
  *
@@ -1191,6 +1212,9 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
 
        BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
        ubo = to_amdgpu_bo_user(bo);
+       if (metadata_size)
+               *metadata_size = ubo->metadata_size;
+
        if (buffer) {
                if (buffer_size < ubo->metadata_size)
                        return -EINVAL;
@@ -1199,8 +1223,6 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
                        memcpy(buffer, ubo->metadata, ubo->metadata_size);
        }
 
-       if (metadata_size)
-               *metadata_size = ubo->metadata_size;
        if (flags)
                *flags = ubo->metadata_flags;
 
index a44779d..90eab1c 100644 (file)
@@ -44,6 +44,7 @@
 #define AMDGPU_AMDKFD_CREATE_SVM_BO    (1ULL << 62)
 
 #define to_amdgpu_bo_user(abo) container_of((abo), struct amdgpu_bo_user, bo)
+#define to_amdgpu_bo_vm(abo) container_of((abo), struct amdgpu_bo_vm, bo)
 
 struct amdgpu_bo_param {
        unsigned long                   size;
@@ -103,9 +104,6 @@ struct amdgpu_bo {
        struct amdgpu_vm_bo_base        *vm_bo;
        /* Constant after initialization */
        struct amdgpu_bo                *parent;
-       struct amdgpu_bo                *shadow;
-
-
 
 #ifdef CONFIG_MMU_NOTIFIER
        struct mmu_interval_notifier    notifier;
@@ -125,6 +123,12 @@ struct amdgpu_bo_user {
 
 };
 
+struct amdgpu_bo_vm {
+       struct amdgpu_bo                bo;
+       struct amdgpu_bo                *shadow;
+       struct amdgpu_vm_pt             entries[];
+};
+
 static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo)
 {
        return container_of(tbo, struct amdgpu_bo, tbo);
@@ -252,6 +256,22 @@ static inline bool amdgpu_bo_encrypted(struct amdgpu_bo *bo)
        return bo->flags & AMDGPU_GEM_CREATE_ENCRYPTED;
 }
 
+/**
+ * amdgpu_bo_shadowed - check if the BO is shadowed
+ *
+ * @bo: BO to be tested.
+ *
+ * Returns:
+ * NULL if not shadowed or else return a BO pointer.
+ */
+static inline struct amdgpu_bo *amdgpu_bo_shadowed(struct amdgpu_bo *bo)
+{
+       if (bo->tbo.type == ttm_bo_type_kernel)
+               return to_amdgpu_bo_vm(bo)->shadow;
+
+       return NULL;
+}
+
 bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
 void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
 
@@ -272,11 +292,11 @@ int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
 int amdgpu_bo_create_user(struct amdgpu_device *adev,
                          struct amdgpu_bo_param *bp,
                          struct amdgpu_bo_user **ubo_ptr);
+int amdgpu_bo_create_vm(struct amdgpu_device *adev,
+                       struct amdgpu_bo_param *bp,
+                       struct amdgpu_bo_vm **ubo_ptr);
 void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
                           void **cpu_addr);
-int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
-                           unsigned long size,
-                           struct amdgpu_bo *bo);
 int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr);
 void *amdgpu_bo_kptr(struct amdgpu_bo *bo);
 void amdgpu_bo_kunmap(struct amdgpu_bo *bo);
@@ -312,6 +332,7 @@ u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo);
 int amdgpu_bo_validate(struct amdgpu_bo *bo);
 void amdgpu_bo_get_memory(struct amdgpu_bo *bo, uint64_t *vram_mem,
                                uint64_t *gtt_mem, uint64_t *cpu_mem);
+void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo *bo);
 int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow,
                             struct dma_fence **fence);
 uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_preempt_mgr.c
new file mode 100644 (file)
index 0000000..d607f31
--- /dev/null
@@ -0,0 +1,190 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright 2016-2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König, Felix Kuehling
+ */
+
+#include "amdgpu.h"
+
+static inline struct amdgpu_preempt_mgr *
+to_preempt_mgr(struct ttm_resource_manager *man)
+{
+       return container_of(man, struct amdgpu_preempt_mgr, manager);
+}
+
+/**
+ * DOC: mem_info_preempt_used
+ *
+ * The amdgpu driver provides a sysfs API for reporting current total amount of
+ * used preemptible memory.
+ * The file mem_info_preempt_used is used for this, and returns the current
+ * used size of the preemptible block, in bytes
+ */
+static ssize_t mem_info_preempt_used_show(struct device *dev,
+                                         struct device_attribute *attr,
+                                         char *buf)
+{
+       struct drm_device *ddev = dev_get_drvdata(dev);
+       struct amdgpu_device *adev = drm_to_adev(ddev);
+       struct ttm_resource_manager *man;
+
+       man = ttm_manager_type(&adev->mman.bdev, AMDGPU_PL_PREEMPT);
+       return sysfs_emit(buf, "%llu\n", amdgpu_preempt_mgr_usage(man));
+}
+
+static DEVICE_ATTR_RO(mem_info_preempt_used);
+
+/**
+ * amdgpu_preempt_mgr_new - allocate a new node
+ *
+ * @man: TTM memory type manager
+ * @tbo: TTM BO we need this range for
+ * @place: placement flags and restrictions
+ * @mem: the resulting mem object
+ *
+ * Dummy, just count the space used without allocating resources or any limit.
+ */
+static int amdgpu_preempt_mgr_new(struct ttm_resource_manager *man,
+                                 struct ttm_buffer_object *tbo,
+                                 const struct ttm_place *place,
+                                 struct ttm_resource *mem)
+{
+       struct amdgpu_preempt_mgr *mgr = to_preempt_mgr(man);
+
+       atomic64_add(mem->num_pages, &mgr->used);
+
+       mem->mm_node = NULL;
+       mem->start = AMDGPU_BO_INVALID_OFFSET;
+       return 0;
+}
+
+/**
+ * amdgpu_preempt_mgr_del - free ranges
+ *
+ * @man: TTM memory type manager
+ * @mem: TTM memory object
+ *
+ * Free the allocated GTT again.
+ */
+static void amdgpu_preempt_mgr_del(struct ttm_resource_manager *man,
+                                  struct ttm_resource *mem)
+{
+       struct amdgpu_preempt_mgr *mgr = to_preempt_mgr(man);
+
+       atomic64_sub(mem->num_pages, &mgr->used);
+}
+
+/**
+ * amdgpu_preempt_mgr_usage - return usage of PREEMPT domain
+ *
+ * @man: TTM memory type manager
+ *
+ * Return how many bytes are used in the GTT domain
+ */
+uint64_t amdgpu_preempt_mgr_usage(struct ttm_resource_manager *man)
+{
+       struct amdgpu_preempt_mgr *mgr = to_preempt_mgr(man);
+       s64 result = atomic64_read(&mgr->used);
+
+       return (result > 0 ? result : 0) * PAGE_SIZE;
+}
+
+/**
+ * amdgpu_preempt_mgr_debug - dump VRAM table
+ *
+ * @man: TTM memory type manager
+ * @printer: DRM printer to use
+ *
+ * Dump the table content using printk.
+ */
+static void amdgpu_preempt_mgr_debug(struct ttm_resource_manager *man,
+                                    struct drm_printer *printer)
+{
+       struct amdgpu_preempt_mgr *mgr = to_preempt_mgr(man);
+
+       drm_printf(printer, "man size:%llu pages, preempt used:%lld pages\n",
+                  man->size, (u64)atomic64_read(&mgr->used));
+}
+
+static const struct ttm_resource_manager_func amdgpu_preempt_mgr_func = {
+       .alloc = amdgpu_preempt_mgr_new,
+       .free = amdgpu_preempt_mgr_del,
+       .debug = amdgpu_preempt_mgr_debug
+};
+
+/**
+ * amdgpu_preempt_mgr_init - init PREEMPT manager and DRM MM
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Allocate and initialize the GTT manager.
+ */
+int amdgpu_preempt_mgr_init(struct amdgpu_device *adev)
+{
+       struct amdgpu_preempt_mgr *mgr = &adev->mman.preempt_mgr;
+       struct ttm_resource_manager *man = &mgr->manager;
+       int ret;
+
+       man->use_tt = true;
+       man->func = &amdgpu_preempt_mgr_func;
+
+       ttm_resource_manager_init(man, (1 << 30));
+
+       atomic64_set(&mgr->used, 0);
+
+       ret = device_create_file(adev->dev, &dev_attr_mem_info_preempt_used);
+       if (ret) {
+               DRM_ERROR("Failed to create device file mem_info_preempt_used\n");
+               return ret;
+       }
+
+       ttm_set_driver_manager(&adev->mman.bdev, AMDGPU_PL_PREEMPT,
+                              &mgr->manager);
+       ttm_resource_manager_set_used(man, true);
+       return 0;
+}
+
+/**
+ * amdgpu_preempt_mgr_fini - free and destroy GTT manager
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Destroy and free the GTT manager, returns -EBUSY if ranges are still
+ * allocated inside it.
+ */
+void amdgpu_preempt_mgr_fini(struct amdgpu_device *adev)
+{
+       struct amdgpu_preempt_mgr *mgr = &adev->mman.preempt_mgr;
+       struct ttm_resource_manager *man = &mgr->manager;
+       int ret;
+
+       ttm_resource_manager_set_used(man, false);
+
+       ret = ttm_resource_manager_evict_all(&adev->mman.bdev, man);
+       if (ret)
+               return;
+
+       device_remove_file(adev->dev, &dev_attr_mem_info_preempt_used);
+
+       ttm_resource_manager_cleanup(man);
+       ttm_set_driver_manager(&adev->mman.bdev, AMDGPU_PL_PREEMPT, NULL);
+}
index 2bfdc27..e5023f1 100644 (file)
@@ -76,6 +76,7 @@ struct psp_ring
        uint64_t                        ring_mem_mc_addr;
        void                            *ring_mem_handle;
        uint32_t                        ring_size;
+       uint32_t                        ring_wptr;
 };
 
 /* More registers may will be supported */
index c2c791c..9dfc1eb 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/uaccess.h>
 #include <linux/reboot.h>
 #include <linux/syscalls.h>
+#include <linux/pm_runtime.h>
 
 #include "amdgpu.h"
 #include "amdgpu_ras.h"
@@ -1043,29 +1044,36 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
 }
 
 /* get the total error counts on all IPs */
-unsigned long amdgpu_ras_query_error_count(struct amdgpu_device *adev,
-               bool is_ce)
+void amdgpu_ras_query_error_count(struct amdgpu_device *adev,
+                                 unsigned long *ce_count,
+                                 unsigned long *ue_count)
 {
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
        struct ras_manager *obj;
-       struct ras_err_data data = {0, 0};
+       unsigned long ce, ue;
 
        if (!adev->ras_enabled || !con)
-               return 0;
+               return;
 
+       ce = 0;
+       ue = 0;
        list_for_each_entry(obj, &con->head, node) {
                struct ras_query_if info = {
                        .head = obj->head,
                };
 
                if (amdgpu_ras_query_error_status(adev, &info))
-                       return 0;
+                       return;
 
-               data.ce_count += info.ce_count;
-               data.ue_count += info.ue_count;
+               ce += info.ce_count;
+               ue += info.ue_count;
        }
 
-       return is_ce ? data.ce_count : data.ue_count;
+       if (ce_count)
+               *ce_count = ce;
+
+       if (ue_count)
+               *ue_count = ue;
 }
 /* query/inject/cure end */
 
@@ -2109,6 +2117,30 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev)
                adev->ras_hw_enabled & amdgpu_ras_mask;
 }
 
+static void amdgpu_ras_counte_dw(struct work_struct *work)
+{
+       struct amdgpu_ras *con = container_of(work, struct amdgpu_ras,
+                                             ras_counte_delay_work.work);
+       struct amdgpu_device *adev = con->adev;
+       struct drm_device *dev = &adev->ddev;
+       unsigned long ce_count, ue_count;
+       int res;
+
+       res = pm_runtime_get_sync(dev->dev);
+       if (res < 0)
+               goto Out;
+
+       /* Cache new values.
+        */
+       amdgpu_ras_query_error_count(adev, &ce_count, &ue_count);
+       atomic_set(&con->ras_ce_count, ce_count);
+       atomic_set(&con->ras_ue_count, ue_count);
+
+       pm_runtime_mark_last_busy(dev->dev);
+Out:
+       pm_runtime_put_autosuspend(dev->dev);
+}
+
 int amdgpu_ras_init(struct amdgpu_device *adev)
 {
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
@@ -2123,6 +2155,11 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
        if (!con)
                return -ENOMEM;
 
+       con->adev = adev;
+       INIT_DELAYED_WORK(&con->ras_counte_delay_work, amdgpu_ras_counte_dw);
+       atomic_set(&con->ras_ce_count, 0);
+       atomic_set(&con->ras_ue_count, 0);
+
        con->objs = (struct ras_manager *)(con + 1);
 
        amdgpu_ras_set_context(adev, con);
@@ -2226,6 +2263,8 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev,
                         struct ras_fs_if *fs_info,
                         struct ras_ih_if *ih_info)
 {
+       struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+       unsigned long ue_count, ce_count;
        int r;
 
        /* disable RAS feature per IP block if it is not supported */
@@ -2266,6 +2305,12 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev,
        if (r)
                goto sysfs;
 
+       /* Those are the cached values at init.
+        */
+       amdgpu_ras_query_error_count(adev, &ce_count, &ue_count);
+       atomic_set(&con->ras_ce_count, ce_count);
+       atomic_set(&con->ras_ue_count, ue_count);
+
        return 0;
 cleanup:
        amdgpu_ras_sysfs_remove(adev, ras_block);
@@ -2384,6 +2429,8 @@ int amdgpu_ras_fini(struct amdgpu_device *adev)
        if (con->features)
                amdgpu_ras_disable_all_features(adev, 1);
 
+       cancel_delayed_work_sync(&con->ras_counte_delay_work);
+
        amdgpu_ras_set_context(adev, NULL);
        kfree(con);
 
index bfa40c8..256cea5 100644 (file)
@@ -340,6 +340,11 @@ struct amdgpu_ras {
 
        /* disable ras error count harvest in recovery */
        bool disable_ras_err_cnt_harvest;
+
+       /* RAS count errors delayed work */
+       struct delayed_work ras_counte_delay_work;
+       atomic_t ras_ue_count;
+       atomic_t ras_ce_count;
 };
 
 struct ras_fs_data {
@@ -485,8 +490,9 @@ int amdgpu_ras_request_reset_on_boot(struct amdgpu_device *adev,
 void amdgpu_ras_resume(struct amdgpu_device *adev);
 void amdgpu_ras_suspend(struct amdgpu_device *adev);
 
-unsigned long amdgpu_ras_query_error_count(struct amdgpu_device *adev,
-               bool is_ce);
+void amdgpu_ras_query_error_count(struct amdgpu_device *adev,
+                                 unsigned long *ce_count,
+                                 unsigned long *ue_count);
 
 /* error handling functions */
 int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
index 4fc2ce8..7a4775a 100644 (file)
@@ -127,8 +127,8 @@ struct amdgpu_rlc_funcs {
        void (*reset)(struct amdgpu_device *adev);
        void (*start)(struct amdgpu_device *adev);
        void (*update_spm_vmid)(struct amdgpu_device *adev, unsigned vmid);
-       void (*rlcg_wreg)(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag);
-       u32 (*rlcg_rreg)(struct amdgpu_device *adev, u32 offset, u32 flag);
+       void (*rlcg_wreg)(struct amdgpu_device *adev, u32 offset, u32 v, u32 acc_flags, u32 hwip);
+       u32 (*rlcg_rreg)(struct amdgpu_device *adev, u32 offset, u32 acc_flags, u32 hwip);
        bool (*is_rlcg_access_range)(struct amdgpu_device *adev, uint32_t reg);
 };
 
index 80437b6..832970c 100644 (file)
@@ -158,6 +158,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
                }
                break;
        case TTM_PL_TT:
+       case AMDGPU_PL_PREEMPT:
        default:
                amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
                break;
@@ -198,6 +199,7 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
 
        BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
               AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
+       BUG_ON(mem->mem_type == AMDGPU_PL_PREEMPT);
 
        /* Map only what can't be accessed directly */
        if (!tmz && mem->start != AMDGPU_BO_INVALID_OFFSET) {
@@ -461,7 +463,8 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
        struct ttm_resource *old_mem = &bo->mem;
        int r;
 
-       if (new_mem->mem_type == TTM_PL_TT) {
+       if (new_mem->mem_type == TTM_PL_TT ||
+           new_mem->mem_type == AMDGPU_PL_PREEMPT) {
                r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, new_mem);
                if (r)
                        return r;
@@ -479,11 +482,13 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
                goto out;
        }
        if (old_mem->mem_type == TTM_PL_SYSTEM &&
-           new_mem->mem_type == TTM_PL_TT) {
+           (new_mem->mem_type == TTM_PL_TT ||
+            new_mem->mem_type == AMDGPU_PL_PREEMPT)) {
                ttm_bo_move_null(bo, new_mem);
                goto out;
        }
-       if (old_mem->mem_type == TTM_PL_TT &&
+       if ((old_mem->mem_type == TTM_PL_TT ||
+            old_mem->mem_type == AMDGPU_PL_PREEMPT) &&
            new_mem->mem_type == TTM_PL_SYSTEM) {
                r = ttm_bo_wait_ctx(bo, ctx);
                if (r)
@@ -568,6 +573,7 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
                /* system memory */
                return 0;
        case TTM_PL_TT:
+       case AMDGPU_PL_PREEMPT:
                break;
        case TTM_PL_VRAM:
                mem->bus.offset = mem->start << PAGE_SHIFT;
@@ -987,6 +993,7 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
                        return r;
                }
 
+               amdgpu_gart_invalidate_tlb(adev);
                ttm_resource_free(bo, &bo->mem);
                bo->mem = tmp;
        }
@@ -1273,7 +1280,8 @@ uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem)
        if (mem && mem->mem_type != TTM_PL_SYSTEM)
                flags |= AMDGPU_PTE_VALID;
 
-       if (mem && mem->mem_type == TTM_PL_TT) {
+       if (mem && (mem->mem_type == TTM_PL_TT ||
+                   mem->mem_type == AMDGPU_PL_PREEMPT)) {
                flags |= AMDGPU_PTE_SYSTEM;
 
                if (ttm->caching == ttm_cached)
@@ -1347,6 +1355,15 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
        }
 
        switch (bo->mem.mem_type) {
+       case AMDGPU_PL_PREEMPT:
+               /* Preemptible BOs don't own system resources managed by the
+                * driver (pages, VRAM, GART space). They point to resources
+                * owned by someone else (e.g. pageable memory in user mode
+                * or a DMABuf). They are used in a preemptible context so we
+                * can guarantee no deadlocks and good QoS in case of MMU
+                * notifiers or DMABuf move notifiers from the resource owner.
+                */
+               return false;
        case TTM_PL_TT:
                if (amdgpu_bo_is_amdgpu_bo(bo) &&
                    amdgpu_bo_encrypted(ttm_to_amdgpu_bo(bo)))
@@ -1727,6 +1744,13 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
        DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
                 (unsigned)(gtt_size / (1024 * 1024)));
 
+       /* Initialize preemptible memory pool */
+       r = amdgpu_preempt_mgr_init(adev);
+       if (r) {
+               DRM_ERROR("Failed initializing PREEMPT heap.\n");
+               return r;
+       }
+
        /* Initialize various on-chip memory pools */
        r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GDS, adev->gds.gds_size);
        if (r) {
@@ -1767,6 +1791,7 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
 
        amdgpu_vram_mgr_fini(adev);
        amdgpu_gtt_mgr_fini(adev);
+       amdgpu_preempt_mgr_fini(adev);
        ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GDS);
        ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS);
        ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA);
@@ -1917,6 +1942,11 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
                return -EINVAL;
        }
 
+       if (bo->tbo.mem.mem_type == AMDGPU_PL_PREEMPT) {
+               DRM_ERROR("Trying to clear preemptible memory.\n");
+               return -EINVAL;
+       }
+
        if (bo->tbo.mem.mem_type == TTM_PL_TT) {
                r = amdgpu_ttm_alloc_gart(&bo->tbo);
                if (r)
index b2c97b1..74a7021 100644 (file)
@@ -31,6 +31,7 @@
 #define AMDGPU_PL_GDS          (TTM_PL_PRIV + 0)
 #define AMDGPU_PL_GWS          (TTM_PL_PRIV + 1)
 #define AMDGPU_PL_OA           (TTM_PL_PRIV + 2)
+#define AMDGPU_PL_PREEMPT      (TTM_PL_PRIV + 3)
 
 #define AMDGPU_GTT_MAX_TRANSFER_SIZE   512
 #define AMDGPU_GTT_NUM_TRANSFER_WINDOWS        2
@@ -54,6 +55,11 @@ struct amdgpu_gtt_mgr {
        atomic64_t available;
 };
 
+struct amdgpu_preempt_mgr {
+       struct ttm_resource_manager manager;
+       atomic64_t used;
+};
+
 struct amdgpu_mman {
        struct ttm_device               bdev;
        bool                            initialized;
@@ -70,6 +76,7 @@ struct amdgpu_mman {
 
        struct amdgpu_vram_mgr vram_mgr;
        struct amdgpu_gtt_mgr gtt_mgr;
+       struct amdgpu_preempt_mgr preempt_mgr;
 
        uint64_t                stolen_vga_size;
        struct amdgpu_bo        *stolen_vga_memory;
@@ -97,6 +104,8 @@ struct amdgpu_copy_mem {
 
 int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, uint64_t gtt_size);
 void amdgpu_gtt_mgr_fini(struct amdgpu_device *adev);
+int amdgpu_preempt_mgr_init(struct amdgpu_device *adev);
+void amdgpu_preempt_mgr_fini(struct amdgpu_device *adev);
 int amdgpu_vram_mgr_init(struct amdgpu_device *adev);
 void amdgpu_vram_mgr_fini(struct amdgpu_device *adev);
 
@@ -104,6 +113,8 @@ bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *mem);
 uint64_t amdgpu_gtt_mgr_usage(struct ttm_resource_manager *man);
 int amdgpu_gtt_mgr_recover(struct ttm_resource_manager *man);
 
+uint64_t amdgpu_preempt_mgr_usage(struct ttm_resource_manager *man);
+
 u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo);
 int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
                              struct ttm_resource *mem,
index 82f0542..ce8f80a 100644 (file)
@@ -840,9 +840,8 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
 
        default:
                DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
-               return -EINVAL;
        }
-       BUG();
+
        return -EINVAL;
 }
 
index 8332034..1ae7f82 100644 (file)
@@ -88,7 +88,7 @@ static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
                                      bool direct, struct dma_fence **fence);
 
 /**
- * amdgpu_vce_init - allocate memory, load vce firmware
+ * amdgpu_vce_sw_init - allocate memory, load vce firmware
  *
  * @adev: amdgpu_device pointer
  * @size: size for the new BO
@@ -205,7 +205,7 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
 }
 
 /**
- * amdgpu_vce_fini - free memory
+ * amdgpu_vce_sw_fini - free memory
  *
  * @adev: amdgpu_device pointer
  *
@@ -579,7 +579,7 @@ err:
 }
 
 /**
- * amdgpu_vce_cs_validate_bo - make sure not to cross 4GB boundary
+ * amdgpu_vce_validate_bo - make sure not to cross 4GB boundary
  *
  * @p: parser context
  * @ib_idx: indirect buffer to use
@@ -720,7 +720,7 @@ static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p,
 }
 
 /**
- * amdgpu_vce_cs_parse - parse and validate the command stream
+ * amdgpu_vce_ring_parse_cs - parse and validate the command stream
  *
  * @p: parser context
  * @ib_idx: indirect buffer to use
@@ -956,7 +956,7 @@ out:
 }
 
 /**
- * amdgpu_vce_cs_parse_vm - parse the command stream in VM mode
+ * amdgpu_vce_ring_parse_cs_vm - parse the command stream in VM mode
  *
  * @p: parser context
  * @ib_idx: indirect buffer to use
index 7d5b02e..8d218c5 100644 (file)
@@ -653,15 +653,15 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
        spin_lock(&adev->mman.bdev.lru_lock);
        list_for_each_entry(bo_base, &vm->idle, vm_status) {
                struct amdgpu_bo *bo = bo_base->bo;
+               struct amdgpu_bo *shadow = amdgpu_bo_shadowed(bo);
 
                if (!bo->parent)
                        continue;
 
                ttm_bo_move_to_lru_tail(&bo->tbo, &bo->tbo.mem,
                                        &vm->lru_bulk_move);
-               if (bo->shadow)
-                       ttm_bo_move_to_lru_tail(&bo->shadow->tbo,
-                                               &bo->shadow->tbo.mem,
+               if (shadow)
+                       ttm_bo_move_to_lru_tail(&shadow->tbo, &shadow->tbo.mem,
                                                &vm->lru_bulk_move);
        }
        spin_unlock(&adev->mman.bdev.lru_lock);
@@ -693,15 +693,21 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
 
        list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) {
                struct amdgpu_bo *bo = bo_base->bo;
+               struct amdgpu_bo *shadow = amdgpu_bo_shadowed(bo);
 
                r = validate(param, bo);
                if (r)
                        return r;
+               if (shadow) {
+                       r = validate(param, shadow);
+                       if (r)
+                               return r;
+               }
 
                if (bo->tbo.type != ttm_bo_type_kernel) {
                        amdgpu_vm_bo_moved(bo_base);
                } else {
-                       vm->update_funcs->map_table(bo);
+                       vm->update_funcs->map_table(to_amdgpu_bo_vm(bo));
                        amdgpu_vm_bo_relocated(bo_base);
                }
        }
@@ -733,7 +739,7 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
  *
  * @adev: amdgpu_device pointer
  * @vm: VM to clear BO from
- * @bo: BO to clear
+ * @vmbo: BO to clear
  * @immediate: use an immediate update
  *
  * Root PD needs to be reserved when calling this.
@@ -743,13 +749,14 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
  */
 static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
                              struct amdgpu_vm *vm,
-                             struct amdgpu_bo *bo,
+                             struct amdgpu_bo_vm *vmbo,
                              bool immediate)
 {
        struct ttm_operation_ctx ctx = { true, false };
        unsigned level = adev->vm_manager.root_level;
        struct amdgpu_vm_update_params params;
-       struct amdgpu_bo *ancestor = bo;
+       struct amdgpu_bo *ancestor = &vmbo->bo;
+       struct amdgpu_bo *bo = &vmbo->bo;
        unsigned entries, ats_entries;
        uint64_t addr;
        int r;
@@ -789,14 +796,15 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
        if (r)
                return r;
 
-       if (bo->shadow) {
-               r = ttm_bo_validate(&bo->shadow->tbo, &bo->shadow->placement,
-                                   &ctx);
+       if (vmbo->shadow) {
+               struct amdgpu_bo *shadow = vmbo->shadow;
+
+               r = ttm_bo_validate(&shadow->tbo, &shadow->placement, &ctx);
                if (r)
                        return r;
        }
 
-       r = vm->update_funcs->map_table(bo);
+       r = vm->update_funcs->map_table(vmbo);
        if (r)
                return r;
 
@@ -820,7 +828,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
                        amdgpu_gmc_get_vm_pde(adev, level, &value, &flags);
                }
 
-               r = vm->update_funcs->update(&params, bo, addr, 0, ats_entries,
+               r = vm->update_funcs->update(&params, vmbo, addr, 0, ats_entries,
                                             value, flags);
                if (r)
                        return r;
@@ -843,7 +851,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
                        }
                }
 
-               r = vm->update_funcs->update(&params, bo, addr, 0, entries,
+               r = vm->update_funcs->update(&params, vmbo, addr, 0, entries,
                                             value, flags);
                if (r)
                        return r;
@@ -859,14 +867,17 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
  * @vm: requesting vm
  * @level: the page table level
  * @immediate: use a immediate update
- * @bo: pointer to the buffer object pointer
+ * @vmbo: pointer to the buffer object pointer
  */
 static int amdgpu_vm_pt_create(struct amdgpu_device *adev,
                               struct amdgpu_vm *vm,
                               int level, bool immediate,
-                              struct amdgpu_bo **bo)
+                              struct amdgpu_bo_vm **vmbo)
 {
        struct amdgpu_bo_param bp;
+       struct amdgpu_bo *bo;
+       struct dma_resv *resv;
+       unsigned int num_entries;
        int r;
 
        memset(&bp, 0, sizeof(bp));
@@ -877,7 +888,14 @@ static int amdgpu_vm_pt_create(struct amdgpu_device *adev,
        bp.domain = amdgpu_bo_get_preferred_pin_domain(adev, bp.domain);
        bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
                AMDGPU_GEM_CREATE_CPU_GTT_USWC;
-       bp.bo_ptr_size = sizeof(struct amdgpu_bo);
+
+       if (level < AMDGPU_VM_PTB)
+               num_entries = amdgpu_vm_num_entries(adev, level);
+       else
+               num_entries = 0;
+
+       bp.bo_ptr_size = struct_size((*vmbo), entries, num_entries);
+
        if (vm->use_cpu_for_update)
                bp.flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
 
@@ -886,26 +904,41 @@ static int amdgpu_vm_pt_create(struct amdgpu_device *adev,
        if (vm->root.base.bo)
                bp.resv = vm->root.base.bo->tbo.base.resv;
 
-       r = amdgpu_bo_create(adev, &bp, bo);
+       r = amdgpu_bo_create_vm(adev, &bp, vmbo);
        if (r)
                return r;
 
-       if (vm->is_compute_context && (adev->flags & AMD_IS_APU))
+       bo = &(*vmbo)->bo;
+       if (vm->is_compute_context && (adev->flags & AMD_IS_APU)) {
+               (*vmbo)->shadow = NULL;
                return 0;
+       }
 
        if (!bp.resv)
-               WARN_ON(dma_resv_lock((*bo)->tbo.base.resv,
+               WARN_ON(dma_resv_lock(bo->tbo.base.resv,
                                      NULL));
-       r = amdgpu_bo_create_shadow(adev, bp.size, *bo);
+       resv = bp.resv;
+       memset(&bp, 0, sizeof(bp));
+       bp.size = amdgpu_vm_bo_size(adev, level);
+       bp.domain = AMDGPU_GEM_DOMAIN_GTT;
+       bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
+       bp.type = ttm_bo_type_kernel;
+       bp.resv = bo->tbo.base.resv;
+       bp.bo_ptr_size = sizeof(struct amdgpu_bo);
 
-       if (!bp.resv)
-               dma_resv_unlock((*bo)->tbo.base.resv);
+       r = amdgpu_bo_create(adev, &bp, &(*vmbo)->shadow);
+
+       if (!resv)
+               dma_resv_unlock(bo->tbo.base.resv);
 
        if (r) {
-               amdgpu_bo_unref(bo);
+               amdgpu_bo_unref(&bo);
                return r;
        }
 
+       (*vmbo)->shadow->parent = amdgpu_bo_ref(bo);
+       amdgpu_bo_add_to_shadow_list((*vmbo)->shadow);
+
        return 0;
 }
 
@@ -929,22 +962,18 @@ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
                               bool immediate)
 {
        struct amdgpu_vm_pt *entry = cursor->entry;
-       struct amdgpu_bo *pt;
+       struct amdgpu_bo *pt_bo;
+       struct amdgpu_bo_vm *pt;
        int r;
 
-       if (cursor->level < AMDGPU_VM_PTB && !entry->entries) {
-               unsigned num_entries;
-
-               num_entries = amdgpu_vm_num_entries(adev, cursor->level);
-               entry->entries = kvmalloc_array(num_entries,
-                                               sizeof(*entry->entries),
-                                               GFP_KERNEL | __GFP_ZERO);
-               if (!entry->entries)
-                       return -ENOMEM;
-       }
-
-       if (entry->base.bo)
+       if (entry->base.bo) {
+               if (cursor->level < AMDGPU_VM_PTB)
+                       entry->entries =
+                               to_amdgpu_bo_vm(entry->base.bo)->entries;
+               else
+                       entry->entries = NULL;
                return 0;
+       }
 
        r = amdgpu_vm_pt_create(adev, vm, cursor->level, immediate, &pt);
        if (r)
@@ -953,8 +982,13 @@ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
        /* Keep a reference to the root directory to avoid
         * freeing them up in the wrong order.
         */
-       pt->parent = amdgpu_bo_ref(cursor->parent->base.bo);
-       amdgpu_vm_bo_base_init(&entry->base, vm, pt);
+       pt_bo = &pt->bo;
+       pt_bo->parent = amdgpu_bo_ref(cursor->parent->base.bo);
+       amdgpu_vm_bo_base_init(&entry->base, vm, pt_bo);
+       if (cursor->level < AMDGPU_VM_PTB)
+               entry->entries = pt->entries;
+       else
+               entry->entries = NULL;
 
        r = amdgpu_vm_clear_bo(adev, vm, pt, immediate);
        if (r)
@@ -964,7 +998,7 @@ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
 
 error_free_pt:
        amdgpu_bo_unref(&pt->shadow);
-       amdgpu_bo_unref(&pt);
+       amdgpu_bo_unref(&pt_bo);
        return r;
 }
 
@@ -975,13 +1009,15 @@ error_free_pt:
  */
 static void amdgpu_vm_free_table(struct amdgpu_vm_pt *entry)
 {
+       struct amdgpu_bo *shadow;
+
        if (entry->base.bo) {
+               shadow = amdgpu_bo_shadowed(entry->base.bo);
                entry->base.bo->vm_bo = NULL;
                list_del(&entry->base.vm_status);
-               amdgpu_bo_unref(&entry->base.bo->shadow);
+               amdgpu_bo_unref(&shadow);
                amdgpu_bo_unref(&entry->base.bo);
        }
-       kvfree(entry->entries);
        entry->entries = NULL;
 }
 
@@ -1280,7 +1316,8 @@ static int amdgpu_vm_update_pde(struct amdgpu_vm_update_params *params,
        level += params->adev->vm_manager.root_level;
        amdgpu_gmc_get_pde_for_bo(entry->base.bo, level, &pt, &flags);
        pde = (entry - parent->entries) * 8;
-       return vm->update_funcs->update(params, bo, pde, pt, 1, 0, flags);
+       return vm->update_funcs->update(params, to_amdgpu_bo_vm(bo), pde, pt,
+                                       1, 0, flags);
 }
 
 /**
@@ -1360,9 +1397,9 @@ error:
  * Make sure to set the right flags for the PTEs at the desired level.
  */
 static void amdgpu_vm_update_flags(struct amdgpu_vm_update_params *params,
-                                  struct amdgpu_bo *bo, unsigned level,
+                                  struct amdgpu_bo_vm *pt, unsigned int level,
                                   uint64_t pe, uint64_t addr,
-                                  unsigned count, uint32_t incr,
+                                  unsigned int count, uint32_t incr,
                                   uint64_t flags)
 
 {
@@ -1378,7 +1415,7 @@ static void amdgpu_vm_update_flags(struct amdgpu_vm_update_params *params,
                flags |= AMDGPU_PTE_EXECUTABLE;
        }
 
-       params->vm->update_funcs->update(params, bo, pe, addr, count, incr,
+       params->vm->update_funcs->update(params, pt, pe, addr, count, incr,
                                         flags);
 }
 
@@ -1558,9 +1595,9 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
                                                    nptes, dst, incr, upd_flags,
                                                    vm->task_info.pid,
                                                    vm->immediate.fence_context);
-                       amdgpu_vm_update_flags(params, pt, cursor.level,
-                                              pe_start, dst, nptes, incr,
-                                              upd_flags);
+                       amdgpu_vm_update_flags(params, to_amdgpu_bo_vm(pt),
+                                              cursor.level, pe_start, dst,
+                                              nptes, incr, upd_flags);
 
                        pe_start += nptes * 8;
                        dst += nptes * incr;
@@ -1583,9 +1620,12 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
                         * completely covered by the range and so potentially still in use.
                         */
                        while (cursor.pfn < frag_start) {
-                               amdgpu_vm_free_pts(adev, params->vm, &cursor);
+                               /* Make sure previous mapping is freed */
+                               if (cursor.entry->base.bo) {
+                                       params->table_freed = true;
+                                       amdgpu_vm_free_pts(adev, params->vm, &cursor);
+                               }
                                amdgpu_vm_pt_next(adev, &cursor);
-                               params->table_freed = true;
                        }
 
                } else if (frag >= shift) {
@@ -1822,7 +1862,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
                                bo = gem_to_amdgpu_bo(gobj);
                }
                mem = &bo->tbo.mem;
-               if (mem->mem_type == TTM_PL_TT)
+               if (mem->mem_type == TTM_PL_TT ||
+                   mem->mem_type == AMDGPU_PL_PREEMPT)
                        pages_addr = bo->tbo.ttm->dma_address;
        }
 
@@ -2673,7 +2714,7 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
        struct amdgpu_vm_bo_base *bo_base;
 
        /* shadow bo doesn't have bo base, its validation needs its parent */
-       if (bo->parent && bo->parent->shadow == bo)
+       if (bo->parent && (amdgpu_bo_shadowed(bo->parent) == bo))
                bo = bo->parent;
 
        for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
@@ -2842,7 +2883,8 @@ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
  */
 int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, u32 pasid)
 {
-       struct amdgpu_bo *root;
+       struct amdgpu_bo *root_bo;
+       struct amdgpu_bo_vm *root;
        int r, i;
 
        vm->va = RB_ROOT_CACHED;
@@ -2896,16 +2938,16 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, u32 pasid)
                                false, &root);
        if (r)
                goto error_free_delayed;
-
-       r = amdgpu_bo_reserve(root, true);
+       root_bo = &root->bo;
+       r = amdgpu_bo_reserve(root_bo, true);
        if (r)
                goto error_free_root;
 
-       r = dma_resv_reserve_shared(root->tbo.base.resv, 1);
+       r = dma_resv_reserve_shared(root_bo->tbo.base.resv, 1);
        if (r)
                goto error_unreserve;
 
-       amdgpu_vm_bo_base_init(&vm->root.base, vm, root);
+       amdgpu_vm_bo_base_init(&vm->root.base, vm, root_bo);
 
        r = amdgpu_vm_clear_bo(adev, vm, root, false);
        if (r)
@@ -2934,8 +2976,8 @@ error_unreserve:
        amdgpu_bo_unreserve(vm->root.base.bo);
 
 error_free_root:
-       amdgpu_bo_unref(&vm->root.base.bo->shadow);
-       amdgpu_bo_unref(&vm->root.base.bo);
+       amdgpu_bo_unref(&root->shadow);
+       amdgpu_bo_unref(&root_bo);
        vm->root.base.bo = NULL;
 
 error_free_delayed:
@@ -3033,7 +3075,9 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,
         */
        if (pte_support_ats != vm->pte_support_ats) {
                vm->pte_support_ats = pte_support_ats;
-               r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo, false);
+               r = amdgpu_vm_clear_bo(adev, vm,
+                                      to_amdgpu_bo_vm(vm->root.base.bo),
+                                      false);
                if (r)
                        goto free_idr;
        }
@@ -3077,7 +3121,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm,
        }
 
        /* Free the shadow bo for compute VM */
-       amdgpu_bo_unref(&vm->root.base.bo->shadow);
+       amdgpu_bo_unref(&to_amdgpu_bo_vm(vm->root.base.bo)->shadow);
 
        if (pasid)
                vm->pasid = pasid;
index 39d60e3..bee439d 100644 (file)
@@ -39,6 +39,7 @@
 struct amdgpu_bo_va;
 struct amdgpu_job;
 struct amdgpu_bo_list_entry;
+struct amdgpu_bo_vm;
 
 /*
  * GPUVM handling
@@ -239,11 +240,11 @@ struct amdgpu_vm_update_params {
 };
 
 struct amdgpu_vm_update_funcs {
-       int (*map_table)(struct amdgpu_bo *bo);
+       int (*map_table)(struct amdgpu_bo_vm *bo);
        int (*prepare)(struct amdgpu_vm_update_params *p, struct dma_resv *resv,
                       enum amdgpu_sync_mode sync_mode);
        int (*update)(struct amdgpu_vm_update_params *p,
-                     struct amdgpu_bo *bo, uint64_t pe, uint64_t addr,
+                     struct amdgpu_bo_vm *bo, uint64_t pe, uint64_t addr,
                      unsigned count, uint32_t incr, uint64_t flags);
        int (*commit)(struct amdgpu_vm_update_params *p,
                      struct dma_fence **fence);
index ac45d9c..03a44be 100644 (file)
@@ -29,9 +29,9 @@
  *
  * @table: newly allocated or validated PD/PT
  */
-static int amdgpu_vm_cpu_map_table(struct amdgpu_bo *table)
+static int amdgpu_vm_cpu_map_table(struct amdgpu_bo_vm *table)
 {
-       return amdgpu_bo_kmap(table, NULL);
+       return amdgpu_bo_kmap(&table->bo, NULL);
 }
 
 /**
@@ -58,7 +58,7 @@ static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p,
  * amdgpu_vm_cpu_update - helper to update page tables via CPU
  *
  * @p: see amdgpu_vm_update_params definition
- * @bo: PD/PT to update
+ * @vmbo: PD/PT to update
  * @pe: byte offset of the PDE/PTE, relative to start of PDB/PTB
  * @addr: dst addr to write into pe
  * @count: number of page entries to update
@@ -68,7 +68,7 @@ static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p,
  * Write count number of PT/PD entries directly.
  */
 static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p,
-                               struct amdgpu_bo *bo, uint64_t pe,
+                               struct amdgpu_bo_vm *vmbo, uint64_t pe,
                                uint64_t addr, unsigned count, uint32_t incr,
                                uint64_t flags)
 {
@@ -76,13 +76,13 @@ static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p,
        uint64_t value;
        int r;
 
-       if (bo->tbo.moving) {
-               r = dma_fence_wait(bo->tbo.moving, true);
+       if (vmbo->bo.tbo.moving) {
+               r = dma_fence_wait(vmbo->bo.tbo.moving, true);
                if (r)
                        return r;
        }
 
-       pe += (unsigned long)amdgpu_bo_kptr(bo);
+       pe += (unsigned long)amdgpu_bo_kptr(&vmbo->bo);
 
        trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->immediate);
 
index a83a646..4229581 100644 (file)
  *
  * @table: newly allocated or validated PD/PT
  */
-static int amdgpu_vm_sdma_map_table(struct amdgpu_bo *table)
+static int amdgpu_vm_sdma_map_table(struct amdgpu_bo_vm *table)
 {
        int r;
 
-       r = amdgpu_ttm_alloc_gart(&table->tbo);
+       r = amdgpu_ttm_alloc_gart(&table->bo.tbo);
        if (r)
                return r;
 
@@ -186,7 +186,7 @@ static void amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params *p,
  * amdgpu_vm_sdma_update - execute VM update
  *
  * @p: see amdgpu_vm_update_params definition
- * @bo: PD/PT to update
+ * @vmbo: PD/PT to update
  * @pe: byte offset of the PDE/PTE, relative to start of PDB/PTB
  * @addr: dst addr to write into pe
  * @count: number of page entries to update
@@ -197,10 +197,11 @@ static void amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params *p,
  * the IB.
  */
 static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
-                                struct amdgpu_bo *bo, uint64_t pe,
+                                struct amdgpu_bo_vm *vmbo, uint64_t pe,
                                 uint64_t addr, unsigned count, uint32_t incr,
                                 uint64_t flags)
 {
+       struct amdgpu_bo *bo = &vmbo->bo;
        enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE
                : AMDGPU_IB_POOL_DELAYED;
        unsigned int i, ndw, nptes;
@@ -238,8 +239,8 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
 
                if (!p->pages_addr) {
                        /* set page commands needed */
-                       if (bo->shadow)
-                               amdgpu_vm_sdma_set_ptes(p, bo->shadow, pe, addr,
+                       if (vmbo->shadow)
+                               amdgpu_vm_sdma_set_ptes(p, vmbo->shadow, pe, addr,
                                                        count, incr, flags);
                        amdgpu_vm_sdma_set_ptes(p, bo, pe, addr, count,
                                                incr, flags);
@@ -248,7 +249,7 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
 
                /* copy commands needed */
                ndw -= p->adev->vm_manager.vm_pte_funcs->copy_pte_num_dw *
-                       (bo->shadow ? 2 : 1);
+                       (vmbo->shadow ? 2 : 1);
 
                /* for padding */
                ndw -= 7;
@@ -263,8 +264,8 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
                        pte[i] |= flags;
                }
 
-               if (bo->shadow)
-                       amdgpu_vm_sdma_copy_ptes(p, bo->shadow, pe, nptes);
+               if (vmbo->shadow)
+                       amdgpu_vm_sdma_copy_ptes(p, vmbo->shadow, pe, nptes);
                amdgpu_vm_sdma_copy_ptes(p, bo, pe, nptes);
 
                pe += nptes * 8;
index 3dcb8b3..6fa2229 100644 (file)
@@ -31,6 +31,7 @@
 
 #define ATOM_DEBUG
 
+#include "atomfirmware.h"
 #include "atom.h"
 #include "atom-names.h"
 #include "atom-bits.h"
@@ -1299,12 +1300,168 @@ static void atom_index_iio(struct atom_context *ctx, int base)
        }
 }
 
+static void atom_get_vbios_name(struct atom_context *ctx)
+{
+       unsigned char *p_rom;
+       unsigned char str_num;
+       unsigned short off_to_vbios_str;
+       unsigned char *c_ptr;
+       int name_size;
+       int i;
+
+       const char *na = "--N/A--";
+       char *back;
+
+       p_rom = ctx->bios;
+
+       str_num = *(p_rom + OFFSET_TO_GET_ATOMBIOS_NUMBER_OF_STRINGS);
+       if (str_num != 0) {
+               off_to_vbios_str =
+                       *(unsigned short *)(p_rom + OFFSET_TO_GET_ATOMBIOS_STRING_START);
+
+               c_ptr = (unsigned char *)(p_rom + off_to_vbios_str);
+       } else {
+               /* do not know where to find name */
+               memcpy(ctx->name, na, 7);
+               ctx->name[7] = 0;
+               return;
+       }
+
+       /*
+        * skip the atombios strings, usually 4
+        * 1st is P/N, 2nd is ASIC, 3rd is PCI type, 4th is Memory type
+        */
+       for (i = 0; i < str_num; i++) {
+               while (*c_ptr != 0)
+                       c_ptr++;
+               c_ptr++;
+       }
+
+       /* skip the following 2 chars: 0x0D 0x0A */
+       c_ptr += 2;
+
+       name_size = strnlen(c_ptr, STRLEN_LONG - 1);
+       memcpy(ctx->name, c_ptr, name_size);
+       back = ctx->name + name_size;
+       while ((*--back) == ' ')
+               ;
+       *(back + 1) = '\0';
+}
+
+static void atom_get_vbios_date(struct atom_context *ctx)
+{
+       unsigned char *p_rom;
+       unsigned char *date_in_rom;
+
+       p_rom = ctx->bios;
+
+       date_in_rom = p_rom + OFFSET_TO_VBIOS_DATE;
+
+       ctx->date[0] = '2';
+       ctx->date[1] = '0';
+       ctx->date[2] = date_in_rom[6];
+       ctx->date[3] = date_in_rom[7];
+       ctx->date[4] = '/';
+       ctx->date[5] = date_in_rom[0];
+       ctx->date[6] = date_in_rom[1];
+       ctx->date[7] = '/';
+       ctx->date[8] = date_in_rom[3];
+       ctx->date[9] = date_in_rom[4];
+       ctx->date[10] = ' ';
+       ctx->date[11] = date_in_rom[9];
+       ctx->date[12] = date_in_rom[10];
+       ctx->date[13] = date_in_rom[11];
+       ctx->date[14] = date_in_rom[12];
+       ctx->date[15] = date_in_rom[13];
+       ctx->date[16] = '\0';
+}
+
+static unsigned char *atom_find_str_in_rom(struct atom_context *ctx, char *str, int start,
+                                          int end, int maxlen)
+{
+       unsigned long str_off;
+       unsigned char *p_rom;
+       unsigned short str_len;
+
+       str_off = 0;
+       str_len = strnlen(str, maxlen);
+       p_rom = ctx->bios;
+
+       for (; start <= end; ++start) {
+               for (str_off = 0; str_off < str_len; ++str_off) {
+                       if (str[str_off] != *(p_rom + start + str_off))
+                               break;
+               }
+
+               if (str_off == str_len || str[str_off] == 0)
+                       return p_rom + start;
+       }
+       return NULL;
+}
+
+static void atom_get_vbios_pn(struct atom_context *ctx)
+{
+       unsigned char *p_rom;
+       unsigned short off_to_vbios_str;
+       unsigned char *vbios_str;
+       int count;
+
+       off_to_vbios_str = 0;
+       p_rom = ctx->bios;
+
+       if (*(p_rom + OFFSET_TO_GET_ATOMBIOS_NUMBER_OF_STRINGS) != 0) {
+               off_to_vbios_str =
+                       *(unsigned short *)(p_rom + OFFSET_TO_GET_ATOMBIOS_STRING_START);
+
+               vbios_str = (unsigned char *)(p_rom + off_to_vbios_str);
+       } else {
+               vbios_str = p_rom + OFFSET_TO_VBIOS_PART_NUMBER;
+       }
+
+       if (*vbios_str == 0) {
+               vbios_str = atom_find_str_in_rom(ctx, BIOS_ATOM_PREFIX, 3, 1024, 64);
+               if (vbios_str == NULL)
+                       vbios_str += sizeof(BIOS_ATOM_PREFIX) - 1;
+       }
+       if (vbios_str != NULL && *vbios_str == 0)
+               vbios_str++;
+
+       if (vbios_str != NULL) {
+               count = 0;
+               while ((count < BIOS_STRING_LENGTH) && vbios_str[count] >= ' ' &&
+                      vbios_str[count] <= 'z') {
+                       ctx->vbios_pn[count] = vbios_str[count];
+                       count++;
+               }
+
+               ctx->vbios_pn[count] = 0;
+       }
+}
+
+static void atom_get_vbios_version(struct atom_context *ctx)
+{
+       unsigned char *vbios_ver;
+
+       /* find anchor ATOMBIOSBK-AMD */
+       vbios_ver = atom_find_str_in_rom(ctx, BIOS_VERSION_PREFIX, 3, 1024, 64);
+       if (vbios_ver != NULL) {
+               /* skip ATOMBIOSBK-AMD VER */
+               vbios_ver += 18;
+               memcpy(ctx->vbios_ver_str, vbios_ver, STRLEN_NORMAL);
+       } else {
+               ctx->vbios_ver_str[0] = '\0';
+       }
+}
+
 struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios)
 {
        int base;
        struct atom_context *ctx =
            kzalloc(sizeof(struct atom_context), GFP_KERNEL);
        char *str;
+       struct _ATOM_ROM_HEADER *atom_rom_header;
+       struct _ATOM_MASTER_DATA_TABLE *master_table;
+       struct _ATOM_FIRMWARE_INFO *atom_fw_info;
        u16 idx;
 
        if (!ctx)
@@ -1353,6 +1510,21 @@ struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios)
                strlcpy(ctx->vbios_version, str, sizeof(ctx->vbios_version));
        }
 
+       atom_rom_header = (struct _ATOM_ROM_HEADER *)CSTR(base);
+       if (atom_rom_header->usMasterDataTableOffset != 0) {
+               master_table = (struct _ATOM_MASTER_DATA_TABLE *)
+                               CSTR(atom_rom_header->usMasterDataTableOffset);
+               if (master_table->ListOfDataTables.FirmwareInfo != 0) {
+                       atom_fw_info = (struct _ATOM_FIRMWARE_INFO *)
+                                       CSTR(master_table->ListOfDataTables.FirmwareInfo);
+                       ctx->version = atom_fw_info->ulFirmwareRevision;
+               }
+       }
+
+       atom_get_vbios_name(ctx);
+       atom_get_vbios_pn(ctx);
+       atom_get_vbios_date(ctx);
+       atom_get_vbios_version(ctx);
 
        return ctx;
 }
index d279759..0c18398 100644 (file)
@@ -112,6 +112,10 @@ struct drm_device;
 #define ATOM_IO_SYSIO          2
 #define ATOM_IO_IIO            0x80
 
+#define STRLEN_NORMAL          32
+#define STRLEN_LONG            64
+#define STRLEN_VERYLONG                254
+
 struct card_info {
        struct drm_device *dev;
        void (* reg_write)(struct card_info *, uint32_t, uint32_t);   /*  filled by driver */
@@ -140,6 +144,12 @@ struct atom_context {
        uint32_t *scratch;
        int scratch_size_bytes;
        char vbios_version[20];
+
+       uint8_t name[STRLEN_LONG];
+       uint8_t vbios_pn[STRLEN_LONG];
+       uint32_t version;
+       uint8_t vbios_ver_str[STRLEN_NORMAL];
+       uint8_t date[STRLEN_NORMAL];
 };
 
 extern int amdgpu_atom_debug;
index c4bb8ee..c8ebd10 100644 (file)
@@ -720,7 +720,7 @@ err0:
 }
 
 /**
- * cik_sdma_vm_copy_pages - update PTEs by copying them from the GART
+ * cik_sdma_vm_copy_pte - update PTEs by copying them from the GART
  *
  * @ib: indirect buffer to fill with commands
  * @pe: addr of the page entry
@@ -746,7 +746,7 @@ static void cik_sdma_vm_copy_pte(struct amdgpu_ib *ib,
 }
 
 /**
- * cik_sdma_vm_write_pages - update PTEs by writing them manually
+ * cik_sdma_vm_write_pte - update PTEs by writing them manually
  *
  * @ib: indirect buffer to fill with commands
  * @pe: addr of the page entry
@@ -775,7 +775,7 @@ static void cik_sdma_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
 }
 
 /**
- * cik_sdma_vm_set_pages - update the page tables using sDMA
+ * cik_sdma_vm_set_pte_pde - update the page tables using sDMA
  *
  * @ib: indirect buffer to fill with commands
  * @pe: addr of the page entry
@@ -804,7 +804,7 @@ static void cik_sdma_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe,
 }
 
 /**
- * cik_sdma_vm_pad_ib - pad the IB to the required number of dw
+ * cik_sdma_ring_pad_ib - pad the IB to the required number of dw
  *
  * @ring: amdgpu_ring structure holding ring information
  * @ib: indirect buffer to fill with padding
index dbcb09c..c7803dc 100644 (file)
@@ -456,7 +456,7 @@ static void dce_v6_0_program_fmt(struct drm_encoder *encoder)
 }
 
 /**
- * cik_get_number_of_dram_channels - get the number of dram channels
+ * si_get_number_of_dram_channels - get the number of dram channels
  *
  * @adev: amdgpu_device pointer
  *
index 17428cb..102f315 100644 (file)
@@ -47,7 +47,7 @@
 #include "gfx_v10_0.h"
 #include "nbio_v2_3.h"
 
-/**
+/*
  * Navi10 has two graphic rings to share each graphic pipe.
  * 1. Primary ring
  * 2. Async ring
@@ -1432,38 +1432,36 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_2[] =
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00c00000)
 };
 
-static bool gfx_v10_is_rlcg_rw(struct amdgpu_device *adev, u32 offset, uint32_t *flag, bool write)
-{
-       /* always programed by rlcg, only for gc */
-       if (offset == SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_HI) ||
-           offset == SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_ADDR_LO) ||
-           offset == SOC15_REG_OFFSET(GC, 0, mmRLC_CSIB_LENGTH) ||
-           offset == SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL) ||
-           offset == SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX) ||
-           offset == SOC15_REG_OFFSET(GC, 0, mmCP_ME_CNTL)) {
-               if (!amdgpu_sriov_reg_indirect_gc(adev))
-                       *flag = GFX_RLCG_GC_WRITE_OLD;
-               else
-                       *flag = write ? GFX_RLCG_GC_WRITE : GFX_RLCG_GC_READ;
+static bool gfx_v10_get_rlcg_flag(struct amdgpu_device *adev, u32 acc_flags, u32 hwip,
+                                int write, u32 *rlcg_flag)
+{
+       switch (hwip) {
+       case GC_HWIP:
+               if (amdgpu_sriov_reg_indirect_gc(adev)) {
+                       *rlcg_flag = write ? GFX_RLCG_GC_WRITE : GFX_RLCG_GC_READ;
 
-               return true;
-       }
+                       return true;
+               /* only in new version, AMDGPU_REGS_NO_KIQ and AMDGPU_REGS_RLC enabled simultaneously */
+               } else if ((acc_flags & AMDGPU_REGS_RLC) && !(acc_flags & AMDGPU_REGS_NO_KIQ)) {
+                       *rlcg_flag = GFX_RLCG_GC_WRITE_OLD;
 
-       /* currently support gc read/write, mmhub write */
-       if (offset >= SOC15_REG_OFFSET(GC, 0, mmSDMA0_DEC_START) &&
-           offset <= SOC15_REG_OFFSET(GC, 0, mmRLC_GTS_OFFSET_MSB)) {
-               if (amdgpu_sriov_reg_indirect_gc(adev))
-                       *flag = write ? GFX_RLCG_GC_WRITE : GFX_RLCG_GC_READ;
-               else
-                       return false;
-       } else {
-               if (amdgpu_sriov_reg_indirect_mmhub(adev))
-                       *flag = GFX_RLCG_MMHUB_WRITE;
-               else
-                       return false;
+                       return true;
+               }
+
+               break;
+       case MMHUB_HWIP:
+               if (amdgpu_sriov_reg_indirect_mmhub(adev) &&
+                   (acc_flags & AMDGPU_REGS_RLC) && write) {
+                       *rlcg_flag = GFX_RLCG_MMHUB_WRITE;
+                       return true;
+               }
+
+               break;
+       default:
+               DRM_DEBUG("Not program register by RLCG\n");
        }
 
-       return true;
+       return false;
 }
 
 static u32 gfx_v10_rlcg_rw(struct amdgpu_device *adev, u32 offset, u32 v, uint32_t flag)
@@ -1523,36 +1521,34 @@ static u32 gfx_v10_rlcg_rw(struct amdgpu_device *adev, u32 offset, u32 v, uint32
        return ret;
 }
 
-static void gfx_v10_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 value, u32 flag)
+static void gfx_v10_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 value, u32 acc_flags, u32 hwip)
 {
-       uint32_t rlcg_flag;
+       u32 rlcg_flag;
 
-       if (amdgpu_sriov_fullaccess(adev) &&
-           gfx_v10_is_rlcg_rw(adev, offset, &rlcg_flag, 1)) {
+       if (!amdgpu_sriov_runtime(adev) &&
+           gfx_v10_get_rlcg_flag(adev, acc_flags, hwip, 1, &rlcg_flag)) {
                gfx_v10_rlcg_rw(adev, offset, value, rlcg_flag);
-
                return;
        }
-       if (flag & AMDGPU_REGS_NO_KIQ)
+
+       if (acc_flags & AMDGPU_REGS_NO_KIQ)
                WREG32_NO_KIQ(offset, value);
        else
                WREG32(offset, value);
 }
 
-static u32 gfx_v10_rlcg_rreg(struct amdgpu_device *adev, u32 offset, u32 flag)
+static u32 gfx_v10_rlcg_rreg(struct amdgpu_device *adev, u32 offset, u32 acc_flags, u32 hwip)
 {
-       uint32_t rlcg_flag;
+       u32 rlcg_flag;
 
-       if (amdgpu_sriov_fullaccess(adev) &&
-           gfx_v10_is_rlcg_rw(adev, offset, &rlcg_flag, 0))
+       if (!amdgpu_sriov_runtime(adev) &&
+           gfx_v10_get_rlcg_flag(adev, acc_flags, hwip, 0, &rlcg_flag))
                return gfx_v10_rlcg_rw(adev, offset, 0, rlcg_flag);
 
-       if (flag & AMDGPU_REGS_NO_KIQ)
+       if (acc_flags & AMDGPU_REGS_NO_KIQ)
                return RREG32_NO_KIQ(offset);
        else
                return RREG32(offset);
-
-       return 0;
 }
 
 static const struct soc15_reg_golden golden_settings_gc_10_1_nv14[] =
@@ -3935,7 +3931,7 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
 {
        const char *chip_name;
        char fw_name[40];
-       char wks[10];
+       char *wks = "";
        int err;
        struct amdgpu_firmware_info *info = NULL;
        const struct common_firmware_header *header = NULL;
@@ -3948,7 +3944,6 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
 
        DRM_DEBUG("\n");
 
-       memset(wks, 0, sizeof(wks));
        switch (adev->asic_type) {
        case CHIP_NAVI10:
                chip_name = "navi10";
@@ -3957,7 +3952,7 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
                chip_name = "navi14";
                if (!(adev->pdev->device == 0x7340 &&
                      adev->pdev->revision != 0x00))
-                       snprintf(wks, sizeof(wks), "_wks");
+                       wks = "_wks";
                break;
        case CHIP_NAVI12:
                chip_name = "navi12";
@@ -5233,10 +5228,10 @@ static void gfx_v10_0_rlc_enable_srm(struct amdgpu_device *adev)
        uint32_t tmp;
 
        /* enable Save Restore Machine */
-       tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL));
+       tmp = RREG32_SOC15(GC, 0, mmRLC_SRM_CNTL);
        tmp |= RLC_SRM_CNTL__AUTO_INCR_ADDR_MASK;
        tmp |= RLC_SRM_CNTL__SRM_ENABLE_MASK;
-       WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL), tmp);
+       WREG32_SOC15(GC, 0, mmRLC_SRM_CNTL, tmp);
 }
 
 static int gfx_v10_0_rlc_load_microcode(struct amdgpu_device *adev)
@@ -7941,12 +7936,12 @@ static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev,
 static void gfx_v10_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
 {
        u32 reg, data;
-
+       /* not for *_SOC15 */
        reg = SOC15_REG_OFFSET(GC, 0, mmRLC_SPM_MC_CNTL);
        if (amdgpu_sriov_is_pp_one_vf(adev))
                data = RREG32_NO_KIQ(reg);
        else
-               data = RREG32(reg);
+               data = RREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL);
 
        data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
        data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
@@ -8688,16 +8683,16 @@ gfx_v10_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev,
 
        switch (state) {
        case AMDGPU_IRQ_STATE_DISABLE:
-               cp_int_cntl = RREG32(cp_int_cntl_reg);
+               cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
                cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
                                            TIME_STAMP_INT_ENABLE, 0);
-               WREG32(cp_int_cntl_reg, cp_int_cntl);
+               WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
                break;
        case AMDGPU_IRQ_STATE_ENABLE:
-               cp_int_cntl = RREG32(cp_int_cntl_reg);
+               cp_int_cntl = RREG32_SOC15_IP(GC, cp_int_cntl_reg);
                cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
                                            TIME_STAMP_INT_ENABLE, 1);
-               WREG32(cp_int_cntl_reg, cp_int_cntl);
+               WREG32_SOC15_IP(GC, cp_int_cntl_reg, cp_int_cntl);
                break;
        default:
                break;
@@ -8741,16 +8736,16 @@ static void gfx_v10_0_set_compute_eop_interrupt_state(struct amdgpu_device *adev
 
        switch (state) {
        case AMDGPU_IRQ_STATE_DISABLE:
-               mec_int_cntl = RREG32(mec_int_cntl_reg);
+               mec_int_cntl = RREG32_SOC15_IP(GC, mec_int_cntl_reg);
                mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
                                             TIME_STAMP_INT_ENABLE, 0);
-               WREG32(mec_int_cntl_reg, mec_int_cntl);
+               WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl);
                break;
        case AMDGPU_IRQ_STATE_ENABLE:
-               mec_int_cntl = RREG32(mec_int_cntl_reg);
+               mec_int_cntl = RREG32_SOC15_IP(GC, mec_int_cntl_reg);
                mec_int_cntl = REG_SET_FIELD(mec_int_cntl, CP_ME1_PIPE0_INT_CNTL,
                                             TIME_STAMP_INT_ENABLE, 1);
-               WREG32(mec_int_cntl_reg, mec_int_cntl);
+               WREG32_SOC15_IP(GC, mec_int_cntl_reg, mec_int_cntl);
                break;
        default:
                break;
@@ -8946,20 +8941,20 @@ static int gfx_v10_0_kiq_set_interrupt_state(struct amdgpu_device *adev,
                                            GENERIC2_INT_ENABLE, 0);
                        WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, tmp);
 
-                       tmp = RREG32(target);
+                       tmp = RREG32_SOC15_IP(GC, target);
                        tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL,
                                            GENERIC2_INT_ENABLE, 0);
-                       WREG32(target, tmp);
+                       WREG32_SOC15_IP(GC, target, tmp);
                } else {
                        tmp = RREG32_SOC15(GC, 0, mmCPC_INT_CNTL);
                        tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL,
                                            GENERIC2_INT_ENABLE, 1);
                        WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, tmp);
 
-                       tmp = RREG32(target);
+                       tmp = RREG32_SOC15_IP(GC, target);
                        tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL,
                                            GENERIC2_INT_ENABLE, 1);
-                       WREG32(target, tmp);
+                       WREG32_SOC15_IP(GC, target, tmp);
                }
                break;
        default:
index c35fdd2..685212c 100644 (file)
@@ -2116,7 +2116,7 @@ error_free_scratch:
 }
 
 /**
- * gfx_v7_0_ring_emit_hdp - emit an hdp flush on the cp
+ * gfx_v7_0_ring_emit_hdp_flush - emit an hdp flush on the cp
  *
  * @ring: amdgpu_ring structure holding ring information
  *
@@ -2242,7 +2242,7 @@ static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
  * IB stuff
  */
 /**
- * gfx_v7_0_ring_emit_ib - emit an IB (Indirect Buffer) on the ring
+ * gfx_v7_0_ring_emit_ib_gfx - emit an IB (Indirect Buffer) on the ring
  *
  * @ring: amdgpu_ring structure holding ring information
  * @job: job to retrieve vmid from
@@ -3196,7 +3196,7 @@ static int gfx_v7_0_cp_resume(struct amdgpu_device *adev)
 }
 
 /**
- * gfx_v7_0_ring_emit_vm_flush - cik vm flush using the CP
+ * gfx_v7_0_ring_emit_pipeline_sync - cik vm flush using the CP
  *
  * @ring: the ring to emit the commands to
  *
index feaa5e4..fe5908f 100644 (file)
@@ -734,7 +734,7 @@ static const u32 GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[] =
        mmRLC_SRM_INDEX_CNTL_DATA_7 - mmRLC_SRM_INDEX_CNTL_DATA_0,
 };
 
-static void gfx_v9_0_rlcg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag)
+static void gfx_v9_0_rlcg_w(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag)
 {
        static void *scratch_reg0;
        static void *scratch_reg1;
@@ -787,15 +787,16 @@ static void gfx_v9_0_rlcg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32
 
 }
 
-static void gfx_v9_0_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag)
+static void gfx_v9_0_rlcg_wreg(struct amdgpu_device *adev, u32 offset,
+                              u32 v, u32 acc_flags, u32 hwip)
 {
        if (amdgpu_sriov_fullaccess(adev)) {
-               gfx_v9_0_rlcg_rw(adev, offset, v, flag);
+               gfx_v9_0_rlcg_w(adev, offset, v, acc_flags);
 
                return;
        }
 
-       if (flag & AMDGPU_REGS_NO_KIQ)
+       if (acc_flags & AMDGPU_REGS_NO_KIQ)
                WREG32_NO_KIQ(offset, v);
        else
                WREG32(offset, v);
index dbad9ef..c0352dc 100644 (file)
@@ -1641,8 +1641,8 @@ static int gfx_v9_4_2_query_utc_edc_count(struct amdgpu_device *adev,
        return 0;
 }
 
-int gfx_v9_4_2_query_ras_error_count(struct amdgpu_device *adev,
-                                  void *ras_error_status)
+static int gfx_v9_4_2_query_ras_error_count(struct amdgpu_device *adev,
+                                           void *ras_error_status)
 {
        struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
        uint32_t sec_count = 0, ded_count = 0;
@@ -1676,13 +1676,14 @@ static void gfx_v9_4_2_reset_ea_err_status(struct amdgpu_device *adev)
        uint32_t i, j;
        uint32_t value;
 
-       value = REG_SET_FIELD(0, GCEA_ERR_STATUS, CLEAR_ERROR_STATUS, 0x1);
-
        mutex_lock(&adev->grbm_idx_mutex);
        for (i = 0; i < gfx_v9_4_2_ea_err_status_regs.se_num; i++) {
                for (j = 0; j < gfx_v9_4_2_ea_err_status_regs.instance;
                     j++) {
                        gfx_v9_4_2_select_se_sh(adev, i, 0, j);
+                       value = RREG32(SOC15_REG_ENTRY_OFFSET(
+                               gfx_v9_4_2_ea_err_status_regs));
+                       value = REG_SET_FIELD(value, GCEA_ERR_STATUS, CLEAR_ERROR_STATUS, 0x1);
                        WREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_4_2_ea_err_status_regs), value);
                }
        }
@@ -1690,7 +1691,7 @@ static void gfx_v9_4_2_reset_ea_err_status(struct amdgpu_device *adev)
        mutex_unlock(&adev->grbm_idx_mutex);
 }
 
-void gfx_v9_4_2_reset_ras_error_count(struct amdgpu_device *adev)
+static void gfx_v9_4_2_reset_ras_error_count(struct amdgpu_device *adev)
 {
        if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
                return;
@@ -1699,7 +1700,7 @@ void gfx_v9_4_2_reset_ras_error_count(struct amdgpu_device *adev)
        gfx_v9_4_2_query_utc_edc_count(adev, NULL, NULL);
 }
 
-int gfx_v9_4_2_ras_error_inject(struct amdgpu_device *adev, void *inject_if)
+static int gfx_v9_4_2_ras_error_inject(struct amdgpu_device *adev, void *inject_if)
 {
        struct ras_inject_if *info = (struct ras_inject_if *)inject_if;
        int ret;
@@ -1734,6 +1735,7 @@ static void gfx_v9_4_2_query_ea_err_status(struct amdgpu_device *adev)
                        gfx_v9_4_2_select_se_sh(adev, i, 0, j);
                        reg_value = RREG32(SOC15_REG_ENTRY_OFFSET(
                                gfx_v9_4_2_ea_err_status_regs));
+
                        if (REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_RDRSP_STATUS) ||
                            REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_WRRSP_STATUS) ||
                            REG_GET_FIELD(reg_value, GCEA_ERR_STATUS, SDP_RDRSP_DATAPARITY_ERROR)) {
@@ -1741,7 +1743,9 @@ static void gfx_v9_4_2_query_ea_err_status(struct amdgpu_device *adev)
                                                j, reg_value);
                        }
                        /* clear after read */
-                       WREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_4_2_ea_err_status_regs), 0x10);
+                       reg_value = REG_SET_FIELD(reg_value, GCEA_ERR_STATUS,
+                                                 CLEAR_ERROR_STATUS, 0x1);
+                       WREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_4_2_ea_err_status_regs), reg_value);
                }
        }
 
@@ -1772,7 +1776,7 @@ static void gfx_v9_4_2_query_utc_err_status(struct amdgpu_device *adev)
        }
 }
 
-void gfx_v9_4_2_query_ras_error_status(struct amdgpu_device *adev)
+static void gfx_v9_4_2_query_ras_error_status(struct amdgpu_device *adev)
 {
        if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
                return;
@@ -1782,7 +1786,7 @@ void gfx_v9_4_2_query_ras_error_status(struct amdgpu_device *adev)
        gfx_v9_4_2_query_sq_timeout_status(adev);
 }
 
-void gfx_v9_4_2_reset_ras_error_status(struct amdgpu_device *adev)
+static void gfx_v9_4_2_reset_ras_error_status(struct amdgpu_device *adev)
 {
        if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
                return;
@@ -1792,7 +1796,7 @@ void gfx_v9_4_2_reset_ras_error_status(struct amdgpu_device *adev)
        gfx_v9_4_2_reset_sq_timeout_status(adev);
 }
 
-void gfx_v9_4_2_enable_watchdog_timer(struct amdgpu_device *adev)
+static void gfx_v9_4_2_enable_watchdog_timer(struct amdgpu_device *adev)
 {
        uint32_t i;
        uint32_t data;
index cd8dc38..db154bf 100644 (file)
@@ -229,6 +229,10 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
        /* Use register 17 for GART */
        const unsigned eng = 17;
        unsigned int i;
+       unsigned char hub_ip = 0;
+
+       hub_ip = (vmhub == AMDGPU_GFXHUB_0) ?
+                  GC_HWIP : MMHUB_HWIP;
 
        spin_lock(&adev->gmc.invalidate_lock);
        /*
@@ -242,8 +246,9 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
        if (use_semaphore) {
                for (i = 0; i < adev->usec_timeout; i++) {
                        /* a read return value of 1 means semaphore acuqire */
-                       tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem +
-                                           hub->eng_distance * eng);
+                       tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem +
+                                        hub->eng_distance * eng, hub_ip);
+
                        if (tmp & 0x1)
                                break;
                        udelay(1);
@@ -253,7 +258,9 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
                        DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
        }
 
-       WREG32_NO_KIQ(hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req);
+       WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_req +
+                         hub->eng_distance * eng,
+                         inv_req, hub_ip);
 
        /*
         * Issue a dummy read to wait for the ACK register to be cleared
@@ -261,12 +268,14 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
         */
        if ((vmhub == AMDGPU_GFXHUB_0) &&
            (adev->asic_type < CHIP_SIENNA_CICHLID))
-               RREG32_NO_KIQ(hub->vm_inv_eng0_req + hub->eng_distance * eng);
+               RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_req +
+                                 hub->eng_distance * eng, hub_ip);
 
        /* Wait for ACK with a delay.*/
        for (i = 0; i < adev->usec_timeout; i++) {
-               tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack +
-                                   hub->eng_distance * eng);
+               tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_ack +
+                                 hub->eng_distance * eng, hub_ip);
+
                tmp &= 1 << vmid;
                if (tmp)
                        break;
@@ -280,8 +289,8 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
                 * add semaphore release after invalidation,
                 * write with 0 means semaphore release
                 */
-               WREG32_NO_KIQ(hub->vm_inv_eng0_sem +
-                             hub->eng_distance * eng, 0);
+               WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem +
+                                 hub->eng_distance * eng, 0, hub_ip);
 
        spin_unlock(&adev->gmc.invalidate_lock);
 
@@ -947,7 +956,7 @@ static int gmc_v10_0_sw_init(void *handle)
 }
 
 /**
- * gmc_v8_0_gart_fini - vm fini callback
+ * gmc_v10_0_gart_fini - vm fini callback
  *
  * @adev: amdgpu_device pointer
  *
index 0795ea7..0a50fda 100644 (file)
@@ -516,7 +516,7 @@ static void gmc_v7_0_get_vm_pte(struct amdgpu_device *adev,
 }
 
 /**
- * gmc_v8_0_set_fault_enable_default - update VM fault handling
+ * gmc_v7_0_set_fault_enable_default - update VM fault handling
  *
  * @adev: amdgpu_device pointer
  * @value: true redirects VM faults to the default page
index de5abce..85967a5 100644 (file)
@@ -172,6 +172,8 @@ static int jpeg_v2_0_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       cancel_delayed_work_sync(&adev->vcn.idle_work);
+
        if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
              RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS))
                jpeg_v2_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
index 938ef4c..46096ad 100644 (file)
@@ -187,14 +187,14 @@ static int jpeg_v2_5_hw_init(void *handle)
 static int jpeg_v2_5_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-       struct amdgpu_ring *ring;
        int i;
 
+       cancel_delayed_work_sync(&adev->vcn.idle_work);
+
        for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
                if (adev->jpeg.harvest_config & (1 << i))
                        continue;
 
-               ring = &adev->jpeg.inst[i].ring_dec;
                if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
                      RREG32_SOC15(JPEG, i, mmUVD_JRBC_STATUS))
                        jpeg_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
index 94be353..bd77794 100644 (file)
@@ -159,9 +159,9 @@ static int jpeg_v3_0_hw_init(void *handle)
 static int jpeg_v3_0_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-       struct amdgpu_ring *ring;
 
-       ring = &adev->jpeg.inst->ring_dec;
+       cancel_delayed_work_sync(&adev->vcn.idle_work);
+
        if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
              RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS))
                jpeg_v3_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
index 655c3d5..f7e93bb 100644 (file)
@@ -29,6 +29,7 @@
 #include "mmhub/mmhub_2_0_0_default.h"
 #include "navi10_enum.h"
 
+#include "gc/gc_10_1_0_offset.h"
 #include "soc15_common.h"
 
 #define mmMM_ATC_L2_MISC_CG_Sienna_Cichlid                      0x064d
@@ -192,11 +193,11 @@ static void mmhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmi
 {
        struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB_0];
 
-       WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
+       WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
                            hub->ctx_addr_distance * vmid,
                            lower_32_bits(page_table_base));
 
-       WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
+       WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
                            hub->ctx_addr_distance * vmid,
                            upper_32_bits(page_table_base));
 }
@@ -207,14 +208,14 @@ static void mmhub_v2_0_init_gart_aperture_regs(struct amdgpu_device *adev)
 
        mmhub_v2_0_setup_vm_pt_regs(adev, 0, pt_base);
 
-       WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
+       WREG32_SOC15_RLC(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
                     (u32)(adev->gmc.gart_start >> 12));
-       WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
+       WREG32_SOC15_RLC(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
                     (u32)(adev->gmc.gart_start >> 44));
 
-       WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
+       WREG32_SOC15_RLC(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
                     (u32)(adev->gmc.gart_end >> 12));
-       WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
+       WREG32_SOC15_RLC(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
                     (u32)(adev->gmc.gart_end >> 44));
 }
 
@@ -223,12 +224,12 @@ static void mmhub_v2_0_init_system_aperture_regs(struct amdgpu_device *adev)
        uint64_t value;
        uint32_t tmp;
 
-       /* Program the AGP BAR */
-       WREG32_SOC15(MMHUB, 0, mmMMMC_VM_AGP_BASE, 0);
-       WREG32_SOC15(MMHUB, 0, mmMMMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
-       WREG32_SOC15(MMHUB, 0, mmMMMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
-
        if (!amdgpu_sriov_vf(adev)) {
+               /* Program the AGP BAR */
+               WREG32_SOC15_RLC(MMHUB, 0, mmMMMC_VM_AGP_BASE, 0);
+               WREG32_SOC15_RLC(MMHUB, 0, mmMMMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
+               WREG32_SOC15_RLC(MMHUB, 0, mmMMMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
+
                /* Program the system aperture low logical page number. */
                WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_LOW_ADDR,
                             min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
@@ -335,7 +336,7 @@ static void mmhub_v2_0_enable_system_domain(struct amdgpu_device *adev)
        tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0);
        tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL,
                            RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
-       WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_CNTL, tmp);
+       WREG32_SOC15_RLC(MMHUB, 0, mmMMVM_CONTEXT0_CNTL, tmp);
 }
 
 static void mmhub_v2_0_disable_identity_aperture(struct amdgpu_device *adev)
@@ -397,16 +398,16 @@ static void mmhub_v2_0_setup_vmid_config(struct amdgpu_device *adev)
                tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
                                    RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
                                    !adev->gmc.noretry);
-               WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_CNTL,
+               WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_CONTEXT1_CNTL,
                                    i * hub->ctx_distance, tmp);
-               WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
+               WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
                                    i * hub->ctx_addr_distance, 0);
-               WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32,
+               WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32,
                                    i * hub->ctx_addr_distance, 0);
-               WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32,
+               WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32,
                                    i * hub->ctx_addr_distance,
                                    lower_32_bits(adev->vm_manager.max_pfn - 1));
-               WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32,
+               WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32,
                                    i * hub->ctx_addr_distance,
                                    upper_32_bits(adev->vm_manager.max_pfn - 1));
        }
@@ -418,9 +419,9 @@ static void mmhub_v2_0_program_invalidation(struct amdgpu_device *adev)
        unsigned i;
 
        for (i = 0; i < 18; ++i) {
-               WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
+               WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
                                    i * hub->eng_addr_distance, 0xffffffff);
-               WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_ADDR_RANGE_HI32,
+               WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_ADDR_RANGE_HI32,
                                    i * hub->eng_addr_distance, 0x1f);
        }
 }
@@ -449,7 +450,7 @@ static void mmhub_v2_0_gart_disable(struct amdgpu_device *adev)
 
        /* Disable all tables */
        for (i = 0; i < AMDGPU_NUM_VMID; i++)
-               WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_CNTL,
+               WREG32_SOC15_OFFSET_RLC(MMHUB, 0, mmMMVM_CONTEXT0_CNTL,
                                    i * hub->ctx_distance, 0);
 
        /* Setup TLB control */
index 47c8dd9..c4ef822 100644 (file)
@@ -436,7 +436,7 @@ static void mmhub_v9_4_gart_disable(struct amdgpu_device *adev)
 }
 
 /**
- * mmhub_v1_0_set_fault_enable_default - update GART/VM fault handling
+ * mmhub_v9_4_set_fault_enable_default - update GART/VM fault handling
  *
  * @adev: amdgpu_device pointer
  * @value: true redirects VM faults to the default page
index ec5f835..27ba040 100644 (file)
@@ -466,7 +466,7 @@ void nv_grbm_select(struct amdgpu_device *adev,
        grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid);
        grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue);
 
-       WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL), grbm_gfx_cntl);
+       WREG32_SOC15(GC, 0, mmGRBM_GFX_CNTL, grbm_gfx_cntl);
 }
 
 static void nv_vga_set_state(struct amdgpu_device *adev, bool state)
@@ -849,8 +849,13 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
        case CHIP_NAVI12:
                amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
                amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
-               amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
-               amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
+               if (!amdgpu_sriov_vf(adev)) {
+                       amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
+                       amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
+               } else {
+                       amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
+                       amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
+               }
                if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
                        amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
                if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
index abef018..fc400d9 100644 (file)
@@ -733,7 +733,7 @@ static uint32_t psp_v11_0_ring_get_wptr(struct psp_context *psp)
        struct amdgpu_device *adev = psp->adev;
 
        if (amdgpu_sriov_vf(adev))
-               data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102);
+               data = psp->km_ring.ring_wptr;
        else
                data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
 
@@ -747,6 +747,7 @@ static void psp_v11_0_ring_set_wptr(struct psp_context *psp, uint32_t value)
        if (amdgpu_sriov_vf(adev)) {
                WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, value);
                WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, GFX_CTRL_CMD_ID_CONSUME_CMD);
+               psp->km_ring.ring_wptr = value;
        } else
                WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, value);
 }
index d0a6ccc..ce7377d 100644 (file)
@@ -375,7 +375,7 @@ static uint32_t psp_v3_1_ring_get_wptr(struct psp_context *psp)
        struct amdgpu_device *adev = psp->adev;
 
        if (amdgpu_sriov_vf(adev))
-               data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102);
+               data = psp->km_ring.ring_wptr;
        else
                data = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
        return data;
@@ -390,6 +390,7 @@ static void psp_v3_1_ring_set_wptr(struct psp_context *psp, uint32_t value)
                /* send interrupt to PSP for SRIOV ring write pointer update */
                WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101,
                        GFX_CTRL_CMD_ID_CONSUME_CMD);
+               psp->km_ring.ring_wptr = value;
        } else
                WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, value);
 }
index 9f0dda0..4509bd4 100644 (file)
@@ -271,7 +271,7 @@ static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring,
 }
 
 /**
- * sdma_v2_4_hdp_flush_ring_emit - emit an hdp flush on the DMA ring
+ * sdma_v2_4_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
  *
  * @ring: amdgpu ring pointer
  *
index d197185..ae5464e 100644 (file)
@@ -754,7 +754,7 @@ static uint64_t sdma_v4_0_ring_get_wptr(struct amdgpu_ring *ring)
 }
 
 /**
- * sdma_v4_0_page_ring_set_wptr - commit the write pointer
+ * sdma_v4_0_ring_set_wptr - commit the write pointer
  *
  * @ring: amdgpu ring pointer
  *
@@ -820,7 +820,7 @@ static uint64_t sdma_v4_0_page_ring_get_wptr(struct amdgpu_ring *ring)
 }
 
 /**
- * sdma_v4_0_ring_set_wptr - commit the write pointer
+ * sdma_v4_0_page_ring_set_wptr - commit the write pointer
  *
  * @ring: amdgpu ring pointer
  *
index 75d7310..6117ba8 100644 (file)
@@ -328,9 +328,9 @@ static uint64_t sdma_v5_0_ring_get_wptr(struct amdgpu_ring *ring)
                wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs]));
                DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr);
        } else {
-               wptr = RREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI));
+               wptr = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI));
                wptr = wptr << 32;
-               wptr |= RREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR));
+               wptr |= RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR));
                DRM_DEBUG("wptr before shift [%i] wptr == 0x%016llx\n", ring->me, wptr);
        }
 
@@ -371,9 +371,9 @@ static void sdma_v5_0_ring_set_wptr(struct amdgpu_ring *ring)
                                lower_32_bits(ring->wptr << 2),
                                ring->me,
                                upper_32_bits(ring->wptr << 2));
-               WREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR),
+               WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR),
                        lower_32_bits(ring->wptr << 2));
-               WREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI),
+               WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI),
                        upper_32_bits(ring->wptr << 2));
        }
 }
@@ -440,20 +440,19 @@ static void sdma_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
  */
 static void sdma_v5_0_ring_emit_mem_sync(struct amdgpu_ring *ring)
 {
-    uint32_t gcr_cntl =
-                   SDMA_GCR_GL2_INV | SDMA_GCR_GL2_WB | SDMA_GCR_GLM_INV |
-                       SDMA_GCR_GL1_INV | SDMA_GCR_GLV_INV | SDMA_GCR_GLK_INV |
-                       SDMA_GCR_GLI_INV(1);
+       uint32_t gcr_cntl = SDMA_GCR_GL2_INV | SDMA_GCR_GL2_WB | SDMA_GCR_GLM_INV |
+                           SDMA_GCR_GL1_INV | SDMA_GCR_GLV_INV | SDMA_GCR_GLK_INV |
+                           SDMA_GCR_GLI_INV(1);
 
        /* flush entire cache L0/L1/L2, this can be optimized by performance requirement */
        amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_GCR_REQ));
        amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD1_BASE_VA_31_7(0));
        amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD2_GCR_CONTROL_15_0(gcr_cntl) |
-                       SDMA_PKT_GCR_REQ_PAYLOAD2_BASE_VA_47_32(0));
+                         SDMA_PKT_GCR_REQ_PAYLOAD2_BASE_VA_47_32(0));
        amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD3_LIMIT_VA_31_7(0) |
-                       SDMA_PKT_GCR_REQ_PAYLOAD3_GCR_CONTROL_18_16(gcr_cntl >> 16));
+                         SDMA_PKT_GCR_REQ_PAYLOAD3_GCR_CONTROL_18_16(gcr_cntl >> 16));
        amdgpu_ring_write(ring, SDMA_PKT_GCR_REQ_PAYLOAD4_LIMIT_VA_47_32(0) |
-                       SDMA_PKT_GCR_REQ_PAYLOAD4_VMID(0));
+                         SDMA_PKT_GCR_REQ_PAYLOAD4_VMID(0));
 }
 
 /**
@@ -549,12 +548,12 @@ static void sdma_v5_0_gfx_stop(struct amdgpu_device *adev)
                amdgpu_ttm_set_buffer_funcs_status(adev, false);
 
        for (i = 0; i < adev->sdma.num_instances; i++) {
-               rb_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
+               rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
                rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0);
-               WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
-               ib_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
+               WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
+               ib_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
                ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0);
-               WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
+               WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
        }
 }
 
@@ -571,7 +570,7 @@ static void sdma_v5_0_rlc_stop(struct amdgpu_device *adev)
 }
 
 /**
- * sdma_v_0_ctx_switch_enable - stop the async dma engines context switch
+ * sdma_v5_0_ctx_switch_enable - stop the async dma engines context switch
  *
  * @adev: amdgpu_device pointer
  * @enable: enable/disable the DMA MEs context switch.
@@ -615,11 +614,11 @@ static void sdma_v5_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
                }
 
                if (enable && amdgpu_sdma_phase_quantum) {
-                       WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE0_QUANTUM),
+                       WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE0_QUANTUM),
                               phase_quantum);
-                       WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE1_QUANTUM),
+                       WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE1_QUANTUM),
                               phase_quantum);
-                       WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE2_QUANTUM),
+                       WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_PHASE2_QUANTUM),
                               phase_quantum);
                }
                if (!amdgpu_sriov_vf(adev))
@@ -686,58 +685,63 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev)
 
                /* Set ring buffer size in dwords */
                rb_bufsz = order_base_2(ring->ring_size / 4);
-               rb_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
+               rb_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL));
                rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz);
 #ifdef __BIG_ENDIAN
                rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1);
                rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL,
                                        RPTR_WRITEBACK_SWAP_ENABLE, 1);
 #endif
-               WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
+               WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
 
                /* Initialize the ring buffer's read and write pointers */
-               WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), 0);
-               WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), 0);
-               WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), 0);
-               WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), 0);
+               WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR), 0);
+               WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_HI), 0);
+               WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), 0);
+               WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), 0);
 
                /* setup the wptr shadow polling */
                wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
-               WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO),
+               WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO),
                       lower_32_bits(wptr_gpu_addr));
-               WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI),
+               WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI),
                       upper_32_bits(wptr_gpu_addr));
-               wptr_poll_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i,
+               wptr_poll_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i,
                                                         mmSDMA0_GFX_RB_WPTR_POLL_CNTL));
                wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
                                               SDMA0_GFX_RB_WPTR_POLL_CNTL,
                                               F32_POLL_ENABLE, 1);
-               WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL),
+               WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL),
                       wptr_poll_cntl);
 
                /* set the wb address whether it's enabled or not */
-               WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_HI),
+               WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_HI),
                       upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
-               WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_LO),
+               WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_RPTR_ADDR_LO),
                       lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC);
 
                rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1);
 
-               WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE), ring->gpu_addr >> 8);
-               WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE_HI), ring->gpu_addr >> 40);
+               WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE),
+                      ring->gpu_addr >> 8);
+               WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_BASE_HI),
+                      ring->gpu_addr >> 40);
 
                ring->wptr = 0;
 
                /* before programing wptr to a less value, need set minor_ptr_update first */
-               WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1);
+               WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 1);
 
                if (!amdgpu_sriov_vf(adev)) { /* only bare-metal use register write for wptr */
-                       WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR), lower_32_bits(ring->wptr) << 2);
-                       WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI), upper_32_bits(ring->wptr) << 2);
+                       WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR),
+                              lower_32_bits(ring->wptr) << 2);
+                       WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_WPTR_HI),
+                              upper_32_bits(ring->wptr) << 2);
                }
 
-               doorbell = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL));
-               doorbell_offset = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET));
+               doorbell = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL));
+               doorbell_offset = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i,
+                                               mmSDMA0_GFX_DOORBELL_OFFSET));
 
                if (ring->use_doorbell) {
                        doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1);
@@ -746,8 +750,9 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev)
                } else {
                        doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0);
                }
-               WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell);
-               WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET), doorbell_offset);
+               WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell);
+               WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET),
+                      doorbell_offset);
 
                adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell,
                                                      ring->doorbell_index, 20);
@@ -756,7 +761,7 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev)
                        sdma_v5_0_ring_set_wptr(ring);
 
                /* set minor_ptr_update to 0 after wptr programed */
-               WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0);
+               WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_MINOR_PTR_UPDATE), 0);
 
                if (!amdgpu_sriov_vf(adev)) {
                        /* set utc l1 enable flag always to 1 */
@@ -790,15 +795,15 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev)
 
                /* enable DMA RB */
                rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
-               WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
+               WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_RB_CNTL), rb_cntl);
 
-               ib_cntl = RREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
+               ib_cntl = RREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL));
                ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1);
 #ifdef __BIG_ENDIAN
                ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1);
 #endif
                /* enable DMA IBs */
-               WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
+               WREG32_SOC15_IP(GC, sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_IB_CNTL), ib_cntl);
 
                ring->sched.ready = true;
 
index ecb82c3..98059bc 100644 (file)
@@ -147,9 +147,6 @@ static int sdma_v5_2_init_microcode(struct amdgpu_device *adev)
        struct amdgpu_firmware_info *info = NULL;
        const struct common_firmware_header *header = NULL;
 
-       if (amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_SIENNA_CICHLID))
-               return 0;
-
        DRM_DEBUG("\n");
 
        switch (adev->asic_type) {
@@ -187,6 +184,9 @@ static int sdma_v5_2_init_microcode(struct amdgpu_device *adev)
                       (void *)&adev->sdma.instance[0],
                       sizeof(struct amdgpu_sdma_instance));
 
+       if (amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_SIENNA_CICHLID))
+               return 0;
+
        DRM_DEBUG("psp_load == '%s'\n",
                  adev->firmware.load_type == AMDGPU_FW_LOAD_PSP ? "true" : "false");
 
@@ -517,7 +517,7 @@ static void sdma_v5_2_rlc_stop(struct amdgpu_device *adev)
 }
 
 /**
- * sdma_v_0_ctx_switch_enable - stop the async dma engines context switch
+ * sdma_v5_2_ctx_switch_enable - stop the async dma engines context switch
  *
  * @adev: amdgpu_device pointer
  * @enable: enable/disable the DMA MEs context switch.
index cb703e3..195b45b 100644 (file)
@@ -305,7 +305,7 @@ err0:
 }
 
 /**
- * cik_dma_vm_copy_pte - update PTEs by copying them from the GART
+ * si_dma_vm_copy_pte - update PTEs by copying them from the GART
  *
  * @ib: indirect buffer to fill with commands
  * @pe: addr of the page entry
@@ -402,7 +402,7 @@ static void si_dma_vm_set_pte_pde(struct amdgpu_ib *ib,
 }
 
 /**
- * si_dma_pad_ib - pad the IB to the required number of dw
+ * si_dma_ring_pad_ib - pad the IB to the required number of dw
  *
  * @ring: amdgpu_ring pointer
  * @ib: indirect buffer to fill with padding
@@ -415,7 +415,7 @@ static void si_dma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
 }
 
 /**
- * cik_sdma_ring_emit_pipeline_sync - sync the pipeline
+ * si_dma_ring_emit_pipeline_sync - sync the pipeline
  *
  * @ring: amdgpu_ring pointer
  *
index 3c47c94..39b7c20 100644 (file)
@@ -106,7 +106,7 @@ static u32 smuio_v13_0_get_socket_id(struct amdgpu_device *adev)
 }
 
 /**
- * smuio_v13_0_supports_host_gpu_xgmi - detect xgmi interface between cpu and gpu/s.
+ * smuio_v13_0_is_host_gpu_xgmi_supported - detect xgmi interface between cpu and gpu/s.
  *
  * @adev: amdgpu device pointer
  *
index 75008cc..de85577 100644 (file)
@@ -633,7 +633,9 @@ void soc15_program_register_sequence(struct amdgpu_device *adev,
                if (entry->and_mask == 0xffffffff) {
                        tmp = entry->or_mask;
                } else {
-                       tmp = RREG32(reg);
+                       tmp = (entry->hwip == GC_HWIP) ?
+                               RREG32_SOC15_IP(GC, reg) : RREG32(reg);
+
                        tmp &= ~(entry->and_mask);
                        tmp |= (entry->or_mask & entry->and_mask);
                }
@@ -644,7 +646,8 @@ void soc15_program_register_sequence(struct amdgpu_device *adev,
                        reg == SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG))
                        WREG32_RLC(reg, tmp);
                else
-                       WREG32(reg, tmp);
+                       (entry->hwip == GC_HWIP) ?
+                               WREG32_SOC15_IP(GC, reg, tmp) : WREG32(reg, tmp);
 
        }
 
index 14bd794..c781808 100644 (file)
 /* Register Access Macros */
 #define SOC15_REG_OFFSET(ip, inst, reg)        (adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg)
 
+#define __WREG32_SOC15_RLC__(reg, value, flag, hwip) \
+       ((amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs->rlcg_wreg) ? \
+        adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, value, flag, hwip) : \
+        WREG32(reg, value))
+
+#define __RREG32_SOC15_RLC__(reg, flag, hwip) \
+       ((amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs->rlcg_rreg) ? \
+        adev->gfx.rlc.funcs->rlcg_rreg(adev, reg, flag, hwip) : \
+        RREG32(reg))
+
 #define WREG32_FIELD15(ip, idx, reg, field, val)       \
-       WREG32(adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg,  \
-       (RREG32(adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg) \
-       & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
+        __WREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg,   \
+                               (__RREG32_SOC15_RLC__( \
+                                       adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg, \
+                                       0, ip##_HWIP) & \
+                               ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field), \
+                             0, ip##_HWIP)
 
 #define RREG32_SOC15(ip, inst, reg) \
-       RREG32(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg)
+       __RREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg, \
+                        0, ip##_HWIP)
+
+#define RREG32_SOC15_IP(ip, reg) __RREG32_SOC15_RLC__(reg, 0, ip##_HWIP)
 
 #define RREG32_SOC15_NO_KIQ(ip, inst, reg) \
-       RREG32_NO_KIQ(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg)
+       __RREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg, \
+                        AMDGPU_REGS_NO_KIQ, ip##_HWIP)
 
 #define RREG32_SOC15_OFFSET(ip, inst, reg, offset) \
-       RREG32((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset)
+        __RREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset, 0, ip##_HWIP)
 
 #define WREG32_SOC15(ip, inst, reg, value) \
-       WREG32((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg), value)
+        __WREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg), \
+                         value, 0, ip##_HWIP)
+
+#define WREG32_SOC15_IP(ip, reg, value) \
+        __WREG32_SOC15_RLC__(reg, value, 0, ip##_HWIP)
 
 #define WREG32_SOC15_NO_KIQ(ip, inst, reg, value) \
-       WREG32_NO_KIQ((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg), value)
+       __WREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg, \
+                            value, AMDGPU_REGS_NO_KIQ, ip##_HWIP)
 
 #define WREG32_SOC15_OFFSET(ip, inst, reg, offset, value) \
-       WREG32((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset, value)
+        __WREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset, \
+                         value, 0, ip##_HWIP)
 
 #define SOC15_WAIT_ON_RREG(ip, inst, reg, expected_value, mask) \
 ({     int ret = 0;                                            \
 })
 
 #define WREG32_RLC(reg, value) \
-       do { \
-               if (adev->gfx.rlc.funcs->rlcg_wreg) \
-                       adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, value, 0); \
-               else \
-                       WREG32(reg, value);     \
-       } while (0)
+       __WREG32_SOC15_RLC__(reg, value, AMDGPU_REGS_RLC, GC_HWIP)
 
 #define WREG32_RLC_EX(prefix, reg, value) \
        do {                                                    \
                }       \
        } while (0)
 
+/* shadow the registers in the callback function */
 #define WREG32_SOC15_RLC_SHADOW(ip, inst, reg, value) \
-       WREG32_RLC((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg), value)
+       __WREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg), value, AMDGPU_REGS_RLC, GC_HWIP)
 
+/* for GC only */
 #define RREG32_RLC(reg) \
-       (adev->gfx.rlc.funcs->rlcg_rreg ? \
-               adev->gfx.rlc.funcs->rlcg_rreg(adev, reg, 0) : RREG32(reg))
-
-#define WREG32_RLC_NO_KIQ(reg, value) \
-       do { \
-               if (adev->gfx.rlc.funcs->rlcg_wreg) \
-                       adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, value, AMDGPU_REGS_NO_KIQ); \
-               else \
-                       WREG32_NO_KIQ(reg, value);      \
-       } while (0)
+       __RREG32_SOC15_RLC__(reg, AMDGPU_REGS_RLC, GC_HWIP)
+
+#define WREG32_RLC_NO_KIQ(reg, value, hwip) \
+       __WREG32_SOC15_RLC__(reg, value, AMDGPU_REGS_NO_KIQ | AMDGPU_REGS_RLC, hwip)
 
-#define RREG32_RLC_NO_KIQ(reg) \
-       (adev->gfx.rlc.funcs->rlcg_rreg ? \
-               adev->gfx.rlc.funcs->rlcg_rreg(adev, reg, AMDGPU_REGS_NO_KIQ) : RREG32_NO_KIQ(reg))
+#define RREG32_RLC_NO_KIQ(reg, hwip) \
+       __RREG32_SOC15_RLC__(reg, AMDGPU_REGS_NO_KIQ | AMDGPU_REGS_RLC, hwip)
 
 #define WREG32_SOC15_RLC_SHADOW_EX(prefix, ip, inst, reg, value) \
        do {                                                    \
        } while (0)
 
 #define RREG32_SOC15_RLC(ip, inst, reg) \
-       RREG32_RLC(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg)
+       __RREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg, AMDGPU_REGS_RLC, ip##_HWIP)
 
 #define WREG32_SOC15_RLC(ip, inst, reg, value) \
        do {                                                    \
                uint32_t target_reg = adev->reg_offset[ip##_HWIP][0][reg##_BASE_IDX] + reg;\
-               WREG32_RLC(target_reg, value); \
+               __WREG32_SOC15_RLC__(target_reg, value, AMDGPU_REGS_RLC, ip##_HWIP); \
        } while (0)
 
 #define WREG32_SOC15_RLC_EX(prefix, ip, inst, reg, value) \
        } while (0)
 
 #define WREG32_FIELD15_RLC(ip, idx, reg, field, val)   \
-       WREG32_RLC((adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg), \
-       (RREG32_RLC(adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg) \
-       & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
+       __WREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg), \
+                            (__RREG32_SOC15_RLC__(adev->reg_offset[ip##_HWIP][idx][mm##reg##_BASE_IDX] + mm##reg, \
+                                                  AMDGPU_REGS_RLC, ip##_HWIP) & \
+                             ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field), \
+                            AMDGPU_REGS_RLC, ip##_HWIP)
 
 #define WREG32_SOC15_OFFSET_RLC(ip, inst, reg, offset, value) \
-       WREG32_RLC(((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset), value)
+       __WREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset, value, AMDGPU_REGS_RLC, ip##_HWIP)
 
 #define RREG32_SOC15_OFFSET_RLC(ip, inst, reg, offset) \
-       RREG32_RLC(((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset))
+       __RREG32_SOC15_RLC__((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) + offset, AMDGPU_REGS_RLC, ip##_HWIP)
 
 #endif
index 284447d..6c0e914 100644 (file)
@@ -340,7 +340,7 @@ static int uvd_v3_1_start(struct amdgpu_device *adev)
        /* enable VCPU clock */
        WREG32(mmUVD_VCPU_CNTL,  1 << 9);
 
-       /* disable interupt */
+       /* disable interrupt */
        WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
 
 #ifdef __BIG_ENDIAN
@@ -405,7 +405,7 @@ static int uvd_v3_1_start(struct amdgpu_device *adev)
                return r;
        }
 
-       /* enable interupt */
+       /* enable interrupt */
        WREG32_P(mmUVD_MASTINT_EN, 3<<1, ~(3 << 1));
 
        WREG32_P(mmUVD_STATUS, 0, ~(1<<2));
index 2bab9c7..cf3803f 100644 (file)
@@ -357,6 +357,7 @@ static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 
 error:
        dma_fence_put(fence);
+       amdgpu_bo_unpin(bo);
        amdgpu_bo_unreserve(bo);
        amdgpu_bo_unref(&bo);
        return r;
index 0c1beef..284bb42 100644 (file)
@@ -231,9 +231,13 @@ static int vcn_v1_0_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       cancel_delayed_work_sync(&adev->vcn.idle_work);
+
        if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
-               RREG32_SOC15(VCN, 0, mmUVD_STATUS))
+               (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
+                RREG32_SOC15(VCN, 0, mmUVD_STATUS))) {
                vcn_v1_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
+       }
 
        return 0;
 }
@@ -765,7 +769,7 @@ static void vcn_1_0_enable_static_power_gating(struct amdgpu_device *adev)
 }
 
 /**
- * vcn_v1_0_start - start VCN block
+ * vcn_v1_0_start_spg_mode - start VCN block
  *
  * @adev: amdgpu_device pointer
  *
@@ -1101,7 +1105,7 @@ static int vcn_v1_0_start(struct amdgpu_device *adev)
 }
 
 /**
- * vcn_v1_0_stop - stop VCN block
+ * vcn_v1_0_stop_spg_mode - stop VCN block
  *
  * @adev: amdgpu_device pointer
  *
index 116b964..8af567c 100644 (file)
@@ -262,6 +262,8 @@ static int vcn_v2_0_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       cancel_delayed_work_sync(&adev->vcn.idle_work);
+
        if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
            (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
              RREG32_SOC15(VCN, 0, mmUVD_STATUS)))
index 948813d..888b17d 100644 (file)
@@ -321,6 +321,8 @@ static int vcn_v2_5_hw_fini(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        int i;
 
+       cancel_delayed_work_sync(&adev->vcn.idle_work);
+
        for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
                if (adev->vcn.harvest_config & (1 << i))
                        continue;
index 10c2d08..4c36fc5 100644 (file)
@@ -386,15 +386,14 @@ done:
 static int vcn_v3_0_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-       struct amdgpu_ring *ring;
        int i;
 
+       cancel_delayed_work_sync(&adev->vcn.idle_work);
+
        for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
                if (adev->vcn.harvest_config & (1 << i))
                        continue;
 
-               ring = &adev->vcn.inst[i].ring_dec;
-
                if (!amdgpu_sriov_vf(adev)) {
                        if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
                                        (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
index 5b6c566..2f8d352 100644 (file)
@@ -110,8 +110,6 @@ static void kfd_sdma_activity_worker(struct work_struct *work)
 
        workarea = container_of(work, struct kfd_sdma_activity_handler_workarea,
                                sdma_activity_work);
-       if (!workarea)
-               return;
 
        pdd = workarea->pdd;
        if (!pdd)
index 3267eb2..4f4c643 100644 (file)
@@ -28,6 +28,7 @@
 
 #include "dm_services_types.h"
 #include "dc.h"
+#include "dc_link_dp.h"
 #include "dc/inc/core_types.h"
 #include "dal_asic_id.h"
 #include "dmub/dmub_srv.h"
@@ -314,10 +315,8 @@ get_crtc_by_otg_inst(struct amdgpu_device *adev,
        struct drm_crtc *crtc;
        struct amdgpu_crtc *amdgpu_crtc;
 
-       if (otg_inst == -1) {
-               WARN_ON(1);
+       if (WARN_ON(otg_inst == -1))
                return adev->mode_info.crtcs[0];
-       }
 
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
                amdgpu_crtc = to_amdgpu_crtc(crtc);
@@ -396,8 +395,7 @@ static void dm_pflip_high_irq(void *interrupt_params)
        e = amdgpu_crtc->event;
        amdgpu_crtc->event = NULL;
 
-       if (!e)
-               WARN_ON(1);
+       WARN_ON(!e);
 
        vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
 
@@ -600,14 +598,14 @@ static void dm_crtc_high_irq(void *interrupt_params)
 }
 
 #if defined(CONFIG_DRM_AMD_DC_DCN)
+#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
 /**
  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
  * DCN generation ASICs
- * @interrupt params - interrupt parameters
+ * @interrupt_params: interrupt parameters
  *
  * Used to set crc window/read out crc value at vertical line 0 position
  */
-#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
 {
        struct common_irq_params *irq_params = interrupt_params;
@@ -981,7 +979,8 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
                abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
        }
 
-       adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
+       if (!adev->dm.dc->ctx->dmub_srv)
+               adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
        if (!adev->dm.dc->ctx->dmub_srv) {
                DRM_ERROR("Couldn't allocate DC DMUB server!\n");
                return -ENOMEM;
@@ -1335,10 +1334,7 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
        }
 #endif
 
-       if (adev->dm.dc->ctx->dmub_srv) {
-               dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
-               adev->dm.dc->ctx->dmub_srv = NULL;
-       }
+       dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
 
        if (dc_enable_dmub_notifications(adev->dm.dc)) {
                kfree(adev->dm.dmub_notify);
@@ -1715,7 +1711,6 @@ static int dm_late_init(void *handle)
        unsigned int linear_lut[16];
        int i;
        struct dmcu *dmcu = NULL;
-       bool ret = true;
 
        dmcu = adev->dm.dc->res_pool->dmcu;
 
@@ -1732,18 +1727,23 @@ static int dm_late_init(void *handle)
         * 0xFFFF x 0.01 = 0x28F
         */
        params.min_abm_backlight = 0x28F;
-
        /* In the case where abm is implemented on dmcub,
-        * dmcu object will be null.
-        * ABM 2.4 and up are implemented on dmcub.
-        */
-       if (dmcu)
-               ret = dmcu_load_iram(dmcu, params);
-       else if (adev->dm.dc->ctx->dmub_srv)
-               ret = dmub_init_abm_config(adev->dm.dc->res_pool, params);
+       * dmcu object will be null.
+       * ABM 2.4 and up are implemented on dmcub.
+       */
+       if (dmcu) {
+               if (!dmcu_load_iram(dmcu, params))
+                       return -EINVAL;
+       } else if (adev->dm.dc->ctx->dmub_srv) {
+               struct dc_link *edp_links[MAX_NUM_EDP];
+               int edp_num;
 
-       if (!ret)
-               return -EINVAL;
+               get_edp_links(adev->dm.dc, edp_links, &edp_num);
+               for (i = 0; i < edp_num; i++) {
+                       if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
+                               return -EINVAL;
+               }
+       }
 
        return detect_mst_link_for_all_connectors(adev_to_drm(adev));
 }
@@ -2008,7 +2008,6 @@ static int dm_suspend(void *handle)
 
        amdgpu_dm_irq_suspend(adev);
 
-
        dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
 
        return 0;
@@ -2747,6 +2746,7 @@ static void handle_hpd_rx_irq(void *param)
        enum dc_connection_type new_connection_type = dc_connection_none;
        struct amdgpu_device *adev = drm_to_adev(dev);
        union hpd_irq_data hpd_irq_data;
+       bool lock_flag = 0;
 
        memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
 
@@ -2776,15 +2776,28 @@ static void handle_hpd_rx_irq(void *param)
                }
        }
 
-       if (!amdgpu_in_reset(adev)) {
+       /*
+        * TODO: We need the lock to avoid touching DC state while it's being
+        * modified during automated compliance testing, or when link loss
+        * happens. While this should be split into subhandlers and proper
+        * interfaces to avoid having to conditionally lock like this in the
+        * outer layer, we need this workaround temporarily to allow MST
+        * lightup in some scenarios to avoid timeout.
+        */
+       if (!amdgpu_in_reset(adev) &&
+           (hpd_rx_irq_check_link_loss_status(dc_link, &hpd_irq_data) ||
+            hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST)) {
                mutex_lock(&adev->dm.dc_lock);
+               lock_flag = 1;
+       }
+
 #ifdef CONFIG_DRM_AMD_DC_HDCP
        result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
 #else
        result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
 #endif
+       if (!amdgpu_in_reset(adev) && lock_flag)
                mutex_unlock(&adev->dm.dc_lock);
-       }
 
 out:
        if (result && !is_mst_root_connector) {
@@ -3407,7 +3420,7 @@ static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
        if (dm->backlight_caps.caps_valid)
                return;
 
-       amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
+       amdgpu_acpi_get_backlight_caps(&caps);
        if (caps.caps_valid) {
                dm->backlight_caps.caps_valid = true;
                if (caps.aux_support)
@@ -3499,7 +3512,7 @@ static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
                        rc = dc_link_set_backlight_level_nits(link[i], true, brightness[i],
                                AUX_BL_DEFAULT_TRANSITION_TIME_MS);
                        if (!rc) {
-                               DRM_ERROR("DM: Failed to update backlight via AUX on eDP[%d]\n", i);
+                               DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", i);
                                break;
                        }
                }
@@ -3507,7 +3520,7 @@ static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
                for (i = 0; i < dm->num_of_edps; i++) {
                        rc = dc_link_set_backlight_level(dm->backlight_link[i], brightness[i], 0);
                        if (!rc) {
-                               DRM_ERROR("DM: Failed to update backlight on eDP[%d]\n", i);
+                               DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", i);
                                break;
                        }
                }
@@ -4953,6 +4966,14 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
        case DRM_FORMAT_ABGR16161616F:
                plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
                break;
+       case DRM_FORMAT_XRGB16161616:
+       case DRM_FORMAT_ARGB16161616:
+               plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
+               break;
+       case DRM_FORMAT_XBGR16161616:
+       case DRM_FORMAT_ABGR16161616:
+               plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
+               break;
        default:
                DRM_ERROR(
                        "Unsupported screen format %p4cc\n",
@@ -5529,6 +5550,63 @@ static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
        }
 }
 
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
+                                                       struct dc_sink *sink, struct dc_stream_state *stream,
+                                                       struct dsc_dec_dpcd_caps *dsc_caps)
+{
+       stream->timing.flags.DSC = 0;
+
+       if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
+               dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
+                                     aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
+                                     aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
+                                     dsc_caps);
+       }
+}
+
+static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
+                                                                               struct dc_sink *sink, struct dc_stream_state *stream,
+                                                                               struct dsc_dec_dpcd_caps *dsc_caps)
+{
+       struct drm_connector *drm_connector = &aconnector->base;
+       uint32_t link_bandwidth_kbps;
+
+       link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
+                                                       dc_link_get_link_cap(aconnector->dc_link));
+       /* Set DSC policy according to dsc_clock_en */
+       dc_dsc_policy_set_enable_dsc_when_not_needed(
+               aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
+
+       if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
+
+               if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
+                                               dsc_caps,
+                                               aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
+                                               0,
+                                               link_bandwidth_kbps,
+                                               &stream->timing,
+                                               &stream->timing.dsc_cfg)) {
+                       stream->timing.flags.DSC = 1;
+                       DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
+               }
+       }
+
+       /* Overwrite the stream flag if DSC is enabled through debugfs */
+       if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
+               stream->timing.flags.DSC = 1;
+
+       if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
+               stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
+
+       if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
+               stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
+
+       if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
+               stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
+}
+#endif
+
 static struct drm_display_mode *
 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
                          bool use_probed_modes)
@@ -5625,12 +5703,12 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
        struct drm_display_mode saved_mode;
        struct drm_display_mode *freesync_mode = NULL;
        bool native_mode_found = false;
-       bool recalculate_timing = dm_state ? (dm_state->scaling != RMX_OFF) : false;
+       bool recalculate_timing = false;
+       bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
        int mode_refresh;
        int preferred_refresh = 0;
 #if defined(CONFIG_DRM_AMD_DC_DCN)
        struct dsc_dec_dpcd_caps dsc_caps;
-       uint32_t link_bandwidth_kbps;
 #endif
        struct dc_sink *sink = NULL;
 
@@ -5688,7 +5766,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
                 */
                DRM_DEBUG_DRIVER("No preferred mode found\n");
        } else {
-               recalculate_timing |= amdgpu_freesync_vid_mode &&
+               recalculate_timing = amdgpu_freesync_vid_mode &&
                                 is_freesync_video_mode(&mode, aconnector);
                if (recalculate_timing) {
                        freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
@@ -5696,11 +5774,10 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
                        mode = *freesync_mode;
                } else {
                        decide_crtc_timing_for_drm_display_mode(
-                               &mode, preferred_mode,
-                               dm_state ? (dm_state->scaling != RMX_OFF) : false);
-               }
+                               &mode, preferred_mode, scale);
 
-               preferred_refresh = drm_mode_vrefresh(preferred_mode);
+                       preferred_refresh = drm_mode_vrefresh(preferred_mode);
+               }
        }
 
        if (recalculate_timing)
@@ -5712,7 +5789,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
        * If scaling is enabled and refresh rate didn't change
        * we copy the vic and polarities of the old timings
        */
-       if (!recalculate_timing || mode_refresh != preferred_refresh)
+       if (!scale || mode_refresh != preferred_refresh)
                fill_stream_properties_from_drm_display_mode(
                        stream, &mode, &aconnector->base, con_state, NULL,
                        requested_bpc);
@@ -5721,45 +5798,12 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
                        stream, &mode, &aconnector->base, con_state, old_stream,
                        requested_bpc);
 
-       stream->timing.flags.DSC = 0;
-
-       if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
 #if defined(CONFIG_DRM_AMD_DC_DCN)
-               dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
-                                     aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
-                                     aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
-                                     &dsc_caps);
-               link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
-                                                            dc_link_get_link_cap(aconnector->dc_link));
-
-               if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
-                       /* Set DSC policy according to dsc_clock_en */
-                       dc_dsc_policy_set_enable_dsc_when_not_needed(
-                               aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
-
-                       if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
-                                                 &dsc_caps,
-                                                 aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
-                                                 0,
-                                                 link_bandwidth_kbps,
-                                                 &stream->timing,
-                                                 &stream->timing.dsc_cfg))
-                               stream->timing.flags.DSC = 1;
-                       /* Overwrite the stream flag if DSC is enabled through debugfs */
-                       if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
-                               stream->timing.flags.DSC = 1;
-
-                       if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
-                               stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
-
-                       if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
-                               stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
-
-                       if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
-                               stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
-               }
+       /* SST DSC determination policy */
+       update_dsc_caps(aconnector, sink, stream, &dsc_caps);
+       if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
+               apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
 #endif
-       }
 
        update_stream_scaling_settings(&mode, dm_state, stream);
 
@@ -6558,9 +6602,8 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
 
        dm_update_crtc_active_planes(crtc, crtc_state);
 
-       if (unlikely(!dm_crtc_state->stream &&
-                    modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
-               WARN_ON(1);
+       if (WARN_ON(unlikely(!dm_crtc_state->stream &&
+                    modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
                return ret;
        }
 
@@ -7059,6 +7102,10 @@ static const uint32_t rgb_formats[] = {
        DRM_FORMAT_XBGR2101010,
        DRM_FORMAT_ARGB2101010,
        DRM_FORMAT_ABGR2101010,
+       DRM_FORMAT_XRGB16161616,
+       DRM_FORMAT_XBGR16161616,
+       DRM_FORMAT_ARGB16161616,
+       DRM_FORMAT_ABGR16161616,
        DRM_FORMAT_XBGR8888,
        DRM_FORMAT_ABGR8888,
        DRM_FORMAT_RGB565,
@@ -8967,7 +9014,10 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
                }
 
                status = dc_stream_get_status(dm_new_crtc_state->stream);
-               WARN_ON(!status);
+
+               if (WARN_ON(!status))
+                       continue;
+
                WARN_ON(!status->plane_count);
 
                /*
@@ -9974,7 +10024,7 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state,
 
        if (cursor_scale_w != primary_scale_w ||
            cursor_scale_h != primary_scale_h) {
-               DRM_DEBUG_ATOMIC("Cursor plane scaling doesn't match primary plane\n");
+               drm_dbg_atomic(crtc->dev, "Cursor plane scaling doesn't match primary plane\n");
                return -EINVAL;
        }
 
@@ -10010,11 +10060,11 @@ static int validate_overlay(struct drm_atomic_state *state)
 {
        int i;
        struct drm_plane *plane;
-       struct drm_plane_state *old_plane_state, *new_plane_state;
-       struct drm_plane_state *primary_state, *overlay_state = NULL;
+       struct drm_plane_state *new_plane_state;
+       struct drm_plane_state *primary_state, *cursor_state, *overlay_state = NULL;
 
        /* Check if primary plane is contained inside overlay */
-       for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
+       for_each_new_plane_in_state_reverse(state, plane, new_plane_state, i) {
                if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
                        if (drm_atomic_plane_disabling(plane->state, new_plane_state))
                                return 0;
@@ -10041,6 +10091,14 @@ static int validate_overlay(struct drm_atomic_state *state)
        if (!primary_state->crtc)
                return 0;
 
+       /* check if cursor plane is enabled */
+       cursor_state = drm_atomic_get_plane_state(state, overlay_state->crtc->cursor);
+       if (IS_ERR(cursor_state))
+               return PTR_ERR(cursor_state);
+
+       if (drm_atomic_plane_disabling(plane->state, cursor_state))
+               return 0;
+
        /* Perform the bounds check to ensure the overlay plane covers the primary */
        if (primary_state->crtc_x < overlay_state->crtc_x ||
            primary_state->crtc_y < overlay_state->crtc_y ||
index fed9496..5568d4e 100644 (file)
@@ -160,6 +160,8 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto
        struct dc_sink *dc_sink = aconnector->dc_sink;
        struct drm_dp_mst_port *port = aconnector->port;
        u8 dsc_caps[16] = { 0 };
+       u8 dsc_branch_dec_caps_raw[3] = { 0 };  // DSC branch decoder caps 0xA0 ~ 0xA2
+       u8 *dsc_branch_dec_caps = NULL;
 
        aconnector->dsc_aux = drm_dp_mst_dsc_aux_for_port(port);
 #if defined(CONFIG_HP_HOOK_WORKAROUND)
@@ -182,9 +184,13 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto
        if (drm_dp_dpcd_read(aconnector->dsc_aux, DP_DSC_SUPPORT, dsc_caps, 16) < 0)
                return false;
 
+       if (drm_dp_dpcd_read(aconnector->dsc_aux,
+                       DP_DSC_BRANCH_OVERALL_THROUGHPUT_0, dsc_branch_dec_caps_raw, 3) == 3)
+               dsc_branch_dec_caps = dsc_branch_dec_caps_raw;
+
        if (!dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
-                                  dsc_caps, NULL,
-                                  &dc_sink->dsc_caps.dsc_dec_caps))
+                                 dsc_caps, dsc_branch_dec_caps,
+                                 &dc_sink->dsc_caps.dsc_dec_caps))
                return false;
 
        return true;
index c67d21a..9b8ea6e 100644 (file)
@@ -979,7 +979,7 @@ static enum bp_result get_ss_info_from_internal_ss_info_tbl_V2_1(
        struct spread_spectrum_info *info);
 
 /**
- * get_ss_info_from_table
+ * get_ss_info_from_tbl
  * Get spread sprectrum information from the ASIC_InternalSS_Info Ver 2.1 or
  * SS_Info table from the VBIOS
  * There can not be more than 1 entry for  ASIC_InternalSS_Info Ver 2.1 or
@@ -1548,7 +1548,7 @@ static uint32_t get_ss_entry_number_from_ss_info_tbl(
        uint32_t id);
 
 /**
- * BiosParserObject::GetNumberofSpreadSpectrumEntry
+ * bios_parser_get_ss_entry_number
  * Get Number of SpreadSpectrum Entry from the ASIC_InternalSS_Info table from
  * the VBIOS that match the SSid (to be converted from signal)
  *
@@ -1725,7 +1725,7 @@ static uint32_t get_ss_entry_number_from_internal_ss_info_tbl_v2_1(
        return 0;
 }
 /**
- * get_ss_entry_number_from_internal_ss_info_table_V3_1
+ * get_ss_entry_number_from_internal_ss_info_tbl_V3_1
  * Get Number of SpreadSpectrum Entry from the ASIC_InternalSS_Info table of
  * the VBIOS that matches id
  *
index 5b77251..e317a36 100644 (file)
@@ -114,7 +114,7 @@ bool dal_cmd_table_helper_controller_id_to_atom(
 }
 
 /**
- * translate_transmitter_bp_to_atom - Translate the Transmitter to the
+ * dal_cmd_table_helper_transmitter_bp_to_atom - Translate the Transmitter to the
  *                                    corresponding ATOM BIOS value
  * @t: transmitter
  * returns: output digitalTransmitter
index 00706b0..6d2fb11 100644 (file)
@@ -129,7 +129,7 @@ bool dal_cmd_table_helper_controller_id_to_atom2(
 }
 
 /**
- * translate_transmitter_bp_to_atom2 - Translate the Transmitter to the
+ * dal_cmd_table_helper_transmitter_bp_to_atom2 - Translate the Transmitter to the
  *                                     corresponding ATOM BIOS value
  *  @t: transmitter
  *  returns: digitalTransmitter
index 1244fcb..ff5bb15 100644 (file)
@@ -2863,6 +2863,7 @@ static void populate_initial_data(
                        data->bytes_per_pixel[num_displays + 4] = 4;
                        break;
                case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+               case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
                case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
                        data->bytes_per_pixel[num_displays + 4] = 8;
                        break;
@@ -2966,6 +2967,7 @@ static void populate_initial_data(
                                data->bytes_per_pixel[num_displays + 4] = 4;
                                break;
                        case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+                       case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
                        case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
                                data->bytes_per_pixel[num_displays + 4] = 8;
                                break;
index d4df4da..0e18df1 100644 (file)
@@ -236,6 +236,7 @@ static enum dcn_bw_defs tl_pixel_format_to_bw_defs(enum surface_pixel_format for
        case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS:
                return dcn_bw_rgb_sub_32;
        case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+       case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
        case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
        case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
                return dcn_bw_rgb_sub_64;
@@ -375,6 +376,7 @@ static void pipe_ctx_to_e2e_pipe_params (
                input->src.viewport_height_c   = input->src.viewport_height / 2;
                break;
        case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+       case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
        case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
        case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
                input->src.source_format = dm_444_64;
index ef157b8..f03889b 100644 (file)
@@ -303,7 +303,7 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
                struct dc_stream_state *stream,
                struct dc_crtc_timing_adjust *adjust)
 {
-       int i = 0;
+       int i;
        bool ret = false;
 
        stream->adjust.v_total_max = adjust->v_total_max;
@@ -331,7 +331,7 @@ bool dc_stream_get_crtc_position(struct dc *dc,
 {
        /* TODO: Support multiple streams */
        const struct dc_stream_state *stream = streams[0];
-       int i = 0;
+       int i;
        bool ret = false;
        struct crtc_position position;
 
@@ -538,7 +538,7 @@ void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream,
                enum dc_dynamic_expansion option)
 {
        /* OPP FMT dyn expansion updates*/
-       int i = 0;
+       int i;
        struct pipe_ctx *pipe_ctx;
 
        for (i = 0; i < MAX_PIPES; i++) {
@@ -596,7 +596,7 @@ void dc_stream_set_dither_option(struct dc_stream_state *stream,
 
 bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stream)
 {
-       int i = 0;
+       int i;
        bool ret = false;
        struct pipe_ctx *pipes;
 
@@ -613,7 +613,7 @@ bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stre
 
 bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
 {
-       int i = 0;
+       int i;
        bool ret = false;
        struct pipe_ctx *pipes;
 
@@ -639,8 +639,7 @@ void dc_stream_set_static_screen_params(struct dc *dc,
                int num_streams,
                const struct dc_static_screen_params *params)
 {
-       int i = 0;
-       int j = 0;
+       int i, j;
        struct pipe_ctx *pipes_affected[MAX_PIPES];
        int num_pipes_affected = 0;
 
@@ -895,7 +894,7 @@ static void disable_all_writeback_pipes_for_stream(
 static void apply_ctx_interdependent_lock(struct dc *dc, struct dc_state *context,
                                          struct dc_stream_state *stream, bool lock)
 {
-       int i = 0;
+       int i;
 
        /* Checks if interdependent update function pointer is NULL or not, takes care of DCE110 case */
        if (dc->hwss.interdependent_update_lock)
@@ -1155,7 +1154,7 @@ static void enable_timing_multisync(
                struct dc *dc,
                struct dc_state *ctx)
 {
-       int i = 0, multisync_count = 0;
+       int i, multisync_count = 0;
        int pipe_count = dc->res_pool->pipe_count;
        struct pipe_ctx *multisync_pipes[MAX_PIPES] = { NULL };
 
@@ -3335,18 +3334,10 @@ void dc_hardware_release(struct dc *dc)
 #endif
 
 /**
- *****************************************************************************
- *  Function: dc_enable_dmub_notifications
+ * dc_enable_dmub_notifications - Returns whether dmub notification can be enabled
+ * @dc: dc structure
  *
- *  @brief
- *             Returns whether dmub notification can be enabled
- *
- *  @param
- *             [in] dc: dc structure
- *
- *     @return
- *             True to enable dmub notifications, False otherwise
- *****************************************************************************
+ * Returns: True to enable dmub notifications, False otherwise
  */
 bool dc_enable_dmub_notifications(struct dc *dc)
 {
@@ -3355,21 +3346,13 @@ bool dc_enable_dmub_notifications(struct dc *dc)
 }
 
 /**
- *****************************************************************************
- *  Function: dc_process_dmub_aux_transfer_async
- *
- *  @brief
- *             Submits aux command to dmub via inbox message
- *             Sets port index appropriately for legacy DDC
- *
- *  @param
- *             [in] dc: dc structure
- *             [in] link_index: link index
- *             [in] payload: aux payload
+ * dc_process_dmub_aux_transfer_async - Submits aux command to dmub via inbox message
+ *                                      Sets port index appropriately for legacy DDC
+ * @dc: dc structure
+ * @link_index: link index
+ * @payload: aux payload
  *
- *     @return
- *             True if successful, False if failure
- *****************************************************************************
+ * Returns: True if successful, False if failure
  */
 bool dc_process_dmub_aux_transfer_async(struct dc *dc,
                                uint32_t link_index,
@@ -3428,16 +3411,8 @@ bool dc_process_dmub_aux_transfer_async(struct dc *dc,
 }
 
 /**
- *****************************************************************************
- *  Function: dc_disable_accelerated_mode
- *
- *  @brief
- *             disable accelerated mode
- *
- *  @param
- *             [in] dc: dc structure
- *
- *****************************************************************************
+ * dc_disable_accelerated_mode - disable accelerated mode
+ * @dc: dc structure
  */
 void dc_disable_accelerated_mode(struct dc *dc)
 {
index 9e08410..5a70f55 100644 (file)
@@ -25,8 +25,6 @@ static const uint8_t DP_VGA_LVDS_CONVERTER_ID_3[] = "dnomlA";
        link->ctx->logger
 #define DC_TRACE_LEVEL_MESSAGE(...) /* do nothing */
 
-#define DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE   0x50
-
        /* maximum pre emphasis level allowed for each voltage swing level*/
        static const enum dc_pre_emphasis
        voltage_swing_to_pre_emphasis[] = { PRE_EMPHASIS_LEVEL3,
@@ -39,14 +37,6 @@ enum {
        POST_LT_ADJ_REQ_TIMEOUT = 200
 };
 
-enum {
-       LINK_TRAINING_MAX_RETRY_COUNT = 5,
-       /* to avoid infinite loop where-in the receiver
-        * switches between different VS
-        */
-       LINK_TRAINING_MAX_CR_RETRY = 100
-};
-
 static bool decide_fallback_link_setting(
                struct dc_link_settings initial_link_settings,
                struct dc_link_settings *current_link_setting,
@@ -97,7 +87,7 @@ static uint32_t get_eq_training_aux_rd_interval(
        return wait_in_micro_secs;
 }
 
-static void wait_for_training_aux_rd_interval(
+void dp_wait_for_training_aux_rd_interval(
        struct dc_link *link,
        uint32_t wait_in_micro_secs)
 {
@@ -108,7 +98,7 @@ static void wait_for_training_aux_rd_interval(
                wait_in_micro_secs);
 }
 
-static enum dpcd_training_patterns
+enum dpcd_training_patterns
        dc_dp_training_pattern_to_dpcd_training_pattern(
        struct dc_link *link,
        enum dc_dp_training_pattern pattern)
@@ -206,11 +196,12 @@ static enum dc_dp_training_pattern decide_eq_training_pattern(struct dc_link *li
        return DP_TRAINING_PATTERN_SEQUENCE_2;
 }
 
-static void dpcd_set_link_settings(
+enum dc_status dpcd_set_link_settings(
        struct dc_link *link,
        const struct link_training_settings *lt_settings)
 {
        uint8_t rate;
+       enum dc_status status;
 
        union down_spread_ctrl downspread = { {0} };
        union lane_count_set lane_count_set = { {0} };
@@ -225,15 +216,16 @@ static void dpcd_set_link_settings(
        lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0;
 
 
-       if (lt_settings->pattern_for_eq < DP_TRAINING_PATTERN_SEQUENCE_4) {
+       if (link->ep_type == DISPLAY_ENDPOINT_PHY &&
+                       lt_settings->pattern_for_eq < DP_TRAINING_PATTERN_SEQUENCE_4) {
                lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED =
                                link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED;
        }
 
-       core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL,
+       status = core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL,
                &downspread.raw, sizeof(downspread));
 
-       core_link_write_dpcd(link, DP_LANE_COUNT_SET,
+       status = core_link_write_dpcd(link, DP_LANE_COUNT_SET,
                &lane_count_set.raw, 1);
 
        if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14 &&
@@ -249,12 +241,12 @@ static void dpcd_set_link_settings(
                        core_link_read_dpcd(link, DP_SUPPORTED_LINK_RATES,
                                        supported_link_rates, sizeof(supported_link_rates));
                }
-               core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1);
-               core_link_write_dpcd(link, DP_LINK_RATE_SET,
+               status = core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1);
+               status = core_link_write_dpcd(link, DP_LINK_RATE_SET,
                                &lt_settings->link_settings.link_rate_set, 1);
        } else {
                rate = (uint8_t) (lt_settings->link_settings.link_rate);
-               core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1);
+               status = core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1);
        }
 
        if (rate) {
@@ -278,9 +270,11 @@ static void dpcd_set_link_settings(
                        DP_DOWNSPREAD_CTRL,
                        lt_settings->link_settings.link_spread);
        }
+
+       return status;
 }
 
-static uint8_t dc_dp_initialize_scrambling_data_symbols(
+uint8_t dc_dp_initialize_scrambling_data_symbols(
        struct dc_link *link,
        enum dc_dp_training_pattern pattern)
 {
@@ -429,7 +423,7 @@ static void dpcd_set_lt_pattern_and_lane_settings(
        link->cur_lane_setting = lt_settings->lane_settings[0];
 }
 
-static bool is_cr_done(enum dc_lane_count ln_count,
+bool dp_is_cr_done(enum dc_lane_count ln_count,
        union lane_status *dpcd_lane_status)
 {
        uint32_t lane;
@@ -468,7 +462,7 @@ static inline bool is_interlane_aligned(union lane_align_status_updated align_st
        return align_status.bits.INTERLANE_ALIGN_DONE == 1;
 }
 
-static void update_drive_settings(
+void dp_update_drive_settings(
                struct link_training_settings *dest,
                struct link_training_settings src)
 {
@@ -612,7 +606,7 @@ static void find_max_drive_settings(
 
 }
 
-static void get_lane_status_and_drive_settings(
+enum dc_status dp_get_lane_status_and_drive_settings(
        struct dc_link *link,
        const struct link_training_settings *link_training_setting,
        union lane_status *ln_status,
@@ -627,6 +621,7 @@ static void get_lane_status_and_drive_settings(
        union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = { { {0} } };
        struct link_training_settings request_settings = { {0} };
        uint32_t lane;
+       enum dc_status status;
 
        memset(req_settings, '\0', sizeof(struct link_training_settings));
 
@@ -637,7 +632,7 @@ static void get_lane_status_and_drive_settings(
                lane_adjust_offset = 3;
        }
 
-       core_link_read_dpcd(
+       status = core_link_read_dpcd(
                link,
                lane01_status_address,
                (uint8_t *)(dpcd_buf),
@@ -725,9 +720,10 @@ static void get_lane_status_and_drive_settings(
         * read DpcdAddress_AdjustRequestPostCursor2 = 0x020C
         */
 
+       return status;
 }
 
-static void dpcd_set_lane_settings(
+enum dc_status dpcd_set_lane_settings(
        struct dc_link *link,
        const struct link_training_settings *link_training_setting,
        uint32_t offset)
@@ -735,6 +731,7 @@ static void dpcd_set_lane_settings(
        union dpcd_training_lane dpcd_lane[LANE_COUNT_DP_MAX] = {{{0}}};
        uint32_t lane;
        unsigned int lane0_set_address;
+       enum dc_status status;
 
        lane0_set_address = DP_TRAINING_LANE0_SET;
 
@@ -762,7 +759,7 @@ static void dpcd_set_lane_settings(
                        PRE_EMPHASIS_MAX_LEVEL ? 1 : 0);
        }
 
-       core_link_write_dpcd(link,
+       status = core_link_write_dpcd(link,
                lane0_set_address,
                (uint8_t *)(dpcd_lane),
                link_training_setting->link_settings.lane_count);
@@ -808,9 +805,10 @@ static void dpcd_set_lane_settings(
        }
        link->cur_lane_setting = link_training_setting->lane_settings[0];
 
+       return status;
 }
 
-static bool is_max_vs_reached(
+bool dp_is_max_vs_reached(
        const struct link_training_settings *lt_settings)
 {
        uint32_t lane;
@@ -852,19 +850,19 @@ static bool perform_post_lt_adj_req_sequence(
                        union lane_align_status_updated
                                dpcd_lane_status_updated;
 
-                       get_lane_status_and_drive_settings(
-                       link,
-                       lt_settings,
-                       dpcd_lane_status,
-                       &dpcd_lane_status_updated,
-                       &req_settings,
-                       DPRX);
+                       dp_get_lane_status_and_drive_settings(
+                               link,
+                               lt_settings,
+                               dpcd_lane_status,
+                               &dpcd_lane_status_updated,
+                               &req_settings,
+                               DPRX);
 
                        if (dpcd_lane_status_updated.bits.
                                        POST_LT_ADJ_REQ_IN_PROGRESS == 0)
                                return true;
 
-                       if (!is_cr_done(lane_count, dpcd_lane_status))
+                       if (!dp_is_cr_done(lane_count, dpcd_lane_status))
                                return false;
 
                        if (!is_ch_eq_done(lane_count, dpcd_lane_status) ||
@@ -887,7 +885,7 @@ static bool perform_post_lt_adj_req_sequence(
                        }
 
                        if (req_drv_setting_changed) {
-                               update_drive_settings(
+                               dp_update_drive_settings(
                                        lt_settings, req_settings);
 
                                dc_link_dp_set_drive_settings(link,
@@ -939,7 +937,7 @@ static uint32_t translate_training_aux_read_interval(uint32_t dpcd_aux_read_inte
        return aux_rd_interval_us;
 }
 
-static enum link_training_result get_cr_failure(enum dc_lane_count ln_count,
+enum link_training_result dp_get_cr_failure(enum dc_lane_count ln_count,
                                        union lane_status *dpcd_lane_status)
 {
        enum link_training_result result = LINK_TRAINING_SUCCESS;
@@ -1003,14 +1001,14 @@ static enum link_training_result perform_channel_equalization_sequence(
                                        translate_training_aux_read_interval(
                                                link->dpcd_caps.lttpr_caps.aux_rd_interval[offset - 1]);
 
-               wait_for_training_aux_rd_interval(
+               dp_wait_for_training_aux_rd_interval(
                                link,
                                wait_time_microsec);
 
                /* 4. Read lane status and requested
                 * drive settings as set by the sink*/
 
-               get_lane_status_and_drive_settings(
+               dp_get_lane_status_and_drive_settings(
                        link,
                        lt_settings,
                        dpcd_lane_status,
@@ -1019,7 +1017,7 @@ static enum link_training_result perform_channel_equalization_sequence(
                        offset);
 
                /* 5. check CR done*/
-               if (!is_cr_done(lane_count, dpcd_lane_status))
+               if (!dp_is_cr_done(lane_count, dpcd_lane_status))
                        return LINK_TRAINING_EQ_FAIL_CR;
 
                /* 6. check CHEQ done*/
@@ -1029,13 +1027,12 @@ static enum link_training_result perform_channel_equalization_sequence(
                        return LINK_TRAINING_SUCCESS;
 
                /* 7. update VS/PE/PC2 in lt_settings*/
-               update_drive_settings(lt_settings, req_settings);
+               dp_update_drive_settings(lt_settings, req_settings);
        }
 
        return LINK_TRAINING_EQ_FAIL_EQ;
 
 }
-#define TRAINING_AUX_RD_INTERVAL 100 //us
 
 static void start_clock_recovery_pattern_early(struct dc_link *link,
                struct link_training_settings *lt_settings,
@@ -1106,14 +1103,14 @@ static enum link_training_result perform_clock_recovery_sequence(
                if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
                        wait_time_microsec = TRAINING_AUX_RD_INTERVAL;
 
-               wait_for_training_aux_rd_interval(
+               dp_wait_for_training_aux_rd_interval(
                                link,
                                wait_time_microsec);
 
                /* 4. Read lane status and requested drive
                * settings as set by the sink
                */
-               get_lane_status_and_drive_settings(
+               dp_get_lane_status_and_drive_settings(
                                link,
                                lt_settings,
                                dpcd_lane_status,
@@ -1122,11 +1119,11 @@ static enum link_training_result perform_clock_recovery_sequence(
                                offset);
 
                /* 5. check CR done*/
-               if (is_cr_done(lane_count, dpcd_lane_status))
+               if (dp_is_cr_done(lane_count, dpcd_lane_status))
                        return LINK_TRAINING_SUCCESS;
 
                /* 6. max VS reached*/
-               if (is_max_vs_reached(lt_settings))
+               if (dp_is_max_vs_reached(lt_settings))
                        break;
 
                /* 7. same lane settings*/
@@ -1141,7 +1138,7 @@ static enum link_training_result perform_clock_recovery_sequence(
                        retries_cr = 0;
 
                /* 8. update VS/PE/PC2 in lt_settings*/
-               update_drive_settings(lt_settings, req_settings);
+               dp_update_drive_settings(lt_settings, req_settings);
 
                retry_count++;
        }
@@ -1154,7 +1151,7 @@ static enum link_training_result perform_clock_recovery_sequence(
 
        }
 
-       return get_cr_failure(lane_count, dpcd_lane_status);
+       return dp_get_cr_failure(lane_count, dpcd_lane_status);
 }
 
 static inline enum link_training_result dp_transition_to_video_idle(
@@ -1173,8 +1170,16 @@ static inline enum link_training_result dp_transition_to_video_idle(
         * TPS4 must be used instead of POST_LT_ADJ_REQ.
         */
        if (link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED != 1 ||
-                       lt_settings->pattern_for_eq == DP_TRAINING_PATTERN_SEQUENCE_4)
+                       lt_settings->pattern_for_eq == DP_TRAINING_PATTERN_SEQUENCE_4) {
+               /* delay 5ms after Main Link output idle pattern and then check
+                * DPCD 0202h.
+                */
+               if (link->connector_signal != SIGNAL_TYPE_EDP && status == LINK_TRAINING_SUCCESS) {
+                       msleep(5);
+                       status = dp_check_link_loss_status(link, lt_settings);
+               }
                return status;
+       }
 
        if (status == LINK_TRAINING_SUCCESS &&
                perform_post_lt_adj_req_sequence(link, lt_settings) == false)
@@ -1327,9 +1332,14 @@ static inline void decide_8b_10b_training_settings(
                lt_settings->enhanced_framing = *overrides->enhanced_framing;
        else
                lt_settings->enhanced_framing = 1;
+
+       if (link->preferred_training_settings.fec_enable != NULL)
+               lt_settings->should_set_fec_ready = *link->preferred_training_settings.fec_enable;
+       else
+               lt_settings->should_set_fec_ready = true;
 }
 
-static void decide_training_settings(
+void dp_decide_training_settings(
                struct dc_link *link,
                const struct dc_link_settings *link_settings,
                const struct dc_link_training_overrides *overrides,
@@ -1365,18 +1375,18 @@ uint8_t dp_convert_to_count(uint8_t lttpr_repeater_count)
        return 0; // invalid value
 }
 
-static void configure_lttpr_mode_transparent(struct dc_link *link)
+enum dc_status configure_lttpr_mode_transparent(struct dc_link *link)
 {
        uint8_t repeater_mode = DP_PHY_REPEATER_MODE_TRANSPARENT;
 
        DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Transparent Mode\n", __func__);
-       core_link_write_dpcd(link,
+       return core_link_write_dpcd(link,
                        DP_PHY_REPEATER_MODE,
                        (uint8_t *)&repeater_mode,
                        sizeof(repeater_mode));
 }
 
-static void configure_lttpr_mode_non_transparent(
+enum dc_status configure_lttpr_mode_non_transparent(
                struct dc_link *link,
                const struct link_training_settings *lt_settings)
 {
@@ -1431,6 +1441,8 @@ static void configure_lttpr_mode_non_transparent(
                        }
                }
        }
+
+       return result;
 }
 
 static void repeater_training_done(struct dc_link *link, uint32_t offset)
@@ -1564,7 +1576,7 @@ bool dc_link_dp_perform_link_training_skip_aux(
 {
        struct link_training_settings lt_settings;
 
-       decide_training_settings(
+       dp_decide_training_settings(
                        link,
                        link_setting,
                        &link->preferred_training_settings,
@@ -1579,7 +1591,7 @@ bool dc_link_dp_perform_link_training_skip_aux(
        dp_set_hw_lane_settings(link, &lt_settings, DPRX);
 
        /* wait receiver to lock-on*/
-       wait_for_training_aux_rd_interval(link, lt_settings.cr_pattern_time);
+       dp_wait_for_training_aux_rd_interval(link, lt_settings.cr_pattern_time);
 
        /* 2. Perform_channel_equalization_sequence. */
 
@@ -1590,7 +1602,7 @@ bool dc_link_dp_perform_link_training_skip_aux(
        dp_set_hw_lane_settings(link, &lt_settings, DPRX);
 
        /* wait receiver to lock-on. */
-       wait_for_training_aux_rd_interval(link, lt_settings.eq_pattern_time);
+       dp_wait_for_training_aux_rd_interval(link, lt_settings.eq_pattern_time);
 
        /* 3. Perform_link_training_int. */
 
@@ -1602,42 +1614,61 @@ bool dc_link_dp_perform_link_training_skip_aux(
        return true;
 }
 
-enum link_training_result dc_link_dp_perform_link_training(
-       struct dc_link *link,
-       const struct dc_link_settings *link_setting,
-       bool skip_video_pattern)
+enum dc_status dpcd_configure_lttpr_mode(struct dc_link *link, struct link_training_settings *lt_settings)
 {
-       enum link_training_result status = LINK_TRAINING_SUCCESS;
-       struct link_training_settings lt_settings;
+       enum dc_status status = DC_OK;
 
-       bool fec_enable;
-       uint8_t repeater_cnt;
-       uint8_t repeater_id;
+       if (lt_settings->lttpr_mode == LTTPR_MODE_TRANSPARENT)
+               status = configure_lttpr_mode_transparent(link);
+
+       else if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
+               status = configure_lttpr_mode_non_transparent(link, lt_settings);
+
+       return status;
+}
+
+static void dpcd_exit_training_mode(struct dc_link *link)
+{
+
+       /* clear training pattern set */
+       dpcd_set_training_pattern(link, DP_TRAINING_PATTERN_VIDEOIDLE);
+}
+
+enum dc_status dpcd_configure_channel_coding(struct dc_link *link,
+               struct link_training_settings *lt_settings)
+{
+       enum dp_link_encoding encoding =
+                       dp_get_link_encoding_format(
+                                       &lt_settings->link_settings);
+       enum dc_status status;
 
-       decide_training_settings(
+       status = core_link_write_dpcd(
                        link,
-                       link_setting,
-                       &link->preferred_training_settings,
-                       &lt_settings);
+                       DP_MAIN_LINK_CHANNEL_CODING_SET,
+                       (uint8_t *) &encoding,
+                       1);
+       DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X MAIN_LINK_CHANNEL_CODING_SET = %x\n",
+                                       __func__,
+                                       DP_MAIN_LINK_CHANNEL_CODING_SET,
+                                       encoding);
 
-       /* Configure lttpr mode */
-       if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
-               configure_lttpr_mode_non_transparent(link, &lt_settings);
-       else if (link->lttpr_mode == LTTPR_MODE_TRANSPARENT)
-               configure_lttpr_mode_transparent(link);
+       return status;
+}
 
-       if (link->ctx->dc->work_arounds.lt_early_cr_pattern)
-               start_clock_recovery_pattern_early(link, &lt_settings, DPRX);
+static enum link_training_result dp_perform_8b_10b_link_training(
+               struct dc_link *link,
+               struct link_training_settings *lt_settings)
+{
+       enum link_training_result status = LINK_TRAINING_SUCCESS;
 
-       /* 1. set link rate, lane count and spread. */
-       dpcd_set_link_settings(link, &lt_settings);
+       uint8_t repeater_cnt;
+       uint8_t repeater_id;
 
-       if (link->preferred_training_settings.fec_enable != NULL)
-               fec_enable = *link->preferred_training_settings.fec_enable;
-       else
-               fec_enable = true;
+       if (link->ctx->dc->work_arounds.lt_early_cr_pattern)
+               start_clock_recovery_pattern_early(link, lt_settings, DPRX);
 
-       dp_set_fec_ready(link, fec_enable);
+       /* 1. set link rate, lane count and spread. */
+       dpcd_set_link_settings(link, lt_settings);
 
        if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
 
@@ -1648,13 +1679,13 @@ enum link_training_result dc_link_dp_perform_link_training(
 
                for (repeater_id = repeater_cnt; (repeater_id > 0 && status == LINK_TRAINING_SUCCESS);
                                repeater_id--) {
-                       status = perform_clock_recovery_sequence(link, &lt_settings, repeater_id);
+                       status = perform_clock_recovery_sequence(link, lt_settings, repeater_id);
 
                        if (status != LINK_TRAINING_SUCCESS)
                                break;
 
                        status = perform_channel_equalization_sequence(link,
-                                       &lt_settings,
+                                       lt_settings,
                                        repeater_id);
 
                        if (status != LINK_TRAINING_SUCCESS)
@@ -1665,36 +1696,62 @@ enum link_training_result dc_link_dp_perform_link_training(
        }
 
        if (status == LINK_TRAINING_SUCCESS) {
-               status = perform_clock_recovery_sequence(link, &lt_settings, DPRX);
+               status = perform_clock_recovery_sequence(link, lt_settings, DPRX);
        if (status == LINK_TRAINING_SUCCESS) {
                status = perform_channel_equalization_sequence(link,
-                                       &lt_settings,
+                                       lt_settings,
                                        DPRX);
                }
        }
 
-       /* 3. set training not in progress*/
-       dpcd_set_training_pattern(link, DP_TRAINING_PATTERN_VIDEOIDLE);
-       if ((status == LINK_TRAINING_SUCCESS) || !skip_video_pattern) {
+       return status;
+}
+
+enum link_training_result dc_link_dp_perform_link_training(
+       struct dc_link *link,
+       const struct dc_link_settings *link_settings,
+       bool skip_video_pattern)
+{
+       enum link_training_result status = LINK_TRAINING_SUCCESS;
+       struct link_training_settings lt_settings;
+       enum dp_link_encoding encoding =
+                       dp_get_link_encoding_format(link_settings);
+
+       /* decide training settings */
+       dp_decide_training_settings(
+                       link,
+                       link_settings,
+                       &link->preferred_training_settings,
+                       &lt_settings);
+
+       /* reset previous training states */
+       dpcd_exit_training_mode(link);
+
+       /* configure link prior to entering training mode */
+       dpcd_configure_lttpr_mode(link, &lt_settings);
+       dp_set_fec_ready(link, lt_settings.should_set_fec_ready);
+       dpcd_configure_channel_coding(link, &lt_settings);
+
+       /* enter training mode:
+        * Per DP specs starting from here, DPTX device shall not issue
+        * Non-LT AUX transactions inside training mode.
+        */
+       if (encoding == DP_8b_10b_ENCODING)
+               status = dp_perform_8b_10b_link_training(link, &lt_settings);
+       else
+               ASSERT(0);
+
+       /* exit training mode and switch to video idle */
+       dpcd_exit_training_mode(link);
+       if ((status == LINK_TRAINING_SUCCESS) || !skip_video_pattern)
                status = dp_transition_to_video_idle(link,
                                &lt_settings,
                                status);
-       }
-
-       /* delay 5ms after Main Link output idle pattern and then check
-        * DPCD 0202h.
-        */
-       if (link->connector_signal != SIGNAL_TYPE_EDP && status == LINK_TRAINING_SUCCESS) {
-               msleep(5);
-               status = dp_check_link_loss_status(link, &lt_settings);
-       }
 
-       /* 6. print status message*/
+       /* dump debug data */
        print_status_message(link, &lt_settings, status);
-
        if (status != LINK_TRAINING_SUCCESS)
                link->ctx->dc->debug_data.ltFailCount++;
-
        return status;
 }
 
@@ -1899,7 +1956,7 @@ enum link_training_result dc_link_dp_sync_lt_attempt(
        enum clock_source_id dp_cs_id = CLOCK_SOURCE_ID_EXTERNAL;
        bool fec_enable = false;
 
-       decide_training_settings(
+       dp_decide_training_settings(
                link,
                link_settings,
                lt_overrides,
@@ -2070,7 +2127,7 @@ enum dc_status read_hpd_rx_irq_data(
        return retval;
 }
 
-static bool hpd_rx_irq_check_link_loss_status(
+bool hpd_rx_irq_check_link_loss_status(
        struct dc_link *link,
        union hpd_irq_data *hpd_irq_dpcd_data)
 {
@@ -4606,50 +4663,74 @@ enum dp_panel_mode dp_get_panel_mode(struct dc_link *link)
        return DP_PANEL_MODE_DEFAULT;
 }
 
-void dp_set_fec_ready(struct dc_link *link, bool ready)
+enum dc_status dp_set_fec_ready(struct dc_link *link, bool ready)
 {
        /* FEC has to be "set ready" before the link training.
         * The policy is to always train with FEC
         * if the sink supports it and leave it enabled on link.
         * If FEC is not supported, disable it.
         */
-       struct link_encoder *link_enc = link->link_enc;
+       struct link_encoder *link_enc = NULL;
+       enum dc_status status = DC_OK;
        uint8_t fec_config = 0;
 
+       /* Access link encoder based on whether it is statically
+        * or dynamically assigned to a link.
+        */
+       if (link->is_dig_mapping_flexible &&
+                       link->dc->res_pool->funcs->link_encs_assign)
+               link_enc = link_enc_cfg_get_link_enc_used_by_link(link->dc->current_state, link);
+       else
+               link_enc = link->link_enc;
+       ASSERT(link_enc);
+
        if (!dc_link_should_enable_fec(link))
-               return;
+               return status;
 
        if (link_enc->funcs->fec_set_ready &&
                        link->dpcd_caps.fec_cap.bits.FEC_CAPABLE) {
                if (ready) {
                        fec_config = 1;
-                       if (core_link_write_dpcd(link,
+                       status = core_link_write_dpcd(link,
                                        DP_FEC_CONFIGURATION,
                                        &fec_config,
-                                       sizeof(fec_config)) == DC_OK) {
+                                       sizeof(fec_config));
+                       if (status == DC_OK) {
                                link_enc->funcs->fec_set_ready(link_enc, true);
                                link->fec_state = dc_link_fec_ready;
                        } else {
-                               link->link_enc->funcs->fec_set_ready(link->link_enc, false);
+                               link_enc->funcs->fec_set_ready(link->link_enc, false);
                                link->fec_state = dc_link_fec_not_ready;
                                dm_error("dpcd write failed to set fec_ready");
                        }
                } else if (link->fec_state == dc_link_fec_ready) {
                        fec_config = 0;
-                       core_link_write_dpcd(link,
+                       status = core_link_write_dpcd(link,
                                        DP_FEC_CONFIGURATION,
                                        &fec_config,
                                        sizeof(fec_config));
-                       link->link_enc->funcs->fec_set_ready(
-                                       link->link_enc, false);
+                       link_enc->funcs->fec_set_ready(link_enc, false);
                        link->fec_state = dc_link_fec_not_ready;
                }
        }
+
+       return status;
 }
 
 void dp_set_fec_enable(struct dc_link *link, bool enable)
 {
-       struct link_encoder *link_enc = link->link_enc;
+       struct link_encoder *link_enc = NULL;
+
+       /* Access link encoder based on whether it is statically
+        * or dynamically assigned to a link.
+        */
+       if (link->is_dig_mapping_flexible &&
+                       link->dc->res_pool->funcs->link_encs_assign)
+               link_enc = link_enc_cfg_get_link_enc_used_by_link(
+                               link->dc->current_state, link);
+       else
+               link_enc = link->link_enc;
+       ASSERT(link_enc);
 
        if (!dc_link_should_enable_fec(link))
                return;
index 8a158a1..cd864cc 100644 (file)
@@ -611,6 +611,7 @@ static enum pixel_format convert_pixel_format_to_dalsurface(
                dal_pixel_format = PIXEL_FORMAT_420BPP10;
                break;
        case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+       case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
        default:
                dal_pixel_format = PIXEL_FORMAT_UNKNOWN;
                break;
@@ -701,124 +702,23 @@ static void calculate_split_count_and_index(struct pipe_ctx *pipe_ctx, int *spli
        }
 }
 
-static void calculate_viewport(struct pipe_ctx *pipe_ctx)
+/*
+ * This is a preliminary vp size calculation to allow us to check taps support.
+ * The result is completely overridden afterwards.
+ */
+static void calculate_viewport_size(struct pipe_ctx *pipe_ctx)
 {
-       const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
-       const struct dc_stream_state *stream = pipe_ctx->stream;
        struct scaler_data *data = &pipe_ctx->plane_res.scl_data;
-       struct rect surf_src = plane_state->src_rect;
-       struct rect clip, dest;
-       int vpc_div = (data->format == PIXEL_FORMAT_420BPP8
-                       || data->format == PIXEL_FORMAT_420BPP10) ? 2 : 1;
-       int split_count = 0;
-       int split_idx = 0;
-       bool orthogonal_rotation, flip_y_start, flip_x_start;
-
-       calculate_split_count_and_index(pipe_ctx, &split_count, &split_idx);
 
-       if (stream->view_format == VIEW_3D_FORMAT_SIDE_BY_SIDE ||
-               stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM) {
-               split_count = 0;
-               split_idx = 0;
-       }
-
-       /* The actual clip is an intersection between stream
-        * source and surface clip
-        */
-       dest = plane_state->dst_rect;
-       clip.x = stream->src.x > plane_state->clip_rect.x ?
-                       stream->src.x : plane_state->clip_rect.x;
-
-       clip.width = stream->src.x + stream->src.width <
-                       plane_state->clip_rect.x + plane_state->clip_rect.width ?
-                       stream->src.x + stream->src.width - clip.x :
-                       plane_state->clip_rect.x + plane_state->clip_rect.width - clip.x ;
-
-       clip.y = stream->src.y > plane_state->clip_rect.y ?
-                       stream->src.y : plane_state->clip_rect.y;
-
-       clip.height = stream->src.y + stream->src.height <
-                       plane_state->clip_rect.y + plane_state->clip_rect.height ?
-                       stream->src.y + stream->src.height - clip.y :
-                       plane_state->clip_rect.y + plane_state->clip_rect.height - clip.y ;
-
-       /*
-        * Need to calculate how scan origin is shifted in vp space
-        * to correctly rotate clip and dst
-        */
-       get_vp_scan_direction(
-                       plane_state->rotation,
-                       plane_state->horizontal_mirror,
-                       &orthogonal_rotation,
-                       &flip_y_start,
-                       &flip_x_start);
-
-       if (orthogonal_rotation) {
-               swap(clip.x, clip.y);
-               swap(clip.width, clip.height);
-               swap(dest.x, dest.y);
-               swap(dest.width, dest.height);
-       }
-       if (flip_x_start) {
-               clip.x = dest.x + dest.width - clip.x - clip.width;
-               dest.x = 0;
-       }
-       if (flip_y_start) {
-               clip.y = dest.y + dest.height - clip.y - clip.height;
-               dest.y = 0;
-       }
-
-       /* offset = surf_src.ofs + (clip.ofs - surface->dst_rect.ofs) * scl_ratio
-        * num_pixels = clip.num_pix * scl_ratio
-        */
-       data->viewport.x = surf_src.x + (clip.x - dest.x) * surf_src.width / dest.width;
-       data->viewport.width = clip.width * surf_src.width / dest.width;
-
-       data->viewport.y = surf_src.y + (clip.y - dest.y) * surf_src.height / dest.height;
-       data->viewport.height = clip.height * surf_src.height / dest.height;
-
-       /* Handle split */
-       if (split_count) {
-               /* extra pixels in the division remainder need to go to pipes after
-                * the extra pixel index minus one(epimo) defined here as:
-                */
-               int epimo = 0;
-
-               if (orthogonal_rotation) {
-                       if (flip_y_start)
-                               split_idx = split_count - split_idx;
-
-                       epimo = split_count - data->viewport.height % (split_count + 1);
-
-                       data->viewport.y += (data->viewport.height / (split_count + 1)) * split_idx;
-                       if (split_idx > epimo)
-                               data->viewport.y += split_idx - epimo - 1;
-                       data->viewport.height = data->viewport.height / (split_count + 1) + (split_idx > epimo ? 1 : 0);
-               } else {
-                       if (flip_x_start)
-                               split_idx = split_count - split_idx;
-
-                       epimo = split_count - data->viewport.width % (split_count + 1);
-
-                       data->viewport.x += (data->viewport.width / (split_count + 1)) * split_idx;
-                       if (split_idx > epimo)
-                               data->viewport.x += split_idx - epimo - 1;
-                       data->viewport.width = data->viewport.width / (split_count + 1) + (split_idx > epimo ? 1 : 0);
-               }
+       data->viewport.width = dc_fixpt_ceil(dc_fixpt_mul_int(data->ratios.horz, data->recout.width));
+       data->viewport.height = dc_fixpt_ceil(dc_fixpt_mul_int(data->ratios.vert, data->recout.height));
+       data->viewport_c.width = dc_fixpt_ceil(dc_fixpt_mul_int(data->ratios.horz_c, data->recout.width));
+       data->viewport_c.height = dc_fixpt_ceil(dc_fixpt_mul_int(data->ratios.vert_c, data->recout.height));
+       if (pipe_ctx->plane_state->rotation == ROTATION_ANGLE_90 ||
+                       pipe_ctx->plane_state->rotation == ROTATION_ANGLE_270) {
+               swap(data->viewport.width, data->viewport.height);
+               swap(data->viewport_c.width, data->viewport_c.height);
        }
-
-       /* Round down, compensate in init */
-       data->viewport_c.x = data->viewport.x / vpc_div;
-       data->viewport_c.y = data->viewport.y / vpc_div;
-       data->inits.h_c = (data->viewport.x % vpc_div) != 0 ? dc_fixpt_half : dc_fixpt_zero;
-       data->inits.v_c = (data->viewport.y % vpc_div) != 0 ? dc_fixpt_half : dc_fixpt_zero;
-
-       /* Round up, assume original video size always even dimensions */
-       data->viewport_c.width = (data->viewport.width + vpc_div - 1) / vpc_div;
-       data->viewport_c.height = (data->viewport.height + vpc_div - 1) / vpc_div;
-
-       data->viewport_unadjusted = data->viewport;
-       data->viewport_c_unadjusted = data->viewport_c;
 }
 
 static void calculate_recout(struct pipe_ctx *pipe_ctx)
@@ -827,26 +727,21 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx)
        const struct dc_stream_state *stream = pipe_ctx->stream;
        struct scaler_data *data = &pipe_ctx->plane_res.scl_data;
        struct rect surf_clip = plane_state->clip_rect;
-       bool pri_split_tb = pipe_ctx->bottom_pipe &&
-                       pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state &&
-                       stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM;
-       bool sec_split_tb = pipe_ctx->top_pipe &&
-                       pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state &&
-                       stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM;
-       int split_count = 0;
-       int split_idx = 0;
+       bool split_tb = stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM;
+       int split_count, split_idx;
 
        calculate_split_count_and_index(pipe_ctx, &split_count, &split_idx);
+       if (stream->view_format == VIEW_3D_FORMAT_SIDE_BY_SIDE)
+               split_idx = 0;
 
        /*
         * Only the leftmost ODM pipe should be offset by a nonzero distance
         */
-       if (!pipe_ctx->prev_odm_pipe) {
+       if (!pipe_ctx->prev_odm_pipe || split_idx == split_count) {
                data->recout.x = stream->dst.x;
                if (stream->src.x < surf_clip.x)
                        data->recout.x += (surf_clip.x - stream->src.x) * stream->dst.width
                                                / stream->src.width;
-
        } else
                data->recout.x = 0;
 
@@ -867,26 +762,31 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx)
        if (data->recout.height + data->recout.y > stream->dst.y + stream->dst.height)
                data->recout.height = stream->dst.y + stream->dst.height - data->recout.y;
 
-       /* Handle h & v split, handle rotation using viewport */
-       if (sec_split_tb) {
-               data->recout.y += data->recout.height / 2;
-               /* Floor primary pipe, ceil 2ndary pipe */
-               data->recout.height = (data->recout.height + 1) / 2;
-       } else if (pri_split_tb)
+       /* Handle h & v split */
+       if (split_tb) {
+               ASSERT(data->recout.height % 2 == 0);
                data->recout.height /= 2;
-       else if (split_count) {
-               /* extra pixels in the division remainder need to go to pipes after
-                * the extra pixel index minus one(epimo) defined here as:
-                */
-               int epimo = split_count - data->recout.width % (split_count + 1);
-
-               /*no recout offset due to odm */
+       } else if (split_count) {
                if (!pipe_ctx->next_odm_pipe && !pipe_ctx->prev_odm_pipe) {
+                       /* extra pixels in the division remainder need to go to pipes after
+                        * the extra pixel index minus one(epimo) defined here as:
+                        */
+                       int epimo = split_count - data->recout.width % (split_count + 1);
+
                        data->recout.x += (data->recout.width / (split_count + 1)) * split_idx;
                        if (split_idx > epimo)
                                data->recout.x += split_idx - epimo - 1;
+                       ASSERT(stream->view_format != VIEW_3D_FORMAT_SIDE_BY_SIDE || data->recout.width % 2 == 0);
+                       data->recout.width = data->recout.width / (split_count + 1) + (split_idx > epimo ? 1 : 0);
+               } else {
+                       /* odm */
+                       if (split_idx == split_count) {
+                               /* rightmost pipe is the remainder recout */
+                               data->recout.width -= data->h_active * split_count - data->recout.x;
+                               data->recout.x = 0;
+                       } else
+                               data->recout.width = data->h_active - data->recout.x;
                }
-               data->recout.width = data->recout.width / (split_count + 1) + (split_idx > epimo ? 1 : 0);
        }
 }
 
@@ -940,9 +840,15 @@ static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx)
                        pipe_ctx->plane_res.scl_data.ratios.vert_c, 19);
 }
 
-static inline void adjust_vp_and_init_for_seamless_clip(
+
+/*
+ * We completely calculate vp offset, size and inits here based entirely on scaling
+ * ratios and recout for pixel perfect pipe combine.
+ */
+static void calculate_init_and_vp(
                bool flip_scan_dir,
-               int recout_skip,
+               int recout_offset_within_recout_full,
+               int recout_size,
                int src_size,
                int taps,
                struct fixed31_32 ratio,
@@ -950,91 +856,87 @@ static inline void adjust_vp_and_init_for_seamless_clip(
                int *vp_offset,
                int *vp_size)
 {
-       if (!flip_scan_dir) {
-               /* Adjust for viewport end clip-off */
-               if ((*vp_offset + *vp_size) < src_size) {
-                       int vp_clip = src_size - *vp_size - *vp_offset;
-                       int int_part = dc_fixpt_floor(dc_fixpt_sub(*init, ratio));
-
-                       int_part = int_part > 0 ? int_part : 0;
-                       *vp_size += int_part < vp_clip ? int_part : vp_clip;
-               }
-
-               /* Adjust for non-0 viewport offset */
-               if (*vp_offset) {
-                       int int_part;
-
-                       *init = dc_fixpt_add(*init, dc_fixpt_mul_int(ratio, recout_skip));
-                       int_part = dc_fixpt_floor(*init) - *vp_offset;
-                       if (int_part < taps) {
-                               int int_adj = *vp_offset >= (taps - int_part) ?
-                                                       (taps - int_part) : *vp_offset;
-                               *vp_offset -= int_adj;
-                               *vp_size += int_adj;
-                               int_part += int_adj;
-                       } else if (int_part > taps) {
-                               *vp_offset += int_part - taps;
-                               *vp_size -= int_part - taps;
-                               int_part = taps;
-                       }
-                       init->value &= 0xffffffff;
-                       *init = dc_fixpt_add_int(*init, int_part);
-               }
-       } else {
-               /* Adjust for non-0 viewport offset */
-               if (*vp_offset) {
-                       int int_part = dc_fixpt_floor(dc_fixpt_sub(*init, ratio));
-
-                       int_part = int_part > 0 ? int_part : 0;
-                       *vp_size += int_part < *vp_offset ? int_part : *vp_offset;
-                       *vp_offset -= int_part < *vp_offset ? int_part : *vp_offset;
-               }
+       struct fixed31_32 temp;
+       int int_part;
 
-               /* Adjust for viewport end clip-off */
-               if ((*vp_offset + *vp_size) < src_size) {
-                       int int_part;
-                       int end_offset = src_size - *vp_offset - *vp_size;
-
-                       /*
-                        * this is init if vp had no offset, keep in mind this is from the
-                        * right side of vp due to scan direction
-                        */
-                       *init = dc_fixpt_add(*init, dc_fixpt_mul_int(ratio, recout_skip));
-                       /*
-                        * this is the difference between first pixel of viewport available to read
-                        * and init position, takning into account scan direction
-                        */
-                       int_part = dc_fixpt_floor(*init) - end_offset;
-                       if (int_part < taps) {
-                               int int_adj = end_offset >= (taps - int_part) ?
-                                                       (taps - int_part) : end_offset;
-                               *vp_size += int_adj;
-                               int_part += int_adj;
-                       } else if (int_part > taps) {
-                               *vp_size += int_part - taps;
-                               int_part = taps;
-                       }
-                       init->value &= 0xffffffff;
-                       *init = dc_fixpt_add_int(*init, int_part);
-               }
+       /*
+        * First of the taps starts sampling pixel number <init_int_part> corresponding to recout
+        * pixel 1. Next recout pixel samples int part of <init + scaling ratio> and so on.
+        * All following calculations are based on this logic.
+        *
+        * Init calculated according to formula:
+        *      init = (scaling_ratio + number_of_taps + 1) / 2
+        *      init_bot = init + scaling_ratio
+        *      to get pixel perfect combine add the fraction from calculating vp offset
+        */
+       temp = dc_fixpt_mul_int(ratio, recout_offset_within_recout_full);
+       *vp_offset = dc_fixpt_floor(temp);
+       temp.value &= 0xffffffff;
+       *init = dc_fixpt_truncate(dc_fixpt_add(dc_fixpt_div_int(
+                       dc_fixpt_add_int(ratio, taps + 1), 2), temp), 19);
+       /*
+        * If viewport has non 0 offset and there are more taps than covered by init then
+        * we should decrease the offset and increase init so we are never sampling
+        * outside of viewport.
+        */
+       int_part = dc_fixpt_floor(*init);
+       if (int_part < taps) {
+               int_part = taps - int_part;
+               if (int_part > *vp_offset)
+                       int_part = *vp_offset;
+               *vp_offset -= int_part;
+               *init = dc_fixpt_add_int(*init, int_part);
        }
+       /*
+        * If taps are sampling outside of viewport at end of recout and there are more pixels
+        * available in the surface we should increase the viewport size, regardless set vp to
+        * only what is used.
+        */
+       temp = dc_fixpt_add(*init, dc_fixpt_mul_int(ratio, recout_size - 1));
+       *vp_size = dc_fixpt_floor(temp);
+       if (*vp_size + *vp_offset > src_size)
+               *vp_size = src_size - *vp_offset;
+
+       /* We did all the math assuming we are scanning same direction as display does,
+        * however mirror/rotation changes how vp scans vs how it is offset. If scan direction
+        * is flipped we simply need to calculate offset from the other side of plane.
+        * Note that outside of viewport all scaling hardware works in recout space.
+        */
+       if (flip_scan_dir)
+               *vp_offset = src_size - *vp_offset - *vp_size;
 }
 
-static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx)
+static void calculate_inits_and_viewports(struct pipe_ctx *pipe_ctx)
 {
        const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
        const struct dc_stream_state *stream = pipe_ctx->stream;
-       struct pipe_ctx *odm_pipe = pipe_ctx;
        struct scaler_data *data = &pipe_ctx->plane_res.scl_data;
-       struct rect src = pipe_ctx->plane_state->src_rect;
-       int recout_skip_h, recout_skip_v, surf_size_h, surf_size_v;
+       struct rect src = plane_state->src_rect;
        int vpc_div = (data->format == PIXEL_FORMAT_420BPP8
-                       || data->format == PIXEL_FORMAT_420BPP10) ? 2 : 1;
+                               || data->format == PIXEL_FORMAT_420BPP10) ? 2 : 1;
+       int split_count, split_idx, ro_lb, ro_tb, recout_full_x, recout_full_y;
        bool orthogonal_rotation, flip_vert_scan_dir, flip_horz_scan_dir;
-       int odm_idx = 0;
 
+       calculate_split_count_and_index(pipe_ctx, &split_count, &split_idx);
        /*
-        * Need to calculate the scan direction for viewport to make adjustments
+        * recout full is what the recout would have been if we didnt clip
+        * the source plane at all. We only care about left(ro_lb) and top(ro_tb)
+        * offsets of recout within recout full because those are the directions
+        * we scan from and therefore the only ones that affect inits.
+        */
+       recout_full_x = stream->dst.x + (plane_state->dst_rect.x - stream->src.x)
+                       * stream->dst.width / stream->src.width;
+       recout_full_y = stream->dst.y + (plane_state->dst_rect.y - stream->src.y)
+                       * stream->dst.height / stream->src.height;
+       if (pipe_ctx->prev_odm_pipe && split_idx)
+               ro_lb = data->h_active * split_idx - recout_full_x;
+       else
+               ro_lb = data->recout.x - recout_full_x;
+       ro_tb = data->recout.y - recout_full_y;
+       ASSERT(ro_lb >= 0 && ro_tb >= 0);
+
+       /*
+        * Work in recout rotation since that requires less transformations
         */
        get_vp_scan_direction(
                        plane_state->rotation,
@@ -1043,145 +945,62 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx)
                        &flip_vert_scan_dir,
                        &flip_horz_scan_dir);
 
-       /* Calculate src rect rotation adjusted to recout space */
-       surf_size_h = src.x + src.width;
-       surf_size_v = src.y + src.height;
-       if (flip_horz_scan_dir)
-               src.x = 0;
-       if (flip_vert_scan_dir)
-               src.y = 0;
        if (orthogonal_rotation) {
-               swap(src.x, src.y);
                swap(src.width, src.height);
+               swap(flip_vert_scan_dir, flip_horz_scan_dir);
        }
 
-       /*modified recout_skip_h calculation due to odm having no recout offset*/
-       while (odm_pipe->prev_odm_pipe) {
-               odm_idx++;
-               odm_pipe = odm_pipe->prev_odm_pipe;
-       }
-       /*odm_pipe is the leftmost pipe in the ODM group*/
-       recout_skip_h = odm_idx * data->recout.width;
-
-       /* Recout matching initial vp offset = recout_offset - (stream dst offset +
-        *                      ((surf dst offset - stream src offset) * 1/ stream scaling ratio)
-        *                      - (surf surf_src offset * 1/ full scl ratio))
-        */
-       recout_skip_h += odm_pipe->plane_res.scl_data.recout.x
-                               - (stream->dst.x + (plane_state->dst_rect.x - stream->src.x)
-                                       * stream->dst.width / stream->src.width -
-                                       src.x * plane_state->dst_rect.width / src.width
-                                       * stream->dst.width / stream->src.width);
-
-
-       recout_skip_v = data->recout.y - (stream->dst.y + (plane_state->dst_rect.y - stream->src.y)
-                                       * stream->dst.height / stream->src.height -
-                                       src.y * plane_state->dst_rect.height / src.height
-                                       * stream->dst.height / stream->src.height);
-       if (orthogonal_rotation)
-               swap(recout_skip_h, recout_skip_v);
-       /*
-        * Init calculated according to formula:
-        *      init = (scaling_ratio + number_of_taps + 1) / 2
-        *      init_bot = init + scaling_ratio
-        *      init_c = init + truncated_vp_c_offset(from calculate viewport)
-        */
-       data->inits.h = dc_fixpt_truncate(dc_fixpt_div_int(
-                       dc_fixpt_add_int(data->ratios.horz, data->taps.h_taps + 1), 2), 19);
-
-       data->inits.h_c = dc_fixpt_truncate(dc_fixpt_add(data->inits.h_c, dc_fixpt_div_int(
-                       dc_fixpt_add_int(data->ratios.horz_c, data->taps.h_taps_c + 1), 2)), 19);
-
-       data->inits.v = dc_fixpt_truncate(dc_fixpt_div_int(
-                       dc_fixpt_add_int(data->ratios.vert, data->taps.v_taps + 1), 2), 19);
-
-       data->inits.v_c = dc_fixpt_truncate(dc_fixpt_add(data->inits.v_c, dc_fixpt_div_int(
-                       dc_fixpt_add_int(data->ratios.vert_c, data->taps.v_taps_c + 1), 2)), 19);
-
-       /*
-        * Taps, inits and scaling ratios are in recout space need to rotate
-        * to viewport rotation before adjustment
-        */
-       adjust_vp_and_init_for_seamless_clip(
+       calculate_init_and_vp(
                        flip_horz_scan_dir,
-                       recout_skip_h,
-                       surf_size_h,
-                       orthogonal_rotation ? data->taps.v_taps : data->taps.h_taps,
-                       orthogonal_rotation ? data->ratios.vert : data->ratios.horz,
-                       orthogonal_rotation ? &data->inits.v : &data->inits.h,
+                       ro_lb,
+                       data->recout.width,
+                       src.width,
+                       data->taps.h_taps,
+                       data->ratios.horz,
+                       &data->inits.h,
                        &data->viewport.x,
                        &data->viewport.width);
-       adjust_vp_and_init_for_seamless_clip(
+       calculate_init_and_vp(
                        flip_horz_scan_dir,
-                       recout_skip_h,
-                       surf_size_h / vpc_div,
-                       orthogonal_rotation ? data->taps.v_taps_c : data->taps.h_taps_c,
-                       orthogonal_rotation ? data->ratios.vert_c : data->ratios.horz_c,
-                       orthogonal_rotation ? &data->inits.v_c : &data->inits.h_c,
+                       ro_lb,
+                       data->recout.width,
+                       src.width / vpc_div,
+                       data->taps.h_taps_c,
+                       data->ratios.horz_c,
+                       &data->inits.h_c,
                        &data->viewport_c.x,
                        &data->viewport_c.width);
-       adjust_vp_and_init_for_seamless_clip(
+       calculate_init_and_vp(
                        flip_vert_scan_dir,
-                       recout_skip_v,
-                       surf_size_v,
-                       orthogonal_rotation ? data->taps.h_taps : data->taps.v_taps,
-                       orthogonal_rotation ? data->ratios.horz : data->ratios.vert,
-                       orthogonal_rotation ? &data->inits.h : &data->inits.v,
+                       ro_tb,
+                       data->recout.height,
+                       src.height,
+                       data->taps.v_taps,
+                       data->ratios.vert,
+                       &data->inits.v,
                        &data->viewport.y,
                        &data->viewport.height);
-       adjust_vp_and_init_for_seamless_clip(
+       calculate_init_and_vp(
                        flip_vert_scan_dir,
-                       recout_skip_v,
-                       surf_size_v / vpc_div,
-                       orthogonal_rotation ? data->taps.h_taps_c : data->taps.v_taps_c,
-                       orthogonal_rotation ? data->ratios.horz_c : data->ratios.vert_c,
-                       orthogonal_rotation ? &data->inits.h_c : &data->inits.v_c,
+                       ro_tb,
+                       data->recout.height,
+                       src.height / vpc_div,
+                       data->taps.v_taps_c,
+                       data->ratios.vert_c,
+                       &data->inits.v_c,
                        &data->viewport_c.y,
                        &data->viewport_c.height);
-
-       /* Interlaced inits based on final vert inits */
-       data->inits.v_bot = dc_fixpt_add(data->inits.v, data->ratios.vert);
-       data->inits.v_c_bot = dc_fixpt_add(data->inits.v_c, data->ratios.vert_c);
-
-}
-
-/*
- * When handling 270 rotation in mixed SLS mode, we have
- * stream->timing.h_border_left that is non zero.  If we are doing
- * pipe-splitting, this h_border_left value gets added to recout.x and when it
- * calls calculate_inits_and_adj_vp() and
- * adjust_vp_and_init_for_seamless_clip(), it can cause viewport.height for a
- * pipe to be incorrect.
- *
- * To fix this, instead of using stream->timing.h_border_left, we can use
- * stream->dst.x to represent the border instead.  So we will set h_border_left
- * to 0 and shift the appropriate amount in stream->dst.x.  We will then
- * perform all calculations in resource_build_scaling_params() based on this
- * and then restore the h_border_left and stream->dst.x to their original
- * values.
- *
- * shift_border_left_to_dst() will shift the amount of h_border_left to
- * stream->dst.x and set h_border_left to 0.  restore_border_left_from_dst()
- * will restore h_border_left and stream->dst.x back to their original values
- * We also need to make sure pipe_ctx->plane_res.scl_data.h_active uses the
- * original h_border_left value in its calculation.
- */
-static int shift_border_left_to_dst(struct pipe_ctx *pipe_ctx)
-{
-       int store_h_border_left = pipe_ctx->stream->timing.h_border_left;
-
-       if (store_h_border_left) {
-               pipe_ctx->stream->timing.h_border_left = 0;
-               pipe_ctx->stream->dst.x += store_h_border_left;
+       if (orthogonal_rotation) {
+               swap(data->viewport.x, data->viewport.y);
+               swap(data->viewport.width, data->viewport.height);
+               swap(data->viewport_c.x, data->viewport_c.y);
+               swap(data->viewport_c.width, data->viewport_c.height);
        }
-       return store_h_border_left;
-}
-
-static void restore_border_left_from_dst(struct pipe_ctx *pipe_ctx,
-                                        int store_h_border_left)
-{
-       pipe_ctx->stream->dst.x -= store_h_border_left;
-       pipe_ctx->stream->timing.h_border_left = store_h_border_left;
+       data->viewport.x += src.x;
+       data->viewport.y += src.y;
+       ASSERT(src.x % vpc_div == 0 && src.y % vpc_div == 0);
+       data->viewport_c.x += src.x / vpc_div;
+       data->viewport_c.y += src.y / vpc_div;
 }
 
 bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
@@ -1189,48 +1008,45 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
        const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
        struct dc_crtc_timing *timing = &pipe_ctx->stream->timing;
        bool res = false;
-       int store_h_border_left = shift_border_left_to_dst(pipe_ctx);
        DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
-       /* Important: scaling ratio calculation requires pixel format,
-        * lb depth calculation requires recout and taps require scaling ratios.
-        * Inits require viewport, taps, ratios and recout of split pipe
-        */
+
        pipe_ctx->plane_res.scl_data.format = convert_pixel_format_to_dalsurface(
                        pipe_ctx->plane_state->format);
 
-       calculate_scaling_ratios(pipe_ctx);
-
-       calculate_viewport(pipe_ctx);
-
-       if (pipe_ctx->plane_res.scl_data.viewport.height < MIN_VIEWPORT_SIZE ||
-               pipe_ctx->plane_res.scl_data.viewport.width < MIN_VIEWPORT_SIZE) {
-               if (store_h_border_left) {
-                       restore_border_left_from_dst(pipe_ctx,
-                               store_h_border_left);
-               }
-               return false;
-       }
-
-       calculate_recout(pipe_ctx);
-
-       /**
-        * Setting line buffer pixel depth to 24bpp yields banding
-        * on certain displays, such as the Sharp 4k
+       /* Timing borders are part of vactive that we are also supposed to skip in addition
+        * to any stream dst offset. Since dm logic assumes dst is in addressable
+        * space we need to add the the left and top borders to dst offsets temporarily.
+        * TODO: fix in DM, stream dst is supposed to be in vactive
         */
-       pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP;
-       pipe_ctx->plane_res.scl_data.lb_params.alpha_en = plane_state->per_pixel_alpha;
-
-       pipe_ctx->plane_res.scl_data.recout.x += timing->h_border_left;
-       pipe_ctx->plane_res.scl_data.recout.y += timing->v_border_top;
+       pipe_ctx->stream->dst.x += timing->h_border_left;
+       pipe_ctx->stream->dst.y += timing->v_border_top;
 
+       /* Calculate H and V active size */
        pipe_ctx->plane_res.scl_data.h_active = timing->h_addressable +
-               store_h_border_left + timing->h_border_right;
+                       timing->h_border_left + timing->h_border_right;
        pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable +
                timing->v_border_top + timing->v_border_bottom;
        if (pipe_ctx->next_odm_pipe || pipe_ctx->prev_odm_pipe)
                pipe_ctx->plane_res.scl_data.h_active /= get_num_odm_splits(pipe_ctx) + 1;
 
-       /* Taps calculations */
+       /* depends on h_active */
+       calculate_recout(pipe_ctx);
+       /* depends on pixel format */
+       calculate_scaling_ratios(pipe_ctx);
+       /* depends on scaling ratios and recout, does not calculate offset yet */
+       calculate_viewport_size(pipe_ctx);
+
+       /*
+        * LB calculations depend on vp size, h/v_active and scaling ratios
+        * Setting line buffer pixel depth to 24bpp yields banding
+        * on certain displays, such as the Sharp 4k. 36bpp is needed
+        * to support SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 and
+        * SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616 with actual > 10 bpc
+        * precision on at least DCN display engines.
+        */
+       pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_36BPP;
+       pipe_ctx->plane_res.scl_data.lb_params.alpha_en = plane_state->per_pixel_alpha;
+
        if (pipe_ctx->plane_res.xfm != NULL)
                res = pipe_ctx->plane_res.xfm->funcs->transform_get_optimal_number_of_taps(
                                pipe_ctx->plane_res.xfm, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality);
@@ -1257,9 +1073,31 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
                                        &plane_state->scaling_quality);
        }
 
+       /*
+        * Depends on recout, scaling ratios, h_active and taps
+        * May need to re-check lb size after this in some obscure scenario
+        */
        if (res)
-               /* May need to re-check lb size after this in some obscure scenario */
-               calculate_inits_and_adj_vp(pipe_ctx);
+               calculate_inits_and_viewports(pipe_ctx);
+
+       /*
+        * Handle side by side and top bottom 3d recout offsets after vp calculation
+        * since 3d is special and needs to calculate vp as if there is no recout offset
+        * This may break with rotation, good thing we aren't mixing hw rotation and 3d
+        */
+       if (pipe_ctx->top_pipe && pipe_ctx->top_pipe->plane_state == plane_state) {
+               ASSERT(plane_state->rotation == ROTATION_ANGLE_0 ||
+                       (pipe_ctx->stream->view_format != VIEW_3D_FORMAT_TOP_AND_BOTTOM &&
+                               pipe_ctx->stream->view_format != VIEW_3D_FORMAT_SIDE_BY_SIDE));
+               if (pipe_ctx->stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM)
+                       pipe_ctx->plane_res.scl_data.recout.y += pipe_ctx->plane_res.scl_data.recout.height;
+               else if (pipe_ctx->stream->view_format == VIEW_3D_FORMAT_SIDE_BY_SIDE)
+                       pipe_ctx->plane_res.scl_data.recout.x += pipe_ctx->plane_res.scl_data.recout.width;
+       }
+
+       if (pipe_ctx->plane_res.scl_data.viewport.height < MIN_VIEWPORT_SIZE ||
+                       pipe_ctx->plane_res.scl_data.viewport.width < MIN_VIEWPORT_SIZE)
+               res = false;
 
        DC_LOG_SCALER("%s pipe %d:\nViewport: height:%d width:%d x:%d y:%d  Recout: height:%d width:%d x:%d y:%d  HACTIVE:%d VACTIVE:%d\n"
                        "src_rect: height:%d width:%d x:%d y:%d  dst_rect: height:%d width:%d x:%d y:%d  clip_rect: height:%d width:%d x:%d y:%d\n",
@@ -1288,8 +1126,8 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
                        plane_state->clip_rect.x,
                        plane_state->clip_rect.y);
 
-       if (store_h_border_left)
-               restore_border_left_from_dst(pipe_ctx, store_h_border_left);
+       pipe_ctx->stream->dst.x -= timing->h_border_left;
+       pipe_ctx->stream->dst.y -= timing->v_border_top;
 
        return res;
 }
@@ -3046,6 +2884,7 @@ unsigned int resource_pixel_format_to_bpp(enum surface_pixel_format format)
 #endif
                return 32;
        case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+       case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
        case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
        case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
                return 64;
index 7da5e7a..c0fbcbd 100644 (file)
@@ -45,7 +45,7 @@
 /* forward declaration */
 struct aux_payload;
 
-#define DC_VER "3.2.136"
+#define DC_VER "3.2.137"
 
 #define MAX_SURFACES 3
 #define MAX_PLANES 6
@@ -1069,8 +1069,6 @@ bool dc_resource_is_dsc_encoding_supported(const struct dc *dc);
  */
 bool dc_commit_state(struct dc *dc, struct dc_state *context);
 
-void dc_power_down_on_boot(struct dc *dc);
-
 struct dc_state *dc_create_state(struct dc *dc);
 struct dc_state *dc_copy_state(struct dc_state *src_ctx);
 void dc_retain_state(struct dc_state *context);
index c5dc3a9..4b2854d 100644 (file)
@@ -110,6 +110,15 @@ void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv)
                DC_ERROR("Error waiting for DMUB idle: status=%d\n", status);
 }
 
+void dc_dmub_srv_send_inbox0_cmd(struct dc_dmub_srv *dmub_srv,
+               union dmub_inbox0_data_register data)
+{
+       struct dmub_srv *dmub = dmub_srv->dmub;
+       if (dmub->hw_funcs.send_inbox0_cmd)
+               dmub->hw_funcs.send_inbox0_cmd(dmub, data);
+       // TODO: Add wait command -- poll register for ACK
+}
+
 bool dc_dmub_srv_cmd_with_reply_data(struct dc_dmub_srv *dc_dmub_srv, union dmub_rb_cmd *cmd)
 {
        struct dmub_srv *dmub;
index 338f776..f615e3a 100644 (file)
@@ -66,4 +66,6 @@ bool dc_dmub_srv_get_dmub_outbox0_msg(const struct dc *dc, struct dmcub_trace_bu
 
 void dc_dmub_trace_event_control(struct dc *dc, bool enable);
 
+void dc_dmub_srv_send_inbox0_cmd(struct dc_dmub_srv *dmub_srv, union dmub_inbox0_data_register data);
+
 #endif /* _DMUB_DC_SRV_H_ */
index 04957a9..52355fe 100644 (file)
@@ -182,6 +182,8 @@ enum surface_pixel_format {
        SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS,
        /*64 bpp */
        SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616,
+       /*swapped*/
+       SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616,
        /*float*/
        SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F,
        /*swaped & float*/
index c871923..83845d0 100644 (file)
@@ -216,6 +216,23 @@ static inline void get_edp_links(const struct dc *dc,
        }
 }
 
+static inline bool dc_get_edp_link_panel_inst(const struct dc *dc,
+               const struct dc_link *link,
+               unsigned int *inst_out)
+{
+       struct dc_link *edp_links[MAX_NUM_EDP];
+       int edp_num;
+
+       if (link->connector_signal != SIGNAL_TYPE_EDP)
+               return false;
+       get_edp_links(dc, edp_links, &edp_num);
+       if ((edp_num > 1) && (link->link_index > edp_links[0]->link_index))
+               *inst_out = 1;
+       else
+               *inst_out = 0;
+       return true;
+}
+
 /* Set backlight level of an embedded panel (eDP, LVDS).
  * backlight_pwm_u16_16 is unsigned 32 bit with 16 bit integer
  * and 16 bit fractional, where 1.0 is max backlight value.
@@ -316,7 +333,7 @@ bool dc_link_dp_perform_link_training_skip_aux(
 
 enum link_training_result dc_link_dp_perform_link_training(
        struct dc_link *link,
-       const struct dc_link_settings *link_setting,
+       const struct dc_link_settings *link_settings,
        bool skip_video_pattern);
 
 bool dc_link_dp_sync_lt_begin(struct dc_link *link);
index 13dae72..0ab1a33 100644 (file)
@@ -179,6 +179,9 @@ struct dc_stream_state {
 
        bool use_vsc_sdp_for_colorimetry;
        bool ignore_msa_timing_param;
+
+       bool freesync_on_desktop;
+
        bool converter_disable_audio;
        uint8_t qs_bit;
        uint8_t qy_bit;
index 535da8d..8016e22 100644 (file)
@@ -271,11 +271,6 @@ struct dc_edid_caps {
        struct dc_panel_patch panel_patch;
 };
 
-struct view {
-       uint32_t width;
-       uint32_t height;
-};
-
 struct dc_mode_flags {
        /* note: part of refresh rate flag*/
        uint32_t INTERLACE :1;
index 79a6f26..4cdd4da 100644 (file)
@@ -566,6 +566,7 @@ static void program_grph_pixel_format(
                         *  should problem swap endian*/
                format == SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010 ||
                format == SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS ||
+               format == SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616 ||
                format == SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F) {
                /* ABGR formats */
                red_xbar = 2;
@@ -606,6 +607,7 @@ static void program_grph_pixel_format(
                fallthrough;
        case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: /* shouldn't this get float too? */
        case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+       case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
                grph_depth = 3;
                grph_format = 0;
                break;
index 23db5c7..08a4c8d 100644 (file)
@@ -181,7 +181,6 @@ struct dce_mem_input_registers {
        SFB(blk, GRPH_ENABLE, GRPH_ENABLE, mask_sh),\
        SFB(blk, GRPH_CONTROL, GRPH_DEPTH, mask_sh),\
        SFB(blk, GRPH_CONTROL, GRPH_FORMAT, mask_sh),\
-       SFB(blk, GRPH_CONTROL, GRPH_NUM_BANKS, mask_sh),\
        SFB(blk, GRPH_X_START, GRPH_X_START, mask_sh),\
        SFB(blk, GRPH_Y_START, GRPH_Y_START, mask_sh),\
        SFB(blk, GRPH_X_END, GRPH_X_END, mask_sh),\
@@ -207,7 +206,6 @@ struct dce_mem_input_registers {
        SFB(blk, GRPH_ENABLE, GRPH_ENABLE, mask_sh),\
        SFB(blk, GRPH_CONTROL, GRPH_DEPTH, mask_sh),\
        SFB(blk, GRPH_CONTROL, GRPH_FORMAT, mask_sh),\
-       SFB(blk, GRPH_CONTROL, GRPH_NUM_BANKS, mask_sh),\
        SFB(blk, GRPH_X_START, GRPH_X_START, mask_sh),\
        SFB(blk, GRPH_Y_START, GRPH_Y_START, mask_sh),\
        SFB(blk, GRPH_X_END, GRPH_X_END, mask_sh),\
index 151dc7b..d9fd4ec 100644 (file)
@@ -794,7 +794,7 @@ static void program_bit_depth_reduction(
        enum dcp_out_trunc_round_mode trunc_mode;
        bool spatial_dither_enable;
 
-       ASSERT(depth < COLOR_DEPTH_121212); /* Invalid clamp bit depth */
+       ASSERT(depth <= COLOR_DEPTH_121212); /* Invalid clamp bit depth */
 
        spatial_dither_enable = bit_depth_params->flags.SPATIAL_DITHER_ENABLED;
        /* Default to 12 bit truncation without rounding */
@@ -854,7 +854,7 @@ static void dce60_program_bit_depth_reduction(
        enum dcp_out_trunc_round_mode trunc_mode;
        bool spatial_dither_enable;
 
-       ASSERT(depth < COLOR_DEPTH_121212); /* Invalid clamp bit depth */
+       ASSERT(depth <= COLOR_DEPTH_121212); /* Invalid clamp bit depth */
 
        spatial_dither_enable = bit_depth_params->flags.SPATIAL_DITHER_ENABLED;
        /* Default to 12 bit truncation without rounding */
@@ -1647,7 +1647,8 @@ void dce_transform_construct(
        xfm_dce->lb_pixel_depth_supported =
                        LB_PIXEL_DEPTH_18BPP |
                        LB_PIXEL_DEPTH_24BPP |
-                       LB_PIXEL_DEPTH_30BPP;
+                       LB_PIXEL_DEPTH_30BPP |
+                       LB_PIXEL_DEPTH_36BPP;
 
        xfm_dce->lb_bits_per_entry = LB_BITS_PER_ENTRY;
        xfm_dce->lb_memory_size = LB_TOTAL_NUMBER_OF_ENTRIES; /*0x6B0*/
@@ -1675,7 +1676,8 @@ void dce60_transform_construct(
        xfm_dce->lb_pixel_depth_supported =
                        LB_PIXEL_DEPTH_18BPP |
                        LB_PIXEL_DEPTH_24BPP |
-                       LB_PIXEL_DEPTH_30BPP;
+                       LB_PIXEL_DEPTH_30BPP |
+                       LB_PIXEL_DEPTH_36BPP;
 
        xfm_dce->lb_bits_per_entry = LB_BITS_PER_ENTRY;
        xfm_dce->lb_memory_size = LB_TOTAL_NUMBER_OF_ENTRIES; /*0x6B0*/
index 6939ca2..54a1408 100644 (file)
@@ -172,16 +172,12 @@ static bool dmub_abm_set_level(struct abm *abm, uint32_t level)
 
 static bool dmub_abm_init_config(struct abm *abm,
        const char *src,
-       unsigned int bytes)
+       unsigned int bytes,
+       unsigned int inst)
 {
        union dmub_rb_cmd cmd;
        struct dc_context *dc = abm->ctx;
-       uint32_t edp_id_count = dc->dc_edp_id_count;
-       int i;
-       uint8_t panel_mask = 0;
-
-       for (i = 0; i < edp_id_count; i++)
-               panel_mask |= 0x01 << i;
+       uint8_t panel_mask = 0x01 << inst;
 
        // TODO: Optimize by only reading back final 4 bytes
        dmub_flush_buffer_mem(&dc->dmub_srv->dmub->scratch_mem_fb);
index c97ee5a..9baf8ca 100644 (file)
@@ -52,6 +52,14 @@ void dmub_hw_lock_mgr_cmd(struct dc_dmub_srv *dmub_srv,
        dc_dmub_srv_wait_idle(dmub_srv);
 }
 
+void dmub_hw_lock_mgr_inbox0_cmd(struct dc_dmub_srv *dmub_srv,
+               union dmub_inbox0_cmd_lock_hw hw_lock_cmd)
+{
+       union dmub_inbox0_data_register data = { 0 };
+       data.inbox0_cmd_lock_hw = hw_lock_cmd;
+       dc_dmub_srv_send_inbox0_cmd(dmub_srv, data);
+}
+
 bool should_use_dmub_lock(struct dc_link *link)
 {
        return false;
index bc59063..5a72b16 100644 (file)
@@ -34,6 +34,9 @@ void dmub_hw_lock_mgr_cmd(struct dc_dmub_srv *dmub_srv,
                                union dmub_hw_lock_flags *hw_locks,
                                struct dmub_hw_lock_inst_flags *inst_flags);
 
+void dmub_hw_lock_mgr_inbox0_cmd(struct dc_dmub_srv *dmub_srv,
+               union dmub_inbox0_cmd_lock_hw hw_lock_cmd);
+
 bool should_use_dmub_lock(struct dc_link *link);
 
 #endif /*_DMUB_HW_LOCK_MGR_H_ */
index 295596d..faad855 100644 (file)
 #include "dmub/inc/dmub_cmd.h"
 
 /**
- *****************************************************************************
- *  Function: dmub_enable_outbox_notification
- *
- *  @brief
- *             Sends inbox cmd to dmub to enable outbox1 messages with interrupt.
- *             Dmub sends outbox1 message and triggers outbox1 interrupt.
- *
- *  @param
- *             [in] dc: dc structure
- *
- *  @return
- *     None
- *****************************************************************************
+ *  dmub_enable_outbox_notification - Sends inbox cmd to dmub to enable outbox1
+ *                                    messages with interrupt. Dmub sends outbox1
+ *                                    message and triggers outbox1 interrupt.
+ * @dc: dc structure
  */
 void dmub_enable_outbox_notification(struct dc *dc)
 {
index 5ddeee9..e731987 100644 (file)
@@ -63,6 +63,9 @@
 
 #include "atomfirmware.h"
 
+#include "dce110_hw_sequencer.h"
+#include "dcn10/dcn10_hw_sequencer.h"
+
 #define GAMMA_HW_POINTS_NUM 256
 
 /*
@@ -264,6 +267,7 @@ static void build_prescale_params(struct ipp_prescale_params *prescale_params,
                prescale_params->scale = 0x2008;
                break;
        case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+       case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
        case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
                prescale_params->scale = 0x2000;
                break;
index 8bbb499..db7557a 100644 (file)
@@ -393,6 +393,7 @@ static void program_pixel_format(
                        grph_format = 1;
                        break;
                case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+               case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
                case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
                case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
                        grph_depth = 3;
index 29438c6..45bca0d 100644 (file)
@@ -708,7 +708,8 @@ bool dce110_transform_v_construct(
        xfm_dce->lb_pixel_depth_supported =
                        LB_PIXEL_DEPTH_18BPP |
                        LB_PIXEL_DEPTH_24BPP |
-                       LB_PIXEL_DEPTH_30BPP;
+                       LB_PIXEL_DEPTH_30BPP |
+                       LB_PIXEL_DEPTH_36BPP;
 
        xfm_dce->prescaler_on = true;
        xfm_dce->lb_bits_per_entry = LB_BITS_PER_ENTRY;
index 7f8456b..91fdfcd 100644 (file)
@@ -257,7 +257,8 @@ static void dpp1_setup_format_flags(enum surface_pixel_format input_format,\
        if (input_format == SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F ||
                input_format == SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F)
                *fmt = PIXEL_FORMAT_FLOAT;
-       else if (input_format == SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616)
+       else if (input_format == SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ||
+               input_format == SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616)
                *fmt = PIXEL_FORMAT_FIXED16;
        else
                *fmt = PIXEL_FORMAT_FIXED;
@@ -368,7 +369,8 @@ void dpp1_cnv_setup (
                select = INPUT_CSC_SELECT_ICSC;
                break;
        case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
-               pixel_format = 22;
+       case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
+               pixel_format = 26; /* ARGB16161616_UNORM */
                break;
        case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
                pixel_format = 24;
@@ -566,7 +568,8 @@ void dpp1_construct(
        dpp->lb_pixel_depth_supported =
                LB_PIXEL_DEPTH_18BPP |
                LB_PIXEL_DEPTH_24BPP |
-               LB_PIXEL_DEPTH_30BPP;
+               LB_PIXEL_DEPTH_30BPP |
+               LB_PIXEL_DEPTH_36BPP;
 
        dpp->lb_bits_per_entry = LB_BITS_PER_ENTRY;
        dpp->lb_memory_size = LB_TOTAL_NUMBER_OF_ENTRIES; /*0x1404*/
index 0bd8de4..673b93f 100644 (file)
@@ -631,8 +631,10 @@ static void dpp1_dscl_set_manual_ratio_init(
                SCL_V_INIT_INT, init_int);
 
        if (REG(SCL_VERT_FILTER_INIT_BOT)) {
-               init_frac = dc_fixpt_u0d19(data->inits.v_bot) << 5;
-               init_int = dc_fixpt_floor(data->inits.v_bot);
+               struct fixed31_32 bot = dc_fixpt_add(data->inits.v, data->ratios.vert);
+
+               init_frac = dc_fixpt_u0d19(bot) << 5;
+               init_int = dc_fixpt_floor(bot);
                REG_SET_2(SCL_VERT_FILTER_INIT_BOT, 0,
                        SCL_V_INIT_FRAC_BOT, init_frac,
                        SCL_V_INIT_INT_BOT, init_int);
@@ -645,8 +647,10 @@ static void dpp1_dscl_set_manual_ratio_init(
                SCL_V_INIT_INT_C, init_int);
 
        if (REG(SCL_VERT_FILTER_INIT_BOT_C)) {
-               init_frac = dc_fixpt_u0d19(data->inits.v_c_bot) << 5;
-               init_int = dc_fixpt_floor(data->inits.v_c_bot);
+               struct fixed31_32 bot = dc_fixpt_add(data->inits.v_c, data->ratios.vert_c);
+
+               init_frac = dc_fixpt_u0d19(bot) << 5;
+               init_int = dc_fixpt_floor(bot);
                REG_SET_2(SCL_VERT_FILTER_INIT_BOT_C, 0,
                        SCL_V_INIT_FRAC_BOT_C, init_frac,
                        SCL_V_INIT_INT_BOT_C, init_int);
index 6f42d10..f4f423d 100644 (file)
@@ -785,6 +785,7 @@ static bool hubbub1_dcc_support_pixel_format(
                *bytes_per_element = 4;
                return true;
        case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+       case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
        case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
        case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
                *bytes_per_element = 8;
index e39e8a2..04303fe 100644 (file)
@@ -245,6 +245,7 @@ void hubp1_program_pixel_format(
        if (format == SURFACE_PIXEL_FORMAT_GRPH_ABGR8888
                        || format == SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010
                        || format == SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS
+                       || format == SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616
                        || format == SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F) {
                red_bar = 2;
                blue_bar = 3;
@@ -277,8 +278,9 @@ void hubp1_program_pixel_format(
                                SURFACE_PIXEL_FORMAT, 10);
                break;
        case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+       case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: /*we use crossbar already*/
                REG_UPDATE(DCSURF_SURFACE_CONFIG,
-                               SURFACE_PIXEL_FORMAT, 22);
+                               SURFACE_PIXEL_FORMAT, 26); /* ARGB16161616_UNORM */
                break;
        case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
        case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:/*we use crossbar already*/
index 8180346..ef37d3a 100644 (file)
@@ -2502,25 +2502,9 @@ static void dcn10_update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state
                dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
 }
 
-void dcn10_update_visual_confirm_color(struct dc *dc, struct pipe_ctx *pipe_ctx, struct tg_color *color, int mpcc_id)
-{
-       struct dce_hwseq *hws = dc->hwseq;
-       struct mpc *mpc = dc->res_pool->mpc;
-
-       if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR)
-               hws->funcs.get_hdr_visual_confirm_color(pipe_ctx, color);
-       else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE)
-               hws->funcs.get_surface_visual_confirm_color(pipe_ctx, color);
-       else
-               color_space_to_black_color(
-                               dc, pipe_ctx->stream->output_color_space, color);
-
-       if (mpc->funcs->set_bg_color)
-               mpc->funcs->set_bg_color(mpc, color, mpcc_id);
-}
-
 void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
 {
+       struct dce_hwseq *hws = dc->hwseq;
        struct hubp *hubp = pipe_ctx->plane_res.hubp;
        struct mpcc_blnd_cfg blnd_cfg = {{0}};
        bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
@@ -2529,6 +2513,18 @@ void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
        struct mpc *mpc = dc->res_pool->mpc;
        struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
 
+       if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) {
+               hws->funcs.get_hdr_visual_confirm_color(
+                               pipe_ctx, &blnd_cfg.black_color);
+       } else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) {
+               hws->funcs.get_surface_visual_confirm_color(
+                               pipe_ctx, &blnd_cfg.black_color);
+       } else {
+               color_space_to_black_color(
+                               dc, pipe_ctx->stream->output_color_space,
+                               &blnd_cfg.black_color);
+       }
+
        if (per_pixel_alpha)
                blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
        else
@@ -2560,8 +2556,6 @@ void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
         */
        mpcc_id = hubp->inst;
 
-       dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id);
-
        /* If there is no full update, don't need to touch MPC tree*/
        if (!pipe_ctx->plane_state->update_flags.bits.full_update) {
                mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
@@ -2599,7 +2593,7 @@ static void update_scaler(struct pipe_ctx *pipe_ctx)
                        pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
 
        pipe_ctx->plane_res.scl_data.lb_params.alpha_en = per_pixel_alpha;
-       pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP;
+       pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_36BPP;
        /* scaler configuration */
        pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler(
                        pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
index 478180b..c9bdffe 100644 (file)
@@ -206,10 +206,4 @@ void dcn10_verify_allow_pstate_change_high(struct dc *dc);
 
 void dcn10_get_dcc_en_bits(struct dc *dc, int *dcc_en_bits);
 
-void dcn10_update_visual_confirm_color(
-               struct dc *dc,
-               struct pipe_ctx *pipe_ctx,
-               struct tg_color *color,
-               int mpcc_id);
-
 #endif /* __DC_HWSS_DCN10_H__ */
index 4ff3ebc..680ca53 100644 (file)
@@ -82,7 +82,6 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
        .set_abm_immediate_disable = dce110_set_abm_immediate_disable,
        .set_pipe = dce110_set_pipe,
        .get_dcc_en_bits = dcn10_get_dcc_en_bits,
-       .update_visual_confirm_color = dcn10_update_visual_confirm_color,
 };
 
 static const struct hwseq_private_funcs dcn10_private_funcs = {
index da74269..b096011 100644 (file)
@@ -64,8 +64,6 @@ void mpc1_set_bg_color(struct mpc *mpc,
                        MPCC_BG_G_Y, bg_g_y);
        REG_SET(MPCC_BG_B_CB[bottommost_mpcc->mpcc_id], 0,
                        MPCC_BG_B_CB, bg_b_cb);
-
-       bottommost_mpcc->blnd_cfg.black_color = *bg_color;
 }
 
 static void mpc1_update_blending(
@@ -248,8 +246,6 @@ struct mpcc *mpc1_insert_plane(
                }
        }
 
-       mpc->funcs->set_bg_color(mpc, &blnd_cfg->black_color, mpcc_id);
-
        /* update the blending configuration */
        mpc->funcs->update_blending(mpc, blnd_cfg, mpcc_id);
 
@@ -499,7 +495,6 @@ static const struct mpc_funcs dcn10_mpc_funcs = {
        .set_output_csc = NULL,
        .set_output_gamma = NULL,
        .get_mpc_out_mux = mpc1_get_mpc_out_mux,
-       .set_bg_color = mpc1_set_bg_color,
 };
 
 void dcn10_mpc_construct(struct dcn10_mpc *mpc10,
index 4af96cc..a9e420c 100644 (file)
@@ -166,7 +166,8 @@ static void dpp2_cnv_setup (
                select = DCN2_ICSC_SELECT_ICSC_A;
                break;
        case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
-               pixel_format = 22;
+       case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
+               pixel_format = 26; /* ARGB16161616_UNORM */
                break;
        case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
                pixel_format = 24;
@@ -431,7 +432,8 @@ bool dpp2_construct(
        dpp->lb_pixel_depth_supported =
                LB_PIXEL_DEPTH_18BPP |
                LB_PIXEL_DEPTH_24BPP |
-               LB_PIXEL_DEPTH_30BPP;
+               LB_PIXEL_DEPTH_30BPP |
+               LB_PIXEL_DEPTH_36BPP;
 
        dpp->lb_bits_per_entry = LB_BITS_PER_ENTRY;
        dpp->lb_memory_size = LB_TOTAL_NUMBER_OF_ENTRIES; /*0x1404*/
index 6d03d98..91a9305 100644 (file)
@@ -158,6 +158,7 @@ bool hubbub2_dcc_support_pixel_format(
                *bytes_per_element = 4;
                return true;
        case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+       case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
        case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
        case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
                *bytes_per_element = 8;
index a1318c3..7e54058 100644 (file)
@@ -431,6 +431,7 @@ void hubp2_program_pixel_format(
        if (format == SURFACE_PIXEL_FORMAT_GRPH_ABGR8888
                        || format == SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010
                        || format == SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS
+                       || format == SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616
                        || format == SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F) {
                red_bar = 2;
                blue_bar = 3;
@@ -463,8 +464,9 @@ void hubp2_program_pixel_format(
                                SURFACE_PIXEL_FORMAT, 10);
                break;
        case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+       case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: /*we use crossbar already*/
                REG_UPDATE(DCSURF_SURFACE_CONFIG,
-                               SURFACE_PIXEL_FORMAT, 22);
+                               SURFACE_PIXEL_FORMAT, 26); /* ARGB16161616_UNORM */
                break;
        case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
        case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:/*we use crossbar already*/
index 25de151..14e3227 100644 (file)
@@ -1473,7 +1473,7 @@ static void dcn20_update_dchubp_dpp(
                        plane_state->update_flags.bits.per_pixel_alpha_change ||
                        pipe_ctx->stream->update_flags.bits.scaling) {
                pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->plane_state->per_pixel_alpha;
-               ASSERT(pipe_ctx->plane_res.scl_data.lb_params.depth == LB_PIXEL_DEPTH_30BPP);
+               ASSERT(pipe_ctx->plane_res.scl_data.lb_params.depth == LB_PIXEL_DEPTH_36BPP);
                /* scaler configuration */
                pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler(
                                pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
@@ -2267,25 +2267,9 @@ void dcn20_get_mpctree_visual_confirm_color(
        *color = pipe_colors[top_pipe->pipe_idx];
 }
 
-void dcn20_update_visual_confirm_color(struct dc *dc, struct pipe_ctx *pipe_ctx, struct tg_color *color, int mpcc_id)
-{
-       struct dce_hwseq *hws = dc->hwseq;
-       struct mpc *mpc = dc->res_pool->mpc;
-
-       /* input to MPCC is always RGB, by default leave black_color at 0 */
-       if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR)
-               hws->funcs.get_hdr_visual_confirm_color(pipe_ctx, color);
-       else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE)
-               hws->funcs.get_surface_visual_confirm_color(pipe_ctx, color);
-       else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE)
-               dcn20_get_mpctree_visual_confirm_color(pipe_ctx, color);
-
-       if (mpc->funcs->set_bg_color)
-               mpc->funcs->set_bg_color(mpc, color, mpcc_id);
-}
-
 void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
 {
+       struct dce_hwseq *hws = dc->hwseq;
        struct hubp *hubp = pipe_ctx->plane_res.hubp;
        struct mpcc_blnd_cfg blnd_cfg = { {0} };
        bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha;
@@ -2294,6 +2278,15 @@ void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
        struct mpc *mpc = dc->res_pool->mpc;
        struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
 
+       // input to MPCC is always RGB, by default leave black_color at 0
+       if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) {
+               hws->funcs.get_hdr_visual_confirm_color(pipe_ctx, &blnd_cfg.black_color);
+       } else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) {
+               hws->funcs.get_surface_visual_confirm_color(pipe_ctx, &blnd_cfg.black_color);
+       } else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE) {
+               dcn20_get_mpctree_visual_confirm_color(pipe_ctx, &blnd_cfg.black_color);
+       }
+
        if (per_pixel_alpha)
                blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
        else
@@ -2327,8 +2320,6 @@ void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
         */
        mpcc_id = hubp->inst;
 
-       dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id);
-
        /* If there is no full update, don't need to touch MPC tree*/
        if (!pipe_ctx->plane_state->update_flags.bits.full_update &&
                !pipe_ctx->update_flags.bits.mpcc) {
index 6bba191..c69f766 100644 (file)
@@ -146,10 +146,5 @@ void dcn20_set_disp_pattern_generator(const struct dc *dc,
                const struct tg_color *solid_color,
                int width, int height, int offset);
 
-void dcn20_update_visual_confirm_color(struct dc *dc,
-               struct pipe_ctx *pipe_ctx,
-               struct tg_color *color,
-               int mpcc_id);
-
 #endif /* __DC_HWSS_DCN20_H__ */
 
index 2f59f10..b5bb613 100644 (file)
@@ -96,7 +96,6 @@ static const struct hw_sequencer_funcs dcn20_funcs = {
 #endif
        .set_disp_pattern_generator = dcn20_set_disp_pattern_generator,
        .get_dcc_en_bits = dcn10_get_dcc_en_bits,
-       .update_visual_confirm_color = dcn20_update_visual_confirm_color,
 };
 
 static const struct hwseq_private_funcs dcn20_private_funcs = {
index 947eb0d..6a99fdd 100644 (file)
@@ -67,6 +67,7 @@ void mpc2_update_blending(
        REG_SET(MPCC_BOT_GAIN_INSIDE[mpcc_id], 0, MPCC_BOT_GAIN_INSIDE, blnd_cfg->bottom_inside_gain);
        REG_SET(MPCC_BOT_GAIN_OUTSIDE[mpcc_id], 0, MPCC_BOT_GAIN_OUTSIDE, blnd_cfg->bottom_outside_gain);
 
+       mpc1_set_bg_color(mpc, &blnd_cfg->black_color, mpcc_id);
        mpcc->blnd_cfg = *blnd_cfg;
 }
 
@@ -556,7 +557,6 @@ const struct mpc_funcs dcn20_mpc_funcs = {
        .set_output_gamma = mpc2_set_output_gamma,
        .power_on_mpc_mem_pwr = mpc20_power_on_ogam_lut,
        .get_mpc_out_mux = mpc1_get_mpc_out_mux,
-       .set_bg_color = mpc1_set_bg_color,
 };
 
 void dcn20_mpc_construct(struct dcn20_mpc *mpc20,
index 6a56a03..0d06307 100644 (file)
@@ -2289,12 +2289,14 @@ int dcn20_populate_dml_pipes_from_context(
 
                        pipes[pipe_cnt].pipe.src.source_scan = pln->rotation == ROTATION_ANGLE_90
                                        || pln->rotation == ROTATION_ANGLE_270 ? dm_vert : dm_horz;
-                       pipes[pipe_cnt].pipe.src.viewport_y_y = scl->viewport_unadjusted.y;
-                       pipes[pipe_cnt].pipe.src.viewport_y_c = scl->viewport_c_unadjusted.y;
-                       pipes[pipe_cnt].pipe.src.viewport_width = scl->viewport_unadjusted.width;
-                       pipes[pipe_cnt].pipe.src.viewport_width_c = scl->viewport_c_unadjusted.width;
-                       pipes[pipe_cnt].pipe.src.viewport_height = scl->viewport_unadjusted.height;
-                       pipes[pipe_cnt].pipe.src.viewport_height_c = scl->viewport_c_unadjusted.height;
+                       pipes[pipe_cnt].pipe.src.viewport_y_y = scl->viewport.y;
+                       pipes[pipe_cnt].pipe.src.viewport_y_c = scl->viewport_c.y;
+                       pipes[pipe_cnt].pipe.src.viewport_width = scl->viewport.width;
+                       pipes[pipe_cnt].pipe.src.viewport_width_c = scl->viewport_c.width;
+                       pipes[pipe_cnt].pipe.src.viewport_height = scl->viewport.height;
+                       pipes[pipe_cnt].pipe.src.viewport_height_c = scl->viewport_c.height;
+                       pipes[pipe_cnt].pipe.src.viewport_width_max = pln->src_rect.width;
+                       pipes[pipe_cnt].pipe.src.viewport_height_max = pln->src_rect.height;
                        pipes[pipe_cnt].pipe.src.surface_width_y = pln->plane_size.surface_size.width;
                        pipes[pipe_cnt].pipe.src.surface_height_y = pln->plane_size.surface_size.height;
                        pipes[pipe_cnt].pipe.src.surface_width_c = pln->plane_size.chroma_size.width;
@@ -2363,6 +2365,7 @@ int dcn20_populate_dml_pipes_from_context(
                                pipes[pipe_cnt].pipe.src.source_format = dm_420_10;
                                break;
                        case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+                       case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
                        case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
                        case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
                                pipes[pipe_cnt].pipe.src.source_format = dm_444_64;
@@ -3236,7 +3239,7 @@ static noinline bool dcn20_validate_bandwidth_fp(struct dc *dc,
        voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false);
        dummy_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support;
 
-       if (voltage_supported && dummy_pstate_supported) {
+       if (voltage_supported && (dummy_pstate_supported || !(context->stream_count))) {
                context->bw_ctx.bw.dcn.clk.p_state_change_support = false;
                goto restore_dml_state;
        }
index 523e25f..4f20a85 100644 (file)
@@ -100,7 +100,6 @@ static const struct hw_sequencer_funcs dcn21_funcs = {
        .is_abm_supported = dcn21_is_abm_supported,
        .set_disp_pattern_generator = dcn20_set_disp_pattern_generator,
        .get_dcc_en_bits = dcn10_get_dcc_en_bits,
-       .update_visual_confirm_color = dcn20_update_visual_confirm_color,
 };
 
 static const struct hwseq_private_funcs dcn21_private_funcs = {
index 434d3c4..2140b75 100644 (file)
@@ -245,7 +245,8 @@ static void dpp3_cnv_setup (
                select = INPUT_CSC_SELECT_ICSC;
                break;
        case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
-               pixel_format = 22;
+       case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616:
+               pixel_format = 26; /* ARGB16161616_UNORM */
                break;
        case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
                pixel_format = 24;
@@ -1442,7 +1443,8 @@ bool dpp3_construct(
        dpp->lb_pixel_depth_supported =
                LB_PIXEL_DEPTH_18BPP |
                LB_PIXEL_DEPTH_24BPP |
-               LB_PIXEL_DEPTH_30BPP;
+               LB_PIXEL_DEPTH_30BPP |
+               LB_PIXEL_DEPTH_36BPP;
 
        dpp->lb_bits_per_entry = LB_BITS_PER_ENTRY;
        dpp->lb_memory_size = LB_TOTAL_NUMBER_OF_ENTRIES; /*0x1404*/
index a978d84..bf7fa98 100644 (file)
@@ -99,7 +99,6 @@ static const struct hw_sequencer_funcs dcn30_funcs = {
        .set_pipe = dcn21_set_pipe,
        .set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
        .get_dcc_en_bits = dcn10_get_dcc_en_bits,
-       .update_visual_confirm_color = dcn20_update_visual_confirm_color,
 };
 
 static const struct hwseq_private_funcs dcn30_private_funcs = {
index a82319f..950c9bf 100644 (file)
@@ -1431,7 +1431,7 @@ const struct mpc_funcs dcn30_mpc_funcs = {
        .release_rmu = mpcc3_release_rmu,
        .power_on_mpc_mem_pwr = mpc3_power_on_ogam_lut,
        .get_mpc_out_mux = mpc1_get_mpc_out_mux,
-       .set_bg_color = mpc1_set_bg_color,
+
 };
 
 void dcn30_mpc_construct(struct dcn30_mpc *mpc30,
index 181f217..70b053d 100644 (file)
@@ -101,7 +101,6 @@ static const struct hw_sequencer_funcs dcn301_funcs = {
        .get_dcc_en_bits = dcn10_get_dcc_en_bits,
        .optimize_pwr_state = dcn21_optimize_pwr_state,
        .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
-       .update_visual_confirm_color = dcn20_update_visual_confirm_color,
 };
 
 static const struct hwseq_private_funcs dcn301_private_funcs = {
index fb41140..4440d08 100644 (file)
@@ -245,6 +245,8 @@ struct pp_smu_funcs_nv {
 #define PP_SMU_NUM_DCFCLK_DPM_LEVELS  8
 #define PP_SMU_NUM_FCLK_DPM_LEVELS    4
 #define PP_SMU_NUM_MEMCLK_DPM_LEVELS  4
+#define PP_SMU_NUM_DCLK_DPM_LEVELS    8
+#define PP_SMU_NUM_VCLK_DPM_LEVELS    8
 
 struct dpm_clock {
   uint32_t  Freq;    // In MHz
@@ -258,6 +260,8 @@ struct dpm_clocks {
        struct dpm_clock SocClocks[PP_SMU_NUM_SOCCLK_DPM_LEVELS];
        struct dpm_clock FClocks[PP_SMU_NUM_FCLK_DPM_LEVELS];
        struct dpm_clock MemClocks[PP_SMU_NUM_MEMCLK_DPM_LEVELS];
+       struct dpm_clock VClocks[PP_SMU_NUM_VCLK_DPM_LEVELS];
+       struct dpm_clock DClocks[PP_SMU_NUM_DCLK_DPM_LEVELS];
 };
 
 
index 2ece369..a0f0c54 100644 (file)
@@ -253,6 +253,8 @@ struct _vcs_dpi_display_pipe_source_params_st {
        unsigned int viewport_y_c;
        unsigned int viewport_width_c;
        unsigned int viewport_height_c;
+       unsigned int viewport_width_max;
+       unsigned int viewport_height_max;
        unsigned int data_pitch;
        unsigned int data_pitch_c;
        unsigned int meta_pitch;
index d764d78..a4a1b96 100644 (file)
@@ -630,6 +630,19 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
                                }
                        }
                }
+               if (src->viewport_width_max) {
+                       int hdiv_c = src->source_format >= dm_420_8 && src->source_format <= dm_422_10 ? 2 : 1;
+                       int vdiv_c = src->source_format >= dm_420_8 && src->source_format <= dm_420_12 ? 2 : 1;
+
+                       if (mode_lib->vba.ViewportWidth[mode_lib->vba.NumberOfActivePlanes] > src->viewport_width_max)
+                               mode_lib->vba.ViewportWidth[mode_lib->vba.NumberOfActivePlanes] = src->viewport_width_max;
+                       if (mode_lib->vba.ViewportHeight[mode_lib->vba.NumberOfActivePlanes] > src->viewport_height_max)
+                               mode_lib->vba.ViewportHeight[mode_lib->vba.NumberOfActivePlanes] = src->viewport_height_max;
+                       if (mode_lib->vba.ViewportWidthChroma[mode_lib->vba.NumberOfActivePlanes] > src->viewport_width_max / hdiv_c)
+                               mode_lib->vba.ViewportWidthChroma[mode_lib->vba.NumberOfActivePlanes] = src->viewport_width_max / hdiv_c;
+                       if (mode_lib->vba.ViewportHeightChroma[mode_lib->vba.NumberOfActivePlanes] > src->viewport_height_max / vdiv_c)
+                               mode_lib->vba.ViewportHeightChroma[mode_lib->vba.NumberOfActivePlanes] = src->viewport_height_max / vdiv_c;
+               }
 
                if (pipes[k].pipe.src.immediate_flip) {
                        mode_lib->vba.ImmediateFlipSupport = true;
index 92280cc..dae8e48 100644 (file)
@@ -53,8 +53,8 @@
  */
 
 struct gpio_service *dal_gpio_service_create(
-       enum dce_version dce_version_major,
-       enum dce_version dce_version_minor,
+       enum dce_version dce_version,
+       enum dce_environment dce_environment,
        struct dc_context *ctx)
 {
        struct gpio_service *service;
@@ -67,14 +67,14 @@ struct gpio_service *dal_gpio_service_create(
                return NULL;
        }
 
-       if (!dal_hw_translate_init(&service->translate, dce_version_major,
-                       dce_version_minor)) {
+       if (!dal_hw_translate_init(&service->translate, dce_version,
+                       dce_environment)) {
                BREAK_TO_DEBUGGER();
                goto failure_1;
        }
 
-       if (!dal_hw_factory_init(&service->factory, dce_version_major,
-                       dce_version_minor)) {
+       if (!dal_hw_factory_init(&service->factory, dce_version,
+                       dce_environment)) {
                BREAK_TO_DEBUGGER();
                goto failure_1;
        }
index ffc3f2c..bbb054f 100644 (file)
 #define LINK_TRAINING_RETRY_DELAY 50 /* ms */
 #define LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD 3200 /*us*/
 #define LINK_AUX_DEFAULT_TIMEOUT_PERIOD 552 /*us*/
+#define DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE 0x50
+#define TRAINING_AUX_RD_INTERVAL 100 //us
 
 struct dc_link;
 struct dc_stream_state;
 struct dc_link_settings;
 
+enum {
+       LINK_TRAINING_MAX_RETRY_COUNT = 5,
+       /* to avoid infinite loop where-in the receiver
+        * switches between different VS
+        */
+       LINK_TRAINING_MAX_CR_RETRY = 100
+};
+
 bool dp_verify_link_cap(
        struct dc_link *link,
        struct dc_link_settings *known_limit_link_setting,
@@ -68,6 +78,10 @@ bool perform_link_training_with_retries(
        enum signal_type signal,
        bool do_fallback);
 
+bool hpd_rx_irq_check_link_loss_status(
+       struct dc_link *link,
+       union hpd_irq_data *hpd_irq_dpcd_data);
+
 bool is_mst_supported(struct dc_link *link);
 
 bool detect_dp_sink_caps(struct dc_link *link);
@@ -88,8 +102,51 @@ void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode);
 bool dp_overwrite_extended_receiver_cap(struct dc_link *link);
 
 void dpcd_set_source_specific_data(struct dc_link *link);
+/* Write DPCD link configuration data. */
+enum dc_status dpcd_set_link_settings(
+       struct dc_link *link,
+       const struct link_training_settings *lt_settings);
+/* Write DPCD drive settings. */
+enum dc_status dpcd_set_lane_settings(
+       struct dc_link *link,
+       const struct link_training_settings *link_training_setting,
+       uint32_t offset);
+/* Read training status and adjustment requests from DPCD. */
+enum dc_status dp_get_lane_status_and_drive_settings(
+       struct dc_link *link,
+       const struct link_training_settings *link_training_setting,
+       union lane_status *ln_status,
+       union lane_align_status_updated *ln_status_updated,
+       struct link_training_settings *req_settings,
+       uint32_t offset);
+
+void dp_wait_for_training_aux_rd_interval(
+       struct dc_link *link,
+       uint32_t wait_in_micro_secs);
+
+bool dp_is_cr_done(enum dc_lane_count ln_count,
+       union lane_status *dpcd_lane_status);
+
+enum link_training_result dp_get_cr_failure(enum dc_lane_count ln_count,
+       union lane_status *dpcd_lane_status);
+
+bool dp_is_max_vs_reached(
+       const struct link_training_settings *lt_settings);
 
-void dp_set_fec_ready(struct dc_link *link, bool ready);
+void dp_update_drive_settings(
+       struct link_training_settings *dest,
+       struct link_training_settings src);
+
+enum dpcd_training_patterns
+       dc_dp_training_pattern_to_dpcd_training_pattern(
+       struct dc_link *link,
+       enum dc_dp_training_pattern pattern);
+
+uint8_t dc_dp_initialize_scrambling_data_symbols(
+       struct dc_link *link,
+       enum dc_dp_training_pattern pattern);
+
+enum dc_status dp_set_fec_ready(struct dc_link *link, bool ready);
 void dp_set_fec_enable(struct dc_link *link, bool enable);
 bool dp_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable);
 bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable);
@@ -97,6 +154,13 @@ void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable);
 bool dp_update_dsc_config(struct pipe_ctx *pipe_ctx);
 bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable);
 
+/* Initialize output parameter lt_settings. */
+void dp_decide_training_settings(
+       struct dc_link *link,
+       const struct dc_link_settings *link_setting,
+       const struct dc_link_training_overrides *overrides,
+       struct link_training_settings *lt_settings);
+
 /* Convert PHY repeater count read from DPCD uint8_t. */
 uint8_t dp_convert_to_count(uint8_t lttpr_repeater_count);
 
@@ -105,5 +169,9 @@ enum link_training_result dp_check_link_loss_status(
                struct dc_link *link,
                const struct link_training_settings *link_training_setting);
 
+enum dc_status dpcd_configure_lttpr_mode(
+               struct dc_link *link,
+               struct link_training_settings *lt_settings);
+
 enum dp_link_encoding dp_get_link_encoding_format(const struct dc_link_settings *link_settings);
 #endif /* __DC_LINK_DP_H__ */
index e8ce8c8..1427536 100644 (file)
@@ -52,7 +52,8 @@ struct abm_funcs {
        unsigned int (*get_target_backlight)(struct abm *abm);
        bool (*init_abm_config)(struct abm *abm,
                        const char *src,
-                       unsigned int bytes);
+                       unsigned int bytes,
+                       unsigned int inst);
 };
 
 #endif
index 640bb43..75c77ad 100644 (file)
@@ -363,9 +363,6 @@ struct mpc_funcs {
                        struct mpc *mpc,
                        int opp_id);
 
-       void (*set_bg_color)(struct mpc *mpc,
-                       struct tg_color *bg_color,
-                       int mpcc_id);
 };
 
 #endif
index 2947d1b..2a0db2b 100644 (file)
@@ -162,9 +162,7 @@ struct scl_inits {
        struct fixed31_32 h;
        struct fixed31_32 h_c;
        struct fixed31_32 v;
-       struct fixed31_32 v_bot;
        struct fixed31_32 v_c;
-       struct fixed31_32 v_c_bot;
 };
 
 struct scaler_data {
@@ -173,8 +171,6 @@ struct scaler_data {
        struct scaling_taps taps;
        struct rect viewport;
        struct rect viewport_c;
-       struct rect viewport_unadjusted;
-       struct rect viewport_c_unadjusted;
        struct rect recout;
        struct scaling_ratios ratios;
        struct scl_inits inits;
index 43284d4..1d5853c 100644 (file)
@@ -235,10 +235,6 @@ struct hw_sequencer_funcs {
                        enum dc_color_depth color_depth,
                        const struct tg_color *solid_color,
                        int width, int height, int offset);
-       void (*update_visual_confirm_color)(struct dc *dc,
-                       struct pipe_ctx *pipe_ctx,
-                       struct tg_color *color,
-                       int mpcc_id);
 };
 
 void color_space_to_black_color(
index 79ff68c..ed58abc 100644 (file)
@@ -324,6 +324,7 @@ struct dmub_srv_hw_funcs {
 
        uint32_t (*get_gpint_response)(struct dmub_srv *dmub);
 
+       void (*send_inbox0_cmd)(struct dmub_srv *dmub, union dmub_inbox0_data_register data);
        uint32_t (*get_current_time)(struct dmub_srv *dmub);
 };
 
index e6f3bfa..70766d5 100644 (file)
  */
 
 /**
- *****************************************************************************
- *  Function: dmub_srv_stat_get_notification
+ * dmub_srv_stat_get_notification - Retrieves a dmub outbox notification, set up dmub notification
+ *                                  structure with message information. Also a pending bit if queue
+ *                                  is having more notifications
+ *  @dmub: dmub srv structure
+ *  @notify: dmub notification structure to be filled up
  *
- *  @brief
- *             Retrieves a dmub outbox notification, set up dmub notification
- *             structure with message information. Also a pending bit if queue
- *             is having more notifications
- *
- *  @param [in] dmub: dmub srv structure
- *  @param [out] pnotify: dmub notification structure to be filled up
- *
- *  @return
- *     dmub_status
- *****************************************************************************
+ *  Returns: dmub_status
  */
 enum dmub_status dmub_srv_stat_get_notification(struct dmub_srv *dmub,
                                                struct dmub_notification *notify)
index 9c55d24..7e3240e 100644 (file)
@@ -42,8 +42,8 @@ void dal_gpio_destroy(
        struct gpio **ptr);
 
 struct gpio_service *dal_gpio_service_create(
-       enum dce_version dce_version_major,
-       enum dce_version dce_version_minor,
+       enum dce_version dce_version,
+       enum dce_environment dce_environment,
        struct dc_context *ctx);
 
 struct gpio *dal_gpio_service_create_irq(
index 7a30ca0..32f5274 100644 (file)
@@ -85,6 +85,7 @@ struct link_training_settings {
        enum dc_voltage_swing *voltage_swing;
        enum dc_pre_emphasis *pre_emphasis;
        enum dc_post_cursor2 *post_cursor2;
+       bool should_set_fec_ready;
 
        uint16_t cr_pattern_time;
        uint16_t eq_pattern_time;
index 3f4f44b..b99aa23 100644 (file)
@@ -516,7 +516,8 @@ bool mod_freesync_get_v_position(struct mod_freesync *mod_freesync,
 }
 
 static void build_vrr_infopacket_data_v1(const struct mod_vrr_params *vrr,
-               struct dc_info_packet *infopacket)
+               struct dc_info_packet *infopacket,
+               bool freesync_on_desktop)
 {
        /* PB1 = 0x1A (24bit AMD IEEE OUI (0x00001A) - Byte 0) */
        infopacket->sb[1] = 0x1A;
@@ -542,10 +543,16 @@ static void build_vrr_infopacket_data_v1(const struct mod_vrr_params *vrr,
                        vrr->state != VRR_STATE_UNSUPPORTED)
                infopacket->sb[6] |= 0x02;
 
-       /* PB6 = [Bit 2 = FreeSync Active] */
-       if (vrr->state != VRR_STATE_DISABLED &&
+       if (freesync_on_desktop) {
+               /* PB6 = [Bit 2 = FreeSync Active] */
+               if (vrr->state != VRR_STATE_DISABLED &&
                        vrr->state != VRR_STATE_UNSUPPORTED)
-               infopacket->sb[6] |= 0x04;
+                       infopacket->sb[6] |= 0x04;
+       } else {
+               if (vrr->state == VRR_STATE_ACTIVE_VARIABLE ||
+                       vrr->state == VRR_STATE_ACTIVE_FIXED)
+                       infopacket->sb[6] |= 0x04;
+       }
 
        // For v1 & 2 infoframes program nominal if non-fs mode, otherwise full range
        /* PB7 = FreeSync Minimum refresh rate (Hz) */
@@ -824,13 +831,14 @@ static void build_vrr_infopacket_checksum(unsigned int *payload_size,
 
 static void build_vrr_infopacket_v1(enum signal_type signal,
                const struct mod_vrr_params *vrr,
-               struct dc_info_packet *infopacket)
+               struct dc_info_packet *infopacket,
+               bool freesync_on_desktop)
 {
        /* SPD info packet for FreeSync */
        unsigned int payload_size = 0;
 
        build_vrr_infopacket_header_v1(signal, infopacket, &payload_size);
-       build_vrr_infopacket_data_v1(vrr, infopacket);
+       build_vrr_infopacket_data_v1(vrr, infopacket, freesync_on_desktop);
        build_vrr_infopacket_checksum(&payload_size, infopacket);
 
        infopacket->valid = true;
@@ -839,12 +847,13 @@ static void build_vrr_infopacket_v1(enum signal_type signal,
 static void build_vrr_infopacket_v2(enum signal_type signal,
                const struct mod_vrr_params *vrr,
                enum color_transfer_func app_tf,
-               struct dc_info_packet *infopacket)
+               struct dc_info_packet *infopacket,
+               bool freesync_on_desktop)
 {
        unsigned int payload_size = 0;
 
        build_vrr_infopacket_header_v2(signal, infopacket, &payload_size);
-       build_vrr_infopacket_data_v1(vrr, infopacket);
+       build_vrr_infopacket_data_v1(vrr, infopacket, freesync_on_desktop);
 
        build_vrr_infopacket_fs2_data(app_tf, infopacket);
 
@@ -953,12 +962,12 @@ void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync,
 #endif
                break;
        case PACKET_TYPE_FS_V2:
-               build_vrr_infopacket_v2(stream->signal, vrr, app_tf, infopacket);
+               build_vrr_infopacket_v2(stream->signal, vrr, app_tf, infopacket, stream->freesync_on_desktop);
                break;
        case PACKET_TYPE_VRR:
        case PACKET_TYPE_FS_V1:
        default:
-               build_vrr_infopacket_v1(stream->signal, vrr, infopacket);
+               build_vrr_infopacket_v1(stream->signal, vrr, infopacket, stream->freesync_on_desktop);
        }
 
        if (true == pack_sdp_v1_3 &&
index 43e6f8b..de872e7 100644 (file)
@@ -29,8 +29,10 @@ static inline enum mod_hdcp_status validate_bksv(struct mod_hdcp *hdcp)
 {
        uint64_t n = 0;
        uint8_t count = 0;
+       u8 bksv[sizeof(n)] = { };
 
-       memcpy(&n, hdcp->auth.msg.hdcp1.bksv, sizeof(uint64_t));
+       memcpy(bksv, hdcp->auth.msg.hdcp1.bksv, sizeof(hdcp->auth.msg.hdcp1.bksv));
+       n = *(uint64_t *)bksv;
 
        while (n) {
                count++;
index 26f96c0..06910d2 100644 (file)
@@ -371,19 +371,6 @@ enum mod_hdcp_status mod_hdcp_hdcp1_link_maintenance(struct mod_hdcp *hdcp)
        return status;
 }
 
-enum mod_hdcp_status mod_hdcp_hdcp1_get_link_encryption_status(struct mod_hdcp *hdcp,
-                                                              enum mod_hdcp_encryption_status *encryption_status)
-{
-       *encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF;
-
-       if (mod_hdcp_hdcp1_link_maintenance(hdcp) != MOD_HDCP_STATUS_SUCCESS)
-               return MOD_HDCP_STATUS_FAILURE;
-
-       *encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP1_ON;
-
-       return MOD_HDCP_STATUS_SUCCESS;
-}
-
 enum mod_hdcp_status mod_hdcp_hdcp2_create_session(struct mod_hdcp *hdcp)
 {
        struct psp_context *psp = hdcp->config.psp.handle;
index 6270ecb..5e7331b 100644 (file)
@@ -660,7 +660,8 @@ static void fill_iram_v_2_3(struct iram_table_v_2_2 *ram_table, struct dmcu_iram
 }
 
 bool dmub_init_abm_config(struct resource_pool *res_pool,
-       struct dmcu_iram_parameters params)
+       struct dmcu_iram_parameters params,
+       unsigned int inst)
 {
        struct iram_table_v_2_2 ram_table;
        struct abm_config_table config;
@@ -669,7 +670,7 @@ bool dmub_init_abm_config(struct resource_pool *res_pool,
        uint32_t i, j = 0;
 
 #if defined(CONFIG_DRM_AMD_DC_DCN)
-       if (res_pool->abm == NULL && res_pool->multiple_abms[0] == NULL)
+       if (res_pool->abm == NULL && res_pool->multiple_abms[inst] == NULL)
                return false;
 #else
        if (res_pool->abm == NULL)
@@ -728,13 +729,13 @@ bool dmub_init_abm_config(struct resource_pool *res_pool,
        config.min_abm_backlight = ram_table.min_abm_backlight;
 
 #if defined(CONFIG_DRM_AMD_DC_DCN)
-       if (res_pool->multiple_abms[0])
-               result = res_pool->multiple_abms[0]->funcs->init_abm_config(
-                       res_pool->multiple_abms[0], (char *)(&config), sizeof(struct abm_config_table));
-       else
+       if (res_pool->multiple_abms[inst]) {
+               result = res_pool->multiple_abms[inst]->funcs->init_abm_config(
+                       res_pool->multiple_abms[inst], (char *)(&config), sizeof(struct abm_config_table), inst);
+       else
 #endif
                result = res_pool->abm->funcs->init_abm_config(
-                       res_pool->abm, (char *)(&config), sizeof(struct abm_config_table));
+                       res_pool->abm, (char *)(&config), sizeof(struct abm_config_table), 0);
 
        return result;
 }
index 6f2eecc..2a9f8e2 100644 (file)
@@ -49,6 +49,7 @@ struct dmcu_iram_parameters {
 bool dmcu_load_iram(struct dmcu *dmcu,
                struct dmcu_iram_parameters params);
 bool dmub_init_abm_config(struct resource_pool *res_pool,
-               struct dmcu_iram_parameters params);
+               struct dmcu_iram_parameters params,
+               unsigned int inst);
 
 #endif /* MODULES_POWER_POWER_HELPERS_H_ */
index 644ffec..cdd426b 100644 (file)
@@ -30,7 +30,7 @@ struct IP_BASE_INSTANCE {
 
 struct IP_BASE {
     struct IP_BASE_INSTANCE instance[MAX_INSTANCE];
-};
+} __maybe_unused;
 
 static const struct IP_BASE ATHUB_BASE = { { { { 0x00000C20, 0x02408C00, 0, 0, 0, 0 } },
                                         { { 0, 0, 0, 0, 0, 0 } },
index c72cbfe..2d089d3 100644 (file)
@@ -103,6 +103,13 @@ struct atcs_pref_req_output {
        u8 ret_val;             /* return value */
 } __packed;
 
+struct atcs_pwr_shift_input {
+       u16 size;               /* structure size in bytes (includes size field) */
+       u16 dgpu_id;            /* client id (bit 2-0: func num, 7-3: dev num, 15-8: bus num) */
+       u8 dev_acpi_state;      /* D0 = 0, D3 hot = 3 */
+       u8 drv_state;   /* 0 = operational, 1 = not operational */
+} __packed;
+
 /* AMD hw uses four ACPI control methods:
  * 1. ATIF
  * ARG0: (ACPI_INTEGER) function code
@@ -418,6 +425,7 @@ struct atcs_pref_req_output {
 #       define ATCS_PCIE_PERFORMANCE_REQUEST_SUPPORTED             (1 << 1)
 #       define ATCS_PCIE_DEVICE_READY_NOTIFICATION_SUPPORTED       (1 << 2)
 #       define ATCS_SET_PCIE_BUS_WIDTH_SUPPORTED                   (1 << 3)
+#       define ATCS_SET_POWER_SHIFT_CONTROL_SUPPORTED             (1 << 7)
 #define ATCS_FUNCTION_GET_EXTERNAL_STATE                           0x1
 /* ARG0: ATCS_FUNCTION_GET_EXTERNAL_STATE
  * ARG1: none
@@ -472,4 +480,14 @@ struct atcs_pref_req_output {
  * BYTE  - number of active lanes
  */
 
+#define ATCS_FUNCTION_POWER_SHIFT_CONTROL                          0x8
+/* ARG0: ATCS_FUNCTION_POWER_SHIFT_CONTROL
+ * ARG1:
+ * WORD  - structure size in bytes (includes size field)
+ * WORD  - dGPU id (bit 2-0: func num, 7-3: dev num, 15-8: bus num)
+ * BYTE  - Device ACPI state
+ * BYTE  - Driver state
+ * OUTPUT: none
+ */
+
 #endif
index 2aa6d27..28deecc 100644 (file)
@@ -197,6 +197,9 @@ enum atom_dp_vs_preemph_def{
   DP_VS_LEVEL0_PREEMPH_LEVEL3 = 0x18,
 };
 
+#define BIOS_ATOM_PREFIX   "ATOMBIOS"
+#define BIOS_VERSION_PREFIX  "ATOMBIOSBK-AMD"
+#define BIOS_STRING_LENGTH 43
 
 /*
 enum atom_string_def{
@@ -209,12 +212,14 @@ atom_bios_string          = "ATOM"
 #pragma pack(1)                          /* BIOS data must use byte aligment*/
 
 enum atombios_image_offset{
-OFFSET_TO_ATOM_ROM_HEADER_POINTER          =0x00000048,
-OFFSET_TO_ATOM_ROM_IMAGE_SIZE              =0x00000002,
-OFFSET_TO_ATOMBIOS_ASIC_BUS_MEM_TYPE       =0x94,
-MAXSIZE_OF_ATOMBIOS_ASIC_BUS_MEM_TYPE      =20,  /*including the terminator 0x0!*/
-OFFSET_TO_GET_ATOMBIOS_NUMBER_OF_STRINGS   =0x2f,
-OFFSET_TO_GET_ATOMBIOS_STRING_START        =0x6e,
+  OFFSET_TO_ATOM_ROM_HEADER_POINTER          = 0x00000048,
+  OFFSET_TO_ATOM_ROM_IMAGE_SIZE              = 0x00000002,
+  OFFSET_TO_ATOMBIOS_ASIC_BUS_MEM_TYPE       = 0x94,
+  MAXSIZE_OF_ATOMBIOS_ASIC_BUS_MEM_TYPE      = 20,  /*including the terminator 0x0!*/
+  OFFSET_TO_GET_ATOMBIOS_NUMBER_OF_STRINGS   = 0x2f,
+  OFFSET_TO_GET_ATOMBIOS_STRING_START        = 0x6e,
+  OFFSET_TO_VBIOS_PART_NUMBER                = 0x80,
+  OFFSET_TO_VBIOS_DATE                       = 0x50,
 };
 
 /****************************************************************************   
index e2d1313..b1cd52a 100644 (file)
@@ -536,6 +536,75 @@ struct gpu_metrics_v1_2 {
        uint64_t                        firmware_timestamp;
 };
 
+struct gpu_metrics_v1_3 {
+       struct metrics_table_header     common_header;
+
+       /* Temperature */
+       uint16_t                        temperature_edge;
+       uint16_t                        temperature_hotspot;
+       uint16_t                        temperature_mem;
+       uint16_t                        temperature_vrgfx;
+       uint16_t                        temperature_vrsoc;
+       uint16_t                        temperature_vrmem;
+
+       /* Utilization */
+       uint16_t                        average_gfx_activity;
+       uint16_t                        average_umc_activity; // memory controller
+       uint16_t                        average_mm_activity; // UVD or VCN
+
+       /* Power/Energy */
+       uint16_t                        average_socket_power;
+       uint64_t                        energy_accumulator;
+
+       /* Driver attached timestamp (in ns) */
+       uint64_t                        system_clock_counter;
+
+       /* Average clocks */
+       uint16_t                        average_gfxclk_frequency;
+       uint16_t                        average_socclk_frequency;
+       uint16_t                        average_uclk_frequency;
+       uint16_t                        average_vclk0_frequency;
+       uint16_t                        average_dclk0_frequency;
+       uint16_t                        average_vclk1_frequency;
+       uint16_t                        average_dclk1_frequency;
+
+       /* Current clocks */
+       uint16_t                        current_gfxclk;
+       uint16_t                        current_socclk;
+       uint16_t                        current_uclk;
+       uint16_t                        current_vclk0;
+       uint16_t                        current_dclk0;
+       uint16_t                        current_vclk1;
+       uint16_t                        current_dclk1;
+
+       /* Throttle status */
+       uint32_t                        throttle_status;
+
+       /* Fans */
+       uint16_t                        current_fan_speed;
+
+       /* Link width/speed */
+       uint16_t                        pcie_link_width;
+       uint16_t                        pcie_link_speed; // in 0.1 GT/s
+
+       uint16_t                        padding;
+
+       uint32_t                        gfx_activity_acc;
+       uint32_t                        mem_activity_acc;
+
+       uint16_t                        temperature_hbm[NUM_HBM_INSTANCES];
+
+       /* PMFW attached timestamp (10ns resolution) */
+       uint64_t                        firmware_timestamp;
+
+       /* Voltage (mV) */
+       uint16_t                        voltage_soc;
+       uint16_t                        voltage_gfx;
+       uint16_t                        voltage_mem;
+
+       uint16_t                        padding1;
+};
+
 /*
  * gpu_metrics_v2_0 is not recommended as it's not naturally aligned.
  * Use gpu_metrics_v2_1 or later instead.
index 13da377..f48132b 100644 (file)
@@ -1942,7 +1942,7 @@ static int amdgpu_device_attr_create(struct amdgpu_device *adev,
 
        BUG_ON(!attr);
 
-       attr_update = attr->attr_update ? attr_update : default_attr_update;
+       attr_update = attr->attr_update ? attr->attr_update : default_attr_update;
 
        ret = attr_update(adev, attr, mask, &attr_states);
        if (ret) {
index 7a6d049..61c87c3 100644 (file)
@@ -1176,7 +1176,7 @@ typedef struct {
   uint16_t         LedGpio;            //GeneriA GPIO flag used to control the radeon LEDs
   uint16_t         GfxPowerStagesGpio; //Genlk_vsync GPIO flag used to control gfx power stages 
 
-  uint32_t         SkuReserved[16];
+  uint32_t         SkuReserved[63];
 
 
 
index 1687709..6119a36 100644 (file)
 #define CTF_OFFSET_HOTSPOT             5
 #define CTF_OFFSET_MEM                 5
 
-static const struct smu_temperature_range smu13_thermal_policy[] =
-{
-       {-273150,  99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000},
-       { 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000},
-};
-
 struct smu_13_0_max_sustainable_clocks {
        uint32_t display_clock;
        uint32_t phy_clock;
index 25b5831..981dc8c 100644 (file)
@@ -82,7 +82,8 @@ int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr)
 
        /* Skip for suspend/resume case */
        if (!hwmgr->pp_one_vf && smum_is_dpm_running(hwmgr)
-           && !amdgpu_passthrough(adev) && adev->in_suspend) {
+           && !amdgpu_passthrough(adev) && adev->in_suspend
+               && adev->asic_type != CHIP_RAVEN) {
                pr_info("dpm has been enabled\n");
                return 0;
        }
index f5fe540..8f71f6a 100644 (file)
@@ -377,6 +377,27 @@ static int smu10_enable_gfx_off(struct pp_hwmgr *hwmgr)
 
 static int smu10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
 {
+       struct amdgpu_device *adev = hwmgr->adev;
+       struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
+       int ret = -EINVAL;
+
+       if (adev->in_suspend) {
+               pr_info("restore the fine grain parameters\n");
+
+               ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+                                       PPSMC_MSG_SetHardMinGfxClk,
+                                       smu10_data->gfx_actual_soft_min_freq,
+                                       NULL);
+               if (ret)
+                       return ret;
+               ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+                                       PPSMC_MSG_SetSoftMaxGfxClk,
+                                       smu10_data->gfx_actual_soft_max_freq,
+                                       NULL);
+               if (ret)
+                       return ret;
+       }
+
        return 0;
 }
 
index 0d38d42..6cfe148 100644 (file)
@@ -129,10 +129,10 @@ int smu7_fan_ctrl_set_static_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
 }
 
 /**
-* Reset Fan Speed Control to default mode.
-* @hwmgr:  the address of the powerplay hardware manager.
-* Exception: Should always succeed.
-*/
+ * smu7_fan_ctrl_set_default_mode - Reset Fan Speed Control to default mode.
+ * @hwmgr:  the address of the powerplay hardware manager.
+ * Exception: Should always succeed.
+ */
 int smu7_fan_ctrl_set_default_mode(struct pp_hwmgr *hwmgr)
 {
        if (!hwmgr->fan_ctrl_is_in_default_mode) {
index 31c61ac..2597910 100644 (file)
@@ -544,7 +544,7 @@ static int vega10_get_socclk_for_voltage_evv(struct pp_hwmgr *hwmgr,
 
 #define ATOM_VIRTUAL_VOLTAGE_ID0             0xff01
 /**
- * Get Leakage VDDC based on leakage ID.
+ * vega10_get_evv_voltages - Get Leakage VDDC based on leakage ID.
  *
  * @hwmgr:  the address of the powerplay hardware manager.
  * return:  always 0.
@@ -600,7 +600,7 @@ static int vega10_get_evv_voltages(struct pp_hwmgr *hwmgr)
 }
 
 /**
- * Change virtual leakage voltage to actual value.
+ * vega10_patch_with_vdd_leakage - Change virtual leakage voltage to actual value.
  *
  * @hwmgr:         the address of the powerplay hardware manager.
  * @voltage:       pointer to changing voltage
@@ -626,7 +626,7 @@ static void vega10_patch_with_vdd_leakage(struct pp_hwmgr *hwmgr,
 }
 
 /**
- * Patch voltage lookup table by EVV leakages.
+ * vega10_patch_lookup_table_with_leakage - Patch voltage lookup table by EVV leakages.
  *
  * @hwmgr:         the address of the powerplay hardware manager.
  * @lookup_table:  pointer to voltage lookup table
@@ -1003,7 +1003,7 @@ static int vega10_setup_asic_task(struct pp_hwmgr *hwmgr)
 }
 
 /**
- * Remove repeated voltage values and create table with unique values.
+ * vega10_trim_voltage_table - Remove repeated voltage values and create table with unique values.
  *
  * @hwmgr:      the address of the powerplay hardware manager.
  * @vol_table:  the pointer to changing voltage table
@@ -1152,7 +1152,7 @@ static void vega10_trim_voltage_table_to_fit_state_table(
 }
 
 /**
- * Create Voltage Tables.
+ * vega10_construct_voltage_tables - Create Voltage Tables.
  *
  * @hwmgr:  the address of the powerplay hardware manager.
  * return:  always 0
@@ -1595,7 +1595,8 @@ static int vega10_populate_smc_link_levels(struct pp_hwmgr *hwmgr)
 }
 
 /**
- * Populates single SMC GFXSCLK structure using the provided engine clock
+ * vega10_populate_single_gfx_level - Populates single SMC GFXSCLK structure
+ *                                    using the provided engine clock
  *
  * @hwmgr:      the address of the hardware manager
  * @gfx_clock:  the GFX clock to use to populate the structure.
@@ -1660,7 +1661,8 @@ static int vega10_populate_single_gfx_level(struct pp_hwmgr *hwmgr,
 }
 
 /**
- * Populates single SMC SOCCLK structure using the provided clock.
+ * vega10_populate_single_soc_level - Populates single SMC SOCCLK structure
+ *                                    using the provided clock.
  *
  * @hwmgr:     the address of the hardware manager.
  * @soc_clock: the SOC clock to use to populate the structure.
@@ -1710,7 +1712,8 @@ static int vega10_populate_single_soc_level(struct pp_hwmgr *hwmgr,
 }
 
 /**
- * Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
+ * vega10_populate_all_graphic_levels - Populates all SMC SCLK levels' structure
+ *                                      based on the trimmed allowed dpm engine clock states
  *
  * @hwmgr:      the address of the hardware manager
  */
@@ -1859,7 +1862,8 @@ static int vega10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
 }
 
 /**
- * Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states.
+ * vega10_populate_all_memory_levels - Populates all SMC MCLK levels' structure
+ *                                     based on the trimmed allowed dpm memory clock states.
  *
  * @hwmgr:  the address of the hardware manager.
  * return:   PP_Result_OK on success.
@@ -2537,7 +2541,7 @@ static void vega10_check_dpm_table_updated(struct pp_hwmgr *hwmgr)
 }
 
 /**
- * Initializes the SMC table and uploads it
+ * vega10_init_smc_table - Initializes the SMC table and uploads it
  *
  * @hwmgr:  the address of the powerplay hardware manager.
  * return:  always 0
@@ -2919,7 +2923,7 @@ static int vega10_stop_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
 }
 
 /**
- * Tell SMC to enabled the supported DPMs.
+ * vega10_start_dpm - Tell SMC to enabled the supported DPMs.
  *
  * @hwmgr:   the address of the powerplay hardware manager.
  * @bitmap:  bitmap for the features to enabled.
index 1a097e6..29e0d1d 100644 (file)
@@ -803,7 +803,7 @@ static int vega12_save_default_power_profile(struct pp_hwmgr *hwmgr)
 #endif
 
 /**
- * Initializes the SMC table and uploads it
+ * vega12_init_smc_table - Initializes the SMC table and uploads it
  *
  * @hwmgr:  the address of the powerplay hardware manager.
  * return:  always 0
index 0dc16f2..ed3dff0 100644 (file)
@@ -159,7 +159,8 @@ int vega12_thermal_get_temperature(struct pp_hwmgr *hwmgr)
 }
 
 /**
- * Set the requested temperature range for high and low alert signals
+ * vega12_thermal_set_temperature_range - Set the requested temperature range
+ *                                        for high and low alert signals
  *
  * @hwmgr: The address of the hardware manager.
  * @range: Temperature range to be programmed for
index d3177a5..0791309 100644 (file)
@@ -772,7 +772,7 @@ static int vega20_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
 }
 
 /**
- * Initializes the SMC table and uploads it
+ * vega20_init_smc_table - Initializes the SMC table and uploads it
  *
  * @hwmgr:  the address of the powerplay hardware manager.
  * return:  always 0
index 77693bf..1735a96 100644 (file)
@@ -822,6 +822,52 @@ static int arcturus_print_clk_levels(struct smu_context *smu,
                                now) ? "*" : ""));
                break;
 
+       case SMU_VCLK:
+               ret = arcturus_get_current_clk_freq_by_table(smu, SMU_VCLK, &now);
+               if (ret) {
+                       dev_err(smu->adev->dev, "Attempt to get current vclk Failed!");
+                       return ret;
+               }
+
+               single_dpm_table = &(dpm_context->dpm_tables.vclk_table);
+               ret = arcturus_get_clk_table(smu, &clocks, single_dpm_table);
+               if (ret) {
+                       dev_err(smu->adev->dev, "Attempt to get vclk levels Failed!");
+                       return ret;
+               }
+
+               for (i = 0; i < single_dpm_table->count; i++)
+                       size += sprintf(buf + size, "%d: %uMhz %s\n",
+                               i, single_dpm_table->dpm_levels[i].value,
+                               (clocks.num_levels == 1) ? "*" :
+                               (arcturus_freqs_in_same_level(
+                               clocks.data[i].clocks_in_khz / 1000,
+                               now) ? "*" : ""));
+               break;
+
+       case SMU_DCLK:
+               ret = arcturus_get_current_clk_freq_by_table(smu, SMU_DCLK, &now);
+               if (ret) {
+                       dev_err(smu->adev->dev, "Attempt to get current dclk Failed!");
+                       return ret;
+               }
+
+               single_dpm_table = &(dpm_context->dpm_tables.dclk_table);
+               ret = arcturus_get_clk_table(smu, &clocks, single_dpm_table);
+               if (ret) {
+                       dev_err(smu->adev->dev, "Attempt to get dclk levels Failed!");
+                       return ret;
+               }
+
+               for (i = 0; i < single_dpm_table->count; i++)
+                       size += sprintf(buf + size, "%d: %uMhz %s\n",
+                               i, single_dpm_table->dpm_levels[i].value,
+                               (clocks.num_levels == 1) ? "*" :
+                               (arcturus_freqs_in_same_level(
+                               clocks.data[i].clocks_in_khz / 1000,
+                               now) ? "*" : ""));
+               break;
+
        case SMU_PCIE:
                gen_speed = smu_v11_0_get_current_pcie_link_speed_level(smu);
                lane_width = smu_v11_0_get_current_pcie_link_width_level(smu);
index ac13042..78fe131 100644 (file)
@@ -505,7 +505,7 @@ static int navi10_tables_init(struct smu_context *smu)
                goto err0_out;
        smu_table->metrics_time = 0;
 
-       smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_1);
+       smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_3);
        smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
        if (!smu_table->gpu_metrics_table)
                goto err1_out;
@@ -1273,6 +1273,8 @@ static int navi10_print_clk_levels(struct smu_context *smu,
        case SMU_MCLK:
        case SMU_UCLK:
        case SMU_FCLK:
+       case SMU_VCLK:
+       case SMU_DCLK:
        case SMU_DCEFCLK:
                ret = navi10_get_current_clk_freq_by_table(smu, clk_type, &cur_value);
                if (ret)
@@ -2627,8 +2629,8 @@ static ssize_t navi10_get_legacy_gpu_metrics(struct smu_context *smu,
                                             void **table)
 {
        struct smu_table_context *smu_table = &smu->smu_table;
-       struct gpu_metrics_v1_1 *gpu_metrics =
-               (struct gpu_metrics_v1_1 *)smu_table->gpu_metrics_table;
+       struct gpu_metrics_v1_3 *gpu_metrics =
+               (struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
        SmuMetrics_legacy_t metrics;
        int ret = 0;
 
@@ -2646,7 +2648,7 @@ static ssize_t navi10_get_legacy_gpu_metrics(struct smu_context *smu,
 
        mutex_unlock(&smu->metrics_lock);
 
-       smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 1);
+       smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
 
        gpu_metrics->temperature_edge = metrics.TemperatureEdge;
        gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
@@ -2681,17 +2683,27 @@ static ssize_t navi10_get_legacy_gpu_metrics(struct smu_context *smu,
 
        gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
 
+       if (metrics.CurrGfxVoltageOffset)
+               gpu_metrics->voltage_gfx =
+                       (155000 - 625 * metrics.CurrGfxVoltageOffset) / 100;
+       if (metrics.CurrMemVidOffset)
+               gpu_metrics->voltage_mem =
+                       (155000 - 625 * metrics.CurrMemVidOffset) / 100;
+       if (metrics.CurrSocVoltageOffset)
+               gpu_metrics->voltage_soc =
+                       (155000 - 625 * metrics.CurrSocVoltageOffset) / 100;
+
        *table = (void *)gpu_metrics;
 
-       return sizeof(struct gpu_metrics_v1_1);
+       return sizeof(struct gpu_metrics_v1_3);
 }
 
 static ssize_t navi10_get_gpu_metrics(struct smu_context *smu,
                                      void **table)
 {
        struct smu_table_context *smu_table = &smu->smu_table;
-       struct gpu_metrics_v1_1 *gpu_metrics =
-               (struct gpu_metrics_v1_1 *)smu_table->gpu_metrics_table;
+       struct gpu_metrics_v1_3 *gpu_metrics =
+               (struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
        SmuMetrics_t metrics;
        int ret = 0;
 
@@ -2709,7 +2721,7 @@ static ssize_t navi10_get_gpu_metrics(struct smu_context *smu,
 
        mutex_unlock(&smu->metrics_lock);
 
-       smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 1);
+       smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
 
        gpu_metrics->temperature_edge = metrics.TemperatureEdge;
        gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
@@ -2746,17 +2758,27 @@ static ssize_t navi10_get_gpu_metrics(struct smu_context *smu,
 
        gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
 
+       if (metrics.CurrGfxVoltageOffset)
+               gpu_metrics->voltage_gfx =
+                       (155000 - 625 * metrics.CurrGfxVoltageOffset) / 100;
+       if (metrics.CurrMemVidOffset)
+               gpu_metrics->voltage_mem =
+                       (155000 - 625 * metrics.CurrMemVidOffset) / 100;
+       if (metrics.CurrSocVoltageOffset)
+               gpu_metrics->voltage_soc =
+                       (155000 - 625 * metrics.CurrSocVoltageOffset) / 100;
+
        *table = (void *)gpu_metrics;
 
-       return sizeof(struct gpu_metrics_v1_1);
+       return sizeof(struct gpu_metrics_v1_3);
 }
 
 static ssize_t navi12_get_legacy_gpu_metrics(struct smu_context *smu,
                                             void **table)
 {
        struct smu_table_context *smu_table = &smu->smu_table;
-       struct gpu_metrics_v1_1 *gpu_metrics =
-               (struct gpu_metrics_v1_1 *)smu_table->gpu_metrics_table;
+       struct gpu_metrics_v1_3 *gpu_metrics =
+               (struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
        SmuMetrics_NV12_legacy_t metrics;
        int ret = 0;
 
@@ -2774,7 +2796,7 @@ static ssize_t navi12_get_legacy_gpu_metrics(struct smu_context *smu,
 
        mutex_unlock(&smu->metrics_lock);
 
-       smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 1);
+       smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
 
        gpu_metrics->temperature_edge = metrics.TemperatureEdge;
        gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
@@ -2814,17 +2836,27 @@ static ssize_t navi12_get_legacy_gpu_metrics(struct smu_context *smu,
 
        gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
 
+       if (metrics.CurrGfxVoltageOffset)
+               gpu_metrics->voltage_gfx =
+                       (155000 - 625 * metrics.CurrGfxVoltageOffset) / 100;
+       if (metrics.CurrMemVidOffset)
+               gpu_metrics->voltage_mem =
+                       (155000 - 625 * metrics.CurrMemVidOffset) / 100;
+       if (metrics.CurrSocVoltageOffset)
+               gpu_metrics->voltage_soc =
+                       (155000 - 625 * metrics.CurrSocVoltageOffset) / 100;
+
        *table = (void *)gpu_metrics;
 
-       return sizeof(struct gpu_metrics_v1_1);
+       return sizeof(struct gpu_metrics_v1_3);
 }
 
 static ssize_t navi12_get_gpu_metrics(struct smu_context *smu,
                                      void **table)
 {
        struct smu_table_context *smu_table = &smu->smu_table;
-       struct gpu_metrics_v1_1 *gpu_metrics =
-               (struct gpu_metrics_v1_1 *)smu_table->gpu_metrics_table;
+       struct gpu_metrics_v1_3 *gpu_metrics =
+               (struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
        SmuMetrics_NV12_t metrics;
        int ret = 0;
 
@@ -2842,7 +2874,7 @@ static ssize_t navi12_get_gpu_metrics(struct smu_context *smu,
 
        mutex_unlock(&smu->metrics_lock);
 
-       smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 1);
+       smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
 
        gpu_metrics->temperature_edge = metrics.TemperatureEdge;
        gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
@@ -2884,9 +2916,19 @@ static ssize_t navi12_get_gpu_metrics(struct smu_context *smu,
 
        gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
 
+       if (metrics.CurrGfxVoltageOffset)
+               gpu_metrics->voltage_gfx =
+                       (155000 - 625 * metrics.CurrGfxVoltageOffset) / 100;
+       if (metrics.CurrMemVidOffset)
+               gpu_metrics->voltage_mem =
+                       (155000 - 625 * metrics.CurrMemVidOffset) / 100;
+       if (metrics.CurrSocVoltageOffset)
+               gpu_metrics->voltage_soc =
+                       (155000 - 625 * metrics.CurrSocVoltageOffset) / 100;
+
        *table = (void *)gpu_metrics;
 
-       return sizeof(struct gpu_metrics_v1_1);
+       return sizeof(struct gpu_metrics_v1_3);
 }
 
 static ssize_t navi1x_get_gpu_metrics(struct smu_context *smu,
@@ -2925,6 +2967,8 @@ static ssize_t navi1x_get_gpu_metrics(struct smu_context *smu,
 
 static int navi10_enable_mgpu_fan_boost(struct smu_context *smu)
 {
+       struct smu_table_context *table_context = &smu->smu_table;
+       PPTable_t *smc_pptable = table_context->driver_pptable;
        struct amdgpu_device *adev = smu->adev;
        uint32_t param = 0;
 
@@ -2932,6 +2976,13 @@ static int navi10_enable_mgpu_fan_boost(struct smu_context *smu)
        if (adev->asic_type == CHIP_NAVI12)
                return 0;
 
+       /*
+        * Skip the MGpuFanBoost setting for those ASICs
+        * which do not support it
+        */
+       if (!smc_pptable->MGpuFanBoostLimitRpm)
+               return 0;
+
        /* Workaround for WS SKU */
        if (adev->pdev->device == 0x7312 &&
            adev->pdev->revision == 0)
index 0c40a54..75acdb8 100644 (file)
@@ -987,6 +987,10 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
        case SMU_MCLK:
        case SMU_UCLK:
        case SMU_FCLK:
+       case SMU_VCLK:
+       case SMU_VCLK1:
+       case SMU_DCLK:
+       case SMU_DCLK1:
        case SMU_DCEFCLK:
                ret = sienna_cichlid_get_current_clk_freq_by_table(smu, clk_type, &cur_value);
                if (ret)
@@ -3690,6 +3694,16 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
 
 static int sienna_cichlid_enable_mgpu_fan_boost(struct smu_context *smu)
 {
+       struct smu_table_context *table_context = &smu->smu_table;
+       PPTable_t *smc_pptable = table_context->driver_pptable;
+
+       /*
+        * Skip the MGpuFanBoost setting for those ASICs
+        * which do not support it
+        */
+       if (!smc_pptable->MGpuFanBoostLimitRpm)
+               return 0;
+
        return smu_cmn_send_smc_msg_with_param(smu,
                                               SMU_MSG_SetMGpuFanBoostLimitRpm,
                                               0,
index f43b4c6..1c399c4 100644 (file)
@@ -109,6 +109,8 @@ static struct cmn2asic_mapping renoir_clk_map[SMU_CLK_COUNT] = {
        CLK_MAP(SOCCLK, CLOCK_SOCCLK),
        CLK_MAP(UCLK, CLOCK_FCLK),
        CLK_MAP(MCLK, CLOCK_FCLK),
+       CLK_MAP(VCLK, CLOCK_VCLK),
+       CLK_MAP(DCLK, CLOCK_DCLK),
 };
 
 static struct cmn2asic_mapping renoir_table_map[SMU_TABLE_COUNT] = {
@@ -202,6 +204,17 @@ static int renoir_get_dpm_clk_limited(struct smu_context *smu, enum smu_clk_type
                        return -EINVAL;
                *freq = clk_table->FClocks[dpm_level].Freq;
                break;
+       case SMU_VCLK:
+               if (dpm_level >= NUM_VCN_DPM_LEVELS)
+                       return -EINVAL;
+               *freq = clk_table->VClocks[dpm_level].Freq;
+               break;
+       case SMU_DCLK:
+               if (dpm_level >= NUM_VCN_DPM_LEVELS)
+                       return -EINVAL;
+               *freq = clk_table->DClocks[dpm_level].Freq;
+               break;
+
        default:
                return -EINVAL;
        }
@@ -532,6 +545,14 @@ static int renoir_print_clk_levels(struct smu_context *smu,
                count = NUM_FCLK_DPM_LEVELS;
                cur_value = metrics.ClockFrequency[CLOCK_FCLK];
                break;
+       case SMU_VCLK:
+               count = NUM_VCN_DPM_LEVELS;
+               cur_value = metrics.ClockFrequency[CLOCK_VCLK];
+               break;
+       case SMU_DCLK:
+               count = NUM_VCN_DPM_LEVELS;
+               cur_value = metrics.ClockFrequency[CLOCK_DCLK];
+               break;
        default:
                break;
        }
@@ -543,6 +564,8 @@ static int renoir_print_clk_levels(struct smu_context *smu,
        case SMU_MCLK:
        case SMU_DCEFCLK:
        case SMU_FCLK:
+       case SMU_VCLK:
+       case SMU_DCLK:
                for (i = 0; i < count; i++) {
                        ret = renoir_get_dpm_clk_limited(smu, clk_type, i, &value);
                        if (ret)
@@ -730,6 +753,16 @@ static int renoir_get_dpm_clock_table(struct smu_context *smu, struct dpm_clocks
                clock_table->MemClocks[i].Vol = table->MemClocks[i].Vol;
        }
 
+       for (i = 0; i < NUM_VCN_DPM_LEVELS; i++) {
+               clock_table->VClocks[i].Freq = table->VClocks[i].Freq;
+               clock_table->VClocks[i].Vol = table->VClocks[i].Vol;
+       }
+
+       for (i = 0; i < NUM_VCN_DPM_LEVELS; i++) {
+               clock_table->DClocks[i].Freq = table->DClocks[i].Freq;
+               clock_table->DClocks[i].Vol = table->DClocks[i].Vol;
+       }
+
        return 0;
 }
 
index 7c191a5..7a1abb3 100644 (file)
 
 #define smnPCIE_ESM_CTRL                       0x111003D0
 
+static const struct smu_temperature_range smu13_thermal_policy[] =
+{
+       {-273150,  99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000},
+       { 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000},
+};
+
 static const struct cmn2asic_msg_mapping aldebaran_message_map[SMU_MSG_MAX_COUNT] = {
        MSG_MAP(TestMessage,                         PPSMC_MSG_TestMessage,                     0),
        MSG_MAP(GetSmuVersion,                       PPSMC_MSG_GetSmuVersion,                   1),
@@ -816,6 +822,52 @@ static int aldebaran_print_clk_levels(struct smu_context *smu,
                                                                       now) ? "*" : ""));
                break;
 
+       case SMU_VCLK:
+               ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_VCLK, &now);
+               if (ret) {
+                       dev_err(smu->adev->dev, "Attempt to get current vclk Failed!");
+                       return ret;
+               }
+
+               single_dpm_table = &(dpm_context->dpm_tables.vclk_table);
+               ret = aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
+               if (ret) {
+                       dev_err(smu->adev->dev, "Attempt to get vclk levels Failed!");
+                       return ret;
+               }
+
+               for (i = 0; i < single_dpm_table->count; i++)
+                       size += sprintf(buf + size, "%d: %uMhz %s\n",
+                                       i, single_dpm_table->dpm_levels[i].value,
+                                       (clocks.num_levels == 1) ? "*" :
+                                       (aldebaran_freqs_in_same_level(
+                                                                      clocks.data[i].clocks_in_khz / 1000,
+                                                                      now) ? "*" : ""));
+               break;
+
+       case SMU_DCLK:
+               ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_DCLK, &now);
+               if (ret) {
+                       dev_err(smu->adev->dev, "Attempt to get current dclk Failed!");
+                       return ret;
+               }
+
+               single_dpm_table = &(dpm_context->dpm_tables.dclk_table);
+               ret = aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
+               if (ret) {
+                       dev_err(smu->adev->dev, "Attempt to get dclk levels Failed!");
+                       return ret;
+               }
+
+               for (i = 0; i < single_dpm_table->count; i++)
+                       size += sprintf(buf + size, "%d: %uMhz %s\n",
+                                       i, single_dpm_table->dpm_levels[i].value,
+                                       (clocks.num_levels == 1) ? "*" :
+                                       (aldebaran_freqs_in_same_level(
+                                                                      clocks.data[i].clocks_in_khz / 1000,
+                                                                      now) ? "*" : ""));
+               break;
+
        default:
                break;
        }
@@ -1316,10 +1368,13 @@ static int aldebaran_usr_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_
 
 static bool aldebaran_is_dpm_running(struct smu_context *smu)
 {
-       int ret = 0;
+       int ret;
        uint32_t feature_mask[2];
        unsigned long feature_enabled;
+
        ret = smu_cmn_get_enabled_mask(smu, feature_mask, 2);
+       if (ret)
+               return false;
        feature_enabled = (unsigned long)((uint64_t)feature_mask[0] |
                                          ((uint64_t)feature_mask[1] << 32));
        return !!(feature_enabled & SMC_DPM_FEATURE);
@@ -1810,10 +1865,8 @@ static int aldebaran_set_mp1_state(struct smu_context *smu,
        case PP_MP1_STATE_UNLOAD:
                return smu_cmn_set_mp1_state(smu, mp1_state);
        default:
-               return -EINVAL;
+               return 0;
        }
-
-       return 0;
 }
 
 static const struct pptable_funcs aldebaran_ppt_funcs = {
index 62df8b2..0882a1d 100644 (file)
@@ -764,6 +764,9 @@ void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev)
        case METRICS_VERSION(1, 2):
                structure_size = sizeof(struct gpu_metrics_v1_2);
                break;
+       case METRICS_VERSION(1, 3):
+               structure_size = sizeof(struct gpu_metrics_v1_3);
+               break;
        case METRICS_VERSION(2, 0):
                structure_size = sizeof(struct gpu_metrics_v2_0);
                break;
index 5cf45aa..eda832f 100644 (file)
@@ -178,6 +178,10 @@ const struct drm_format_info *__drm_format_info(u32 format)
                { .format = DRM_FORMAT_ARGB16161616F,   .depth = 0,  .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
                { .format = DRM_FORMAT_ABGR16161616F,   .depth = 0,  .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
                { .format = DRM_FORMAT_AXBXGXRX106106106106, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
+               { .format = DRM_FORMAT_XRGB16161616,    .depth = 0,  .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1 },
+               { .format = DRM_FORMAT_XBGR16161616,    .depth = 0,  .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1 },
+               { .format = DRM_FORMAT_ARGB16161616,    .depth = 0,  .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
+               { .format = DRM_FORMAT_ABGR16161616,    .depth = 0,  .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
                { .format = DRM_FORMAT_RGB888_A8,       .depth = 32, .num_planes = 2, .cpp = { 3, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
                { .format = DRM_FORMAT_BGR888_A8,       .depth = 32, .num_planes = 2, .cpp = { 3, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
                { .format = DRM_FORMAT_XRGB8888_A8,     .depth = 32, .num_planes = 2, .cpp = { 4, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
index 42a8afa..73ea518 100644 (file)
@@ -7439,7 +7439,7 @@ static void cik_irq_disable(struct radeon_device *rdev)
 }
 
 /**
- * cik_irq_disable - disable interrupts for suspend
+ * cik_irq_suspend - disable interrupts for suspend
  *
  * @rdev: radeon_device pointer
  *
index 4025a4e..b07befe 100644 (file)
@@ -45,7 +45,6 @@ void sumo_rlc_fini(struct radeon_device *rdev);
 int sumo_rlc_init(struct radeon_device *rdev);
 void evergreen_gpu_pci_config_reset(struct radeon_device *rdev);
 u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev);
-void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
 u32 evergreen_gpu_check_soft_reset(struct radeon_device *rdev);
 int evergreen_rlc_resume(struct radeon_device *rdev);
 struct evergreen_power_info *evergreen_get_pi(struct radeon_device *rdev);
index fcfcaec..3c4e7c1 100644 (file)
@@ -1406,7 +1406,7 @@ int r100_cs_parse_packet0(struct radeon_cs_parser *p,
 }
 
 /**
- * r100_cs_packet_next_vline() - parse userspace VLINE packet
+ * r100_cs_packet_parse_vline() - parse userspace VLINE packet
  * @p:         parser structure holding parsing context.
  *
  * Userspace sends a special sequence for VLINE waits.
index 1cf2a5e..1e00f6b 100644 (file)
@@ -187,7 +187,6 @@ extern int rv370_pcie_gart_init(struct radeon_device *rdev);
 extern void rv370_pcie_gart_fini(struct radeon_device *rdev);
 extern int rv370_pcie_gart_enable(struct radeon_device *rdev);
 extern void rv370_pcie_gart_disable(struct radeon_device *rdev);
-extern int r300_mc_wait_for_idle(struct radeon_device *rdev);
 
 /*
  * r420,r423,rv410
@@ -404,7 +403,6 @@ void r600_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock);
 void r600_hdmi_audio_workaround(struct drm_encoder *encoder);
 int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
 void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
-int r600_mc_wait_for_idle(struct radeon_device *rdev);
 u32 r600_get_xclk(struct radeon_device *rdev);
 uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev);
 int rv6xx_get_temp(struct radeon_device *rdev);
index 4816250..80a3bee 100644 (file)
@@ -405,7 +405,7 @@ static int cmp_size_smaller_first(void *priv, const struct list_head *a,
 }
 
 /**
- * cs_parser_fini() - clean parser states
+ * radeon_cs_parser_fini() - clean parser states
  * @parser:    parser structure holding parsing context.
  * @error:     error number
  * @backoff:   indicator to backoff the reservation
index 2dc9c9f..36a38ad 100644 (file)
@@ -51,7 +51,7 @@
  */
 
 /**
- * radeon_vm_num_pde - return the number of page directory entries
+ * radeon_vm_num_pdes - return the number of page directory entries
  *
  * @rdev: radeon_device pointer
  *
@@ -626,7 +626,7 @@ static uint32_t radeon_vm_page_flags(uint32_t flags)
 }
 
 /**
- * radeon_vm_update_pdes - make sure that page directory is valid
+ * radeon_vm_update_page_directory - make sure that page directory is valid
  *
  * @rdev: radeon_device pointer
  * @vm: requested vm
index ac5a28e..8f1350e 100644 (file)
@@ -895,6 +895,18 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
                              (old_plane_state) = (__state)->planes[__i].old_state,\
                              (new_plane_state) = (__state)->planes[__i].new_state, 1))
 
+/**
+ * for_each_new_plane_in_state_reverse - other than only tracking new state,
+ * it's the same as for_each_oldnew_plane_in_state_reverse
+ */
+#define for_each_new_plane_in_state_reverse(__state, plane, new_plane_state, __i) \
+       for ((__i) = ((__state)->dev->mode_config.num_total_plane - 1); \
+            (__i) >= 0;                                                \
+            (__i)--)                                                   \
+               for_each_if ((__state)->planes[__i].ptr &&              \
+                            ((plane) = (__state)->planes[__i].ptr,     \
+                             (new_plane_state) = (__state)->planes[__i].new_state, 1))
+
 /**
  * for_each_old_plane_in_state - iterate over all planes in an atomic update
  * @__state: &struct drm_atomic_state pointer
index 2063a1c..91a2318 100644 (file)
@@ -136,6 +136,10 @@ extern "C" {
  * accessing it with various hw blocks
  */
 #define AMDGPU_GEM_CREATE_ENCRYPTED            (1 << 10)
+/* Flag that BO will be used only in preemptible context, which does
+ * not require GTT memory accounting
+ */
+#define AMDGPU_GEM_CREATE_PREEMPTIBLE          (1 << 11)
 
 struct drm_amdgpu_gem_create_in  {
        /** the requested memory size */
@@ -753,6 +757,8 @@ struct drm_amdgpu_cs_chunk_data {
        #define AMDGPU_INFO_VBIOS_SIZE          0x1
        /* Subquery id: Query vbios image */
        #define AMDGPU_INFO_VBIOS_IMAGE         0x2
+       /* Subquery id: Query vbios info */
+       #define AMDGPU_INFO_VBIOS_INFO          0x3
 /* Query UVD handles */
 #define AMDGPU_INFO_NUM_HANDLES                        0x1C
 /* Query sensor related information */
@@ -946,6 +952,15 @@ struct drm_amdgpu_info_firmware {
        __u32 feature;
 };
 
+struct drm_amdgpu_info_vbios {
+       __u8 name[64];
+       __u8 vbios_pn[64];
+       __u32 version;
+       __u32 pad;
+       __u8 vbios_ver_str[32];
+       __u8 date[32];
+};
+
 #define AMDGPU_VRAM_TYPE_UNKNOWN 0
 #define AMDGPU_VRAM_TYPE_GDDR1 1
 #define AMDGPU_VRAM_TYPE_DDR2  2
index f76de49..f715632 100644 (file)
@@ -168,6 +168,13 @@ extern "C" {
 #define DRM_FORMAT_RGBA1010102 fourcc_code('R', 'A', '3', '0') /* [31:0] R:G:B:A 10:10:10:2 little endian */
 #define DRM_FORMAT_BGRA1010102 fourcc_code('B', 'A', '3', '0') /* [31:0] B:G:R:A 10:10:10:2 little endian */
 
+/* 64 bpp RGB */
+#define DRM_FORMAT_XRGB16161616        fourcc_code('X', 'R', '4', '8') /* [63:0] x:R:G:B 16:16:16:16 little endian */
+#define DRM_FORMAT_XBGR16161616        fourcc_code('X', 'B', '4', '8') /* [63:0] x:B:G:R 16:16:16:16 little endian */
+
+#define DRM_FORMAT_ARGB16161616        fourcc_code('A', 'R', '4', '8') /* [63:0] A:R:G:B 16:16:16:16 little endian */
+#define DRM_FORMAT_ABGR16161616        fourcc_code('A', 'B', '4', '8') /* [63:0] A:B:G:R 16:16:16:16 little endian */
+
 /*
  * Floating point 64bpp RGB
  * IEEE 754-2008 binary16 half-precision float