Merge tag 'drm-misc-next-fixes-2021-09-09' of git://anongit.freedesktop.org/drm/drm...
authorDave Airlie <airlied@redhat.com>
Fri, 10 Sep 2021 04:18:33 +0000 (14:18 +1000)
committerDave Airlie <airlied@redhat.com>
Fri, 10 Sep 2021 04:18:49 +0000 (14:18 +1000)
drm-misc-next-fixes for v5.15:
- Make some dma-buf config options depend on DMA_SHARED_BUFFER.
- Handle multiplication overflow of fbdev xres/yres in the core.

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/37c5fe2e-5be8-45c3-286b-d8d536a5cef2@linux.intel.com
277 files changed:
Documentation/devicetree/bindings/display/mediatek/mediatek,dsi.txt
Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml
Documentation/devicetree/bindings/display/msm/dsi-phy-7nm.yaml [new file with mode: 0644]
drivers/gpu/drm/Kconfig
drivers/gpu/drm/amd/amdgpu/Makefile
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c
drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c
drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
drivers/gpu/drm/amd/amdgpu/amdgpu_pll.c
drivers/gpu/drm/amd/amdgpu/amdgpu_pll.h
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
drivers/gpu/drm/amd/amdgpu/amdgpu_rap.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
drivers/gpu/drm/amd/amdgpu/amdgpu_securedisplay.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c
drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
drivers/gpu/drm/amd/amdgpu/mca_v3_0.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/mca_v3_0.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/mmsch_v1_0.h
drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
drivers/gpu/drm/amd/amdgpu/soc15.c
drivers/gpu/drm/amd/amdgpu/ta_xgmi_if.h
drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
drivers/gpu/drm/amd/amdkfd/kfd_svm.h
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
drivers/gpu/drm/amd/display/dc/core/dc.c
drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
drivers/gpu/drm/amd/display/dc/core/dc_stream.c
drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c
drivers/gpu/drm/amd/display/dc/dc.h
drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_panel_cntl.c
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c
drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c
drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c
drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
drivers/gpu/drm/amd/include/asic_reg/thm/thm_11_0_2_offset.h
drivers/gpu/drm/amd/include/asic_reg/thm/thm_11_0_2_sh_mask.h
drivers/gpu/drm/amd/include/kgd_kfd_interface.h
drivers/gpu/drm/amd/include/kgd_pp_interface.h
drivers/gpu/drm/amd/pm/amdgpu_pm.c
drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
drivers/gpu/drm/amd/pm/inc/hwmgr.h
drivers/gpu/drm/amd/pm/inc/smu_types.h
drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.c
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_thermal.h
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.c
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_thermal.h
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega12_hwmgr.c
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_hwmgr.c
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.c
drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega20_thermal.h
drivers/gpu/drm/amd/pm/powerplay/si_dpm.c
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
drivers/gpu/drm/etnaviv/etnaviv_drv.c
drivers/gpu/drm/etnaviv/etnaviv_drv.h
drivers/gpu/drm/etnaviv/etnaviv_gem.c
drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
drivers/gpu/drm/etnaviv/etnaviv_gpu.c
drivers/gpu/drm/etnaviv/etnaviv_hwdb.c
drivers/gpu/drm/exynos/exynos_drm_dma.c
drivers/gpu/drm/exynos/exynos_drm_g2d.c
drivers/gpu/drm/i915/display/intel_display.c
drivers/gpu/drm/i915/gt/intel_gt_requests.h
drivers/gpu/drm/i915/gt/intel_migrate.c
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
drivers/gpu/drm/i915/i915_module.c
drivers/gpu/drm/mediatek/Makefile
drivers/gpu/drm/mediatek/mtk_disp_aal.c [new file with mode: 0644]
drivers/gpu/drm/mediatek/mtk_disp_drv.h
drivers/gpu/drm/mediatek/mtk_disp_rdma.c
drivers/gpu/drm/mediatek/mtk_drm_crtc.c
drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
drivers/gpu/drm/mediatek/mtk_drm_drv.c
drivers/gpu/drm/mediatek/mtk_drm_drv.h
drivers/gpu/drm/mediatek/mtk_drm_gem.c
drivers/gpu/drm/mediatek/mtk_drm_gem.h
drivers/gpu/drm/msm/Kconfig
drivers/gpu/drm/msm/adreno/a5xx_gpu.c
drivers/gpu/drm/msm/adreno/a6xx_gmu.c
drivers/gpu/drm/msm/adreno/a6xx_gmu.h
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
drivers/gpu/drm/msm/adreno/a6xx_hfi.c
drivers/gpu/drm/msm/adreno/adreno_device.c
drivers/gpu/drm/msm/adreno/adreno_gpu.h
drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c
drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
drivers/gpu/drm/msm/disp/msm_disp_snapshot.h
drivers/gpu/drm/msm/dp/dp_aux.c
drivers/gpu/drm/msm/dp/dp_catalog.c
drivers/gpu/drm/msm/dp/dp_ctrl.c
drivers/gpu/drm/msm/dp/dp_display.c
drivers/gpu/drm/msm/dp/dp_link.c
drivers/gpu/drm/msm/dp/dp_panel.c
drivers/gpu/drm/msm/dp/dp_power.c
drivers/gpu/drm/msm/dsi/dsi.c
drivers/gpu/drm/msm/dsi/dsi.h
drivers/gpu/drm/msm/dsi/dsi.xml.h
drivers/gpu/drm/msm/dsi/dsi_cfg.c
drivers/gpu/drm/msm/dsi/dsi_cfg.h
drivers/gpu/drm/msm/dsi/dsi_host.c
drivers/gpu/drm/msm/dsi/dsi_manager.c
drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
drivers/gpu/drm/msm/dsi/phy/dsi_phy_10nm.c
drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/msm/msm_drv.h
drivers/gpu/drm/msm/msm_fbdev.c
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/msm/msm_gem.h
drivers/gpu/drm/msm/msm_gem_prime.c
drivers/gpu/drm/msm/msm_kms.h
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_fb.c
drivers/gpu/drm/tegra/Kconfig
drivers/gpu/drm/tegra/Makefile
drivers/gpu/drm/tegra/dc.c
drivers/gpu/drm/tegra/dc.h
drivers/gpu/drm/tegra/drm.c
drivers/gpu/drm/tegra/drm.h
drivers/gpu/drm/tegra/firewall.c [new file with mode: 0644]
drivers/gpu/drm/tegra/gem.c
drivers/gpu/drm/tegra/gem.h
drivers/gpu/drm/tegra/plane.c
drivers/gpu/drm/tegra/plane.h
drivers/gpu/drm/tegra/submit.c [new file with mode: 0644]
drivers/gpu/drm/tegra/submit.h [new file with mode: 0644]
drivers/gpu/drm/tegra/uapi.c [new file with mode: 0644]
drivers/gpu/drm/tegra/uapi.h [new file with mode: 0644]
drivers/gpu/drm/tegra/vic.c
drivers/gpu/host1x/Makefile
drivers/gpu/host1x/cdma.c
drivers/gpu/host1x/fence.c [new file with mode: 0644]
drivers/gpu/host1x/fence.h [new file with mode: 0644]
drivers/gpu/host1x/hw/channel_hw.c
drivers/gpu/host1x/hw/debug_hw.c
drivers/gpu/host1x/hw/debug_hw_1x01.c
drivers/gpu/host1x/hw/debug_hw_1x06.c
drivers/gpu/host1x/hw/hw_host1x02_uclass.h
drivers/gpu/host1x/hw/hw_host1x04_uclass.h
drivers/gpu/host1x/hw/hw_host1x05_uclass.h
drivers/gpu/host1x/hw/hw_host1x06_uclass.h
drivers/gpu/host1x/hw/hw_host1x07_uclass.h
drivers/gpu/host1x/intr.c
drivers/gpu/host1x/intr.h
drivers/gpu/host1x/job.c
drivers/gpu/host1x/job.h
drivers/gpu/host1x/syncpt.c
drivers/gpu/host1x/syncpt.h
include/dt-bindings/phy/phy.h
include/linux/host1x.h
include/uapi/drm/tegra_drm.h

index 8238a86..d30428b 100644 (file)
@@ -7,7 +7,7 @@ channel output.
 
 Required properties:
 - compatible: "mediatek,<chip>-dsi"
-- the supported chips are mt2701, mt7623, mt8173 and mt8183.
+- the supported chips are mt2701, mt7623, mt8167, mt8173 and mt8183.
 - reg: Physical base address and length of the controller's registers
 - interrupts: The interrupt signal from the function block.
 - clocks: device clocks
index 76348b7..760eec6 100644 (file)
@@ -64,6 +64,18 @@ properties:
       Indicates if the DSI controller is driving a panel which needs
       2 DSI links.
 
+  assigned-clocks:
+    minItems: 2
+    maxItems: 2
+    description: |
+      Parents of "byte" and "pixel" for the given platform.
+
+  assigned-clock-parents:
+    minItems: 2
+    maxItems: 2
+    description: |
+      The Byte clock and Pixel clock PLL outputs provided by a DSI PHY block.
+
   power-domains:
     maxItems: 1
 
@@ -119,6 +131,8 @@ required:
   - clock-names
   - phys
   - phy-names
+  - assigned-clocks
+  - assigned-clock-parents
   - power-domains
   - operating-points-v2
   - ports
@@ -159,6 +173,9 @@ examples:
            phys = <&dsi0_phy>;
            phy-names = "dsi";
 
+           assigned-clocks = <&dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>, <&dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
+           assigned-clock-parents = <&dsi_phy 0>, <&dsi_phy 1>;
+
            power-domains = <&rpmhpd SC7180_CX>;
            operating-points-v2 = <&dsi_opp_table>;
 
diff --git a/Documentation/devicetree/bindings/display/msm/dsi-phy-7nm.yaml b/Documentation/devicetree/bindings/display/msm/dsi-phy-7nm.yaml
new file mode 100644 (file)
index 0000000..4265399
--- /dev/null
@@ -0,0 +1,72 @@
+# SPDX-License-Identifier: GPL-2.0-only or BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/msm/dsi-phy-7nm.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Display DSI 7nm PHY
+
+maintainers:
+  - Jonathan Marek <jonathan@marek.ca>
+
+allOf:
+  - $ref: dsi-phy-common.yaml#
+
+properties:
+  compatible:
+    oneOf:
+      - const: qcom,dsi-phy-7nm
+      - const: qcom,dsi-phy-7nm-8150
+      - const: qcom,sc7280-dsi-phy-7nm
+
+  reg:
+    items:
+      - description: dsi phy register set
+      - description: dsi phy lane register set
+      - description: dsi pll register set
+
+  reg-names:
+    items:
+      - const: dsi_phy
+      - const: dsi_phy_lane
+      - const: dsi_pll
+
+  vdds-supply:
+    description: |
+      Connected to VDD_A_DSI_PLL_0P9 pin (or VDDA_DSI{0,1}_PLL_0P9 for sm8150)
+
+  phy-type:
+    description: D-PHY (default) or C-PHY mode
+    enum: [ 10, 11 ]
+    default: 10
+
+required:
+  - compatible
+  - reg
+  - reg-names
+  - vdds-supply
+
+unevaluatedProperties: false
+
+examples:
+  - |
+     #include <dt-bindings/clock/qcom,dispcc-sm8250.h>
+     #include <dt-bindings/clock/qcom,rpmh.h>
+
+     dsi-phy@ae94400 {
+         compatible = "qcom,dsi-phy-7nm";
+         reg = <0x0ae94400 0x200>,
+               <0x0ae94600 0x280>,
+               <0x0ae94900 0x260>;
+         reg-names = "dsi_phy",
+                     "dsi_phy_lane",
+                     "dsi_pll";
+
+         #clock-cells = <1>;
+         #phy-cells = <0>;
+
+         vdds-supply = <&vreg_l5a_0p88>;
+         clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>,
+                  <&rpmhcc RPMH_CXO_CLK>;
+         clock-names = "iface", "ref";
+     };
index 0d37235..cea777a 100644 (file)
@@ -256,7 +256,6 @@ config DRM_AMDGPU
        select HWMON
        select BACKLIGHT_CLASS_DEVICE
        select INTERVAL_TREE
-       select CHASH
        help
          Choose this option if you have a recent AMD Radeon graphics card.
 
index 0d814c9..8d07481 100644 (file)
@@ -58,7 +58,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
        amdgpu_vm_sdma.o amdgpu_discovery.o amdgpu_ras_eeprom.o amdgpu_nbio.o \
        amdgpu_umc.o smu_v11_0_i2c.o amdgpu_fru_eeprom.o amdgpu_rap.o \
        amdgpu_fw_attestation.o amdgpu_securedisplay.o amdgpu_hdp.o \
-       amdgpu_eeprom.o
+       amdgpu_eeprom.o amdgpu_mca.o
 
 amdgpu-$(CONFIG_PROC_FS) += amdgpu_fdinfo.o
 
@@ -189,6 +189,10 @@ amdgpu-y += \
 amdgpu-y += \
        amdgpu_reset.o
 
+# add MCA block
+amdgpu-y += \
+       mca_v3_0.o
+
 # add amdkfd interfaces
 amdgpu-y += amdgpu_amdkfd.o
 
index 96e895d..dc3c6b3 100644 (file)
 #include "amdgpu_df.h"
 #include "amdgpu_smuio.h"
 #include "amdgpu_fdinfo.h"
+#include "amdgpu_mca.h"
 
 #define MAX_GPU_INSTANCE               16
 
@@ -1009,6 +1010,9 @@ struct amdgpu_device {
        /* df */
        struct amdgpu_df                df;
 
+       /* MCA */
+       struct amdgpu_mca               mca;
+
        struct amdgpu_ip_block          ip_blocks[AMDGPU_MAX_IP_NUM];
        uint32_t                        harvest_ip_mask;
        int                             num_ip_blocks;
@@ -1271,6 +1275,8 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
 
 #define amdgpu_inc_vram_lost(adev) atomic_inc(&((adev)->vram_lost_counter));
 
+#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
+
 /* Common functions */
 bool amdgpu_device_has_job_running(struct amdgpu_device *adev);
 bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev);
index 260ba01..4811b0f 100644 (file)
@@ -1040,7 +1040,7 @@ void amdgpu_acpi_detect(void)
  */
 bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev)
 {
-#if IS_ENABLED(CONFIG_AMD_PMC) && IS_ENABLED(CONFIG_PM_SLEEP)
+#if IS_ENABLED(CONFIG_AMD_PMC) && IS_ENABLED(CONFIG_SUSPEND)
        if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) {
                if (adev->flags & AMD_IS_APU)
                        return pm_suspend_target_state == PM_SUSPEND_TO_IDLE;
index 7b46ba5..3003ee1 100644 (file)
@@ -714,7 +714,6 @@ int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
        ret = dma_fence_wait(f, false);
 
 err_ib_sched:
-       dma_fence_put(f);
        amdgpu_job_free(job);
 err:
        return ret;
index a5434b7..46cd4ee 100644 (file)
@@ -44,4 +44,5 @@ const struct kfd2kgd_calls aldebaran_kfd2kgd = {
        .get_atc_vmid_pasid_mapping_info =
                                kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
        .set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base,
+       .program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings
 };
index 6409d6b..5a7f680 100644 (file)
@@ -305,5 +305,6 @@ const struct kfd2kgd_calls arcturus_kfd2kgd = {
                                kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
        .set_vm_context_page_table_base =
                                kgd_gfx_v9_set_vm_context_page_table_base,
-       .get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy
+       .get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy,
+       .program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings
 };
index 491acdf..960acf6 100644 (file)
@@ -560,6 +560,9 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
        case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
                type = RESET_WAVES;
                break;
+       case KFD_PREEMPT_TYPE_WAVEFRONT_SAVE:
+               type = SAVE_WAVES;
+               break;
        default:
                type = DRAIN_PIPE;
                break;
@@ -754,6 +757,33 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
        adev->gfxhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base);
 }
 
+static void program_trap_handler_settings(struct kgd_dev *kgd,
+               uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr)
+{
+       struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+       lock_srbm(kgd, 0, 0, 0, vmid);
+
+       /*
+        * Program TBA registers
+        */
+       WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TBA_LO),
+                       lower_32_bits(tba_addr >> 8));
+       WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TBA_HI),
+                       upper_32_bits(tba_addr >> 8) |
+                       (1 << SQ_SHADER_TBA_HI__TRAP_EN__SHIFT));
+
+       /*
+        * Program TMA registers
+        */
+       WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_LO),
+                       lower_32_bits(tma_addr >> 8));
+       WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_HI),
+                       upper_32_bits(tma_addr >> 8));
+
+       unlock_srbm(kgd);
+}
+
 const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
        .program_sh_mem_settings = kgd_program_sh_mem_settings,
        .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
@@ -774,4 +804,5 @@ const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
        .get_atc_vmid_pasid_mapping_info =
                        get_atc_vmid_pasid_mapping_info,
        .set_vm_context_page_table_base = set_vm_context_page_table_base,
+       .program_trap_handler_settings = program_trap_handler_settings,
 };
index 1f5620c..dac0d75 100644 (file)
@@ -537,6 +537,9 @@ static int hqd_destroy_v10_3(struct kgd_dev *kgd, void *mqd,
        case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
                type = RESET_WAVES;
                break;
+       case KFD_PREEMPT_TYPE_WAVEFRONT_SAVE:
+               type = SAVE_WAVES;
+               break;
        default:
                type = DRAIN_PIPE;
                break;
@@ -658,6 +661,33 @@ static void set_vm_context_page_table_base_v10_3(struct kgd_dev *kgd, uint32_t v
        adev->gfxhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base);
 }
 
+static void program_trap_handler_settings_v10_3(struct kgd_dev *kgd,
+                       uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr)
+{
+       struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+       lock_srbm(kgd, 0, 0, 0, vmid);
+
+       /*
+        * Program TBA registers
+        */
+       WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TBA_LO),
+                       lower_32_bits(tba_addr >> 8));
+       WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TBA_HI),
+                       upper_32_bits(tba_addr >> 8) |
+                       (1 << SQ_SHADER_TBA_HI__TRAP_EN__SHIFT));
+
+       /*
+        * Program TMA registers
+        */
+       WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_LO),
+                       lower_32_bits(tma_addr >> 8));
+       WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_HI),
+                        upper_32_bits(tma_addr >> 8));
+
+       unlock_srbm(kgd);
+}
+
 #if 0
 uint32_t enable_debug_trap_v10_3(struct kgd_dev *kgd,
                                uint32_t trap_debug_wave_launch_mode,
@@ -820,6 +850,7 @@ const struct kfd2kgd_calls gfx_v10_3_kfd2kgd = {
        .address_watch_get_offset = address_watch_get_offset_v10_3,
        .get_atc_vmid_pasid_mapping_info = NULL,
        .set_vm_context_page_table_base = set_vm_context_page_table_base_v10_3,
+       .program_trap_handler_settings = program_trap_handler_settings_v10_3,
 #if 0
        .enable_debug_trap = enable_debug_trap_v10_3,
        .disable_debug_trap = disable_debug_trap_v10_3,
index ed3014f..bcc1cbe 100644 (file)
@@ -42,7 +42,8 @@
 enum hqd_dequeue_request_type {
        NO_ACTION = 0,
        DRAIN_PIPE,
-       RESET_WAVES
+       RESET_WAVES,
+       SAVE_WAVES
 };
 
 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
@@ -566,6 +567,9 @@ int kgd_gfx_v9_hqd_destroy(struct kgd_dev *kgd, void *mqd,
        case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
                type = RESET_WAVES;
                break;
+       case KFD_PREEMPT_TYPE_WAVEFRONT_SAVE:
+               type = SAVE_WAVES;
+               break;
        default:
                type = DRAIN_PIPE;
                break;
@@ -878,6 +882,32 @@ void kgd_gfx_v9_get_cu_occupancy(struct kgd_dev *kgd, int pasid,
                                adev->gfx.cu_info.max_waves_per_simd;
 }
 
+void kgd_gfx_v9_program_trap_handler_settings(struct kgd_dev *kgd,
+                        uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr)
+{
+       struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+       lock_srbm(kgd, 0, 0, 0, vmid);
+
+       /*
+        * Program TBA registers
+        */
+       WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TBA_LO),
+                        lower_32_bits(tba_addr >> 8));
+       WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TBA_HI),
+                        upper_32_bits(tba_addr >> 8));
+
+       /*
+        * Program TMA registers
+        */
+       WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_LO),
+                       lower_32_bits(tma_addr >> 8));
+       WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_HI),
+                       upper_32_bits(tma_addr >> 8));
+
+       unlock_srbm(kgd);
+}
+
 const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
        .program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings,
        .set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping,
@@ -899,4 +929,5 @@ const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
                        kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
        .set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base,
        .get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy,
+       .program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings,
 };
index e64deba..c635911 100644 (file)
@@ -65,3 +65,5 @@ void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd,
                        uint32_t vmid, uint64_t page_table_base);
 void kgd_gfx_v9_get_cu_occupancy(struct kgd_dev *kgd, int pasid,
                int *pasid_wave_cnt, int *max_waves_per_cu);
+void kgd_gfx_v9_program_trap_handler_settings(struct kgd_dev *kgd,
+               uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr);
index 8f53837..97178b3 100644 (file)
@@ -468,14 +468,18 @@ bool amdgpu_atomfirmware_dynamic_boot_config_supported(struct amdgpu_device *ade
        return (fw_cap & ATOM_FIRMWARE_CAP_DYNAMIC_BOOT_CFG_ENABLE) ? true : false;
 }
 
-/*
- * Helper function to query RAS EEPROM address
- *
- * @adev: amdgpu_device pointer
+/**
+ * amdgpu_atomfirmware_ras_rom_addr -- Get the RAS EEPROM addr from VBIOS
+ * adev: amdgpu_device pointer
+ * i2c_address: pointer to u8; if not NULL, will contain
+ *    the RAS EEPROM address if the function returns true
  *
- * Return true if vbios supports ras rom address reporting
+ * Return true if VBIOS supports RAS EEPROM address reporting,
+ * else return false. If true and @i2c_address is not NULL,
+ * will contain the RAS ROM address.
  */
-bool amdgpu_atomfirmware_ras_rom_addr(struct amdgpu_device *adev, uint8_t* i2c_address)
+bool amdgpu_atomfirmware_ras_rom_addr(struct amdgpu_device *adev,
+                                     u8 *i2c_address)
 {
        struct amdgpu_mode_info *mode_info = &adev->mode_info;
        int index;
@@ -483,27 +487,39 @@ bool amdgpu_atomfirmware_ras_rom_addr(struct amdgpu_device *adev, uint8_t* i2c_a
        union firmware_info *firmware_info;
        u8 frev, crev;
 
-       if (i2c_address == NULL)
-               return false;
-
-       *i2c_address = 0;
-
        index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
-                       firmwareinfo);
+                                           firmwareinfo);
 
        if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context,
-                               index, &size, &frev, &crev, &data_offset)) {
+                                         index, &size, &frev, &crev,
+                                         &data_offset)) {
                /* support firmware_info 3.4 + */
                if ((frev == 3 && crev >=4) || (frev > 3)) {
                        firmware_info = (union firmware_info *)
                                (mode_info->atom_context->bios + data_offset);
-                       *i2c_address = firmware_info->v34.ras_rom_i2c_slave_addr;
+                       /* The ras_rom_i2c_slave_addr should ideally
+                        * be a 19-bit EEPROM address, which would be
+                        * used as is by the driver; see top of
+                        * amdgpu_eeprom.c.
+                        *
+                        * When this is the case, 0 is of course a
+                        * valid RAS EEPROM address, in which case,
+                        * we'll drop the first "if (firm...)" and only
+                        * leave the check for the pointer.
+                        *
+                        * The reason this works right now is because
+                        * ras_rom_i2c_slave_addr contains the EEPROM
+                        * device type qualifier 1010b in the top 4
+                        * bits.
+                        */
+                       if (firmware_info->v34.ras_rom_i2c_slave_addr) {
+                               if (i2c_address)
+                                       *i2c_address = firmware_info->v34.ras_rom_i2c_slave_addr;
+                               return true;
+                       }
                }
        }
 
-       if (*i2c_address != 0)
-               return true;
-
        return false;
 }
 
index 536005b..2771288 100644 (file)
@@ -1414,7 +1414,7 @@ no_preempt:
                        continue;
                }
                job = to_amdgpu_job(s_job);
-               if (preempted && job->fence == fence)
+               if (preempted && (&job->hw_fence) == fence)
                        /* mark the job as preempted */
                        job->preemption_status |= AMDGPU_IB_PREEMPTED;
        }
index d7cc45e..41c6b3a 100644 (file)
@@ -2829,12 +2829,11 @@ static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
        struct amdgpu_device *adev =
                container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);
 
-       mutex_lock(&adev->gfx.gfx_off_mutex);
-       if (!adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) {
-               if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
-                       adev->gfx.gfx_off_state = true;
-       }
-       mutex_unlock(&adev->gfx.gfx_off_mutex);
+       WARN_ON_ONCE(adev->gfx.gfx_off_state);
+       WARN_ON_ONCE(adev->gfx.gfx_off_req_count);
+
+       if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
+               adev->gfx.gfx_off_state = true;
 }
 
 /**
@@ -3826,7 +3825,10 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
 {
        dev_info(adev->dev, "amdgpu: finishing device.\n");
        flush_delayed_work(&adev->delayed_init_work);
-       ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
+       if (adev->mman.initialized) {
+               flush_delayed_work(&adev->mman.bdev.wq);
+               ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
+       }
        adev->shutdown = true;
 
        /* make sure IB test finished before entering exclusive mode
@@ -4448,7 +4450,7 @@ int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
 int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
                                 struct amdgpu_reset_context *reset_context)
 {
-       int i, r = 0;
+       int i, j, r = 0;
        struct amdgpu_job *job = NULL;
        bool need_full_reset =
                test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
@@ -4472,6 +4474,17 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
                if (!ring || !ring->sched.thread)
                        continue;
 
+               /*clear job fence from fence drv to avoid force_completion
+                *leave NULL and vm flush fence in fence drv */
+               for (j = 0; j <= ring->fence_drv.num_fences_mask; j++) {
+                       struct dma_fence *old, **ptr;
+
+                       ptr = &ring->fence_drv.fences[j];
+                       old = rcu_dereference_protected(*ptr, 1);
+                       if (old && test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &old->flags)) {
+                               RCU_INIT_POINTER(*ptr, NULL);
+                       }
+               }
                /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
                amdgpu_fence_driver_force_completion(ring);
        }
index 43e7b61..ada7bc1 100644 (file)
@@ -299,6 +299,9 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
                                  ip->major, ip->minor,
                                  ip->revision);
 
+                       if (le16_to_cpu(ip->hw_id) == VCN_HWID)
+                               adev->vcn.num_vcn_inst++;
+
                        for (k = 0; k < num_base_address; k++) {
                                /*
                                 * convert the endianness of base addresses in place,
@@ -385,7 +388,7 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
 {
        struct binary_header *bhdr;
        struct harvest_table *harvest_info;
-       int i;
+       int i, vcn_harvest_count = 0;
 
        bhdr = (struct binary_header *)adev->mman.discovery_bin;
        harvest_info = (struct harvest_table *)(adev->mman.discovery_bin +
@@ -397,8 +400,7 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
 
                switch (le32_to_cpu(harvest_info->list[i].hw_id)) {
                case VCN_HWID:
-                       adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
-                       adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
+                       vcn_harvest_count++;
                        break;
                case DMU_HWID:
                        adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
@@ -407,6 +409,10 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
                        break;
                }
        }
+       if (vcn_harvest_count == adev->vcn.num_vcn_inst) {
+               adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
+               adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
+       }
 }
 
 int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
index 8e5a7ac..7a73167 100644 (file)
@@ -522,6 +522,7 @@ uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev,
                        break;
                case CHIP_RENOIR:
                case CHIP_VANGOGH:
+               case CHIP_YELLOW_CARP:
                        domain |= AMDGPU_GEM_DOMAIN_GTT;
                        break;
 
index b664029..f18240f 100644 (file)
@@ -1181,7 +1181,12 @@ static const struct pci_device_id pciidlist[] = {
        {0x1002, 0x73A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
        {0x1002, 0x73A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
        {0x1002, 0x73A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
+       {0x1002, 0x73A5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
+       {0x1002, 0x73A8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
+       {0x1002, 0x73A9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
        {0x1002, 0x73AB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
+       {0x1002, 0x73AC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
+       {0x1002, 0x73AD, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
        {0x1002, 0x73AE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
        {0x1002, 0x73AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
        {0x1002, 0x73BF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
@@ -1197,6 +1202,11 @@ static const struct pci_device_id pciidlist[] = {
        {0x1002, 0x73C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
        {0x1002, 0x73C1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
        {0x1002, 0x73C3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
+       {0x1002, 0x73DA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
+       {0x1002, 0x73DB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
+       {0x1002, 0x73DC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
+       {0x1002, 0x73DD, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
+       {0x1002, 0x73DE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
        {0x1002, 0x73DF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
 
        /* DIMGREY_CAVEFISH */
@@ -1204,6 +1214,13 @@ static const struct pci_device_id pciidlist[] = {
        {0x1002, 0x73E1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
        {0x1002, 0x73E2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
        {0x1002, 0x73E3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
+       {0x1002, 0x73E8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
+       {0x1002, 0x73E9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
+       {0x1002, 0x73EA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
+       {0x1002, 0x73EB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
+       {0x1002, 0x73EC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
+       {0x1002, 0x73ED, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
+       {0x1002, 0x73EF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
        {0x1002, 0x73FF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
 
        /* Aldebaran */
index 5a143ca..cd0acbe 100644 (file)
@@ -273,9 +273,6 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
        return 0;
 
 out:
-       if (abo) {
-
-       }
        if (fb && ret) {
                drm_gem_object_put(gobj);
                drm_framebuffer_unregister_private(fb);
index d94c541..5a6857c 100644 (file)
@@ -59,6 +59,7 @@ void amdgpu_show_fdinfo(struct seq_file *m, struct file *f)
        uint64_t vram_mem = 0, gtt_mem = 0, cpu_mem = 0;
        struct drm_file *file = f->private_data;
        struct amdgpu_device *adev = drm_to_adev(file->minor->dev);
+       struct amdgpu_bo *root;
        int ret;
 
        ret = amdgpu_file_to_fpriv(f, &fpriv);
@@ -69,13 +70,19 @@ void amdgpu_show_fdinfo(struct seq_file *m, struct file *f)
        dev = PCI_SLOT(adev->pdev->devfn);
        fn = PCI_FUNC(adev->pdev->devfn);
 
-       ret = amdgpu_bo_reserve(fpriv->vm.root.bo, false);
+       root = amdgpu_bo_ref(fpriv->vm.root.bo);
+       if (!root)
+               return;
+
+       ret = amdgpu_bo_reserve(root, false);
        if (ret) {
                DRM_ERROR("Fail to reserve bo\n");
                return;
        }
        amdgpu_vm_get_memory(&fpriv->vm, &vram_mem, &gtt_mem, &cpu_mem);
-       amdgpu_bo_unreserve(fpriv->vm.root.bo);
+       amdgpu_bo_unreserve(root);
+       amdgpu_bo_unref(&root);
+
        seq_printf(m, "pdev:\t%04x:%02x:%02x.%d\npasid:\t%u\n", domain, bus,
                        dev, fn, fpriv->vm.pasid);
        seq_printf(m, "vram mem:\t%llu kB\n", vram_mem/1024UL);
index 6ed5366..8d682be 100644 (file)
@@ -129,30 +129,50 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
  *
  * @ring: ring the fence is associated with
  * @f: resulting fence object
+ * @job: job the fence is embedded in
  * @flags: flags to pass into the subordinate .emit_fence() call
  *
  * Emits a fence command on the requested ring (all asics).
  * Returns 0 on success, -ENOMEM on failure.
  */
-int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
+int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amdgpu_job *job,
                      unsigned flags)
 {
        struct amdgpu_device *adev = ring->adev;
-       struct amdgpu_fence *fence;
+       struct dma_fence *fence;
+       struct amdgpu_fence *am_fence;
        struct dma_fence __rcu **ptr;
        uint32_t seq;
        int r;
 
-       fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
-       if (fence == NULL)
-               return -ENOMEM;
+       if (job == NULL) {
+               /* create a sperate hw fence */
+               am_fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_ATOMIC);
+               if (am_fence == NULL)
+                       return -ENOMEM;
+               fence = &am_fence->base;
+               am_fence->ring = ring;
+       } else {
+               /* take use of job-embedded fence */
+               fence = &job->hw_fence;
+       }
 
        seq = ++ring->fence_drv.sync_seq;
-       fence->ring = ring;
-       dma_fence_init(&fence->base, &amdgpu_fence_ops,
-                      &ring->fence_drv.lock,
-                      adev->fence_context + ring->idx,
-                      seq);
+       if (job != NULL && job->job_run_counter) {
+               /* reinit seq for resubmitted jobs */
+               fence->seqno = seq;
+       } else {
+               dma_fence_init(fence, &amdgpu_fence_ops,
+                               &ring->fence_drv.lock,
+                               adev->fence_context + ring->idx,
+                               seq);
+       }
+
+       if (job != NULL) {
+               /* mark this fence has a parent job */
+               set_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &fence->flags);
+       }
+
        amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
                               seq, flags | AMDGPU_FENCE_FLAG_INT);
        pm_runtime_get_noresume(adev_to_drm(adev)->dev);
@@ -175,9 +195,9 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
        /* This function can't be called concurrently anyway, otherwise
         * emitting the fence would mess up the hardware ring buffer.
         */
-       rcu_assign_pointer(*ptr, dma_fence_get(&fence->base));
+       rcu_assign_pointer(*ptr, dma_fence_get(fence));
 
-       *f = &fence->base;
+       *f = fence;
 
        return 0;
 }
@@ -532,6 +552,9 @@ void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev)
                if (!ring || !ring->fence_drv.initialized)
                        continue;
 
+               if (!ring->no_scheduler)
+                       drm_sched_stop(&ring->sched, NULL);
+
                /* You can't wait for HW to signal if it's gone */
                if (!drm_dev_is_unplugged(&adev->ddev))
                        r = amdgpu_fence_wait_empty(ring);
@@ -591,6 +614,11 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev)
                if (!ring || !ring->fence_drv.initialized)
                        continue;
 
+               if (!ring->no_scheduler) {
+                       drm_sched_resubmit_jobs(&ring->sched);
+                       drm_sched_start(&ring->sched, true);
+               }
+
                /* enable the interrupt */
                if (ring->fence_drv.irq_src)
                        amdgpu_irq_get(adev, ring->fence_drv.irq_src,
@@ -621,8 +649,16 @@ static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence)
 
 static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
 {
-       struct amdgpu_fence *fence = to_amdgpu_fence(f);
-       return (const char *)fence->ring->name;
+       struct amdgpu_ring *ring;
+
+       if (test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &f->flags)) {
+               struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
+
+               ring = to_amdgpu_ring(job->base.sched);
+       } else {
+               ring = to_amdgpu_fence(f)->ring;
+       }
+       return (const char *)ring->name;
 }
 
 /**
@@ -635,13 +671,20 @@ static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
  */
 static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
 {
-       struct amdgpu_fence *fence = to_amdgpu_fence(f);
-       struct amdgpu_ring *ring = fence->ring;
+       struct amdgpu_ring *ring;
+
+       if (test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &f->flags)) {
+               struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
+
+               ring = to_amdgpu_ring(job->base.sched);
+       } else {
+               ring = to_amdgpu_fence(f)->ring;
+       }
 
        if (!timer_pending(&ring->fence_drv.fallback_timer))
                amdgpu_fence_schedule_fallback(ring);
 
-       DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
+       DMA_FENCE_TRACE(f, "armed on ring %i!\n", ring->idx);
 
        return true;
 }
@@ -656,8 +699,20 @@ static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
 static void amdgpu_fence_free(struct rcu_head *rcu)
 {
        struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
-       struct amdgpu_fence *fence = to_amdgpu_fence(f);
-       kmem_cache_free(amdgpu_fence_slab, fence);
+
+       if (test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &f->flags)) {
+       /* free job if fence has a parent job */
+               struct amdgpu_job *job;
+
+               job = container_of(f, struct amdgpu_job, hw_fence);
+               kfree(job);
+       } else {
+       /* free fence_slab if it's separated fence*/
+               struct amdgpu_fence *fence;
+
+               fence = to_amdgpu_fence(f);
+               kmem_cache_free(amdgpu_fence_slab, fence);
+       }
 }
 
 /**
@@ -680,6 +735,7 @@ static const struct dma_fence_ops amdgpu_fence_ops = {
        .release = amdgpu_fence_release,
 };
 
+
 /*
  * Fence debugfs
  */
index b364051..76efd5f 100644 (file)
@@ -76,7 +76,7 @@ static int amdgpu_gart_dummy_page_init(struct amdgpu_device *adev)
        if (adev->dummy_page_addr)
                return 0;
        adev->dummy_page_addr = dma_map_page(&adev->pdev->dev, dummy_page, 0,
-                                            PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+                                            PAGE_SIZE, DMA_BIDIRECTIONAL);
        if (dma_mapping_error(&adev->pdev->dev, adev->dummy_page_addr)) {
                dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
                adev->dummy_page_addr = 0;
@@ -96,8 +96,8 @@ void amdgpu_gart_dummy_page_fini(struct amdgpu_device *adev)
 {
        if (!adev->dummy_page_addr)
                return;
-       pci_unmap_page(adev->pdev, adev->dummy_page_addr,
-                      PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+       dma_unmap_page(&adev->pdev->dev, adev->dummy_page_addr, PAGE_SIZE,
+                      DMA_BIDIRECTIONAL);
        adev->dummy_page_addr = 0;
 }
 
index c6f2fb9..d6aa032 100644 (file)
@@ -341,21 +341,18 @@ retry:
        r = amdgpu_gem_object_create(adev, size, args->in.alignment,
                                     initial_domain,
                                     flags, ttm_bo_type_device, resv, &gobj);
-       if (r) {
-               if (r != -ERESTARTSYS) {
-                       if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
-                               flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
-                               goto retry;
-                       }
+       if (r && r != -ERESTARTSYS) {
+               if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
+                       flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+                       goto retry;
+               }
 
-                       if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
-                               initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
-                               goto retry;
-                       }
-                       DRM_DEBUG("Failed to allocate GEM object (%llu, %d, %llu, %d)\n",
-                                 size, initial_domain, args->in.alignment, r);
+               if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
+                       initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
+                       goto retry;
                }
-               return r;
+               DRM_DEBUG("Failed to allocate GEM object (%llu, %d, %llu, %d)\n",
+                               size, initial_domain, args->in.alignment, r);
        }
 
        if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
@@ -904,7 +901,7 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
                                         DIV_ROUND_UP(args->bpp, 8), 0);
        args->size = (u64)args->pitch * args->height;
        args->size = ALIGN(args->size, PAGE_SIZE);
-       domain = amdgpu_bo_get_preferred_pin_domain(adev,
+       domain = amdgpu_bo_get_preferred_domain(adev,
                                amdgpu_display_supported_domains(adev, flags));
        r = amdgpu_gem_object_create(adev, args->size, 0, domain, flags,
                                     ttm_bo_type_device, NULL, &gobj);
index a0be077..e7f06bd 100644 (file)
@@ -563,24 +563,38 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
 
        mutex_lock(&adev->gfx.gfx_off_mutex);
 
-       if (!enable)
-               adev->gfx.gfx_off_req_count++;
-       else if (adev->gfx.gfx_off_req_count > 0)
+       if (enable) {
+               /* If the count is already 0, it means there's an imbalance bug somewhere.
+                * Note that the bug may be in a different caller than the one which triggers the
+                * WARN_ON_ONCE.
+                */
+               if (WARN_ON_ONCE(adev->gfx.gfx_off_req_count == 0))
+                       goto unlock;
+
                adev->gfx.gfx_off_req_count--;
 
-       if (enable && !adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) {
-               schedule_delayed_work(&adev->gfx.gfx_off_delay_work, GFX_OFF_DELAY_ENABLE);
-       } else if (!enable && adev->gfx.gfx_off_state) {
-               if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false)) {
-                       adev->gfx.gfx_off_state = false;
+               if (adev->gfx.gfx_off_req_count == 0 && !adev->gfx.gfx_off_state)
+                       schedule_delayed_work(&adev->gfx.gfx_off_delay_work, GFX_OFF_DELAY_ENABLE);
+       } else {
+               if (adev->gfx.gfx_off_req_count == 0) {
+                       cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
+
+                       if (adev->gfx.gfx_off_state &&
+                           !amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false)) {
+                               adev->gfx.gfx_off_state = false;
 
-                       if (adev->gfx.funcs->init_spm_golden) {
-                               dev_dbg(adev->dev, "GFXOFF is disabled, re-init SPM golden settings\n");
-                               amdgpu_gfx_init_spm_golden(adev);
+                               if (adev->gfx.funcs->init_spm_golden) {
+                                       dev_dbg(adev->dev,
+                                               "GFXOFF is disabled, re-init SPM golden settings\n");
+                                       amdgpu_gfx_init_spm_golden(adev);
+                               }
                        }
                }
+
+               adev->gfx.gfx_off_req_count++;
        }
 
+unlock:
        mutex_unlock(&adev->gfx.gfx_off_mutex);
 }
 
@@ -615,7 +629,6 @@ int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev)
                adev->gfx.ras_if->block = AMDGPU_RAS_BLOCK__GFX;
                adev->gfx.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
                adev->gfx.ras_if->sub_block_index = 0;
-               strcpy(adev->gfx.ras_if->name, "gfx");
        }
        fs_info.head = ih_info.head = *adev->gfx.ras_if;
        r = amdgpu_ras_late_init(adev, adev->gfx.ras_if,
index d0b8d41..c7797ea 100644 (file)
@@ -471,6 +471,27 @@ int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev)
                        return r;
        }
 
+       if (adev->mca.mp0.ras_funcs &&
+           adev->mca.mp0.ras_funcs->ras_late_init) {
+               r = adev->mca.mp0.ras_funcs->ras_late_init(adev);
+               if (r)
+                       return r;
+       }
+
+       if (adev->mca.mp1.ras_funcs &&
+           adev->mca.mp1.ras_funcs->ras_late_init) {
+               r = adev->mca.mp1.ras_funcs->ras_late_init(adev);
+               if (r)
+                       return r;
+       }
+
+       if (adev->mca.mpio.ras_funcs &&
+           adev->mca.mpio.ras_funcs->ras_late_init) {
+               r = adev->mca.mpio.ras_funcs->ras_late_init(adev);
+               if (r)
+                       return r;
+       }
+
        return 0;
 }
 
index 5430003..675a72e 100644 (file)
@@ -118,7 +118,7 @@ bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *res)
  * @man: TTM memory type manager
  * @tbo: TTM BO we need this range for
  * @place: placement flags and restrictions
- * @mem: the resulting mem object
+ * @res: the resulting mem object
  *
  * Dummy, allocate the node but no space for it yet.
  */
@@ -182,7 +182,7 @@ err_out:
  * amdgpu_gtt_mgr_del - free ranges
  *
  * @man: TTM memory type manager
- * @mem: TTM memory object
+ * @res: TTM memory object
  *
  * Free the allocated GTT again.
  */
index 1d50d53..a766e1a 100644 (file)
@@ -41,7 +41,6 @@ int amdgpu_hdp_ras_late_init(struct amdgpu_device *adev)
                adev->hdp.ras_if->block = AMDGPU_RAS_BLOCK__HDP;
                adev->hdp.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
                adev->hdp.ras_if->sub_block_index = 0;
-               strcpy(adev->hdp.ras_if->name, "hdp");
        }
        ih_info.head = fs_info.head = *adev->hdp.ras_if;
        r = amdgpu_ras_late_init(adev, adev->hdp.ras_if,
index bca4ddd..82608df 100644 (file)
@@ -339,7 +339,7 @@ static void amdgpu_i2c_put_byte(struct amdgpu_i2c_chan *i2c_bus,
 void
 amdgpu_i2c_router_select_ddc_port(const struct amdgpu_connector *amdgpu_connector)
 {
-       u8 val;
+       u8 val = 0;
 
        if (!amdgpu_connector->router.ddc_valid)
                return;
index ec65ab0..c076a6b 100644 (file)
@@ -262,7 +262,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
                                       fence_flags | AMDGPU_FENCE_FLAG_64BIT);
        }
 
-       r = amdgpu_fence_emit(ring, f, fence_flags);
+       r = amdgpu_fence_emit(ring, f, job, fence_flags);
        if (r) {
                dev_err(adev->dev, "failed to emit fence (%d)\n", r);
                if (job && job->vmid)
index d33e6d9..de29518 100644 (file)
@@ -127,11 +127,16 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
 {
        struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
        struct dma_fence *f;
+       struct dma_fence *hw_fence;
        unsigned i;
 
-       /* use sched fence if available */
-       f = job->base.s_fence ? &job->base.s_fence->finished : job->fence;
+       if (job->hw_fence.ops == NULL)
+               hw_fence = job->external_hw_fence;
+       else
+               hw_fence = &job->hw_fence;
 
+       /* use sched fence if available */
+       f = job->base.s_fence ? &job->base.s_fence->finished : hw_fence;
        for (i = 0; i < job->num_ibs; ++i)
                amdgpu_ib_free(ring->adev, &job->ibs[i], f);
 }
@@ -142,20 +147,27 @@ static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
 
        drm_sched_job_cleanup(s_job);
 
-       dma_fence_put(job->fence);
        amdgpu_sync_free(&job->sync);
        amdgpu_sync_free(&job->sched_sync);
-       kfree(job);
+
+    /* only put the hw fence if has embedded fence */
+       if (job->hw_fence.ops != NULL)
+               dma_fence_put(&job->hw_fence);
+       else
+               kfree(job);
 }
 
 void amdgpu_job_free(struct amdgpu_job *job)
 {
        amdgpu_job_free_resources(job);
-
-       dma_fence_put(job->fence);
        amdgpu_sync_free(&job->sync);
        amdgpu_sync_free(&job->sched_sync);
-       kfree(job);
+
+       /* only put the hw fence if has embedded fence */
+       if (job->hw_fence.ops != NULL)
+               dma_fence_put(&job->hw_fence);
+       else
+               kfree(job);
 }
 
 int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
@@ -184,11 +196,14 @@ int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
 
        job->base.sched = &ring->sched;
        r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, NULL, fence);
-       job->fence = dma_fence_get(*fence);
+       /* record external_hw_fence for direct submit */
+       job->external_hw_fence = dma_fence_get(*fence);
        if (r)
                return r;
 
        amdgpu_job_free(job);
+       dma_fence_put(*fence);
+
        return 0;
 }
 
@@ -246,10 +261,12 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
                if (r)
                        DRM_ERROR("Error scheduling IBs (%d)\n", r);
        }
-       /* if gpu reset, hw fence will be replaced here */
-       dma_fence_put(job->fence);
-       job->fence = dma_fence_get(fence);
 
+       if (!job->job_run_counter)
+               dma_fence_get(fence);
+       else if (finished->error < 0)
+               dma_fence_put(&job->hw_fence);
+       job->job_run_counter++;
        amdgpu_job_free_resources(job);
 
        fence = r ? ERR_PTR(r) : fence;
index 81caac9..9e65730 100644 (file)
@@ -46,7 +46,8 @@ struct amdgpu_job {
        struct amdgpu_sync      sync;
        struct amdgpu_sync      sched_sync;
        struct amdgpu_ib        *ibs;
-       struct dma_fence        *fence; /* the hw fence */
+       struct dma_fence        hw_fence;
+       struct dma_fence        *external_hw_fence;
        uint32_t                preamble_status;
        uint32_t                preemption_status;
        uint32_t                num_ibs;
@@ -62,6 +63,9 @@ struct amdgpu_job {
        /* user fence handling */
        uint64_t                uf_addr;
        uint64_t                uf_sequence;
+
+       /* job_run_counter >= 1 means a resubmit job */
+       uint32_t                job_run_counter;
 };
 
 int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
index 8996cb4..9342aa2 100644 (file)
@@ -47,8 +47,6 @@ int amdgpu_jpeg_sw_fini(struct amdgpu_device *adev)
 {
        int i;
 
-       cancel_delayed_work_sync(&adev->jpeg.idle_work);
-
        for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
                if (adev->jpeg.harvest_config & (1 << i))
                        continue;
index 20b049a..7e45640 100644 (file)
@@ -341,27 +341,27 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
                switch (query_fw->index) {
                case TA_FW_TYPE_PSP_XGMI:
                        fw_info->ver = adev->psp.ta_fw_version;
-                       fw_info->feature = adev->psp.ta_xgmi_ucode_version;
+                       fw_info->feature = adev->psp.xgmi.feature_version;
                        break;
                case TA_FW_TYPE_PSP_RAS:
                        fw_info->ver = adev->psp.ta_fw_version;
-                       fw_info->feature = adev->psp.ta_ras_ucode_version;
+                       fw_info->feature = adev->psp.ras.feature_version;
                        break;
                case TA_FW_TYPE_PSP_HDCP:
                        fw_info->ver = adev->psp.ta_fw_version;
-                       fw_info->feature = adev->psp.ta_hdcp_ucode_version;
+                       fw_info->feature = adev->psp.hdcp.feature_version;
                        break;
                case TA_FW_TYPE_PSP_DTM:
                        fw_info->ver = adev->psp.ta_fw_version;
-                       fw_info->feature = adev->psp.ta_dtm_ucode_version;
+                       fw_info->feature = adev->psp.dtm.feature_version;
                        break;
                case TA_FW_TYPE_PSP_RAP:
                        fw_info->ver = adev->psp.ta_fw_version;
-                       fw_info->feature = adev->psp.ta_rap_ucode_version;
+                       fw_info->feature = adev->psp.rap.feature_version;
                        break;
                case TA_FW_TYPE_PSP_SECUREDISPLAY:
                        fw_info->ver = adev->psp.ta_fw_version;
-                       fw_info->feature = adev->psp.ta_securedisplay_ucode_version;
+                       fw_info->feature = adev->psp.securedisplay.feature_version;
                        break;
                default:
                        return -EINVAL;
@@ -378,8 +378,8 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
                fw_info->feature = adev->psp.sos.feature_version;
                break;
        case AMDGPU_INFO_FW_ASD:
-               fw_info->ver = adev->psp.asd_fw_version;
-               fw_info->feature = adev->psp.asd_feature_version;
+               fw_info->ver = adev->psp.asd.fw_version;
+               fw_info->feature = adev->psp.asd.feature_version;
                break;
        case AMDGPU_INFO_FW_DMCU:
                fw_info->ver = adev->dm.dmcu_fw_version;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
new file mode 100644 (file)
index 0000000..a2d3dbb
--- /dev/null
@@ -0,0 +1,117 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu_ras.h"
+#include "amdgpu.h"
+#include "amdgpu_mca.h"
+
+#include "umc/umc_6_7_0_offset.h"
+#include "umc/umc_6_7_0_sh_mask.h"
+
+void amdgpu_mca_query_correctable_error_count(struct amdgpu_device *adev,
+                                             uint64_t mc_status_addr,
+                                             unsigned long *error_count)
+{
+       uint64_t mc_status = RREG64_PCIE(mc_status_addr * 4);
+
+       if (REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
+           REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)
+               *error_count += 1;
+}
+
+void amdgpu_mca_query_uncorrectable_error_count(struct amdgpu_device *adev,
+                                               uint64_t mc_status_addr,
+                                               unsigned long *error_count)
+{
+       uint64_t mc_status = RREG64_PCIE(mc_status_addr * 4);
+
+       if ((REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
+           (REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 ||
+           REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
+           REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 ||
+           REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 ||
+           REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1))
+               *error_count += 1;
+}
+
+void amdgpu_mca_reset_error_count(struct amdgpu_device *adev,
+                                 uint64_t mc_status_addr)
+{
+       WREG64_PCIE(mc_status_addr * 4, 0x0ULL);
+}
+
+void amdgpu_mca_query_ras_error_count(struct amdgpu_device *adev,
+                                     uint64_t mc_status_addr,
+                                     void *ras_error_status)
+{
+       struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+
+       amdgpu_mca_query_correctable_error_count(adev, mc_status_addr, &(err_data->ce_count));
+       amdgpu_mca_query_uncorrectable_error_count(adev, mc_status_addr, &(err_data->ue_count));
+
+       amdgpu_mca_reset_error_count(adev, mc_status_addr);
+}
+
+int amdgpu_mca_ras_late_init(struct amdgpu_device *adev,
+                            struct amdgpu_mca_ras *mca_dev)
+{
+       int r;
+       struct ras_ih_if ih_info = {
+               .cb = NULL,
+       };
+       struct ras_fs_if fs_info = {
+               .sysfs_name = mca_dev->ras_funcs->sysfs_name,
+       };
+
+       if (!mca_dev->ras_if) {
+               mca_dev->ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
+               if (!mca_dev->ras_if)
+                       return -ENOMEM;
+               mca_dev->ras_if->block = mca_dev->ras_funcs->ras_block;
+               mca_dev->ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
+               mca_dev->ras_if->sub_block_index = 0;
+       }
+       ih_info.head = fs_info.head = *mca_dev->ras_if;
+       r = amdgpu_ras_late_init(adev, mca_dev->ras_if,
+                                &fs_info, &ih_info);
+       if (r || !amdgpu_ras_is_supported(adev, mca_dev->ras_if->block)) {
+               kfree(mca_dev->ras_if);
+               mca_dev->ras_if = NULL;
+       }
+
+       return r;
+}
+
+void amdgpu_mca_ras_fini(struct amdgpu_device *adev,
+                        struct amdgpu_mca_ras *mca_dev)
+{
+       struct ras_ih_if ih_info = {
+               .cb = NULL,
+       };
+
+       if (!mca_dev->ras_if)
+               return;
+
+       amdgpu_ras_late_fini(adev, mca_dev->ras_if, &ih_info);
+       kfree(mca_dev->ras_if);
+       mca_dev->ras_if = NULL;
+}
\ No newline at end of file
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h
new file mode 100644 (file)
index 0000000..f860f2f
--- /dev/null
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2021  Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __AMDGPU_MCA_H__
+#define __AMDGPU_MCA_H__
+
+struct amdgpu_mca_ras_funcs {
+       int (*ras_late_init)(struct amdgpu_device *adev);
+       void (*ras_fini)(struct amdgpu_device *adev);
+       void (*query_ras_error_count)(struct amdgpu_device *adev,
+                                     void *ras_error_status);
+       void (*query_ras_error_address)(struct amdgpu_device *adev,
+                                       void *ras_error_status);
+       uint32_t ras_block;
+       const char* sysfs_name;
+};
+
+struct amdgpu_mca_ras {
+       struct ras_common_if *ras_if;
+       const struct amdgpu_mca_ras_funcs *ras_funcs;
+};
+
+struct amdgpu_mca_funcs {
+       void (*init)(struct amdgpu_device *adev);
+};
+
+struct amdgpu_mca {
+       const struct amdgpu_mca_funcs *funcs;
+       struct amdgpu_mca_ras mp0;
+       struct amdgpu_mca_ras mp1;
+       struct amdgpu_mca_ras mpio;
+};
+
+void amdgpu_mca_query_correctable_error_count(struct amdgpu_device *adev,
+                                             uint64_t mc_status_addr,
+                                             unsigned long *error_count);
+
+void amdgpu_mca_query_uncorrectable_error_count(struct amdgpu_device *adev,
+                                               uint64_t mc_status_addr,
+                                               unsigned long *error_count);
+
+void amdgpu_mca_reset_error_count(struct amdgpu_device *adev,
+                                 uint64_t mc_status_addr);
+
+void amdgpu_mca_query_ras_error_count(struct amdgpu_device *adev,
+                                     uint64_t mc_status_addr,
+                                     void *ras_error_status);
+
+int amdgpu_mca_ras_late_init(struct amdgpu_device *adev,
+                            struct amdgpu_mca_ras *mca_dev);
+
+void amdgpu_mca_ras_fini(struct amdgpu_device *adev,
+                        struct amdgpu_mca_ras *mca_dev);
+
+#endif
index ead3dc5..24297dc 100644 (file)
@@ -41,7 +41,6 @@ int amdgpu_mmhub_ras_late_init(struct amdgpu_device *adev)
                adev->mmhub.ras_if->block = AMDGPU_RAS_BLOCK__MMHUB;
                adev->mmhub.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
                adev->mmhub.ras_if->sub_block_index = 0;
-               strcpy(adev->mmhub.ras_if->name, "mmhub");
        }
        ih_info.head = fs_info.head = *adev->mmhub.ras_if;
        r = amdgpu_ras_late_init(adev, adev->mmhub.ras_if,
index 6201a5f..6afb02f 100644 (file)
@@ -39,7 +39,6 @@ int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev)
                adev->nbio.ras_if->block = AMDGPU_RAS_BLOCK__PCIE_BIF;
                adev->nbio.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
                adev->nbio.ras_if->sub_block_index = 0;
-               strcpy(adev->nbio.ras_if->name, "pcie_bif");
        }
        ih_info.head = fs_info.head = *adev->nbio.ras_if;
        r = amdgpu_ras_late_init(adev, adev->nbio.ras_if,
index d15eee9..01a78c7 100644 (file)
@@ -920,11 +920,6 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
                        return -EINVAL;
        }
 
-       /* This assumes only APU display buffers are pinned with (VRAM|GTT).
-        * See function amdgpu_display_supported_domains()
-        */
-       domain = amdgpu_bo_get_preferred_pin_domain(adev, domain);
-
        if (bo->tbo.pin_count) {
                uint32_t mem_type = bo->tbo.resource->mem_type;
                uint32_t mem_flags = bo->tbo.resource->placement;
@@ -949,6 +944,11 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
                return 0;
        }
 
+       /* This assumes only APU display buffers are pinned with (VRAM|GTT).
+        * See function amdgpu_display_supported_domains()
+        */
+       domain = amdgpu_bo_get_preferred_domain(adev, domain);
+
        if (bo->tbo.base.import_attach)
                dma_buf_pin(bo->tbo.base.import_attach);
 
@@ -1518,14 +1518,14 @@ u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo)
 }
 
 /**
- * amdgpu_bo_get_preferred_pin_domain - get preferred domain for scanout
+ * amdgpu_bo_get_preferred_domain - get preferred domain
  * @adev: amdgpu device object
  * @domain: allowed :ref:`memory domains <amdgpu_memory_domains>`
  *
  * Returns:
- * Which of the allowed domains is preferred for pinning the BO for scanout.
+ * Which of the allowed domains is preferred for allocating the BO.
  */
-uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev,
+uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
                                            uint32_t domain)
 {
        if (domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) {
index e72f329..9d6c001 100644 (file)
@@ -333,7 +333,7 @@ void amdgpu_bo_get_memory(struct amdgpu_bo *bo, uint64_t *vram_mem,
 void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo_vm *vmbo);
 int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow,
                             struct dma_fence **fence);
-uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev,
+uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
                                            uint32_t domain);
 
 /*
index f2e2066..4eaec44 100644 (file)
@@ -80,12 +80,17 @@ static void amdgpu_pll_reduce_ratio(unsigned *nom, unsigned *den,
  * Calculate feedback and reference divider for a given post divider. Makes
  * sure we stay within the limits.
  */
-static void amdgpu_pll_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div,
-                                     unsigned fb_div_max, unsigned ref_div_max,
-                                     unsigned *fb_div, unsigned *ref_div)
+static void amdgpu_pll_get_fb_ref_div(struct amdgpu_device *adev, unsigned int nom,
+                                     unsigned int den, unsigned int post_div,
+                                     unsigned int fb_div_max, unsigned int ref_div_max,
+                                     unsigned int *fb_div, unsigned int *ref_div)
 {
+
        /* limit reference * post divider to a maximum */
-       ref_div_max = min(128 / post_div, ref_div_max);
+       if (adev->family == AMDGPU_FAMILY_SI)
+               ref_div_max = min(100 / post_div, ref_div_max);
+       else
+               ref_div_max = min(128 / post_div, ref_div_max);
 
        /* get matching reference and feedback divider */
        *ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max);
@@ -112,7 +117,8 @@ static void amdgpu_pll_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_
  * Try to calculate the PLL parameters to generate the given frequency:
  * dot_clock = (ref_freq * feedback_div) / (ref_div * post_div)
  */
-void amdgpu_pll_compute(struct amdgpu_pll *pll,
+void amdgpu_pll_compute(struct amdgpu_device *adev,
+                       struct amdgpu_pll *pll,
                        u32 freq,
                        u32 *dot_clock_p,
                        u32 *fb_div_p,
@@ -199,7 +205,7 @@ void amdgpu_pll_compute(struct amdgpu_pll *pll,
 
        for (post_div = post_div_min; post_div <= post_div_max; ++post_div) {
                unsigned diff;
-               amdgpu_pll_get_fb_ref_div(nom, den, post_div, fb_div_max,
+               amdgpu_pll_get_fb_ref_div(adev, nom, den, post_div, fb_div_max,
                                          ref_div_max, &fb_div, &ref_div);
                diff = abs(target_clock - (pll->reference_freq * fb_div) /
                        (ref_div * post_div));
@@ -214,7 +220,7 @@ void amdgpu_pll_compute(struct amdgpu_pll *pll,
        post_div = post_div_best;
 
        /* get the feedback and reference divider for the optimal value */
-       amdgpu_pll_get_fb_ref_div(nom, den, post_div, fb_div_max, ref_div_max,
+       amdgpu_pll_get_fb_ref_div(adev, nom, den, post_div, fb_div_max, ref_div_max,
                                  &fb_div, &ref_div);
 
        /* reduce the numbers to a simpler ratio once more */
index db6136f..44a583d 100644 (file)
@@ -24,7 +24,8 @@
 #ifndef __AMDGPU_PLL_H__
 #define __AMDGPU_PLL_H__
 
-void amdgpu_pll_compute(struct amdgpu_pll *pll,
+void amdgpu_pll_compute(struct amdgpu_device *adev,
+                        struct amdgpu_pll *pll,
                         u32 freq,
                         u32 *dot_clock_p,
                         u32 *fb_div_p,
index 9dc2d6d..9b41cb8 100644 (file)
@@ -29,6 +29,7 @@
 #include "amdgpu.h"
 #include "amdgpu_psp.h"
 #include "amdgpu_ucode.h"
+#include "amdgpu_xgmi.h"
 #include "soc15_common.h"
 #include "psp_v3_1.h"
 #include "psp_v10_0.h"
@@ -468,10 +469,10 @@ psp_cmd_submit_buf(struct psp_context *psp,
         */
        if (!skip_unsupport && (psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) {
                if (ucode)
-                       DRM_WARN("failed to load ucode (%s) ",
-                                 amdgpu_ucode_name(ucode->ucode_id));
-               DRM_WARN("psp gfx command (%s) failed and response status is (0x%X)\n",
-                        psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id),
+                       DRM_WARN("failed to load ucode %s(0x%X) ",
+                                 amdgpu_ucode_name(ucode->ucode_id), ucode->ucode_id);
+               DRM_WARN("psp gfx command %s(0x%X) failed and response status is (0x%X)\n",
+                        psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id), psp->cmd_buf_mem->cmd_id,
                         psp->cmd_buf_mem->resp.status);
                if (!timeout) {
                        ret = -EINVAL;
@@ -799,15 +800,15 @@ static int psp_asd_load(struct psp_context *psp)
         * add workaround to bypass it for sriov now.
         * TODO: add version check to make it common
         */
-       if (amdgpu_sriov_vf(psp->adev) || !psp->asd_ucode_size)
+       if (amdgpu_sriov_vf(psp->adev) || !psp->asd.size_bytes)
                return 0;
 
        cmd = acquire_psp_cmd_buf(psp);
 
-       psp_copy_fw(psp, psp->asd_start_addr, psp->asd_ucode_size);
+       psp_copy_fw(psp, psp->asd.start_addr, psp->asd.size_bytes);
 
        psp_prep_asd_load_cmd_buf(cmd, psp->fw_pri_mc_addr,
-                                 psp->asd_ucode_size);
+                                 psp->asd.size_bytes);
 
        ret = psp_cmd_submit_buf(psp, NULL, cmd,
                                 psp->fence_buf_mc_addr);
@@ -898,23 +899,37 @@ static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
        cmd->cmd.cmd_load_ta.cmd_buf_len         = ta_shared_size;
 }
 
-static int psp_xgmi_init_shared_buf(struct psp_context *psp)
+static int psp_ta_init_shared_buf(struct psp_context *psp,
+                                 struct ta_mem_context *mem_ctx,
+                                 uint32_t shared_mem_size)
 {
        int ret;
 
        /*
-        * Allocate 16k memory aligned to 4k from Frame Buffer (local
-        * physical) for xgmi ta <-> Driver
-        */
-       ret = amdgpu_bo_create_kernel(psp->adev, PSP_XGMI_SHARED_MEM_SIZE,
-                                     PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
-                                     &psp->xgmi_context.xgmi_shared_bo,
-                                     &psp->xgmi_context.xgmi_shared_mc_addr,
-                                     &psp->xgmi_context.xgmi_shared_buf);
+       * Allocate 16k memory aligned to 4k from Frame Buffer (local
+       * physical) for ta to host memory
+       */
+       ret = amdgpu_bo_create_kernel(psp->adev, shared_mem_size, PAGE_SIZE,
+                                     AMDGPU_GEM_DOMAIN_VRAM,
+                                     &mem_ctx->shared_bo,
+                                     &mem_ctx->shared_mc_addr,
+                                     &mem_ctx->shared_buf);
 
        return ret;
 }
 
+static void psp_ta_free_shared_buf(struct ta_mem_context *mem_ctx)
+{
+       amdgpu_bo_free_kernel(&mem_ctx->shared_bo, &mem_ctx->shared_mc_addr,
+                             &mem_ctx->shared_buf);
+}
+
+static int psp_xgmi_init_shared_buf(struct psp_context *psp)
+{
+       return psp_ta_init_shared_buf(psp, &psp->xgmi_context.context.mem_context,
+                                     PSP_XGMI_SHARED_MEM_SIZE);
+}
+
 static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
                                       uint32_t ta_cmd_id,
                                       uint32_t session_id)
@@ -952,20 +967,20 @@ static int psp_xgmi_load(struct psp_context *psp)
 
        cmd = acquire_psp_cmd_buf(psp);
 
-       psp_copy_fw(psp, psp->ta_xgmi_start_addr, psp->ta_xgmi_ucode_size);
+       psp_copy_fw(psp, psp->xgmi.start_addr, psp->xgmi.size_bytes);
 
        psp_prep_ta_load_cmd_buf(cmd,
                                 psp->fw_pri_mc_addr,
-                                psp->ta_xgmi_ucode_size,
-                                psp->xgmi_context.xgmi_shared_mc_addr,
+                                psp->xgmi.size_bytes,
+                                psp->xgmi_context.context.mem_context.shared_mc_addr,
                                 PSP_XGMI_SHARED_MEM_SIZE);
 
        ret = psp_cmd_submit_buf(psp, NULL, cmd,
                                 psp->fence_buf_mc_addr);
 
        if (!ret) {
-               psp->xgmi_context.initialized = 1;
-               psp->xgmi_context.session_id = cmd->resp.session_id;
+               psp->xgmi_context.context.initialized = true;
+               psp->xgmi_context.context.session_id = cmd->resp.session_id;
        }
 
        release_psp_cmd_buf(psp);
@@ -990,7 +1005,7 @@ static int psp_xgmi_unload(struct psp_context *psp)
 
        cmd = acquire_psp_cmd_buf(psp);
 
-       psp_prep_ta_unload_cmd_buf(cmd, psp->xgmi_context.session_id);
+       psp_prep_ta_unload_cmd_buf(cmd, psp->xgmi_context.context.session_id);
 
        ret = psp_cmd_submit_buf(psp, NULL, cmd,
                                 psp->fence_buf_mc_addr);
@@ -1002,41 +1017,42 @@ static int psp_xgmi_unload(struct psp_context *psp)
 
 int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
 {
-       return psp_ta_invoke(psp, ta_cmd_id, psp->xgmi_context.session_id);
+       return psp_ta_invoke(psp, ta_cmd_id, psp->xgmi_context.context.session_id);
 }
 
 int psp_xgmi_terminate(struct psp_context *psp)
 {
        int ret;
 
-       if (!psp->xgmi_context.initialized)
+       if (!psp->xgmi_context.context.initialized)
                return 0;
 
        ret = psp_xgmi_unload(psp);
        if (ret)
                return ret;
 
-       psp->xgmi_context.initialized = 0;
+       psp->xgmi_context.context.initialized = false;
 
        /* free xgmi shared memory */
-       amdgpu_bo_free_kernel(&psp->xgmi_context.xgmi_shared_bo,
-                       &psp->xgmi_context.xgmi_shared_mc_addr,
-                       &psp->xgmi_context.xgmi_shared_buf);
+       psp_ta_free_shared_buf(&psp->xgmi_context.context.mem_context);
 
        return 0;
 }
 
-int psp_xgmi_initialize(struct psp_context *psp)
+int psp_xgmi_initialize(struct psp_context *psp, bool set_extended_data, bool load_ta)
 {
        struct ta_xgmi_shared_memory *xgmi_cmd;
        int ret;
 
-       if (!psp->adev->psp.ta_fw ||
-           !psp->adev->psp.ta_xgmi_ucode_size ||
-           !psp->adev->psp.ta_xgmi_start_addr)
+       if (!psp->ta_fw ||
+           !psp->xgmi.size_bytes ||
+           !psp->xgmi.start_addr)
                return -ENOENT;
 
-       if (!psp->xgmi_context.initialized) {
+       if (!load_ta)
+               goto invoke;
+
+       if (!psp->xgmi_context.context.initialized) {
                ret = psp_xgmi_init_shared_buf(psp);
                if (ret)
                        return ret;
@@ -1047,9 +1063,11 @@ int psp_xgmi_initialize(struct psp_context *psp)
        if (ret)
                return ret;
 
+invoke:
        /* Initialize XGMI session */
-       xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.xgmi_shared_buf);
+       xgmi_cmd = (struct ta_xgmi_shared_memory *)(psp->xgmi_context.context.mem_context.shared_buf);
        memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
+       xgmi_cmd->flag_extend_link_record = set_extended_data;
        xgmi_cmd->cmd_id = TA_COMMAND_XGMI__INITIALIZE;
 
        ret = psp_xgmi_invoke(psp, xgmi_cmd->cmd_id);
@@ -1062,7 +1080,7 @@ int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id)
        struct ta_xgmi_shared_memory *xgmi_cmd;
        int ret;
 
-       xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.xgmi_shared_buf;
+       xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
        memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
 
        xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_HIVE_ID;
@@ -1082,7 +1100,7 @@ int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id)
        struct ta_xgmi_shared_memory *xgmi_cmd;
        int ret;
 
-       xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.xgmi_shared_buf;
+       xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
        memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
 
        xgmi_cmd->cmd_id = TA_COMMAND_XGMI__GET_NODE_ID;
@@ -1100,12 +1118,59 @@ int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id)
 static bool psp_xgmi_peer_link_info_supported(struct psp_context *psp)
 {
        return psp->adev->asic_type == CHIP_ALDEBARAN &&
-                               psp->ta_xgmi_ucode_version >= 0x2000000b;
+                               psp->xgmi.feature_version >= 0x2000000b;
+}
+
+/*
+ * Chips that support extended topology information require the driver to
+ * reflect topology information in the opposite direction.  This is
+ * because the TA has already exceeded its link record limit and if the
+ * TA holds bi-directional information, the driver would have to do
+ * multiple fetches instead of just two.
+ */
+static void psp_xgmi_reflect_topology_info(struct psp_context *psp,
+                                       struct psp_xgmi_node_info node_info)
+{
+       struct amdgpu_device *mirror_adev;
+       struct amdgpu_hive_info *hive;
+       uint64_t src_node_id = psp->adev->gmc.xgmi.node_id;
+       uint64_t dst_node_id = node_info.node_id;
+       uint8_t dst_num_hops = node_info.num_hops;
+       uint8_t dst_num_links = node_info.num_links;
+
+       hive = amdgpu_get_xgmi_hive(psp->adev);
+       list_for_each_entry(mirror_adev, &hive->device_list, gmc.xgmi.head) {
+               struct psp_xgmi_topology_info *mirror_top_info;
+               int j;
+
+               if (mirror_adev->gmc.xgmi.node_id != dst_node_id)
+                       continue;
+
+               mirror_top_info = &mirror_adev->psp.xgmi_context.top_info;
+               for (j = 0; j < mirror_top_info->num_nodes; j++) {
+                       if (mirror_top_info->nodes[j].node_id != src_node_id)
+                               continue;
+
+                       mirror_top_info->nodes[j].num_hops = dst_num_hops;
+                       /*
+                        * prevent 0 num_links value re-reflection since reflection
+                        * criteria is based on num_hops (direct or indirect).
+                        *
+                        */
+                       if (dst_num_links)
+                               mirror_top_info->nodes[j].num_links = dst_num_links;
+
+                       break;
+               }
+
+               break;
+       }
 }
 
 int psp_xgmi_get_topology_info(struct psp_context *psp,
                               int number_devices,
-                              struct psp_xgmi_topology_info *topology)
+                              struct psp_xgmi_topology_info *topology,
+                              bool get_extended_data)
 {
        struct ta_xgmi_shared_memory *xgmi_cmd;
        struct ta_xgmi_cmd_get_topology_info_input *topology_info_input;
@@ -1116,8 +1181,9 @@ int psp_xgmi_get_topology_info(struct psp_context *psp,
        if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
                return -EINVAL;
 
-       xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.xgmi_shared_buf;
+       xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
        memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
+       xgmi_cmd->flag_extend_link_record = get_extended_data;
 
        /* Fill in the shared memory with topology information as input */
        topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
@@ -1140,10 +1206,19 @@ int psp_xgmi_get_topology_info(struct psp_context *psp,
        topology_info_output = &xgmi_cmd->xgmi_out_message.get_topology_info;
        topology->num_nodes = xgmi_cmd->xgmi_out_message.get_topology_info.num_nodes;
        for (i = 0; i < topology->num_nodes; i++) {
-               topology->nodes[i].node_id = topology_info_output->nodes[i].node_id;
-               topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops;
-               topology->nodes[i].is_sharing_enabled = topology_info_output->nodes[i].is_sharing_enabled;
-               topology->nodes[i].sdma_engine = topology_info_output->nodes[i].sdma_engine;
+               /* extended data will either be 0 or equal to non-extended data */
+               if (topology_info_output->nodes[i].num_hops)
+                       topology->nodes[i].num_hops = topology_info_output->nodes[i].num_hops;
+
+               /* non-extended data gets everything here so no need to update */
+               if (!get_extended_data) {
+                       topology->nodes[i].node_id = topology_info_output->nodes[i].node_id;
+                       topology->nodes[i].is_sharing_enabled =
+                                       topology_info_output->nodes[i].is_sharing_enabled;
+                       topology->nodes[i].sdma_engine =
+                                       topology_info_output->nodes[i].sdma_engine;
+               }
+
        }
 
        /* Invoke xgmi ta again to get the link information */
@@ -1158,9 +1233,18 @@ int psp_xgmi_get_topology_info(struct psp_context *psp,
                        return ret;
 
                link_info_output = &xgmi_cmd->xgmi_out_message.get_link_info;
-               for (i = 0; i < topology->num_nodes; i++)
-                       topology->nodes[i].num_links =
+               for (i = 0; i < topology->num_nodes; i++) {
+                       /* accumulate num_links on extended data */
+                       topology->nodes[i].num_links = get_extended_data ?
+                                       topology->nodes[i].num_links +
+                                                       link_info_output->nodes[i].num_links :
                                        link_info_output->nodes[i].num_links;
+
+                       /* reflect the topology information for bi-directionality */
+                       if (psp->xgmi_context.supports_extended_data &&
+                                       get_extended_data && topology->nodes[i].num_hops)
+                               psp_xgmi_reflect_topology_info(psp, topology->nodes[i]);
+               }
        }
 
        return 0;
@@ -1177,7 +1261,7 @@ int psp_xgmi_set_topology_info(struct psp_context *psp,
        if (!topology || topology->num_nodes > TA_XGMI__MAX_CONNECTED_NODES)
                return -EINVAL;
 
-       xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.xgmi_shared_buf;
+       xgmi_cmd = (struct ta_xgmi_shared_memory *)psp->xgmi_context.context.mem_context.shared_buf;
        memset(xgmi_cmd, 0, sizeof(struct ta_xgmi_shared_memory));
 
        topology_info_input = &xgmi_cmd->xgmi_in_message.get_topology_info;
@@ -1198,19 +1282,8 @@ int psp_xgmi_set_topology_info(struct psp_context *psp,
 // ras begin
 static int psp_ras_init_shared_buf(struct psp_context *psp)
 {
-       int ret;
-
-       /*
-        * Allocate 16k memory aligned to 4k from Frame Buffer (local
-        * physical) for ras ta <-> Driver
-        */
-       ret = amdgpu_bo_create_kernel(psp->adev, PSP_RAS_SHARED_MEM_SIZE,
-                       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
-                       &psp->ras.ras_shared_bo,
-                       &psp->ras.ras_shared_mc_addr,
-                       &psp->ras.ras_shared_buf);
-
-       return ret;
+       return psp_ta_init_shared_buf(psp, &psp->ras_context.context.mem_context,
+                                     PSP_RAS_SHARED_MEM_SIZE);
 }
 
 static int psp_ras_load(struct psp_context *psp)
@@ -1225,9 +1298,9 @@ static int psp_ras_load(struct psp_context *psp)
        if (amdgpu_sriov_vf(psp->adev))
                return 0;
 
-       psp_copy_fw(psp, psp->ta_ras_start_addr, psp->ta_ras_ucode_size);
+       psp_copy_fw(psp, psp->ras.start_addr, psp->ras.size_bytes);
 
-       ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
+       ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
 
        if (psp->adev->gmc.xgmi.connected_to_cpu)
                ras_cmd->ras_in_message.init_flags.poison_mode_en = 1;
@@ -1238,18 +1311,18 @@ static int psp_ras_load(struct psp_context *psp)
 
        psp_prep_ta_load_cmd_buf(cmd,
                                 psp->fw_pri_mc_addr,
-                                psp->ta_ras_ucode_size,
-                                psp->ras.ras_shared_mc_addr,
+                                psp->ras.size_bytes,
+                                psp->ras_context.context.mem_context.shared_mc_addr,
                                 PSP_RAS_SHARED_MEM_SIZE);
 
        ret = psp_cmd_submit_buf(psp, NULL, cmd,
                        psp->fence_buf_mc_addr);
 
        if (!ret) {
-               psp->ras.session_id = cmd->resp.session_id;
+               psp->ras_context.context.session_id = cmd->resp.session_id;
 
                if (!ras_cmd->ras_status)
-                       psp->ras.ras_initialized = true;
+                       psp->ras_context.context.initialized = true;
                else
                        dev_warn(psp->adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status);
        }
@@ -1275,7 +1348,7 @@ static int psp_ras_unload(struct psp_context *psp)
 
        cmd = acquire_psp_cmd_buf(psp);
 
-       psp_prep_ta_unload_cmd_buf(cmd, psp->ras.session_id);
+       psp_prep_ta_unload_cmd_buf(cmd, psp->ras_context.context.session_id);
 
        ret = psp_cmd_submit_buf(psp, NULL, cmd,
                        psp->fence_buf_mc_addr);
@@ -1290,7 +1363,7 @@ int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
        struct ta_ras_shared_memory *ras_cmd;
        int ret;
 
-       ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
+       ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
 
        /*
         * TODO: bypass the loading in sriov for now
@@ -1298,7 +1371,7 @@ int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
        if (amdgpu_sriov_vf(psp->adev))
                return 0;
 
-       ret = psp_ta_invoke(psp, ta_cmd_id, psp->ras.session_id);
+       ret = psp_ta_invoke(psp, ta_cmd_id, psp->ras_context.context.session_id);
 
        if (amdgpu_ras_intr_triggered())
                return ret;
@@ -1354,10 +1427,10 @@ int psp_ras_enable_features(struct psp_context *psp,
        struct ta_ras_shared_memory *ras_cmd;
        int ret;
 
-       if (!psp->ras.ras_initialized)
+       if (!psp->ras_context.context.initialized)
                return -EINVAL;
 
-       ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
+       ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
        memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
 
        if (enable)
@@ -1384,19 +1457,17 @@ static int psp_ras_terminate(struct psp_context *psp)
        if (amdgpu_sriov_vf(psp->adev))
                return 0;
 
-       if (!psp->ras.ras_initialized)
+       if (!psp->ras_context.context.initialized)
                return 0;
 
        ret = psp_ras_unload(psp);
        if (ret)
                return ret;
 
-       psp->ras.ras_initialized = false;
+       psp->ras_context.context.initialized = false;
 
        /* free ras shared memory */
-       amdgpu_bo_free_kernel(&psp->ras.ras_shared_bo,
-                       &psp->ras.ras_shared_mc_addr,
-                       &psp->ras.ras_shared_buf);
+       psp_ta_free_shared_buf(&psp->ras_context.context.mem_context);
 
        return 0;
 }
@@ -1413,8 +1484,8 @@ static int psp_ras_initialize(struct psp_context *psp)
        if (amdgpu_sriov_vf(adev))
                return 0;
 
-       if (!adev->psp.ta_ras_ucode_size ||
-           !adev->psp.ta_ras_start_addr) {
+       if (!adev->psp.ras.size_bytes ||
+           !adev->psp.ras.start_addr) {
                dev_info(adev->dev, "RAS: optional ras ta ucode is not available\n");
                return 0;
        }
@@ -1460,7 +1531,7 @@ static int psp_ras_initialize(struct psp_context *psp)
                }
        }
 
-       if (!psp->ras.ras_initialized) {
+       if (!psp->ras_context.context.initialized) {
                ret = psp_ras_init_shared_buf(psp);
                if (ret)
                        return ret;
@@ -1479,10 +1550,10 @@ int psp_ras_trigger_error(struct psp_context *psp,
        struct ta_ras_shared_memory *ras_cmd;
        int ret;
 
-       if (!psp->ras.ras_initialized)
+       if (!psp->ras_context.context.initialized)
                return -EINVAL;
 
-       ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
+       ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
        memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
 
        ras_cmd->cmd_id = TA_RAS_COMMAND__TRIGGER_ERROR;
@@ -1504,19 +1575,8 @@ int psp_ras_trigger_error(struct psp_context *psp,
 // HDCP start
 static int psp_hdcp_init_shared_buf(struct psp_context *psp)
 {
-       int ret;
-
-       /*
-        * Allocate 16k memory aligned to 4k from Frame Buffer (local
-        * physical) for hdcp ta <-> Driver
-        */
-       ret = amdgpu_bo_create_kernel(psp->adev, PSP_HDCP_SHARED_MEM_SIZE,
-                                     PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
-                                     &psp->hdcp_context.hdcp_shared_bo,
-                                     &psp->hdcp_context.hdcp_shared_mc_addr,
-                                     &psp->hdcp_context.hdcp_shared_buf);
-
-       return ret;
+       return psp_ta_init_shared_buf(psp, &psp->hdcp_context.context.mem_context,
+                                     PSP_HDCP_SHARED_MEM_SIZE);
 }
 
 static int psp_hdcp_load(struct psp_context *psp)
@@ -1530,22 +1590,22 @@ static int psp_hdcp_load(struct psp_context *psp)
        if (amdgpu_sriov_vf(psp->adev))
                return 0;
 
-       psp_copy_fw(psp, psp->ta_hdcp_start_addr,
-                   psp->ta_hdcp_ucode_size);
+       psp_copy_fw(psp, psp->hdcp.start_addr,
+                   psp->hdcp.size_bytes);
 
        cmd = acquire_psp_cmd_buf(psp);
 
        psp_prep_ta_load_cmd_buf(cmd,
                                 psp->fw_pri_mc_addr,
-                                psp->ta_hdcp_ucode_size,
-                                psp->hdcp_context.hdcp_shared_mc_addr,
+                                psp->hdcp.size_bytes,
+                                psp->hdcp_context.context.mem_context.shared_mc_addr,
                                 PSP_HDCP_SHARED_MEM_SIZE);
 
        ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
 
        if (!ret) {
-               psp->hdcp_context.hdcp_initialized = true;
-               psp->hdcp_context.session_id = cmd->resp.session_id;
+               psp->hdcp_context.context.initialized = true;
+               psp->hdcp_context.context.session_id = cmd->resp.session_id;
                mutex_init(&psp->hdcp_context.mutex);
        }
 
@@ -1563,13 +1623,13 @@ static int psp_hdcp_initialize(struct psp_context *psp)
        if (amdgpu_sriov_vf(psp->adev))
                return 0;
 
-       if (!psp->adev->psp.ta_hdcp_ucode_size ||
-           !psp->adev->psp.ta_hdcp_start_addr) {
+       if (!psp->hdcp.size_bytes ||
+           !psp->hdcp.start_addr) {
                dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n");
                return 0;
        }
 
-       if (!psp->hdcp_context.hdcp_initialized) {
+       if (!psp->hdcp_context.context.initialized) {
                ret = psp_hdcp_init_shared_buf(psp);
                if (ret)
                        return ret;
@@ -1595,7 +1655,7 @@ static int psp_hdcp_unload(struct psp_context *psp)
 
        cmd = acquire_psp_cmd_buf(psp);
 
-       psp_prep_ta_unload_cmd_buf(cmd, psp->hdcp_context.session_id);
+       psp_prep_ta_unload_cmd_buf(cmd, psp->hdcp_context.context.session_id);
 
        ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
 
@@ -1612,7 +1672,7 @@ int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
        if (amdgpu_sriov_vf(psp->adev))
                return 0;
 
-       return psp_ta_invoke(psp, ta_cmd_id, psp->hdcp_context.session_id);
+       return psp_ta_invoke(psp, ta_cmd_id, psp->hdcp_context.context.session_id);
 }
 
 static int psp_hdcp_terminate(struct psp_context *psp)
@@ -1625,8 +1685,8 @@ static int psp_hdcp_terminate(struct psp_context *psp)
        if (amdgpu_sriov_vf(psp->adev))
                return 0;
 
-       if (!psp->hdcp_context.hdcp_initialized) {
-               if (psp->hdcp_context.hdcp_shared_buf)
+       if (!psp->hdcp_context.context.initialized) {
+               if (psp->hdcp_context.context.mem_context.shared_buf)
                        goto out;
                else
                        return 0;
@@ -1636,13 +1696,11 @@ static int psp_hdcp_terminate(struct psp_context *psp)
        if (ret)
                return ret;
 
-       psp->hdcp_context.hdcp_initialized = false;
+       psp->hdcp_context.context.initialized = false;
 
 out:
        /* free hdcp shared memory */
-       amdgpu_bo_free_kernel(&psp->hdcp_context.hdcp_shared_bo,
-                             &psp->hdcp_context.hdcp_shared_mc_addr,
-                             &psp->hdcp_context.hdcp_shared_buf);
+       psp_ta_free_shared_buf(&psp->hdcp_context.context.mem_context);
 
        return 0;
 }
@@ -1651,19 +1709,8 @@ out:
 // DTM start
 static int psp_dtm_init_shared_buf(struct psp_context *psp)
 {
-       int ret;
-
-       /*
-        * Allocate 16k memory aligned to 4k from Frame Buffer (local
-        * physical) for dtm ta <-> Driver
-        */
-       ret = amdgpu_bo_create_kernel(psp->adev, PSP_DTM_SHARED_MEM_SIZE,
-                                     PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
-                                     &psp->dtm_context.dtm_shared_bo,
-                                     &psp->dtm_context.dtm_shared_mc_addr,
-                                     &psp->dtm_context.dtm_shared_buf);
-
-       return ret;
+       return psp_ta_init_shared_buf(psp, &psp->dtm_context.context.mem_context,
+                                     PSP_DTM_SHARED_MEM_SIZE);
 }
 
 static int psp_dtm_load(struct psp_context *psp)
@@ -1677,21 +1724,21 @@ static int psp_dtm_load(struct psp_context *psp)
        if (amdgpu_sriov_vf(psp->adev))
                return 0;
 
-       psp_copy_fw(psp, psp->ta_dtm_start_addr, psp->ta_dtm_ucode_size);
+       psp_copy_fw(psp, psp->dtm.start_addr, psp->dtm.size_bytes);
 
        cmd = acquire_psp_cmd_buf(psp);
 
        psp_prep_ta_load_cmd_buf(cmd,
                                 psp->fw_pri_mc_addr,
-                                psp->ta_dtm_ucode_size,
-                                psp->dtm_context.dtm_shared_mc_addr,
+                                psp->dtm.size_bytes,
+                                psp->dtm_context.context.mem_context.shared_mc_addr,
                                 PSP_DTM_SHARED_MEM_SIZE);
 
        ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
 
        if (!ret) {
-               psp->dtm_context.dtm_initialized = true;
-               psp->dtm_context.session_id = cmd->resp.session_id;
+               psp->dtm_context.context.initialized = true;
+               psp->dtm_context.context.session_id = cmd->resp.session_id;
                mutex_init(&psp->dtm_context.mutex);
        }
 
@@ -1710,13 +1757,13 @@ static int psp_dtm_initialize(struct psp_context *psp)
        if (amdgpu_sriov_vf(psp->adev))
                return 0;
 
-       if (!psp->adev->psp.ta_dtm_ucode_size ||
-           !psp->adev->psp.ta_dtm_start_addr) {
+       if (!psp->dtm.size_bytes ||
+           !psp->dtm.start_addr) {
                dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n");
                return 0;
        }
 
-       if (!psp->dtm_context.dtm_initialized) {
+       if (!psp->dtm_context.context.initialized) {
                ret = psp_dtm_init_shared_buf(psp);
                if (ret)
                        return ret;
@@ -1742,7 +1789,7 @@ static int psp_dtm_unload(struct psp_context *psp)
 
        cmd = acquire_psp_cmd_buf(psp);
 
-       psp_prep_ta_unload_cmd_buf(cmd, psp->dtm_context.session_id);
+       psp_prep_ta_unload_cmd_buf(cmd, psp->dtm_context.context.session_id);
 
        ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
 
@@ -1759,7 +1806,7 @@ int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
        if (amdgpu_sriov_vf(psp->adev))
                return 0;
 
-       return psp_ta_invoke(psp, ta_cmd_id, psp->dtm_context.session_id);
+       return psp_ta_invoke(psp, ta_cmd_id, psp->dtm_context.context.session_id);
 }
 
 static int psp_dtm_terminate(struct psp_context *psp)
@@ -1772,8 +1819,8 @@ static int psp_dtm_terminate(struct psp_context *psp)
        if (amdgpu_sriov_vf(psp->adev))
                return 0;
 
-       if (!psp->dtm_context.dtm_initialized) {
-               if (psp->dtm_context.dtm_shared_buf)
+       if (!psp->dtm_context.context.initialized) {
+               if (psp->dtm_context.context.mem_context.shared_buf)
                        goto out;
                else
                        return 0;
@@ -1783,13 +1830,11 @@ static int psp_dtm_terminate(struct psp_context *psp)
        if (ret)
                return ret;
 
-       psp->dtm_context.dtm_initialized = false;
+       psp->dtm_context.context.initialized = false;
 
 out:
-       /* free hdcp shared memory */
-       amdgpu_bo_free_kernel(&psp->dtm_context.dtm_shared_bo,
-                             &psp->dtm_context.dtm_shared_mc_addr,
-                             &psp->dtm_context.dtm_shared_buf);
+       /* free dtm shared memory */
+       psp_ta_free_shared_buf(&psp->dtm_context.context.mem_context);
 
        return 0;
 }
@@ -1798,19 +1843,8 @@ out:
 // RAP start
 static int psp_rap_init_shared_buf(struct psp_context *psp)
 {
-       int ret;
-
-       /*
-        * Allocate 16k memory aligned to 4k from Frame Buffer (local
-        * physical) for rap ta <-> Driver
-        */
-       ret = amdgpu_bo_create_kernel(psp->adev, PSP_RAP_SHARED_MEM_SIZE,
-                                     PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
-                                     &psp->rap_context.rap_shared_bo,
-                                     &psp->rap_context.rap_shared_mc_addr,
-                                     &psp->rap_context.rap_shared_buf);
-
-       return ret;
+       return psp_ta_init_shared_buf(psp, &psp->rap_context.context.mem_context,
+                                     PSP_RAP_SHARED_MEM_SIZE);
 }
 
 static int psp_rap_load(struct psp_context *psp)
@@ -1818,21 +1852,21 @@ static int psp_rap_load(struct psp_context *psp)
        int ret;
        struct psp_gfx_cmd_resp *cmd;
 
-       psp_copy_fw(psp, psp->ta_rap_start_addr, psp->ta_rap_ucode_size);
+       psp_copy_fw(psp, psp->rap.start_addr, psp->rap.size_bytes);
 
        cmd = acquire_psp_cmd_buf(psp);
 
        psp_prep_ta_load_cmd_buf(cmd,
                                 psp->fw_pri_mc_addr,
-                                psp->ta_rap_ucode_size,
-                                psp->rap_context.rap_shared_mc_addr,
+                                psp->rap.size_bytes,
+                                psp->rap_context.context.mem_context.shared_mc_addr,
                                 PSP_RAP_SHARED_MEM_SIZE);
 
        ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
 
        if (!ret) {
-               psp->rap_context.rap_initialized = true;
-               psp->rap_context.session_id = cmd->resp.session_id;
+               psp->rap_context.context.initialized = true;
+               psp->rap_context.context.session_id = cmd->resp.session_id;
                mutex_init(&psp->rap_context.mutex);
        }
 
@@ -1846,7 +1880,7 @@ static int psp_rap_unload(struct psp_context *psp)
        int ret;
        struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
 
-       psp_prep_ta_unload_cmd_buf(cmd, psp->rap_context.session_id);
+       psp_prep_ta_unload_cmd_buf(cmd, psp->rap_context.context.session_id);
 
        ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
 
@@ -1866,13 +1900,13 @@ static int psp_rap_initialize(struct psp_context *psp)
        if (amdgpu_sriov_vf(psp->adev))
                return 0;
 
-       if (!psp->adev->psp.ta_rap_ucode_size ||
-           !psp->adev->psp.ta_rap_start_addr) {
+       if (!psp->rap.size_bytes ||
+           !psp->rap.start_addr) {
                dev_info(psp->adev->dev, "RAP: optional rap ta ucode is not available\n");
                return 0;
        }
 
-       if (!psp->rap_context.rap_initialized) {
+       if (!psp->rap_context.context.initialized) {
                ret = psp_rap_init_shared_buf(psp);
                if (ret)
                        return ret;
@@ -1886,11 +1920,9 @@ static int psp_rap_initialize(struct psp_context *psp)
        if (ret || status != TA_RAP_STATUS__SUCCESS) {
                psp_rap_unload(psp);
 
-               amdgpu_bo_free_kernel(&psp->rap_context.rap_shared_bo,
-                             &psp->rap_context.rap_shared_mc_addr,
-                             &psp->rap_context.rap_shared_buf);
+               psp_ta_free_shared_buf(&psp->rap_context.context.mem_context);
 
-               psp->rap_context.rap_initialized = false;
+               psp->rap_context.context.initialized = false;
 
                dev_warn(psp->adev->dev, "RAP TA initialize fail (%d) status %d.\n",
                         ret, status);
@@ -1905,17 +1937,15 @@ static int psp_rap_terminate(struct psp_context *psp)
 {
        int ret;
 
-       if (!psp->rap_context.rap_initialized)
+       if (!psp->rap_context.context.initialized)
                return 0;
 
        ret = psp_rap_unload(psp);
 
-       psp->rap_context.rap_initialized = false;
+       psp->rap_context.context.initialized = false;
 
        /* free rap shared memory */
-       amdgpu_bo_free_kernel(&psp->rap_context.rap_shared_bo,
-                             &psp->rap_context.rap_shared_mc_addr,
-                             &psp->rap_context.rap_shared_buf);
+       psp_ta_free_shared_buf(&psp->rap_context.context.mem_context);
 
        return ret;
 }
@@ -1925,7 +1955,7 @@ int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id, enum ta_rap_stat
        struct ta_rap_shared_memory *rap_cmd;
        int ret = 0;
 
-       if (!psp->rap_context.rap_initialized)
+       if (!psp->rap_context.context.initialized)
                return 0;
 
        if (ta_cmd_id != TA_CMD_RAP__INITIALIZE &&
@@ -1935,13 +1965,13 @@ int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id, enum ta_rap_stat
        mutex_lock(&psp->rap_context.mutex);
 
        rap_cmd = (struct ta_rap_shared_memory *)
-                 psp->rap_context.rap_shared_buf;
+                 psp->rap_context.context.mem_context.shared_buf;
        memset(rap_cmd, 0, sizeof(struct ta_rap_shared_memory));
 
        rap_cmd->cmd_id = ta_cmd_id;
        rap_cmd->validation_method_id = METHOD_A;
 
-       ret = psp_ta_invoke(psp, rap_cmd->cmd_id, psp->rap_context.session_id);
+       ret = psp_ta_invoke(psp, rap_cmd->cmd_id, psp->rap_context.context.session_id);
        if (ret)
                goto out_unlock;
 
@@ -1958,19 +1988,9 @@ out_unlock:
 /* securedisplay start */
 static int psp_securedisplay_init_shared_buf(struct psp_context *psp)
 {
-       int ret;
-
-       /*
-        * Allocate 16k memory aligned to 4k from Frame Buffer (local
-        * physical) for sa ta <-> Driver
-        */
-       ret = amdgpu_bo_create_kernel(psp->adev, PSP_SECUREDISPLAY_SHARED_MEM_SIZE,
-                                     PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
-                                     &psp->securedisplay_context.securedisplay_shared_bo,
-                                     &psp->securedisplay_context.securedisplay_shared_mc_addr,
-                                     &psp->securedisplay_context.securedisplay_shared_buf);
-
-       return ret;
+       return psp_ta_init_shared_buf(
+               psp, &psp->securedisplay_context.context.mem_context,
+               PSP_SECUREDISPLAY_SHARED_MEM_SIZE);
 }
 
 static int psp_securedisplay_load(struct psp_context *psp)
@@ -1979,19 +1999,19 @@ static int psp_securedisplay_load(struct psp_context *psp)
        struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
 
        memset(psp->fw_pri_buf, 0, PSP_1_MEG);
-       memcpy(psp->fw_pri_buf, psp->ta_securedisplay_start_addr, psp->ta_securedisplay_ucode_size);
+       memcpy(psp->fw_pri_buf, psp->securedisplay.start_addr, psp->securedisplay.size_bytes);
 
        psp_prep_ta_load_cmd_buf(cmd,
                                 psp->fw_pri_mc_addr,
-                                psp->ta_securedisplay_ucode_size,
-                                psp->securedisplay_context.securedisplay_shared_mc_addr,
+                                psp->securedisplay.size_bytes,
+                                psp->securedisplay_context.context.mem_context.shared_mc_addr,
                                 PSP_SECUREDISPLAY_SHARED_MEM_SIZE);
 
        ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
 
        if (!ret) {
-               psp->securedisplay_context.securedisplay_initialized = true;
-               psp->securedisplay_context.session_id = cmd->resp.session_id;
+               psp->securedisplay_context.context.initialized = true;
+               psp->securedisplay_context.context.session_id = cmd->resp.session_id;
                mutex_init(&psp->securedisplay_context.mutex);
        }
 
@@ -2005,7 +2025,7 @@ static int psp_securedisplay_unload(struct psp_context *psp)
        int ret;
        struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
 
-       psp_prep_ta_unload_cmd_buf(cmd, psp->securedisplay_context.session_id);
+       psp_prep_ta_unload_cmd_buf(cmd, psp->securedisplay_context.context.session_id);
 
        ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
 
@@ -2025,13 +2045,13 @@ static int psp_securedisplay_initialize(struct psp_context *psp)
        if (amdgpu_sriov_vf(psp->adev))
                return 0;
 
-       if (!psp->adev->psp.ta_securedisplay_ucode_size ||
-           !psp->adev->psp.ta_securedisplay_start_addr) {
+       if (!psp->securedisplay.size_bytes ||
+           !psp->securedisplay.start_addr) {
                dev_info(psp->adev->dev, "SECUREDISPLAY: securedisplay ta ucode is not available\n");
                return 0;
        }
 
-       if (!psp->securedisplay_context.securedisplay_initialized) {
+       if (!psp->securedisplay_context.context.initialized) {
                ret = psp_securedisplay_init_shared_buf(psp);
                if (ret)
                        return ret;
@@ -2048,11 +2068,9 @@ static int psp_securedisplay_initialize(struct psp_context *psp)
        if (ret) {
                psp_securedisplay_unload(psp);
 
-               amdgpu_bo_free_kernel(&psp->securedisplay_context.securedisplay_shared_bo,
-                             &psp->securedisplay_context.securedisplay_shared_mc_addr,
-                             &psp->securedisplay_context.securedisplay_shared_buf);
+               psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context);
 
-               psp->securedisplay_context.securedisplay_initialized = false;
+               psp->securedisplay_context.context.initialized = false;
 
                dev_err(psp->adev->dev, "SECUREDISPLAY TA initialize fail.\n");
                return -EINVAL;
@@ -2077,19 +2095,17 @@ static int psp_securedisplay_terminate(struct psp_context *psp)
        if (amdgpu_sriov_vf(psp->adev))
                return 0;
 
-       if (!psp->securedisplay_context.securedisplay_initialized)
+       if (!psp->securedisplay_context.context.initialized)
                return 0;
 
        ret = psp_securedisplay_unload(psp);
        if (ret)
                return ret;
 
-       psp->securedisplay_context.securedisplay_initialized = false;
+       psp->securedisplay_context.context.initialized = false;
 
        /* free securedisplay shared memory */
-       amdgpu_bo_free_kernel(&psp->securedisplay_context.securedisplay_shared_bo,
-                             &psp->securedisplay_context.securedisplay_shared_mc_addr,
-                             &psp->securedisplay_context.securedisplay_shared_buf);
+       psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context);
 
        return ret;
 }
@@ -2098,7 +2114,7 @@ int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
 {
        int ret;
 
-       if (!psp->securedisplay_context.securedisplay_initialized)
+       if (!psp->securedisplay_context.context.initialized)
                return -EINVAL;
 
        if (ta_cmd_id != TA_SECUREDISPLAY_COMMAND__QUERY_TA &&
@@ -2107,7 +2123,7 @@ int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
 
        mutex_lock(&psp->securedisplay_context.mutex);
 
-       ret = psp_ta_invoke(psp, ta_cmd_id, psp->securedisplay_context.session_id);
+       ret = psp_ta_invoke(psp, ta_cmd_id, psp->securedisplay_context.context.session_id);
 
        mutex_unlock(&psp->securedisplay_context.mutex);
 
@@ -2420,7 +2436,7 @@ static int psp_load_smu_fw(struct psp_context *psp)
        struct amdgpu_device *adev = psp->adev;
        struct amdgpu_firmware_info *ucode =
                        &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
-       struct amdgpu_ras *ras = psp->ras.ras;
+       struct amdgpu_ras *ras = psp->ras_context.ras;
 
        if (!ucode->fw || amdgpu_sriov_vf(psp->adev))
                return 0;
@@ -2625,7 +2641,7 @@ skip_memalloc:
                return ret;
        }
 
-       if (psp->adev->psp.ta_fw) {
+       if (psp->ta_fw) {
                ret = psp_ras_initialize(psp);
                if (ret)
                        dev_err(psp->adev->dev,
@@ -2697,7 +2713,7 @@ static int psp_hw_fini(void *handle)
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        struct psp_context *psp = &adev->psp;
 
-       if (psp->adev->psp.ta_fw) {
+       if (psp->ta_fw) {
                psp_ras_terminate(psp);
                psp_securedisplay_terminate(psp);
                psp_rap_terminate(psp);
@@ -2727,7 +2743,7 @@ static int psp_suspend(void *handle)
        struct psp_context *psp = &adev->psp;
 
        if (adev->gmc.xgmi.num_physical_nodes > 1 &&
-           psp->xgmi_context.initialized == 1) {
+           psp->xgmi_context.context.initialized) {
                ret = psp_xgmi_terminate(psp);
                if (ret) {
                        DRM_ERROR("Failed to terminate xgmi ta\n");
@@ -2735,7 +2751,7 @@ static int psp_suspend(void *handle)
                }
        }
 
-       if (psp->adev->psp.ta_fw) {
+       if (psp->ta_fw) {
                ret = psp_ras_terminate(psp);
                if (ret) {
                        DRM_ERROR("Failed to terminate ras ta\n");
@@ -2817,7 +2833,7 @@ static int psp_resume(void *handle)
        }
 
        if (adev->gmc.xgmi.num_physical_nodes > 1) {
-               ret = psp_xgmi_initialize(psp);
+               ret = psp_xgmi_initialize(psp, false, true);
                /* Warning the XGMI seesion initialize failure
                 * Instead of stop driver initialization
                 */
@@ -2826,7 +2842,7 @@ static int psp_resume(void *handle)
                                "XGMI: Failed to initialize XGMI session\n");
        }
 
-       if (psp->adev->psp.ta_fw) {
+       if (psp->ta_fw) {
                ret = psp_ras_initialize(psp);
                if (ret)
                        dev_err(psp->adev->dev,
@@ -2978,10 +2994,10 @@ int psp_init_asd_microcode(struct psp_context *psp,
                goto out;
 
        asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
-       adev->psp.asd_fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
-       adev->psp.asd_feature_version = le32_to_cpu(asd_hdr->sos.fw_version);
-       adev->psp.asd_ucode_size = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
-       adev->psp.asd_start_addr = (uint8_t *)asd_hdr +
+       adev->psp.asd.fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
+       adev->psp.asd.feature_version = le32_to_cpu(asd_hdr->sos.fw_version);
+       adev->psp.asd.size_bytes = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
+       adev->psp.asd.start_addr = (uint8_t *)asd_hdr +
                                le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes);
        return 0;
 out:
@@ -3123,6 +3139,7 @@ static int psp_init_sos_base_fw(struct amdgpu_device *adev)
                adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr->sos.size_bytes);
                adev->psp.sos.start_addr = ucode_array_start_addr +
                                le32_to_cpu(sos_hdr->sos.offset_bytes);
+               adev->psp.xgmi_context.supports_extended_data = false;
        } else {
                /* Load alternate PSP SOS FW */
                sos_hdr_v1_3 = (const struct psp_firmware_header_v1_3 *)adev->psp.sos_fw->data;
@@ -3137,6 +3154,7 @@ static int psp_init_sos_base_fw(struct amdgpu_device *adev)
                adev->psp.sos.size_bytes = le32_to_cpu(sos_hdr_v1_3->sos_aux.size_bytes);
                adev->psp.sos.start_addr = ucode_array_start_addr +
                        le32_to_cpu(sos_hdr_v1_3->sos_aux.offset_bytes);
+               adev->psp.xgmi_context.supports_extended_data = true;
        }
 
        if ((adev->psp.sys.size_bytes == 0) || (adev->psp.sos.size_bytes == 0)) {
@@ -3266,40 +3284,40 @@ static int parse_ta_bin_descriptor(struct psp_context *psp,
 
        switch (desc->fw_type) {
        case TA_FW_TYPE_PSP_ASD:
-               psp->asd_fw_version        = le32_to_cpu(desc->fw_version);
-               psp->asd_feature_version   = le32_to_cpu(desc->fw_version);
-               psp->asd_ucode_size        = le32_to_cpu(desc->size_bytes);
-               psp->asd_start_addr        = ucode_start_addr;
+               psp->asd.fw_version        = le32_to_cpu(desc->fw_version);
+               psp->asd.feature_version   = le32_to_cpu(desc->fw_version);
+               psp->asd.size_bytes        = le32_to_cpu(desc->size_bytes);
+               psp->asd.start_addr        = ucode_start_addr;
                break;
        case TA_FW_TYPE_PSP_XGMI:
-               psp->ta_xgmi_ucode_version = le32_to_cpu(desc->fw_version);
-               psp->ta_xgmi_ucode_size    = le32_to_cpu(desc->size_bytes);
-               psp->ta_xgmi_start_addr    = ucode_start_addr;
+               psp->xgmi.feature_version  = le32_to_cpu(desc->fw_version);
+               psp->xgmi.size_bytes       = le32_to_cpu(desc->size_bytes);
+               psp->xgmi.start_addr       = ucode_start_addr;
                break;
        case TA_FW_TYPE_PSP_RAS:
-               psp->ta_ras_ucode_version  = le32_to_cpu(desc->fw_version);
-               psp->ta_ras_ucode_size     = le32_to_cpu(desc->size_bytes);
-               psp->ta_ras_start_addr     = ucode_start_addr;
+               psp->ras.feature_version   = le32_to_cpu(desc->fw_version);
+               psp->ras.size_bytes        = le32_to_cpu(desc->size_bytes);
+               psp->ras.start_addr        = ucode_start_addr;
                break;
        case TA_FW_TYPE_PSP_HDCP:
-               psp->ta_hdcp_ucode_version = le32_to_cpu(desc->fw_version);
-               psp->ta_hdcp_ucode_size    = le32_to_cpu(desc->size_bytes);
-               psp->ta_hdcp_start_addr    = ucode_start_addr;
+               psp->hdcp.feature_version  = le32_to_cpu(desc->fw_version);
+               psp->hdcp.size_bytes       = le32_to_cpu(desc->size_bytes);
+               psp->hdcp.start_addr       = ucode_start_addr;
                break;
        case TA_FW_TYPE_PSP_DTM:
-               psp->ta_dtm_ucode_version  = le32_to_cpu(desc->fw_version);
-               psp->ta_dtm_ucode_size     = le32_to_cpu(desc->size_bytes);
-               psp->ta_dtm_start_addr     = ucode_start_addr;
+               psp->dtm.feature_version  = le32_to_cpu(desc->fw_version);
+               psp->dtm.size_bytes       = le32_to_cpu(desc->size_bytes);
+               psp->dtm.start_addr       = ucode_start_addr;
                break;
        case TA_FW_TYPE_PSP_RAP:
-               psp->ta_rap_ucode_version  = le32_to_cpu(desc->fw_version);
-               psp->ta_rap_ucode_size     = le32_to_cpu(desc->size_bytes);
-               psp->ta_rap_start_addr     = ucode_start_addr;
+               psp->rap.feature_version  = le32_to_cpu(desc->fw_version);
+               psp->rap.size_bytes       = le32_to_cpu(desc->size_bytes);
+               psp->rap.start_addr       = ucode_start_addr;
                break;
        case TA_FW_TYPE_PSP_SECUREDISPLAY:
-               psp->ta_securedisplay_ucode_version  = le32_to_cpu(desc->fw_version);
-               psp->ta_securedisplay_ucode_size     = le32_to_cpu(desc->size_bytes);
-               psp->ta_securedisplay_start_addr     = ucode_start_addr;
+               psp->securedisplay.feature_version  = le32_to_cpu(desc->fw_version);
+               psp->securedisplay.size_bytes       = le32_to_cpu(desc->size_bytes);
+               psp->securedisplay.start_addr       = ucode_start_addr;
                break;
        default:
                dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type);
index 6b16455..8ef2d28 100644 (file)
@@ -136,59 +136,32 @@ struct psp_asd_context {
        uint32_t                session_id;
 };
 
-struct psp_xgmi_context {
-       uint8_t                         initialized;
-       uint32_t                        session_id;
-       struct amdgpu_bo                *xgmi_shared_bo;
-       uint64_t                        xgmi_shared_mc_addr;
-       void                            *xgmi_shared_buf;
-       struct psp_xgmi_topology_info   top_info;
+struct ta_mem_context {
+       struct amdgpu_bo                *shared_bo;
+       uint64_t                shared_mc_addr;
+       void                    *shared_buf;
 };
 
-struct psp_ras_context {
-       /*ras fw*/
-       bool                    ras_initialized;
+struct ta_context {
+       bool                    initialized;
        uint32_t                session_id;
-       struct amdgpu_bo        *ras_shared_bo;
-       uint64_t                ras_shared_mc_addr;
-       void                    *ras_shared_buf;
-       struct amdgpu_ras       *ras;
+       struct ta_mem_context   mem_context;
 };
 
-struct psp_hdcp_context {
-       bool                    hdcp_initialized;
-       uint32_t                session_id;
-       struct amdgpu_bo        *hdcp_shared_bo;
-       uint64_t                hdcp_shared_mc_addr;
-       void                    *hdcp_shared_buf;
-       struct mutex            mutex;
-};
-
-struct psp_dtm_context {
-       bool                    dtm_initialized;
-       uint32_t                session_id;
-       struct amdgpu_bo        *dtm_shared_bo;
-       uint64_t                dtm_shared_mc_addr;
-       void                    *dtm_shared_buf;
-       struct mutex            mutex;
+struct ta_cp_context {
+       struct ta_context               context;
+       struct mutex                    mutex;
 };
 
-struct psp_rap_context {
-       bool                    rap_initialized;
-       uint32_t                session_id;
-       struct amdgpu_bo        *rap_shared_bo;
-       uint64_t                rap_shared_mc_addr;
-       void                    *rap_shared_buf;
-       struct mutex            mutex;
+struct psp_xgmi_context {
+       struct ta_context               context;
+       struct psp_xgmi_topology_info   top_info;
+       bool                            supports_extended_data;
 };
 
-struct psp_securedisplay_context {
-       bool                    securedisplay_initialized;
-       uint32_t                session_id;
-       struct amdgpu_bo        *securedisplay_shared_bo;
-       uint64_t                securedisplay_shared_mc_addr;
-       void                    *securedisplay_shared_buf;
-       struct mutex            mutex;
+struct psp_ras_context {
+       struct ta_context               context;
+       struct amdgpu_ras               *ras;
 };
 
 #define MEM_TRAIN_SYSTEM_SIGNATURE             0x54534942
@@ -327,11 +300,8 @@ struct psp_context
        uint64_t                        tmr_mc_addr;
 
        /* asd firmware */
-       const struct firmware           *asd_fw;
-       uint32_t                        asd_fw_version;
-       uint32_t                        asd_feature_version;
-       uint32_t                        asd_ucode_size;
-       uint8_t                         *asd_start_addr;
+       const struct firmware   *asd_fw;
+       struct psp_bin_desc             asd;
 
        /* toc firmware */
        const struct firmware           *toc_fw;
@@ -356,36 +326,20 @@ struct psp_context
        /* xgmi ta firmware and buffer */
        const struct firmware           *ta_fw;
        uint32_t                        ta_fw_version;
-       uint32_t                        ta_xgmi_ucode_version;
-       uint32_t                        ta_xgmi_ucode_size;
-       uint8_t                         *ta_xgmi_start_addr;
-       uint32_t                        ta_ras_ucode_version;
-       uint32_t                        ta_ras_ucode_size;
-       uint8_t                         *ta_ras_start_addr;
-
-       uint32_t                        ta_hdcp_ucode_version;
-       uint32_t                        ta_hdcp_ucode_size;
-       uint8_t                         *ta_hdcp_start_addr;
-
-       uint32_t                        ta_dtm_ucode_version;
-       uint32_t                        ta_dtm_ucode_size;
-       uint8_t                         *ta_dtm_start_addr;
-
-       uint32_t                        ta_rap_ucode_version;
-       uint32_t                        ta_rap_ucode_size;
-       uint8_t                         *ta_rap_start_addr;
-
-       uint32_t                        ta_securedisplay_ucode_version;
-       uint32_t                        ta_securedisplay_ucode_size;
-       uint8_t                         *ta_securedisplay_start_addr;
+       struct psp_bin_desc             xgmi;
+       struct psp_bin_desc             ras;
+       struct psp_bin_desc             hdcp;
+       struct psp_bin_desc             dtm;
+       struct psp_bin_desc             rap;
+       struct psp_bin_desc             securedisplay;
 
        struct psp_asd_context          asd_context;
        struct psp_xgmi_context         xgmi_context;
-       struct psp_ras_context          ras;
-       struct psp_hdcp_context         hdcp_context;
-       struct psp_dtm_context          dtm_context;
-       struct psp_rap_context          rap_context;
-       struct psp_securedisplay_context        securedisplay_context;
+       struct psp_ras_context          ras_context;
+       struct ta_cp_context            hdcp_context;
+       struct ta_cp_context            dtm_context;
+       struct ta_cp_context            rap_context;
+       struct ta_cp_context            securedisplay_context;
        struct mutex                    mutex;
        struct psp_memory_training_context mem_train_ctx;
 
@@ -452,14 +406,15 @@ int psp_gpu_reset(struct amdgpu_device *adev);
 int psp_update_vcn_sram(struct amdgpu_device *adev, int inst_idx,
                        uint64_t cmd_gpu_addr, int cmd_size);
 
-int psp_xgmi_initialize(struct psp_context *psp);
+int psp_xgmi_initialize(struct psp_context *psp, bool set_extended_data, bool load_ta);
 int psp_xgmi_terminate(struct psp_context *psp);
 int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
 int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id);
 int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id);
 int psp_xgmi_get_topology_info(struct psp_context *psp,
                               int number_devices,
-                              struct psp_xgmi_topology_info *topology);
+                              struct psp_xgmi_topology_info *topology,
+                              bool get_extended_data);
 int psp_xgmi_set_topology_info(struct psp_context *psp,
                               int number_devices,
                               struct psp_xgmi_topology_info *topology);
index 51909bf..12010c9 100644 (file)
@@ -76,7 +76,7 @@ static ssize_t amdgpu_rap_debugfs_write(struct file *f, const char __user *buf,
                        dev_info(adev->dev, "RAP L0 validate test success.\n");
                } else {
                        rap_shared_mem = (struct ta_rap_shared_memory *)
-                                        adev->psp.rap_context.rap_shared_buf;
+                                        adev->psp.rap_context.context.mem_context.shared_buf;
                        rap_cmd_output = &(rap_shared_mem->rap_out_message.output);
 
                        dev_info(adev->dev, "RAP test failed, the output is:\n");
@@ -119,7 +119,7 @@ void amdgpu_rap_debugfs_init(struct amdgpu_device *adev)
 #if defined(CONFIG_DEBUG_FS)
        struct drm_minor *minor = adev_to_drm(adev)->primary;
 
-       if (!adev->psp.rap_context.rap_initialized)
+       if (!adev->psp.rap_context.context.initialized)
                return;
 
        debugfs_create_file("rap_test", S_IWUSR, minor->debugfs_root,
index 194f7cc..96a8fd0 100644 (file)
@@ -64,7 +64,6 @@ const char *ras_block_string[] = {
 };
 
 #define ras_err_str(i) (ras_error_string[ffs(i)])
-#define ras_block_str(i) (ras_block_string[i])
 
 #define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS)
 
@@ -530,7 +529,7 @@ static inline void put_obj(struct ras_manager *obj)
        if (obj && (--obj->use == 0))
                list_del(&obj->node);
        if (obj && (obj->use < 0))
-               DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", obj->head.name);
+               DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", ras_block_str(obj->head.block));
 }
 
 /* make one obj and return it. */
@@ -793,7 +792,6 @@ static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev,
                        .type = default_ras_type,
                        .sub_block_index = 0,
                };
-               strcpy(head.name, ras_block_str(i));
                if (bypass) {
                        /*
                         * bypass psp. vbios enable ras for us.
@@ -1866,7 +1864,7 @@ int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev)
 static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
 {
        struct amdgpu_ras_eeprom_control *control =
-               &adev->psp.ras.ras->eeprom_control;
+               &adev->psp.ras_context.ras->eeprom_control;
        struct eeprom_table_record *bps;
        int ret;
 
index 4d9c63f..eae604f 100644 (file)
@@ -49,10 +49,14 @@ enum amdgpu_ras_block {
        AMDGPU_RAS_BLOCK__MP0,
        AMDGPU_RAS_BLOCK__MP1,
        AMDGPU_RAS_BLOCK__FUSE,
+       AMDGPU_RAS_BLOCK__MPIO,
 
        AMDGPU_RAS_BLOCK__LAST
 };
 
+extern const char *ras_block_string[];
+
+#define ras_block_str(i) (ras_block_string[i])
 #define AMDGPU_RAS_BLOCK_COUNT AMDGPU_RAS_BLOCK__LAST
 #define AMDGPU_RAS_BLOCK_MASK  ((1ULL << AMDGPU_RAS_BLOCK_COUNT) - 1)
 
@@ -306,7 +310,6 @@ struct ras_common_if {
        enum amdgpu_ras_block block;
        enum amdgpu_ras_error_type type;
        uint32_t sub_block_index;
-       /* block name */
        char name[32];
 };
 
@@ -418,7 +421,7 @@ struct ras_badpage {
 /* interfaces for IP */
 struct ras_fs_if {
        struct ras_common_if head;
-       char sysfs_name[32];
+       const char* sysfs_name;
        char debugfs_name[32];
 };
 
@@ -470,8 +473,8 @@ struct ras_debug_if {
  * 8: feature disable
  */
 
-#define amdgpu_ras_get_context(adev)           ((adev)->psp.ras.ras)
-#define amdgpu_ras_set_context(adev, ras_con)  ((adev)->psp.ras.ras = (ras_con))
+#define amdgpu_ras_get_context(adev)           ((adev)->psp.ras_context.ras)
+#define amdgpu_ras_set_context(adev, ras_con)  ((adev)->psp.ras_context.ras = (ras_con))
 
 /* check if ras is supported on block, say, sdma, gfx */
 static inline int amdgpu_ras_is_supported(struct amdgpu_device *adev,
index 1945902..dc44c94 100644 (file)
@@ -114,21 +114,22 @@ static bool __get_eeprom_i2c_addr_arct(struct amdgpu_device *adev,
 static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev,
                                  struct amdgpu_ras_eeprom_control *control)
 {
+       u8 i2c_addr;
+
        if (!control)
                return false;
 
-       control->i2c_address = 0;
-
-       if (amdgpu_atomfirmware_ras_rom_addr(adev, (uint8_t*)&control->i2c_address))
-       {
-               if (control->i2c_address == 0xA0)
-                       control->i2c_address = 0;
-               else if (control->i2c_address == 0xA8)
-                       control->i2c_address = 0x40000;
-               else {
-                       dev_warn(adev->dev, "RAS EEPROM I2C address not supported");
-                       return false;
-               }
+       if (amdgpu_atomfirmware_ras_rom_addr(adev, &i2c_addr)) {
+               /* The address given by VBIOS is an 8-bit, wire-format
+                * address, i.e. the most significant byte.
+                *
+                * Normalize it to a 19-bit EEPROM address. Remove the
+                * device type identifier and make it a 7-bit address;
+                * then make it a 19-bit EEPROM address. See top of
+                * amdgpu_eeprom.c.
+                */
+               i2c_addr = (i2c_addr & 0x0F) >> 1;
+               control->i2c_address = ((u32) i2c_addr) << 16;
 
                return true;
        }
index 9c11ced..e713d31 100644 (file)
@@ -48,6 +48,9 @@
 #define AMDGPU_FENCE_FLAG_INT           (1 << 1)
 #define AMDGPU_FENCE_FLAG_TC_WB_ONLY    (1 << 2)
 
+/* fence flag bit to indicate the face is embedded in job*/
+#define AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT             (DMA_FENCE_FLAG_USER_BITS + 1)
+
 #define to_amdgpu_ring(s) container_of((s), struct amdgpu_ring, sched)
 
 #define AMDGPU_IB_POOL_SIZE    (1024 * 1024)
@@ -118,7 +121,7 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev);
 void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev);
 int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev);
 void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev);
-int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence,
+int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence, struct amdgpu_job *job,
                      unsigned flags);
 int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s,
                              uint32_t timeout);
index de91d29..65debb6 100644 (file)
@@ -105,7 +105,6 @@ int amdgpu_sdma_ras_late_init(struct amdgpu_device *adev,
                adev->sdma.ras_if->block = AMDGPU_RAS_BLOCK__SDMA;
                adev->sdma.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
                adev->sdma.ras_if->sub_block_index = 0;
-               strcpy(adev->sdma.ras_if->name, "sdma");
        }
        fs_info.head = ih_info->head = *adev->sdma.ras_if;
 
index 1234539..cc7597a 100644 (file)
@@ -80,7 +80,7 @@ void psp_securedisplay_parse_resp_status(struct psp_context *psp,
 void psp_prep_securedisplay_cmd_buf(struct psp_context *psp, struct securedisplay_cmd **cmd,
        enum ta_securedisplay_command command_id)
 {
-       *cmd = (struct securedisplay_cmd *)psp->securedisplay_context.securedisplay_shared_buf;
+       *cmd = (struct securedisplay_cmd *)psp->securedisplay_context.context.mem_context.shared_buf;
        memset(*cmd, 0, sizeof(struct securedisplay_cmd));
        (*cmd)->status = TA_SECUREDISPLAY_STATUS__GENERIC_FAILURE;
        (*cmd)->cmd_id = command_id;
@@ -170,7 +170,7 @@ void amdgpu_securedisplay_debugfs_init(struct amdgpu_device *adev)
 {
 #if defined(CONFIG_DEBUG_FS)
 
-       if (!adev->psp.securedisplay_context.securedisplay_initialized)
+       if (!adev->psp.securedisplay_context.context.initialized)
                return;
 
        debugfs_create_file("securedisplay_test", S_IWUSR, adev_to_drm(adev)->primary->debugfs_root,
index 5fdecea..abd8469 100644 (file)
@@ -525,9 +525,9 @@ FW_VERSION_ATTR(rlc_srls_fw_version, 0444, gfx.rlc_srls_fw_version);
 FW_VERSION_ATTR(mec_fw_version, 0444, gfx.mec_fw_version);
 FW_VERSION_ATTR(mec2_fw_version, 0444, gfx.mec2_fw_version);
 FW_VERSION_ATTR(sos_fw_version, 0444, psp.sos.fw_version);
-FW_VERSION_ATTR(asd_fw_version, 0444, psp.asd_fw_version);
-FW_VERSION_ATTR(ta_ras_fw_version, 0444, psp.ta_ras_ucode_version);
-FW_VERSION_ATTR(ta_xgmi_fw_version, 0444, psp.ta_xgmi_ucode_version);
+FW_VERSION_ATTR(asd_fw_version, 0444, psp.asd.fw_version);
+FW_VERSION_ATTR(ta_ras_fw_version, 0444, psp.ras.feature_version);
+FW_VERSION_ATTR(ta_xgmi_fw_version, 0444, psp.xgmi.feature_version);
 FW_VERSION_ATTR(smc_fw_version, 0444, pm.fw_version);
 FW_VERSION_ATTR(sdma_fw_version, 0444, sdma.instance[0].fw_version);
 FW_VERSION_ATTR(sdma2_fw_version, 0444, sdma.instance[1].fw_version);
index e2e2624..7c2538d 100644 (file)
@@ -136,21 +136,11 @@ struct psp_firmware_header_v2_0 {
 /* version_major=1, version_minor=0 */
 struct ta_firmware_header_v1_0 {
        struct common_firmware_header header;
-       uint32_t ta_xgmi_ucode_version;
-       uint32_t ta_xgmi_offset_bytes;
-       uint32_t ta_xgmi_size_bytes;
-       uint32_t ta_ras_ucode_version;
-       uint32_t ta_ras_offset_bytes;
-       uint32_t ta_ras_size_bytes;
-       uint32_t ta_hdcp_ucode_version;
-       uint32_t ta_hdcp_offset_bytes;
-       uint32_t ta_hdcp_size_bytes;
-       uint32_t ta_dtm_ucode_version;
-       uint32_t ta_dtm_offset_bytes;
-       uint32_t ta_dtm_size_bytes;
-       uint32_t ta_securedisplay_ucode_version;
-       uint32_t ta_securedisplay_offset_bytes;
-       uint32_t ta_securedisplay_size_bytes;
+       struct psp_fw_legacy_bin_desc xgmi;
+       struct psp_fw_legacy_bin_desc ras;
+       struct psp_fw_legacy_bin_desc hdcp;
+       struct psp_fw_legacy_bin_desc dtm;
+       struct psp_fw_legacy_bin_desc securedisplay;
 };
 
 enum ta_fw_type {
index 0c7c56a..a90029e 100644 (file)
@@ -41,7 +41,6 @@ int amdgpu_umc_ras_late_init(struct amdgpu_device *adev)
                adev->umc.ras_if->block = AMDGPU_RAS_BLOCK__UMC;
                adev->umc.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
                adev->umc.ras_if->sub_block_index = 0;
-               strcpy(adev->umc.ras_if->name, "umc");
        }
        ih_info.head = fs_info.head = *adev->umc.ras_if;
 
index 0f576f2..d451c35 100644 (file)
@@ -326,7 +326,6 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
 {
        int i, j;
 
-       cancel_delayed_work_sync(&adev->uvd.idle_work);
        drm_sched_entity_destroy(&adev->uvd.entity);
 
        for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
index 1ae7f82..8e8dee9 100644 (file)
@@ -218,7 +218,6 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
        if (adev->vce.vcpu_bo == NULL)
                return 0;
 
-       cancel_delayed_work_sync(&adev->vce.idle_work);
        drm_sched_entity_destroy(&adev->vce.entity);
 
        amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr,
index 6780df0..008a308 100644 (file)
@@ -258,8 +258,6 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
 {
        int i, j;
 
-       cancel_delayed_work_sync(&adev->vcn.idle_work);
-
        for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
                if (adev->vcn.harvest_config & (1 << j))
                        continue;
index 12a7cc2..ca058fb 100644 (file)
@@ -532,9 +532,9 @@ static void amdgpu_virt_populate_vf2pf_ucode_info(struct amdgpu_device *adev)
        POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC,      adev->gfx.mec_fw_version);
        POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC2,     adev->gfx.mec2_fw_version);
        POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SOS,      adev->psp.sos.fw_version);
-       POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ASD,      adev->psp.asd_fw_version);
-       POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_RAS,   adev->psp.ta_ras_ucode_version);
-       POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_XGMI,  adev->psp.ta_xgmi_ucode_version);
+       POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ASD,      adev->psp.asd.fw_version);
+       POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_RAS,   adev->psp.ras.feature_version);
+       POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_XGMI,  adev->psp.xgmi.feature_version);
        POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SMC,      adev->pm.fw_version);
        POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA,     adev->sdma.instance[0].fw_version);
        POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA2,    adev->sdma.instance[1].fw_version);
index 2a88ed5..6b15cad 100644 (file)
@@ -926,7 +926,7 @@ static int amdgpu_vm_pt_create(struct amdgpu_device *adev,
        bp.size = amdgpu_vm_bo_size(adev, level);
        bp.byte_align = AMDGPU_GPU_PAGE_SIZE;
        bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
-       bp.domain = amdgpu_bo_get_preferred_pin_domain(adev, bp.domain);
+       bp.domain = amdgpu_bo_get_preferred_domain(adev, bp.domain);
        bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
                AMDGPU_GEM_CREATE_CPU_GTT_USWC;
 
@@ -1218,7 +1218,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
                amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
 
        if (vm_flush_needed || pasid_mapping_needed) {
-               r = amdgpu_fence_emit(ring, &fence, 0);
+               r = amdgpu_fence_emit(ring, &fence, NULL, 0);
                if (r)
                        return r;
        }
@@ -3345,12 +3345,13 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
  * @adev: amdgpu device pointer
  * @pasid: PASID of the VM
  * @addr: Address of the fault
+ * @write_fault: true is write fault, false is read fault
  *
  * Try to gracefully handle a VM fault. Return true if the fault was handled and
  * shouldn't be reported any more.
  */
 bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
-                           uint64_t addr)
+                           uint64_t addr, bool write_fault)
 {
        bool is_compute_context = false;
        struct amdgpu_bo *root;
@@ -3375,7 +3376,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
        addr /= AMDGPU_GPU_PAGE_SIZE;
 
        if (is_compute_context &&
-           !svm_range_restore_pages(adev, pasid, addr)) {
+           !svm_range_restore_pages(adev, pasid, addr, write_fault)) {
                amdgpu_bo_unref(&root);
                return true;
        }
index 80cc9ab..85fcfb8 100644 (file)
@@ -448,7 +448,7 @@ void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev);
 void amdgpu_vm_get_task_info(struct amdgpu_device *adev, u32 pasid,
                             struct amdgpu_task_info *task_info);
 bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
-                           uint64_t addr);
+                           uint64_t addr, bool write_fault);
 
 void amdgpu_vm_set_task_info(struct amdgpu_vm *vm);
 
index 2fd77c3..7b2b098 100644 (file)
@@ -361,7 +361,7 @@ static void amdgpu_vram_mgr_virt_start(struct ttm_resource *mem,
  * @man: TTM memory type manager
  * @tbo: TTM BO we need this range for
  * @place: placement flags and restrictions
- * @mem: the resulting mem object
+ * @res: the resulting mem object
  *
  * Allocate VRAM for the given BO.
  */
@@ -487,7 +487,7 @@ error_sub:
  * amdgpu_vram_mgr_del - free ranges
  *
  * @man: TTM memory type manager
- * @mem: TTM memory object
+ * @res: TTM memory object
  *
  * Free the allocated VRAM again.
  */
@@ -522,7 +522,7 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
  * amdgpu_vram_mgr_alloc_sgt - allocate and fill a sg table
  *
  * @adev: amdgpu device pointer
- * @mem: TTM memory object
+ * @res: TTM memory object
  * @offset: byte offset from the base of VRAM BO
  * @length: number of bytes to export in sg_table
  * @dev: the other device
index 258cf86..978ac92 100644 (file)
 #include "wafl/wafl2_4_0_0_smn.h"
 #include "wafl/wafl2_4_0_0_sh_mask.h"
 
+#define smnPCS_XGMI23_PCS_ERROR_STATUS   0x11a01210
+#define smnPCS_XGMI3X16_PCS_ERROR_STATUS 0x11a0020c
+#define smnPCS_GOPX1_PCS_ERROR_STATUS    0x12200210
+
 static DEFINE_MUTEX(xgmi_mutex);
 
 #define AMDGPU_MAX_XGMI_DEVICE_PER_HIVE                4
@@ -63,6 +67,33 @@ static const int wafl_pcs_err_status_reg_arct[] = {
        smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS + 0x100000,
 };
 
+static const int xgmi23_pcs_err_status_reg_aldebaran[] = {
+       smnPCS_XGMI23_PCS_ERROR_STATUS,
+       smnPCS_XGMI23_PCS_ERROR_STATUS + 0x100000,
+       smnPCS_XGMI23_PCS_ERROR_STATUS + 0x200000,
+       smnPCS_XGMI23_PCS_ERROR_STATUS + 0x300000,
+       smnPCS_XGMI23_PCS_ERROR_STATUS + 0x400000,
+       smnPCS_XGMI23_PCS_ERROR_STATUS + 0x500000,
+       smnPCS_XGMI23_PCS_ERROR_STATUS + 0x600000,
+       smnPCS_XGMI23_PCS_ERROR_STATUS + 0x700000
+};
+
+static const int xgmi3x16_pcs_err_status_reg_aldebaran[] = {
+       smnPCS_XGMI3X16_PCS_ERROR_STATUS,
+       smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x100000,
+       smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x200000,
+       smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x300000,
+       smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x400000,
+       smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x500000,
+       smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x600000,
+       smnPCS_XGMI3X16_PCS_ERROR_STATUS + 0x700000
+};
+
+static const int walf_pcs_err_status_reg_aldebaran[] = {
+       smnPCS_GOPX1_PCS_ERROR_STATUS,
+       smnPCS_GOPX1_PCS_ERROR_STATUS + 0x100000
+};
+
 static const struct amdgpu_pcs_ras_field xgmi_pcs_ras_fields[] = {
        {"XGMI PCS DataLossErr",
         SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataLossErr)},
@@ -498,6 +529,32 @@ int amdgpu_xgmi_get_num_links(struct amdgpu_device *adev,
        return  -EINVAL;
 }
 
+/*
+ * Devices that support extended data require the entire hive to initialize with
+ * the shared memory buffer flag set.
+ *
+ * Hive locks and conditions apply - see amdgpu_xgmi_add_device
+ */
+static int amdgpu_xgmi_initialize_hive_get_data_partition(struct amdgpu_hive_info *hive,
+                                                       bool set_extended_data)
+{
+       struct amdgpu_device *tmp_adev;
+       int ret;
+
+       list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
+               ret = psp_xgmi_initialize(&tmp_adev->psp, set_extended_data, false);
+               if (ret) {
+                       dev_err(tmp_adev->dev,
+                               "XGMI: Failed to initialize xgmi session for data partition %i\n",
+                               set_extended_data);
+                       return ret;
+               }
+
+       }
+
+       return 0;
+}
+
 int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
 {
        struct psp_xgmi_topology_info *top_info;
@@ -512,7 +569,7 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
 
        if (!adev->gmc.xgmi.pending_reset &&
            amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) {
-               ret = psp_xgmi_initialize(&adev->psp);
+               ret = psp_xgmi_initialize(&adev->psp, false, true);
                if (ret) {
                        dev_err(adev->dev,
                                "XGMI: Failed to initialize xgmi session\n");
@@ -575,7 +632,7 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
                /* get latest topology info for each device from psp */
                list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
                        ret = psp_xgmi_get_topology_info(&tmp_adev->psp, count,
-                                       &tmp_adev->psp.xgmi_context.top_info);
+                                       &tmp_adev->psp.xgmi_context.top_info, false);
                        if (ret) {
                                dev_err(tmp_adev->dev,
                                        "XGMI: Get topology failure on device %llx, hive %llx, ret %d",
@@ -585,6 +642,34 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
                                goto exit_unlock;
                        }
                }
+
+               /* get topology again for hives that support extended data */
+               if (adev->psp.xgmi_context.supports_extended_data) {
+
+                       /* initialize the hive to get extended data.  */
+                       ret = amdgpu_xgmi_initialize_hive_get_data_partition(hive, true);
+                       if (ret)
+                               goto exit_unlock;
+
+                       /* get the extended data. */
+                       list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
+                               ret = psp_xgmi_get_topology_info(&tmp_adev->psp, count,
+                                               &tmp_adev->psp.xgmi_context.top_info, true);
+                               if (ret) {
+                                       dev_err(tmp_adev->dev,
+                                               "XGMI: Get topology for extended data failure on device %llx, hive %llx, ret %d",
+                                               tmp_adev->gmc.xgmi.node_id,
+                                               tmp_adev->gmc.xgmi.hive_id, ret);
+                                       goto exit_unlock;
+                               }
+                       }
+
+                       /* initialize the hive to get non-extended data for the next round. */
+                       ret = amdgpu_xgmi_initialize_hive_get_data_partition(hive, false);
+                       if (ret)
+                               goto exit_unlock;
+
+               }
        }
 
        if (!ret && !adev->gmc.xgmi.pending_reset)
@@ -663,7 +748,6 @@ static int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev)
                adev->gmc.xgmi.ras_if->block = AMDGPU_RAS_BLOCK__XGMI_WAFL;
                adev->gmc.xgmi.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
                adev->gmc.xgmi.ras_if->sub_block_index = 0;
-               strcpy(adev->gmc.xgmi.ras_if->name, "xgmi_wafl");
        }
        ih_info.head = fs_info.head = *adev->gmc.xgmi.ras_if;
        r = amdgpu_ras_late_init(adev, adev->gmc.xgmi.ras_if,
@@ -718,6 +802,17 @@ static void amdgpu_xgmi_reset_ras_error_count(struct amdgpu_device *adev)
                        pcs_clear_status(adev,
                                         xgmi_pcs_err_status_reg_vg20[i]);
                break;
+       case CHIP_ALDEBARAN:
+               for (i = 0; i < ARRAY_SIZE(xgmi23_pcs_err_status_reg_aldebaran); i++)
+                       pcs_clear_status(adev,
+                                        xgmi23_pcs_err_status_reg_aldebaran[i]);
+               for (i = 0; i < ARRAY_SIZE(xgmi23_pcs_err_status_reg_aldebaran); i++)
+                       pcs_clear_status(adev,
+                                        xgmi23_pcs_err_status_reg_aldebaran[i]);
+               for (i = 0; i < ARRAY_SIZE(walf_pcs_err_status_reg_aldebaran); i++)
+                       pcs_clear_status(adev,
+                                        walf_pcs_err_status_reg_aldebaran[i]);
+               break;
        default:
                break;
        }
@@ -795,7 +890,6 @@ static int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
                }
                break;
        case CHIP_VEGA20:
-       default:
                /* check xgmi pcs error */
                for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_vg20); i++) {
                        data = RREG32_PCIE(xgmi_pcs_err_status_reg_vg20[i]);
@@ -811,6 +905,32 @@ static int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
                                                data, &ue_cnt, &ce_cnt, false);
                }
                break;
+       case CHIP_ALDEBARAN:
+               /* check xgmi23 pcs error */
+               for (i = 0; i < ARRAY_SIZE(xgmi23_pcs_err_status_reg_aldebaran); i++) {
+                       data = RREG32_PCIE(xgmi23_pcs_err_status_reg_aldebaran[i]);
+                       if (data)
+                               amdgpu_xgmi_query_pcs_error_status(adev,
+                                               data, &ue_cnt, &ce_cnt, true);
+               }
+               /* check xgmi3x16 pcs error */
+               for (i = 0; i < ARRAY_SIZE(xgmi3x16_pcs_err_status_reg_aldebaran); i++) {
+                       data = RREG32_PCIE(xgmi3x16_pcs_err_status_reg_aldebaran[i]);
+                       if (data)
+                               amdgpu_xgmi_query_pcs_error_status(adev,
+                                               data, &ue_cnt, &ce_cnt, true);
+               }
+               /* check wafl pcs error */
+               for (i = 0; i < ARRAY_SIZE(walf_pcs_err_status_reg_aldebaran); i++) {
+                       data = RREG32_PCIE(walf_pcs_err_status_reg_aldebaran[i]);
+                       if (data)
+                               amdgpu_xgmi_query_pcs_error_status(adev,
+                                               data, &ue_cnt, &ce_cnt, false);
+               }
+               break;
+       default:
+               dev_warn(adev->dev, "XGMI RAS error query not supported");
+               break;
        }
 
        adev->gmc.xgmi.ras_funcs->reset_ras_error_count(adev);
index 159a2a4..afad094 100644 (file)
@@ -851,7 +851,7 @@ void amdgpu_atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
        pll->reference_div = amdgpu_crtc->pll_reference_div;
        pll->post_div = amdgpu_crtc->pll_post_div;
 
-       amdgpu_pll_compute(pll, amdgpu_crtc->adjusted_clock, &pll_clock,
+       amdgpu_pll_compute(adev, pll, amdgpu_crtc->adjusted_clock, &pll_clock,
                            &fb_div, &frac_fb_div, &ref_div, &post_div);
 
        amdgpu_atombios_crtc_program_ss(adev, ATOM_DISABLE, amdgpu_crtc->pll_id,
index 1769c4c..00a2b36 100644 (file)
@@ -85,7 +85,7 @@ static const struct soc15_reg_golden golden_settings_gc_9_4_2_alde[] = {
        SOC15_REG_GOLDEN_VALUE(GC, 0, regTCI_CNTL_3, 0xff, 0x20),
 };
 
-/**
+/*
  * This shader is used to clear VGPRS and LDS, and also write the input
  * pattern into the write back buffer, which will be used by driver to
  * check whether all SIMDs have been covered.
@@ -206,7 +206,7 @@ const struct soc15_reg_entry vgpr_init_regs_aldebaran[] = {
        { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
 };
 
-/**
+/*
  * The below shaders are used to clear SGPRS, and also write the input
  * pattern into the write back buffer. The first two dispatch should be
  * scheduled simultaneously which make sure that all SGPRS could be
@@ -302,7 +302,7 @@ const struct soc15_reg_entry sgpr96_init_regs_aldebaran[] = {
        { SOC15_REG_ENTRY(GC, 0, regCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
 };
 
-/**
+/*
  * This shader is used to clear the uninitiated sgprs after the above
  * two dispatches, because of hardware feature, dispath 0 couldn't clear
  * top hole sgprs. Therefore need 4 waves per SIMD to cover these sgprs
index 8fca72e..497b86c 100644 (file)
@@ -75,9 +75,8 @@ int gfxhub_v1_1_get_xgmi_info(struct amdgpu_device *adev)
                max_physical_node_id     = 7;
                break;
        case CHIP_ALDEBARAN:
-               /* just using duplicates for Aldebaran support, revisit later */
-               max_num_physical_nodes   = 8;
-               max_physical_node_id     = 7;
+               max_num_physical_nodes   = 16;
+               max_physical_node_id     = 15;
                break;
        default:
                return -EINVAL;
index 24b781e..41c3a0d 100644 (file)
@@ -93,6 +93,7 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
                                       struct amdgpu_iv_entry *entry)
 {
        bool retry_fault = !!(entry->src_data[1] & 0x80);
+       bool write_fault = !!(entry->src_data[1] & 0x20);
        struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src];
        struct amdgpu_task_info task_info;
        uint32_t status = 0;
@@ -121,7 +122,7 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
                /* Try to handle the recoverable page faults by filling page
                 * tables
                 */
-               if (amdgpu_vm_handle_fault(adev, entry->pasid, addr))
+               if (amdgpu_vm_handle_fault(adev, entry->pasid, addr, write_fault))
                        return 1;
        }
 
index 097230b..d90c16a 100644 (file)
@@ -55,6 +55,7 @@
 #include "umc_v6_0.h"
 #include "umc_v6_7.h"
 #include "hdp_v4_0.h"
+#include "mca_v3_0.h"
 
 #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
 
@@ -506,6 +507,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
                                      struct amdgpu_iv_entry *entry)
 {
        bool retry_fault = !!(entry->src_data[1] & 0x80);
+       bool write_fault = !!(entry->src_data[1] & 0x20);
        uint32_t status = 0, cid = 0, rw = 0;
        struct amdgpu_task_info task_info;
        struct amdgpu_vmhub *hub;
@@ -536,7 +538,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
                /* Try to handle the recoverable page faults by filling page
                 * tables
                 */
-               if (amdgpu_vm_handle_fault(adev, entry->pasid, addr))
+               if (amdgpu_vm_handle_fault(adev, entry->pasid, addr, write_fault))
                        return 1;
        }
 
@@ -1229,6 +1231,18 @@ static void gmc_v9_0_set_hdp_ras_funcs(struct amdgpu_device *adev)
        adev->hdp.ras_funcs = &hdp_v4_0_ras_funcs;
 }
 
+static void gmc_v9_0_set_mca_funcs(struct amdgpu_device *adev)
+{
+       switch (adev->asic_type) {
+       case CHIP_ALDEBARAN:
+               if (!adev->gmc.xgmi.connected_to_cpu)
+                       adev->mca.funcs = &mca_v3_0_funcs;
+               break;
+       default:
+               break;
+       }
+}
+
 static int gmc_v9_0_early_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1250,6 +1264,7 @@ static int gmc_v9_0_early_init(void *handle)
        gmc_v9_0_set_mmhub_ras_funcs(adev);
        gmc_v9_0_set_gfxhub_funcs(adev);
        gmc_v9_0_set_hdp_ras_funcs(adev);
+       gmc_v9_0_set_mca_funcs(adev);
 
        adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
        adev->gmc.shared_aperture_end =
@@ -1461,6 +1476,8 @@ static int gmc_v9_0_sw_init(void *handle)
        adev->gfxhub.funcs->init(adev);
 
        adev->mmhub.funcs->init(adev);
+       if (adev->mca.funcs)
+               adev->mca.funcs->init(adev);
 
        spin_lock_init(&adev->gmc.invalidate_lock);
 
diff --git a/drivers/gpu/drm/amd/amdgpu/mca_v3_0.c b/drivers/gpu/drm/amd/amdgpu/mca_v3_0.c
new file mode 100644 (file)
index 0000000..058b657
--- /dev/null
@@ -0,0 +1,125 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu_ras.h"
+#include "amdgpu.h"
+#include "amdgpu_mca.h"
+
+#define smnMCMP0_STATUST0      0x03830408
+#define smnMCMP1_STATUST0      0x03b30408
+#define smnMCMPIO_STATUST0     0x0c930408
+
+
+static void mca_v3_0_mp0_query_ras_error_count(struct amdgpu_device *adev,
+                                              void *ras_error_status)
+{
+       amdgpu_mca_query_ras_error_count(adev,
+                                        smnMCMP0_STATUST0,
+                                        ras_error_status);
+}
+
+static int mca_v3_0_mp0_ras_late_init(struct amdgpu_device *adev)
+{
+       return amdgpu_mca_ras_late_init(adev, &adev->mca.mp0);
+}
+
+static void mca_v3_0_mp0_ras_fini(struct amdgpu_device *adev)
+{
+       amdgpu_mca_ras_fini(adev, &adev->mca.mp0);
+}
+
+const struct amdgpu_mca_ras_funcs mca_v3_0_mp0_ras_funcs = {
+       .ras_late_init = mca_v3_0_mp0_ras_late_init,
+       .ras_fini = mca_v3_0_mp0_ras_fini,
+       .query_ras_error_count = mca_v3_0_mp0_query_ras_error_count,
+       .query_ras_error_address = NULL,
+       .ras_block = AMDGPU_RAS_BLOCK__MP0,
+       .sysfs_name = "mp0_err_count",
+};
+
+static void mca_v3_0_mp1_query_ras_error_count(struct amdgpu_device *adev,
+                                              void *ras_error_status)
+{
+       amdgpu_mca_query_ras_error_count(adev,
+                                        smnMCMP1_STATUST0,
+                                        ras_error_status);
+}
+
+static int mca_v3_0_mp1_ras_late_init(struct amdgpu_device *adev)
+{
+       return amdgpu_mca_ras_late_init(adev, &adev->mca.mp1);
+}
+
+static void mca_v3_0_mp1_ras_fini(struct amdgpu_device *adev)
+{
+       amdgpu_mca_ras_fini(adev, &adev->mca.mp1);
+}
+
+const struct amdgpu_mca_ras_funcs mca_v3_0_mp1_ras_funcs = {
+       .ras_late_init = mca_v3_0_mp1_ras_late_init,
+       .ras_fini = mca_v3_0_mp1_ras_fini,
+       .query_ras_error_count = mca_v3_0_mp1_query_ras_error_count,
+       .query_ras_error_address = NULL,
+       .ras_block = AMDGPU_RAS_BLOCK__MP1,
+       .sysfs_name = "mp1_err_count",
+};
+
+static void mca_v3_0_mpio_query_ras_error_count(struct amdgpu_device *adev,
+                                              void *ras_error_status)
+{
+       amdgpu_mca_query_ras_error_count(adev,
+                                        smnMCMPIO_STATUST0,
+                                        ras_error_status);
+}
+
+static int mca_v3_0_mpio_ras_late_init(struct amdgpu_device *adev)
+{
+       return amdgpu_mca_ras_late_init(adev, &adev->mca.mpio);
+}
+
+static void mca_v3_0_mpio_ras_fini(struct amdgpu_device *adev)
+{
+       amdgpu_mca_ras_fini(adev, &adev->mca.mpio);
+}
+
+const struct amdgpu_mca_ras_funcs mca_v3_0_mpio_ras_funcs = {
+       .ras_late_init = mca_v3_0_mpio_ras_late_init,
+       .ras_fini = mca_v3_0_mpio_ras_fini,
+       .query_ras_error_count = mca_v3_0_mpio_query_ras_error_count,
+       .query_ras_error_address = NULL,
+       .ras_block = AMDGPU_RAS_BLOCK__MPIO,
+       .sysfs_name = "mpio_err_count",
+};
+
+
+static void mca_v3_0_init(struct amdgpu_device *adev)
+{
+       struct amdgpu_mca *mca = &adev->mca;
+
+       mca->mp0.ras_funcs = &mca_v3_0_mp0_ras_funcs;
+       mca->mp1.ras_funcs = &mca_v3_0_mp1_ras_funcs;
+       mca->mpio.ras_funcs = &mca_v3_0_mpio_ras_funcs;
+}
+
+const struct amdgpu_mca_funcs mca_v3_0_funcs = {
+       .init = mca_v3_0_init,
+};
\ No newline at end of file
diff --git a/drivers/gpu/drm/amd/amdgpu/mca_v3_0.h b/drivers/gpu/drm/amd/amdgpu/mca_v3_0.h
new file mode 100644 (file)
index 0000000..b899b86
--- /dev/null
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2021  Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __MCA_V3_0_H__
+#define __MCA_V3_0_H__
+
+extern const struct amdgpu_mca_funcs mca_v3_0_funcs;
+
+#endif
index 2095863..2cdab80 100644 (file)
@@ -24,9 +24,7 @@
 #ifndef __MMSCH_V1_0_H__
 #define __MMSCH_V1_0_H__
 
-#define MMSCH_VERSION_MAJOR    1
-#define MMSCH_VERSION_MINOR    0
-#define MMSCH_VERSION  (MMSCH_VERSION_MAJOR << 16 | MMSCH_VERSION_MINOR)
+#define MMSCH_VERSION  0x1
 
 enum mmsch_v1_0_command_type {
        MMSCH_COMMAND__DIRECT_REG_WRITE = 0,
index ff2307d..23b066b 100644 (file)
@@ -258,6 +258,8 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
        amdgpu_virt_fini_data_exchange(adev);
        atomic_set(&adev->in_gpu_reset, 1);
 
+       xgpu_ai_mailbox_trans_msg(adev, IDH_READY_TO_RESET, 0, 0, 0);
+
        do {
                if (xgpu_ai_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
                        goto flr_done;
index 5057263..bd3b231 100644 (file)
@@ -37,6 +37,7 @@ enum idh_request {
        IDH_REQ_GPU_RESET_ACCESS,
 
        IDH_LOG_VF_ERROR       = 200,
+       IDH_READY_TO_RESET      = 201,
 };
 
 enum idh_event {
index 9f7aac4..a35e6d8 100644 (file)
@@ -96,7 +96,11 @@ static int xgpu_nv_poll_ack(struct amdgpu_device *adev)
 
 static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event)
 {
-       int r, timeout = NV_MAILBOX_POLL_MSG_TIMEDOUT;
+       int r;
+       uint64_t timeout, now;
+
+       now = (uint64_t)ktime_to_ms(ktime_get());
+       timeout = now + NV_MAILBOX_POLL_MSG_TIMEDOUT;
 
        do {
                r = xgpu_nv_mailbox_rcv_msg(adev, event);
@@ -104,8 +108,8 @@ static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event)
                        return 0;
 
                msleep(10);
-               timeout -= 10;
-       } while (timeout > 1);
+               now = (uint64_t)ktime_to_ms(ktime_get());
+       } while (timeout > now);
 
 
        return -ETIME;
@@ -149,9 +153,10 @@ static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev,
 static int xgpu_nv_send_access_requests(struct amdgpu_device *adev,
                                        enum idh_request req)
 {
-       int r;
+       int r, retry = 1;
        enum idh_event event = -1;
 
+send_request:
        xgpu_nv_mailbox_trans_msg(adev, req, 0, 0, 0);
 
        switch (req) {
@@ -170,6 +175,9 @@ static int xgpu_nv_send_access_requests(struct amdgpu_device *adev,
        if (event != -1) {
                r = xgpu_nv_poll_msg(adev, event);
                if (r) {
+                       if (retry++ < 2)
+                               goto send_request;
+
                        if (req != IDH_REQ_GPU_INIT_DATA) {
                                pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r);
                                return r;
@@ -279,6 +287,8 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
        amdgpu_virt_fini_data_exchange(adev);
        atomic_set(&adev->in_gpu_reset, 1);
 
+       xgpu_nv_mailbox_trans_msg(adev, IDH_READY_TO_RESET, 0, 0, 0);
+
        do {
                if (xgpu_nv_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
                        goto flr_done;
index 9f58086..73887b0 100644 (file)
@@ -37,7 +37,8 @@ enum idh_request {
        IDH_REQ_GPU_RESET_ACCESS,
        IDH_REQ_GPU_INIT_DATA,
 
-       IDH_LOG_VF_ERROR       = 200,
+       IDH_LOG_VF_ERROR        = 200,
+       IDH_READY_TO_RESET      = 201,
 };
 
 enum idh_event {
index cef9297..f50045c 100644 (file)
 #define mmRCC_DEV0_EPF0_STRAP0_ALDE                    0x0015
 #define mmRCC_DEV0_EPF0_STRAP0_ALDE_BASE_IDX           2
 
+#define mmBIF_DOORBELL_INT_CNTL_ALDE                   0x00fe
+#define mmBIF_DOORBELL_INT_CNTL_ALDE_BASE_IDX          2
+#define BIF_DOORBELL_INT_CNTL_ALDE__DOORBELL_INTERRUPT_DISABLE__SHIFT  0x18
+#define BIF_DOORBELL_INT_CNTL_ALDE__DOORBELL_INTERRUPT_DISABLE_MASK    0x01000000L
+
+#define mmBIF_INTR_CNTL_ALDE                           0x0101
+#define mmBIF_INTR_CNTL_ALDE_BASE_IDX                  2
+
 static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev,
                                        void *ras_error_status);
 
@@ -346,14 +354,21 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device
        struct ras_err_data err_data = {0, 0, 0, NULL};
        struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
 
-       bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL);
+       if (adev->asic_type == CHIP_ALDEBARAN)
+               bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL_ALDE);
+       else
+               bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL);
+
        if (REG_GET_FIELD(bif_doorbell_intr_cntl,
                BIF_DOORBELL_INT_CNTL, RAS_CNTLR_INTERRUPT_STATUS)) {
                /* driver has to clear the interrupt status when bif ring is disabled */
                bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl,
                                                BIF_DOORBELL_INT_CNTL,
                                                RAS_CNTLR_INTERRUPT_CLEAR, 1);
-               WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
+               if (adev->asic_type == CHIP_ALDEBARAN)
+                       WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL_ALDE, bif_doorbell_intr_cntl);
+               else
+                       WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
 
                if (!ras->disable_ras_err_cnt_harvest) {
                        /*
@@ -372,13 +387,13 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device
                                                "errors detected in %s block, "
                                                "no user action is needed.\n",
                                                obj->err_data.ce_count,
-                                               adev->nbio.ras_if->name);
+                                               ras_block_str(adev->nbio.ras_if->block));
 
                        if (err_data.ue_count)
                                dev_info(adev->dev, "%ld uncorrectable hardware "
                                                "errors detected in %s block\n",
                                                obj->err_data.ue_count,
-                                               adev->nbio.ras_if->name);
+                                               ras_block_str(adev->nbio.ras_if->block));
                }
 
                dev_info(adev->dev, "RAS controller interrupt triggered "
@@ -395,14 +410,22 @@ static void nbio_v7_4_handle_ras_err_event_athub_intr_no_bifring(struct amdgpu_d
 {
        uint32_t bif_doorbell_intr_cntl;
 
-       bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL);
+       if (adev->asic_type == CHIP_ALDEBARAN)
+               bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL_ALDE);
+       else
+               bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL);
+
        if (REG_GET_FIELD(bif_doorbell_intr_cntl,
                BIF_DOORBELL_INT_CNTL, RAS_ATHUB_ERR_EVENT_INTERRUPT_STATUS)) {
                /* driver has to clear the interrupt status when bif ring is disabled */
                bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl,
                                                BIF_DOORBELL_INT_CNTL,
                                                RAS_ATHUB_ERR_EVENT_INTERRUPT_CLEAR, 1);
-               WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
+
+               if (adev->asic_type == CHIP_ALDEBARAN)
+                       WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL_ALDE, bif_doorbell_intr_cntl);
+               else
+                       WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
 
                amdgpu_ras_global_ras_isr(adev);
        }
@@ -420,14 +443,23 @@ static int nbio_v7_4_set_ras_controller_irq_state(struct amdgpu_device *adev,
         */
        uint32_t bif_intr_cntl;
 
-       bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL);
+       if (adev->asic_type == CHIP_ALDEBARAN)
+               bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL_ALDE);
+       else
+               bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL);
+
        if (state == AMDGPU_IRQ_STATE_ENABLE) {
                /* set interrupt vector select bit to 0 to select
                 * vetcor 1 for bare metal case */
                bif_intr_cntl = REG_SET_FIELD(bif_intr_cntl,
                                              BIF_INTR_CNTL,
                                              RAS_INTR_VEC_SEL, 0);
-               WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL, bif_intr_cntl);
+
+               if (adev->asic_type == CHIP_ALDEBARAN)
+                       WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL_ALDE, bif_intr_cntl);
+               else
+                       WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL, bif_intr_cntl);
+
        }
 
        return 0;
@@ -456,14 +488,22 @@ static int nbio_v7_4_set_ras_err_event_athub_irq_state(struct amdgpu_device *ade
         */
        uint32_t bif_intr_cntl;
 
-       bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL);
+       if (adev->asic_type == CHIP_ALDEBARAN)
+               bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL_ALDE);
+       else
+               bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL);
+
        if (state == AMDGPU_IRQ_STATE_ENABLE) {
                /* set interrupt vector select bit to 0 to select
                 * vetcor 1 for bare metal case */
                bif_intr_cntl = REG_SET_FIELD(bif_intr_cntl,
                                              BIF_INTR_CNTL,
                                              RAS_INTR_VEC_SEL, 0);
-               WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL, bif_intr_cntl);
+
+               if (adev->asic_type == CHIP_ALDEBARAN)
+                       WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL_ALDE, bif_intr_cntl);
+               else
+                       WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL, bif_intr_cntl);
        }
 
        return 0;
@@ -572,7 +612,11 @@ static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev,
 static void nbio_v7_4_enable_doorbell_interrupt(struct amdgpu_device *adev,
                                                bool enable)
 {
-       WREG32_FIELD15(NBIO, 0, BIF_DOORBELL_INT_CNTL,
+       if (adev->asic_type == CHIP_ALDEBARAN)
+               WREG32_FIELD15(NBIO, 0, BIF_DOORBELL_INT_CNTL_ALDE,
+                      DOORBELL_INTERRUPT_DISABLE, enable ? 0 : 1);
+       else
+               WREG32_FIELD15(NBIO, 0, BIF_DOORBELL_INT_CNTL,
                       DOORBELL_INTERRUPT_DISABLE, enable ? 0 : 1);
 }
 
index 4b1cc5e..5872d68 100644 (file)
@@ -84,29 +84,29 @@ static int psp_v10_0_init_microcode(struct psp_context *psp)
 
                ta_hdr = (const struct ta_firmware_header_v1_0 *)
                                 adev->psp.ta_fw->data;
-               adev->psp.ta_hdcp_ucode_version =
-                       le32_to_cpu(ta_hdr->ta_hdcp_ucode_version);
-               adev->psp.ta_hdcp_ucode_size =
-                       le32_to_cpu(ta_hdr->ta_hdcp_size_bytes);
-               adev->psp.ta_hdcp_start_addr =
+               adev->psp.hdcp.feature_version =
+                       le32_to_cpu(ta_hdr->hdcp.fw_version);
+               adev->psp.hdcp.size_bytes =
+                       le32_to_cpu(ta_hdr->hdcp.size_bytes);
+               adev->psp.hdcp.start_addr =
                        (uint8_t *)ta_hdr +
                        le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
 
-               adev->psp.ta_dtm_ucode_version =
-                       le32_to_cpu(ta_hdr->ta_dtm_ucode_version);
-               adev->psp.ta_dtm_ucode_size =
-                       le32_to_cpu(ta_hdr->ta_dtm_size_bytes);
-               adev->psp.ta_dtm_start_addr =
-                       (uint8_t *)adev->psp.ta_hdcp_start_addr +
-                       le32_to_cpu(ta_hdr->ta_dtm_offset_bytes);
-
-               adev->psp.ta_securedisplay_ucode_version =
-                       le32_to_cpu(ta_hdr->ta_securedisplay_ucode_version);
-               adev->psp.ta_securedisplay_ucode_size =
-                       le32_to_cpu(ta_hdr->ta_securedisplay_size_bytes);
-               adev->psp.ta_securedisplay_start_addr =
-                       (uint8_t *)adev->psp.ta_hdcp_start_addr +
-                       le32_to_cpu(ta_hdr->ta_securedisplay_offset_bytes);
+               adev->psp.dtm.feature_version =
+                       le32_to_cpu(ta_hdr->dtm.fw_version);
+               adev->psp.dtm.size_bytes =
+                       le32_to_cpu(ta_hdr->dtm.size_bytes);
+               adev->psp.dtm.start_addr =
+                       (uint8_t *)adev->psp.hdcp.start_addr +
+                       le32_to_cpu(ta_hdr->dtm.offset_bytes);
+
+               adev->psp.securedisplay.feature_version =
+                       le32_to_cpu(ta_hdr->securedisplay.fw_version);
+               adev->psp.securedisplay.size_bytes =
+                       le32_to_cpu(ta_hdr->securedisplay.size_bytes);
+               adev->psp.securedisplay.start_addr =
+                       (uint8_t *)adev->psp.hdcp.start_addr +
+                       le32_to_cpu(ta_hdr->securedisplay.offset_bytes);
 
                adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
        }
index 8862684..29bf9f0 100644 (file)
@@ -151,15 +151,15 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
                                goto out2;
 
                        ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data;
-                       adev->psp.ta_xgmi_ucode_version = le32_to_cpu(ta_hdr->ta_xgmi_ucode_version);
-                       adev->psp.ta_xgmi_ucode_size = le32_to_cpu(ta_hdr->ta_xgmi_size_bytes);
-                       adev->psp.ta_xgmi_start_addr = (uint8_t *)ta_hdr +
+                       adev->psp.xgmi.feature_version = le32_to_cpu(ta_hdr->xgmi.fw_version);
+                       adev->psp.xgmi.size_bytes = le32_to_cpu(ta_hdr->xgmi.size_bytes);
+                       adev->psp.xgmi.start_addr = (uint8_t *)ta_hdr +
                                le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
                        adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
-                       adev->psp.ta_ras_ucode_version = le32_to_cpu(ta_hdr->ta_ras_ucode_version);
-                       adev->psp.ta_ras_ucode_size = le32_to_cpu(ta_hdr->ta_ras_size_bytes);
-                       adev->psp.ta_ras_start_addr = (uint8_t *)adev->psp.ta_xgmi_start_addr +
-                               le32_to_cpu(ta_hdr->ta_ras_offset_bytes);
+                       adev->psp.ras.feature_version = le32_to_cpu(ta_hdr->ras.fw_version);
+                       adev->psp.ras.size_bytes = le32_to_cpu(ta_hdr->ras.size_bytes);
+                       adev->psp.ras.start_addr = (uint8_t *)adev->psp.xgmi.start_addr +
+                               le32_to_cpu(ta_hdr->ras.offset_bytes);
                }
                break;
        case CHIP_NAVI10:
@@ -186,17 +186,17 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
                                goto out2;
 
                        ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data;
-                       adev->psp.ta_hdcp_ucode_version = le32_to_cpu(ta_hdr->ta_hdcp_ucode_version);
-                       adev->psp.ta_hdcp_ucode_size = le32_to_cpu(ta_hdr->ta_hdcp_size_bytes);
-                       adev->psp.ta_hdcp_start_addr = (uint8_t *)ta_hdr +
+                       adev->psp.hdcp.feature_version = le32_to_cpu(ta_hdr->hdcp.fw_version);
+                       adev->psp.hdcp.size_bytes = le32_to_cpu(ta_hdr->hdcp.size_bytes);
+                       adev->psp.hdcp.start_addr = (uint8_t *)ta_hdr +
                                le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
 
                        adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
 
-                       adev->psp.ta_dtm_ucode_version = le32_to_cpu(ta_hdr->ta_dtm_ucode_version);
-                       adev->psp.ta_dtm_ucode_size = le32_to_cpu(ta_hdr->ta_dtm_size_bytes);
-                       adev->psp.ta_dtm_start_addr = (uint8_t *)adev->psp.ta_hdcp_start_addr +
-                               le32_to_cpu(ta_hdr->ta_dtm_offset_bytes);
+                       adev->psp.dtm.feature_version = le32_to_cpu(ta_hdr->dtm.fw_version);
+                       adev->psp.dtm.size_bytes = le32_to_cpu(ta_hdr->dtm.size_bytes);
+                       adev->psp.dtm.start_addr = (uint8_t *)adev->psp.hdcp.start_addr +
+                               le32_to_cpu(ta_hdr->dtm.offset_bytes);
                }
                break;
        case CHIP_SIENNA_CICHLID:
index 0c908d4..cc64940 100644 (file)
@@ -84,23 +84,23 @@ static int psp_v12_0_init_microcode(struct psp_context *psp)
 
                ta_hdr = (const struct ta_firmware_header_v1_0 *)
                                 adev->psp.ta_fw->data;
-               adev->psp.ta_hdcp_ucode_version =
-                       le32_to_cpu(ta_hdr->ta_hdcp_ucode_version);
-               adev->psp.ta_hdcp_ucode_size =
-                       le32_to_cpu(ta_hdr->ta_hdcp_size_bytes);
-               adev->psp.ta_hdcp_start_addr =
+               adev->psp.hdcp.feature_version =
+                       le32_to_cpu(ta_hdr->hdcp.fw_version);
+               adev->psp.hdcp.size_bytes =
+                       le32_to_cpu(ta_hdr->hdcp.size_bytes);
+               adev->psp.hdcp.start_addr =
                        (uint8_t *)ta_hdr +
                        le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
 
                adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
 
-               adev->psp.ta_dtm_ucode_version =
-                       le32_to_cpu(ta_hdr->ta_dtm_ucode_version);
-               adev->psp.ta_dtm_ucode_size =
-                       le32_to_cpu(ta_hdr->ta_dtm_size_bytes);
-               adev->psp.ta_dtm_start_addr =
-                       (uint8_t *)adev->psp.ta_hdcp_start_addr +
-                       le32_to_cpu(ta_hdr->ta_dtm_offset_bytes);
+               adev->psp.dtm.feature_version =
+                       le32_to_cpu(ta_hdr->dtm.fw_version);
+               adev->psp.dtm.size_bytes =
+                       le32_to_cpu(ta_hdr->dtm.size_bytes);
+               adev->psp.dtm.start_addr =
+                       (uint8_t *)adev->psp.hdcp.start_addr +
+                       le32_to_cpu(ta_hdr->dtm.offset_bytes);
        }
 
        return 0;
index f7b56a7..0fc97c3 100644 (file)
@@ -1353,8 +1353,6 @@ static int soc15_common_early_init(void *handle)
                adev->asic_funcs = &vega20_asic_funcs;
                adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
                        AMD_CG_SUPPORT_GFX_MGLS |
-                       AMD_CG_SUPPORT_GFX_CGCG |
-                       AMD_CG_SUPPORT_GFX_CGLS |
                        AMD_CG_SUPPORT_GFX_CP_LS |
                        AMD_CG_SUPPORT_HDP_LS |
                        AMD_CG_SUPPORT_SDMA_MGCG |
index cce7127..da815a9 100644 (file)
@@ -134,7 +134,8 @@ struct ta_xgmi_shared_memory {
        uint32_t                        cmd_id;
        uint32_t                        resp_id;
        enum ta_xgmi_status             xgmi_status;
-       uint32_t                        reserved;
+       uint8_t                         flag_extend_link_record;
+       uint8_t                         reserved0[3];
        union ta_xgmi_cmd_input         xgmi_in_message;
        union ta_xgmi_cmd_output        xgmi_out_message;
 };
index 6c0e914..7232241 100644 (file)
@@ -698,6 +698,30 @@ static int uvd_v3_1_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       /*
+        * Proper cleanups before halting the HW engine:
+        *   - cancel the delayed idle work
+        *   - enable powergating
+        *   - enable clockgating
+        *   - disable dpm
+        *
+        * TODO: to align with the VCN implementation, move the
+        * jobs for clockgating/powergating/dpm setting to
+        * ->set_powergating_state().
+        */
+       cancel_delayed_work_sync(&adev->uvd.idle_work);
+
+       if (adev->pm.dpm_enabled) {
+               amdgpu_dpm_enable_uvd(adev, false);
+       } else {
+               amdgpu_asic_set_uvd_clocks(adev, 0, 0);
+               /* shutdown the UVD block */
+               amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+                                                      AMD_PG_STATE_GATE);
+               amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+                                                      AMD_CG_STATE_GATE);
+       }
+
        if (RREG32(mmUVD_STATUS) != 0)
                uvd_v3_1_stop(adev);
 
index a301518..52d6de9 100644 (file)
@@ -212,6 +212,30 @@ static int uvd_v4_2_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       /*
+        * Proper cleanups before halting the HW engine:
+        *   - cancel the delayed idle work
+        *   - enable powergating
+        *   - enable clockgating
+        *   - disable dpm
+        *
+        * TODO: to align with the VCN implementation, move the
+        * jobs for clockgating/powergating/dpm setting to
+        * ->set_powergating_state().
+        */
+       cancel_delayed_work_sync(&adev->uvd.idle_work);
+
+       if (adev->pm.dpm_enabled) {
+               amdgpu_dpm_enable_uvd(adev, false);
+       } else {
+               amdgpu_asic_set_uvd_clocks(adev, 0, 0);
+               /* shutdown the UVD block */
+               amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+                                                      AMD_PG_STATE_GATE);
+               amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+                                                      AMD_CG_STATE_GATE);
+       }
+
        if (RREG32(mmUVD_STATUS) != 0)
                uvd_v4_2_stop(adev);
 
index a4d5bd2..db6d067 100644 (file)
@@ -210,6 +210,30 @@ static int uvd_v5_0_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       /*
+        * Proper cleanups before halting the HW engine:
+        *   - cancel the delayed idle work
+        *   - enable powergating
+        *   - enable clockgating
+        *   - disable dpm
+        *
+        * TODO: to align with the VCN implementation, move the
+        * jobs for clockgating/powergating/dpm setting to
+        * ->set_powergating_state().
+        */
+       cancel_delayed_work_sync(&adev->uvd.idle_work);
+
+       if (adev->pm.dpm_enabled) {
+               amdgpu_dpm_enable_uvd(adev, false);
+       } else {
+               amdgpu_asic_set_uvd_clocks(adev, 0, 0);
+               /* shutdown the UVD block */
+               amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+                                                      AMD_PG_STATE_GATE);
+               amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+                                                      AMD_CG_STATE_GATE);
+       }
+
        if (RREG32(mmUVD_STATUS) != 0)
                uvd_v5_0_stop(adev);
 
@@ -224,7 +248,6 @@ static int uvd_v5_0_suspend(void *handle)
        r = uvd_v5_0_hw_fini(adev);
        if (r)
                return r;
-       uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_GATE);
 
        return amdgpu_uvd_suspend(adev);
 }
index cf3803f..bc57183 100644 (file)
@@ -543,6 +543,30 @@ static int uvd_v6_0_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       /*
+        * Proper cleanups before halting the HW engine:
+        *   - cancel the delayed idle work
+        *   - enable powergating
+        *   - enable clockgating
+        *   - disable dpm
+        *
+        * TODO: to align with the VCN implementation, move the
+        * jobs for clockgating/powergating/dpm setting to
+        * ->set_powergating_state().
+        */
+       cancel_delayed_work_sync(&adev->uvd.idle_work);
+
+       if (adev->pm.dpm_enabled) {
+               amdgpu_dpm_enable_uvd(adev, false);
+       } else {
+               amdgpu_asic_set_uvd_clocks(adev, 0, 0);
+               /* shutdown the UVD block */
+               amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+                                                      AMD_PG_STATE_GATE);
+               amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+                                                      AMD_CG_STATE_GATE);
+       }
+
        if (RREG32(mmUVD_STATUS) != 0)
                uvd_v6_0_stop(adev);
 
index 939bcfa..b6e82d7 100644 (file)
@@ -606,6 +606,30 @@ static int uvd_v7_0_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       /*
+        * Proper cleanups before halting the HW engine:
+        *   - cancel the delayed idle work
+        *   - enable powergating
+        *   - enable clockgating
+        *   - disable dpm
+        *
+        * TODO: to align with the VCN implementation, move the
+        * jobs for clockgating/powergating/dpm setting to
+        * ->set_powergating_state().
+        */
+       cancel_delayed_work_sync(&adev->uvd.idle_work);
+
+       if (adev->pm.dpm_enabled) {
+               amdgpu_dpm_enable_uvd(adev, false);
+       } else {
+               amdgpu_asic_set_uvd_clocks(adev, 0, 0);
+               /* shutdown the UVD block */
+               amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+                                                      AMD_PG_STATE_GATE);
+               amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
+                                                      AMD_CG_STATE_GATE);
+       }
+
        if (!amdgpu_sriov_vf(adev))
                uvd_v7_0_stop(adev);
        else {
index c7d28c1..b70c17f 100644 (file)
@@ -477,6 +477,31 @@ static int vce_v2_0_hw_init(void *handle)
 
 static int vce_v2_0_hw_fini(void *handle)
 {
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       /*
+        * Proper cleanups before halting the HW engine:
+        *   - cancel the delayed idle work
+        *   - enable powergating
+        *   - enable clockgating
+        *   - disable dpm
+        *
+        * TODO: to align with the VCN implementation, move the
+        * jobs for clockgating/powergating/dpm setting to
+        * ->set_powergating_state().
+        */
+       cancel_delayed_work_sync(&adev->vce.idle_work);
+
+       if (adev->pm.dpm_enabled) {
+               amdgpu_dpm_enable_vce(adev, false);
+       } else {
+               amdgpu_asic_set_vce_clocks(adev, 0, 0);
+               amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+                                                      AMD_PG_STATE_GATE);
+               amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+                                                      AMD_CG_STATE_GATE);
+       }
+
        return 0;
 }
 
index 3b82fb2..9de6689 100644 (file)
@@ -490,6 +490,29 @@ static int vce_v3_0_hw_fini(void *handle)
        int r;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
+       /*
+        * Proper cleanups before halting the HW engine:
+        *   - cancel the delayed idle work
+        *   - enable powergating
+        *   - enable clockgating
+        *   - disable dpm
+        *
+        * TODO: to align with the VCN implementation, move the
+  &