Merge tag 'drm-intel-next-fixes-2020-10-15' of git://anongit.freedesktop.org/drm...
authorDave Airlie <airlied@redhat.com>
Sun, 18 Oct 2020 23:21:55 +0000 (09:21 +1000)
committerDave Airlie <airlied@redhat.com>
Sun, 18 Oct 2020 23:21:59 +0000 (09:21 +1000)
- Set all unused color plane offsets to ~0xfff again (Ville)
- Fix TGL DKL PHY DP vswing handling (Ville)

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20201015181453.GA2905280@intel.com
261 files changed:
Documentation/devicetree/bindings/display/mediatek/mediatek,disp.txt
Documentation/devicetree/bindings/display/mediatek/mediatek,dpi.txt
Documentation/devicetree/bindings/display/mediatek/mediatek,dsi.txt
Documentation/devicetree/bindings/display/mediatek/mediatek,hdmi.txt
Documentation/devicetree/bindings/display/msm/dsi.txt
Documentation/gpu/amdgpu.rst
MAINTAINERS
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.h
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/cik_ih.c
drivers/gpu/drm/amd/amdgpu/dce_virtual.c
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.h
drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h
drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.h
drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.c
drivers/gpu/drm/amd/amdgpu/gfxhub_v1_1.h
drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.h
drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.h
drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
drivers/gpu/drm/amd/amdgpu/mes_v10_1.c
drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
drivers/gpu/drm/amd/amdgpu/nv.c
drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
drivers/gpu/drm/amd/amdgpu/soc15.c
drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
drivers/gpu/drm/amd/amdgpu/vcn_v1_0.h
drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
drivers/gpu/drm/amd/amdkfd/kfd_crat.c
drivers/gpu/drm/amd/amdkfd/kfd_device.c
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
drivers/gpu/drm/amd/amdkfd/kfd_module.c
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
drivers/gpu/drm/amd/amdkfd/kfd_process.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
drivers/gpu/drm/amd/display/dc/core/dc.c
drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/core/dc_link.c
drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
drivers/gpu/drm/amd/display/dc/core/dc_resource.c
drivers/gpu/drm/amd/display/dc/dc.h
drivers/gpu/drm/amd/display/dc/dc_link.h
drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.c
drivers/gpu/drm/amd/display/dc/dce/dce_panel_cntl.h
drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h
drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
drivers/gpu/drm/amd/display/dc/dcn30/Makefile
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h
drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
drivers/gpu/drm/amd/display/dc/inc/core_types.h
drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
drivers/gpu/drm/amd/include/amd_shared.h
drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_offset.h
drivers/gpu/drm/amd/include/kgd_kfd_interface.h
drivers/gpu/drm/amd/include/kgd_pp_interface.h
drivers/gpu/drm/amd/pm/amdgpu_pm.c
drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
drivers/gpu/drm/amd/pm/inc/hwmgr.h
drivers/gpu/drm/amd/pm/inc/smu11_driver_if_sienna_cichlid.h
drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.h
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
drivers/gpu/drm/amd/pm/powerplay/smumgr/smu9_smumgr.c
drivers/gpu/drm/amd/pm/powerplay/smumgr/vega10_smumgr.c
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
drivers/gpu/drm/i915/display/intel_display.c
drivers/gpu/drm/ingenic/ingenic-drm-drv.c
drivers/gpu/drm/ingenic/ingenic-drm.h
drivers/gpu/drm/ingenic/ingenic-ipu.c
drivers/gpu/drm/mediatek/Kconfig
drivers/gpu/drm/mediatek/Makefile
drivers/gpu/drm/mediatek/mtk_dpi.c
drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
drivers/gpu/drm/mediatek/mtk_drm_drv.c
drivers/gpu/drm/mediatek/mtk_dsi.c
drivers/gpu/drm/mediatek/mtk_hdmi.c
drivers/gpu/drm/mediatek/mtk_hdmi.h
drivers/gpu/drm/mediatek/mtk_hdmi_phy.c [deleted file]
drivers/gpu/drm/mediatek/mtk_hdmi_phy.h [deleted file]
drivers/gpu/drm/mediatek/mtk_mt2701_hdmi_phy.c [deleted file]
drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c [deleted file]
drivers/gpu/drm/msm/Kconfig
drivers/gpu/drm/msm/Makefile
drivers/gpu/drm/msm/adreno/a2xx_gpu.c
drivers/gpu/drm/msm/adreno/a3xx_gpu.c
drivers/gpu/drm/msm/adreno/a4xx_gpu.c
drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
drivers/gpu/drm/msm/adreno/a5xx_gpu.c
drivers/gpu/drm/msm/adreno/a5xx_gpu.h
drivers/gpu/drm/msm/adreno/a5xx_power.c
drivers/gpu/drm/msm/adreno/a5xx_preempt.c
drivers/gpu/drm/msm/adreno/a6xx_gmu.c
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
drivers/gpu/drm/msm/adreno/a6xx_gpu.h
drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
drivers/gpu/drm/msm/adreno/adreno_device.c
drivers/gpu/drm/msm/adreno/adreno_gpu.c
drivers/gpu/drm/msm/adreno/adreno_gpu.h
drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c
drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
drivers/gpu/drm/msm/disp/mdp4/mdp4_dtv_encoder.c
drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.h
drivers/gpu/drm/msm/disp/mdp4/mdp4_lcdc_encoder.c
drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c
drivers/gpu/drm/msm/dp/dp_audio.c [new file with mode: 0644]
drivers/gpu/drm/msm/dp/dp_audio.h [new file with mode: 0644]
drivers/gpu/drm/msm/dp/dp_aux.c [new file with mode: 0644]
drivers/gpu/drm/msm/dp/dp_aux.h [new file with mode: 0644]
drivers/gpu/drm/msm/dp/dp_catalog.c [new file with mode: 0644]
drivers/gpu/drm/msm/dp/dp_catalog.h [new file with mode: 0644]
drivers/gpu/drm/msm/dp/dp_ctrl.c [new file with mode: 0644]
drivers/gpu/drm/msm/dp/dp_ctrl.h [new file with mode: 0644]
drivers/gpu/drm/msm/dp/dp_debug.c [new file with mode: 0644]
drivers/gpu/drm/msm/dp/dp_debug.h [new file with mode: 0644]
drivers/gpu/drm/msm/dp/dp_display.c [new file with mode: 0644]
drivers/gpu/drm/msm/dp/dp_display.h [new file with mode: 0644]
drivers/gpu/drm/msm/dp/dp_drm.c [new file with mode: 0644]
drivers/gpu/drm/msm/dp/dp_drm.h [new file with mode: 0644]
drivers/gpu/drm/msm/dp/dp_hpd.c [new file with mode: 0644]
drivers/gpu/drm/msm/dp/dp_hpd.h [new file with mode: 0644]
drivers/gpu/drm/msm/dp/dp_link.c [new file with mode: 0644]
drivers/gpu/drm/msm/dp/dp_link.h [new file with mode: 0644]
drivers/gpu/drm/msm/dp/dp_panel.c [new file with mode: 0644]
drivers/gpu/drm/msm/dp/dp_panel.h [new file with mode: 0644]
drivers/gpu/drm/msm/dp/dp_parser.c [new file with mode: 0644]
drivers/gpu/drm/msm/dp/dp_parser.h [new file with mode: 0644]
drivers/gpu/drm/msm/dp/dp_power.c [new file with mode: 0644]
drivers/gpu/drm/msm/dp/dp_power.h [new file with mode: 0644]
drivers/gpu/drm/msm/dp/dp_reg.h [new file with mode: 0644]
drivers/gpu/drm/msm/dsi/dsi.h
drivers/gpu/drm/msm/dsi/dsi.xml.h
drivers/gpu/drm/msm/dsi/dsi_cfg.c
drivers/gpu/drm/msm/dsi/dsi_cfg.h
drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c [new file with mode: 0644]
drivers/gpu/drm/msm/dsi/pll/dsi_pll.c
drivers/gpu/drm/msm/dsi/pll/dsi_pll.h
drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c [new file with mode: 0644]
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/msm/msm_drv.h
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/msm/msm_gem.h
drivers/gpu/drm/msm/msm_gem_shrinker.c
drivers/gpu/drm/msm/msm_gem_submit.c
drivers/gpu/drm/msm/msm_gem_vma.c
drivers/gpu/drm/msm/msm_gpu.c
drivers/gpu/drm/msm/msm_gpu.h
drivers/gpu/drm/msm/msm_gpu_trace.h
drivers/gpu/drm/msm/msm_gpummu.c
drivers/gpu/drm/msm/msm_iommu.c
drivers/gpu/drm/msm/msm_mmu.h
drivers/gpu/drm/msm/msm_ringbuffer.h
drivers/gpu/drm/msm/msm_submitqueue.c
drivers/gpu/drm/panfrost/panfrost_gpu.c
drivers/gpu/drm/radeon/radeon_vm.c
drivers/gpu/drm/radeon/uvd_v1_0.c
drivers/gpu/drm/radeon/uvd_v2_2.c
drivers/gpu/drm/radeon/uvd_v4_2.c
drivers/gpu/drm/vc4/vc4_crtc.c
drivers/gpu/drm/vc4/vc4_drv.h
drivers/gpu/drm/vc4/vc4_kms.c
drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
drivers/phy/mediatek/Kconfig
drivers/phy/mediatek/Makefile
drivers/phy/mediatek/phy-mtk-hdmi-mt2701.c [new file with mode: 0644]
drivers/phy/mediatek/phy-mtk-hdmi-mt8173.c [new file with mode: 0644]
drivers/phy/mediatek/phy-mtk-hdmi.c [new file with mode: 0644]
drivers/phy/mediatek/phy-mtk-hdmi.h [new file with mode: 0644]
include/drm/drm_dp_helper.h
include/linux/adreno-smmu-priv.h [new file with mode: 0644]

index b91e709..1212207 100644 (file)
@@ -43,7 +43,7 @@ Required properties (all function blocks):
        "mediatek,<chip>-dpi"                   - DPI controller, see mediatek,dpi.txt
        "mediatek,<chip>-disp-mutex"            - display mutex
        "mediatek,<chip>-disp-od"               - overdrive
-  the supported chips are mt2701, mt2712 and mt8173.
+  the supported chips are mt2701, mt7623, mt2712 and mt8173.
 - reg: Physical base address and length of the function block register space
 - interrupts: The interrupt signal from the function block (required, except for
   merge and split function blocks).
index 77def44..dc1ebd1 100644 (file)
@@ -7,7 +7,7 @@ output bus.
 
 Required properties:
 - compatible: "mediatek,<chip>-dpi"
-  the supported chips are mt2701 , mt8173 and mt8183.
+  the supported chips are mt2701, mt7623, mt8173 and mt8183.
 - reg: Physical base address and length of the controller's registers
 - interrupts: The interrupt signal from the function block.
 - clocks: device clocks
index 8e4729d..f06f24d 100644 (file)
@@ -7,7 +7,7 @@ channel output.
 
 Required properties:
 - compatible: "mediatek,<chip>-dsi"
-  the supported chips are mt2701, mt8173 and mt8183.
+- the supported chips are mt2701, mt7623, mt8173 and mt8183.
 - reg: Physical base address and length of the controller's registers
 - interrupts: The interrupt signal from the function block.
 - clocks: device clocks
@@ -26,7 +26,7 @@ The MIPI TX configuration module controls the MIPI D-PHY.
 
 Required properties:
 - compatible: "mediatek,<chip>-mipi-tx"
-  the supported chips are mt2701, mt8173 and mt8183.
+- the supported chips are mt2701, 7623, mt8173 and mt8183.
 - reg: Physical base address and length of the controller's registers
 - clocks: PLL reference clock
 - clock-output-names: name of the output clock line to the DSI encoder
index 7b12424..6b1c586 100644 (file)
@@ -6,6 +6,7 @@ its parallel input.
 
 Required properties:
 - compatible: Should be "mediatek,<chip>-hdmi".
+- the supported chips are mt2701, mt7623 and mt8173
 - reg: Physical base address and length of the controller's registers
 - interrupts: The interrupt signal from the function block.
 - clocks: device clocks
@@ -32,6 +33,7 @@ The HDMI CEC controller handles hotplug detection and CEC communication.
 
 Required properties:
 - compatible: Should be "mediatek,<chip>-cec"
+- the supported chips are mt7623 and mt8173
 - reg: Physical base address and length of the controller's registers
 - interrupts: The interrupt signal from the function block.
 - clocks: device clock
@@ -44,6 +46,7 @@ The Mediatek's I2C controller is used to interface with I2C devices.
 
 Required properties:
 - compatible: Should be "mediatek,<chip>-hdmi-ddc"
+- the supported chips are mt7623 and mt8173
 - reg: Physical base address and length of the controller's registers
 - clocks: device clock
 - clock-names: Should be "ddc-i2c".
@@ -56,6 +59,7 @@ output and drives the HDMI pads.
 
 Required properties:
 - compatible: "mediatek,<chip>-hdmi-phy"
+- the supported chips are mt2701, mt7623 and mt8173
 - reg: Physical base address and length of the module's registers
 - clocks: PLL reference clock
 - clock-names: must contain "pll_ref"
index 7884fd7..b9a64d3 100644 (file)
@@ -90,6 +90,8 @@ Required properties:
   * "qcom,dsi-phy-14nm-660"
   * "qcom,dsi-phy-10nm"
   * "qcom,dsi-phy-10nm-8998"
+  * "qcom,dsi-phy-7nm"
+  * "qcom,dsi-phy-7nm-8150"
 - reg: Physical base address and length of the registers of PLL, PHY. Some
   revisions require the PHY regulator base address, whereas others require the
   PHY lane base address. See below for each PHY revision.
@@ -98,7 +100,7 @@ Required properties:
   * "dsi_pll"
   * "dsi_phy"
   * "dsi_phy_regulator"
-  For DSI 14nm and 10nm PHYs:
+  For DSI 14nm, 10nm and 7nm PHYs:
   * "dsi_pll"
   * "dsi_phy"
   * "dsi_phy_lane"
@@ -116,7 +118,7 @@ Required properties:
 - vcca-supply: phandle to vcca regulator device node
   For 14nm PHY:
 - vcca-supply: phandle to vcca regulator device node
-  For 10nm PHY:
+  For 10nm and 7nm PHY:
 - vdds-supply: phandle to vdds regulator device node
 
 Optional properties:
index 29ca5f5..1f9ea82 100644 (file)
@@ -70,6 +70,15 @@ Interrupt Handling
 .. kernel-doc:: drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
    :internal:
 
+IP Blocks
+------------------
+
+.. kernel-doc:: drivers/gpu/drm/amd/include/amd_shared.h
+   :doc: IP Blocks
+
+.. kernel-doc:: drivers/gpu/drm/amd/include/amd_shared.h
+   :identifiers: amd_ip_block_type amd_ip_funcs
+
 AMDGPU XGMI Support
 ===================
 
@@ -197,8 +206,8 @@ pp_power_profile_mode
 .. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c
    :doc: pp_power_profile_mode
 
-*_busy_percent
-~~~~~~~~~~~~~~
+\*_busy_percent
+~~~~~~~~~~~~~~~
 
 .. kernel-doc:: drivers/gpu/drm/amd/pm/amdgpu_pm.c
    :doc: gpu_busy_percent
index 10992f6..06024a4 100644 (file)
@@ -5594,12 +5594,13 @@ S:      Maintained
 F:     Documentation/devicetree/bindings/display/panel/raydium,rm67191.yaml
 F:     drivers/gpu/drm/panel/panel-raydium-rm67191.c
 
-DRM DRIVER FOR ROCKTECH JH057N00900 PANELS
+DRM DRIVER FOR SITRONIX ST7703 PANELS
 M:     Guido Günther <agx@sigxcpu.org>
 R:     Purism Kernel Team <kernel@puri.sm>
+R:     Ondrej Jirman <megous@megous.com>
 S:     Maintained
-F:     Documentation/devicetree/bindings/display/panel/rocktech,jh057n00900.txt
-F:     drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c
+F:     Documentation/devicetree/bindings/display/panel/rocktech,jh057n00900.yaml
+F:     drivers/gpu/drm/panel/panel-sitronix-st7703.c
 
 DRM DRIVER FOR SAVAGE VIDEO CARDS
 S:     Orphan / Obsolete
@@ -5827,6 +5828,7 @@ L:        dri-devel@lists.freedesktop.org
 S:     Supported
 F:     Documentation/devicetree/bindings/display/mediatek/
 F:     drivers/gpu/drm/mediatek/
+F:     drivers/phy/mediatek/phy-mtk-hdmi*
 
 DRM DRIVERS FOR NVIDIA TEGRA
 M:     Thierry Reding <thierry.reding@gmail.com>
index 6125ba9..87f095d 100644 (file)
 #include "amdgpu_mes.h"
 #include "amdgpu_umc.h"
 #include "amdgpu_mmhub.h"
+#include "amdgpu_gfxhub.h"
 #include "amdgpu_df.h"
 
 #define MAX_GPU_INSTANCE               16
@@ -881,6 +882,9 @@ struct amdgpu_device {
        /* mmhub */
        struct amdgpu_mmhub             mmhub;
 
+       /* gfxhub */
+       struct amdgpu_gfxhub            gfxhub;
+
        /* gfx */
        struct amdgpu_gfx               gfx;
 
@@ -1016,18 +1020,32 @@ int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev);
 
 void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
                               uint32_t *buf, size_t size, bool write);
-uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
+uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
+                           uint32_t reg, uint32_t acc_flags);
+void amdgpu_device_wreg(struct amdgpu_device *adev,
+                       uint32_t reg, uint32_t v,
                        uint32_t acc_flags);
-void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
-                   uint32_t acc_flags);
-void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
-                   uint32_t acc_flags);
+void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
+                            uint32_t reg, uint32_t v);
 void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value);
 uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset);
 
 u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg);
 void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v);
 
+u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
+                               u32 pcie_index, u32 pcie_data,
+                               u32 reg_addr);
+u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
+                                 u32 pcie_index, u32 pcie_data,
+                                 u32 reg_addr);
+void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
+                                u32 pcie_index, u32 pcie_data,
+                                u32 reg_addr, u32 reg_data);
+void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
+                                  u32 pcie_index, u32 pcie_data,
+                                  u32 reg_addr, u64 reg_data);
+
 bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type);
 bool amdgpu_device_has_dc_support(struct amdgpu_device *adev);
 
@@ -1038,8 +1056,8 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
  */
 #define AMDGPU_REGS_NO_KIQ    (1<<1)
 
-#define RREG32_NO_KIQ(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ)
-#define WREG32_NO_KIQ(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ)
+#define RREG32_NO_KIQ(reg) amdgpu_device_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ)
+#define WREG32_NO_KIQ(reg, v) amdgpu_device_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ)
 
 #define RREG32_KIQ(reg) amdgpu_kiq_rreg(adev, (reg))
 #define WREG32_KIQ(reg, v) amdgpu_kiq_wreg(adev, (reg), (v))
@@ -1047,9 +1065,9 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
 #define RREG8(reg) amdgpu_mm_rreg8(adev, (reg))
 #define WREG8(reg, v) amdgpu_mm_wreg8(adev, (reg), (v))
 
-#define RREG32(reg) amdgpu_mm_rreg(adev, (reg), 0)
-#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_mm_rreg(adev, (reg), 0))
-#define WREG32(reg, v) amdgpu_mm_wreg(adev, (reg), (v), 0)
+#define RREG32(reg) amdgpu_device_rreg(adev, (reg), 0)
+#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_device_rreg(adev, (reg), 0))
+#define WREG32(reg, v) amdgpu_device_wreg(adev, (reg), (v), 0)
 #define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
 #define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
 #define RREG32_PCIE(reg) adev->pcie_rreg(adev, (reg))
@@ -1095,7 +1113,7 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
                WREG32_SMC(_Reg, tmp);                          \
        } while (0)
 
-#define DREG32_SYS(sqf, adev, reg) seq_printf((sqf), #reg " : 0x%08X\n", amdgpu_mm_rreg((adev), (reg), false))
+#define DREG32_SYS(sqf, adev, reg) seq_printf((sqf), #reg " : 0x%08X\n", amdgpu_device_rreg((adev), (reg), false))
 #define RREG32_IO(reg) amdgpu_io_rreg(adev, (reg))
 #define WREG32_IO(reg, v) amdgpu_io_wreg(adev, (reg), (v))
 
index 4a93b88..165b02e 100644 (file)
@@ -806,8 +806,8 @@ int amdgpu_acpi_init(struct amdgpu_device *adev)
        }
        adev->atif = atif;
 
-       if (atif->notifications.brightness_change) {
 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
+       if (atif->notifications.brightness_change) {
                if (amdgpu_device_has_dc_support(adev)) {
 #if defined(CONFIG_DRM_AMD_DC)
                        struct amdgpu_display_manager *dm = &adev->dm;
index edff1b7..0544460 100644 (file)
@@ -36,6 +36,8 @@
  */
 uint64_t amdgpu_amdkfd_total_mem_size;
 
+static bool kfd_initialized;
+
 int amdgpu_amdkfd_init(void)
 {
        struct sysinfo si;
@@ -51,19 +53,26 @@ int amdgpu_amdkfd_init(void)
 #else
        ret = -ENOENT;
 #endif
+       kfd_initialized = !ret;
 
        return ret;
 }
 
 void amdgpu_amdkfd_fini(void)
 {
-       kgd2kfd_exit();
+       if (kfd_initialized) {
+               kgd2kfd_exit();
+               kfd_initialized = false;
+       }
 }
 
 void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
 {
        bool vf = amdgpu_sriov_vf(adev);
 
+       if (!kfd_initialized)
+               return;
+
        adev->kfd.dev = kgd2kfd_probe((struct kgd_dev *)adev,
                                      adev->pdev, adev->asic_type, vf);
 
@@ -572,6 +581,13 @@ uint32_t amdgpu_amdkfd_get_asic_rev_id(struct kgd_dev *kgd)
        return adev->rev_id;
 }
 
+int amdgpu_amdkfd_get_noretry(struct kgd_dev *kgd)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+
+       return adev->gmc.noretry;
+}
+
 int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
                                uint32_t vmid, uint64_t gpu_addr,
                                uint32_t *ib_cmd, uint32_t ib_len)
index a10507e..bc9f0e4 100644 (file)
@@ -181,6 +181,7 @@ uint64_t amdgpu_amdkfd_get_unique_id(struct kgd_dev *kgd);
 uint64_t amdgpu_amdkfd_get_mmio_remap_phys_addr(struct kgd_dev *kgd);
 uint32_t amdgpu_amdkfd_get_num_gws(struct kgd_dev *kgd);
 uint32_t amdgpu_amdkfd_get_asic_rev_id(struct kgd_dev *kgd);
+int amdgpu_amdkfd_get_noretry(struct kgd_dev *kgd);
 uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *src);
 
 /* Read user wptr from a specified user address space with page fault
index df0aab0..1529815 100644 (file)
@@ -32,7 +32,6 @@
 #include "v10_structs.h"
 #include "nv.h"
 #include "nvd.h"
-#include "gfxhub_v2_0.h"
 
 enum hqd_dequeue_request_type {
        NO_ACTION = 0,
@@ -753,7 +752,7 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
        }
 
        /* SDMA is on gfxhub as well for Navi1* series */
-       gfxhub_v2_0_setup_vm_pt_regs(adev, vmid, page_table_base);
+       adev->gfxhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base);
 }
 
 const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
index 5b38f84..50016bf 100644 (file)
@@ -31,7 +31,6 @@
 #include "v10_structs.h"
 #include "nv.h"
 #include "nvd.h"
-#include "gfxhub_v2_1.h"
 
 enum hqd_dequeue_request_type {
        NO_ACTION = 0,
@@ -657,7 +656,7 @@ static void set_vm_context_page_table_base_v10_3(struct kgd_dev *kgd, uint32_t v
        struct amdgpu_device *adev = get_amdgpu_device(kgd);
 
        /* SDMA is on gfxhub as well for Navi1* series */
-       gfxhub_v2_1_setup_vm_pt_regs(adev, vmid, page_table_base);
+       adev->gfxhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base);
 }
 
 #if 0
index e6aede7..e0d5110 100644 (file)
@@ -36,9 +36,7 @@
 #include "v9_structs.h"
 #include "soc15.h"
 #include "soc15d.h"
-#include "mmhub_v1_0.h"
-#include "gfxhub_v1_0.h"
-
+#include "gfx_v9_0.h"
 
 enum hqd_dequeue_request_type {
        NO_ACTION = 0,
@@ -703,7 +701,180 @@ void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd,
 
        adev->mmhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base);
 
-       gfxhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base);
+       adev->gfxhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base);
+}
+
+static void lock_spi_csq_mutexes(struct amdgpu_device *adev)
+{
+       mutex_lock(&adev->srbm_mutex);
+       mutex_lock(&adev->grbm_idx_mutex);
+
+}
+
+static void unlock_spi_csq_mutexes(struct amdgpu_device *adev)
+{
+       mutex_unlock(&adev->grbm_idx_mutex);
+       mutex_unlock(&adev->srbm_mutex);
+}
+
+/**
+ * @get_wave_count: Read device registers to get number of waves in flight for
+ * a particular queue. The method also returns the VMID associated with the
+ * queue.
+ *
+ * @adev: Handle of device whose registers are to be read
+ * @queue_idx: Index of queue in the queue-map bit-field
+ * @wave_cnt: Output parameter updated with number of waves in flight
+ * @vmid: Output parameter updated with VMID of queue whose wave count
+ * is being collected
+ */
+static void get_wave_count(struct amdgpu_device *adev, int queue_idx,
+               int *wave_cnt, int *vmid)
+{
+       int pipe_idx;
+       int queue_slot;
+       unsigned int reg_val;
+
+       /*
+        * Program GRBM with appropriate MEID, PIPEID, QUEUEID and VMID
+        * parameters to read out waves in flight. Get VMID if there are
+        * non-zero waves in flight.
+        */
+       *vmid = 0xFF;
+       *wave_cnt = 0;
+       pipe_idx = queue_idx / adev->gfx.mec.num_queue_per_pipe;
+       queue_slot = queue_idx % adev->gfx.mec.num_queue_per_pipe;
+       soc15_grbm_select(adev, 1, pipe_idx, queue_slot, 0);
+       reg_val = RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_CSQ_WF_ACTIVE_COUNT_0) +
+                        queue_slot);
+       *wave_cnt = reg_val & SPI_CSQ_WF_ACTIVE_COUNT_0__COUNT_MASK;
+       if (*wave_cnt != 0)
+               *vmid = (RREG32_SOC15(GC, 0, mmCP_HQD_VMID) &
+                        CP_HQD_VMID__VMID_MASK) >> CP_HQD_VMID__VMID__SHIFT;
+}
+
+/**
+ * @kgd_gfx_v9_get_cu_occupancy: Reads relevant registers associated with each
+ * shader engine and aggregates the number of waves that are in flight for the
+ * process whose pasid is provided as a parameter. The process could have ZERO
+ * or more queues running and submitting waves to compute units.
+ *
+ * @kgd: Handle of device from which to get number of waves in flight
+ * @pasid: Identifies the process for which this query call is invoked
+ * @wave_cnt: Output parameter updated with number of waves in flight that
+ * belong to process with given pasid
+ * @max_waves_per_cu: Output parameter updated with maximum number of waves
+ * possible per Compute Unit
+ *
+ * @note: It's possible that the device has too many queues (oversubscription)
+ * in which case a VMID could be remapped to a different PASID. This could lead
+ * to an iaccurate wave count. Following is a high-level sequence:
+ *    Time T1: vmid = getVmid(); vmid is associated with Pasid P1
+ *    Time T2: passId = getPasId(vmid); vmid is associated with Pasid P2
+ * In the sequence above wave count obtained from time T1 will be incorrectly
+ * lost or added to total wave count.
+ *
+ * The registers that provide the waves in flight are:
+ *
+ *  SPI_CSQ_WF_ACTIVE_STATUS - bit-map of queues per pipe. The bit is ON if a
+ *  queue is slotted, OFF if there is no queue. A process could have ZERO or
+ *  more queues slotted and submitting waves to be run on compute units. Even
+ *  when there is a queue it is possible there could be zero wave fronts, this
+ *  can happen when queue is waiting on top-of-pipe events - e.g. waitRegMem
+ *  command
+ *
+ *  For each bit that is ON from above:
+ *
+ *    Read (SPI_CSQ_WF_ACTIVE_COUNT_0 + queue_idx) register. It provides the
+ *    number of waves that are in flight for the queue at specified index. The
+ *    index ranges from 0 to 7.
+ *
+ *    If non-zero waves are in flight, read CP_HQD_VMID register to obtain VMID
+ *    of the wave(s).
+ *
+ *    Determine if VMID from above step maps to pasid provided as parameter. If
+ *    it matches agrregate the wave count. That the VMID will not match pasid is
+ *    a normal condition i.e. a device is expected to support multiple queues
+ *    from multiple proceses.
+ *
+ *  Reading registers referenced above involves programming GRBM appropriately
+ */
+static void kgd_gfx_v9_get_cu_occupancy(struct kgd_dev *kgd, int pasid,
+               int *pasid_wave_cnt, int *max_waves_per_cu)
+{
+       int qidx;
+       int vmid;
+       int se_idx;
+       int sh_idx;
+       int se_cnt;
+       int sh_cnt;
+       int wave_cnt;
+       int queue_map;
+       int pasid_tmp;
+       int max_queue_cnt;
+       int vmid_wave_cnt = 0;
+       struct amdgpu_device *adev;
+       DECLARE_BITMAP(cp_queue_bitmap, KGD_MAX_QUEUES);
+
+       adev = get_amdgpu_device(kgd);
+       lock_spi_csq_mutexes(adev);
+       soc15_grbm_select(adev, 1, 0, 0, 0);
+
+       /*
+        * Iterate through the shader engines and arrays of the device
+        * to get number of waves in flight
+        */
+       bitmap_complement(cp_queue_bitmap, adev->gfx.mec.queue_bitmap,
+                         KGD_MAX_QUEUES);
+       max_queue_cnt = adev->gfx.mec.num_pipe_per_mec *
+                       adev->gfx.mec.num_queue_per_pipe;
+       sh_cnt = adev->gfx.config.max_sh_per_se;
+       se_cnt = adev->gfx.config.max_shader_engines;
+       for (se_idx = 0; se_idx < se_cnt; se_idx++) {
+               for (sh_idx = 0; sh_idx < sh_cnt; sh_idx++) {
+
+                       gfx_v9_0_select_se_sh(adev, se_idx, sh_idx, 0xffffffff);
+                       queue_map = RREG32(SOC15_REG_OFFSET(GC, 0,
+                                          mmSPI_CSQ_WF_ACTIVE_STATUS));
+
+                       /*
+                        * Assumption: queue map encodes following schema: four
+                        * pipes per each micro-engine, with each pipe mapping
+                        * eight queues. This schema is true for GFX9 devices
+                        * and must be verified for newer device families
+                        */
+                       for (qidx = 0; qidx < max_queue_cnt; qidx++) {
+
+                               /* Skip qeueus that are not associated with
+                                * compute functions
+                                */
+                               if (!test_bit(qidx, cp_queue_bitmap))
+                                       continue;
+
+                               if (!(queue_map & (1 << qidx)))
+                                       continue;
+
+                               /* Get number of waves in flight and aggregate them */
+                               get_wave_count(adev, qidx, &wave_cnt, &vmid);
+                               if (wave_cnt != 0) {
+                                       pasid_tmp =
+                                         RREG32(SOC15_REG_OFFSET(OSSSYS, 0,
+                                                mmIH_VMID_0_LUT) + vmid);
+                                       if (pasid_tmp == pasid)
+                                               vmid_wave_cnt += wave_cnt;
+                               }
+                       }
+               }
+       }
+
+       gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+       soc15_grbm_select(adev, 0, 0, 0, 0);
+       unlock_spi_csq_mutexes(adev);
+
+       /* Update the output parameters and return */
+       *pasid_wave_cnt = vmid_wave_cnt;
+       *max_waves_per_cu = adev->gfx.cu_info.simd_per_cu *
+                               adev->gfx.cu_info.max_waves_per_simd;
 }
 
 const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
@@ -726,4 +897,5 @@ const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
        .get_atc_vmid_pasid_mapping_info =
                        kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
        .set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base,
+       .get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy,
 };
index 17c010d..b4df646 100644 (file)
@@ -543,6 +543,7 @@ int amdgpu_mem_train_support(struct amdgpu_device *adev)
                case HW_REV(11, 0, 0):
                case HW_REV(11, 0, 5):
                case HW_REV(11, 0, 7):
+               case HW_REV(11, 0, 11):
                        ret = 1;
                        break;
                default:
index abe0c27..2d125b8 100644 (file)
@@ -267,7 +267,7 @@ static int  amdgpu_debugfs_process_reg_op(bool read, struct file *f,
                } else {
                        r = get_user(value, (uint32_t *)buf);
                        if (!r)
-                               amdgpu_mm_wreg_mmio_rlc(adev, *pos >> 2, value, 0);
+                               amdgpu_mm_wreg_mmio_rlc(adev, *pos >> 2, value);
                }
                if (r) {
                        result = r;
index 2ff43a3..e8b4175 100644 (file)
@@ -80,8 +80,6 @@ MODULE_FIRMWARE("amdgpu/renoir_gpu_info.bin");
 MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin");
 MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin");
 MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
-MODULE_FIRMWARE("amdgpu/sienna_cichlid_gpu_info.bin");
-MODULE_FIRMWARE("amdgpu/navy_flounder_gpu_info.bin");
 
 #define AMDGPU_RESUME_MS               2000
 
@@ -303,10 +301,10 @@ void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
 }
 
 /*
- * MMIO register access helper functions.
+ * register access helper functions.
  */
 /**
- * amdgpu_mm_rreg - read a memory mapped IO register
+ * amdgpu_device_rreg - read a memory mapped IO or indirect register
  *
  * @adev: amdgpu_device pointer
  * @reg: dword aligned register offset
@@ -314,33 +312,29 @@ void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
  *
  * Returns the 32 bit value from the offset specified.
  */
-uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
-                       uint32_t acc_flags)
+uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
+                           uint32_t reg, uint32_t acc_flags)
 {
        uint32_t ret;
 
        if (adev->in_pci_err_recovery)
                return 0;
 
-       if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev) &&
-           down_read_trylock(&adev->reset_sem)) {
-               ret = amdgpu_kiq_rreg(adev, reg);
-               up_read(&adev->reset_sem);
-               return ret;
+       if ((reg * 4) < adev->rmmio_size) {
+               if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
+                   amdgpu_sriov_runtime(adev) &&
+                   down_read_trylock(&adev->reset_sem)) {
+                       ret = amdgpu_kiq_rreg(adev, reg);
+                       up_read(&adev->reset_sem);
+               } else {
+                       ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
+               }
+       } else {
+               ret = adev->pcie_rreg(adev, reg * 4);
        }
 
-       if ((reg * 4) < adev->rmmio_size)
-               ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
-       else {
-               unsigned long flags;
+       trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
 
-               spin_lock_irqsave(&adev->mmio_idx_lock, flags);
-               writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
-               ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
-               spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
-       }
-
-       trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
        return ret;
 }
 
@@ -394,29 +388,8 @@ void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
                BUG();
 }
 
-static inline void amdgpu_mm_wreg_mmio(struct amdgpu_device *adev,
-                                      uint32_t reg, uint32_t v,
-                                      uint32_t acc_flags)
-{
-       if (adev->in_pci_err_recovery)
-               return;
-
-       trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
-
-       if ((reg * 4) < adev->rmmio_size)
-               writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
-       else {
-               unsigned long flags;
-
-               spin_lock_irqsave(&adev->mmio_idx_lock, flags);
-               writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
-               writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
-               spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
-       }
-}
-
 /**
- * amdgpu_mm_wreg - write to a memory mapped IO register
+ * amdgpu_device_wreg - write to a memory mapped IO or indirect register
  *
  * @adev: amdgpu_device pointer
  * @reg: dword aligned register offset
@@ -425,20 +398,27 @@ static inline void amdgpu_mm_wreg_mmio(struct amdgpu_device *adev,
  *
  * Writes the value specified to the offset specified.
  */
-void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
-                   uint32_t acc_flags)
+void amdgpu_device_wreg(struct amdgpu_device *adev,
+                       uint32_t reg, uint32_t v,
+                       uint32_t acc_flags)
 {
        if (adev->in_pci_err_recovery)
                return;
 
-       if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev) &&
-           down_read_trylock(&adev->reset_sem)) {
-               amdgpu_kiq_wreg(adev, reg, v);
-               up_read(&adev->reset_sem);
-               return;
+       if ((reg * 4) < adev->rmmio_size) {
+               if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
+                   amdgpu_sriov_runtime(adev) &&
+                   down_read_trylock(&adev->reset_sem)) {
+                       amdgpu_kiq_wreg(adev, reg, v);
+                       up_read(&adev->reset_sem);
+               } else {
+                       writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
+               }
+       } else {
+               adev->pcie_wreg(adev, reg * 4, v);
        }
 
-       amdgpu_mm_wreg_mmio(adev, reg, v, acc_flags);
+       trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
 }
 
 /*
@@ -446,21 +426,20 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
  *
  * this function is invoked only the debugfs register access
  * */
-void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
-                   uint32_t acc_flags)
+void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
+                            uint32_t reg, uint32_t v)
 {
        if (adev->in_pci_err_recovery)
                return;
 
        if (amdgpu_sriov_fullaccess(adev) &&
-               adev->gfx.rlc.funcs &&
-               adev->gfx.rlc.funcs->is_rlcg_access_range) {
-
+           adev->gfx.rlc.funcs &&
+           adev->gfx.rlc.funcs->is_rlcg_access_range) {
                if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
                        return adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, v);
+       } else {
+               writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
        }
-
-       amdgpu_mm_wreg_mmio(adev, reg, v, acc_flags);
 }
 
 /**
@@ -594,6 +573,135 @@ void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
        }
 }
 
+/**
+ * amdgpu_device_indirect_rreg - read an indirect register
+ *
+ * @adev: amdgpu_device pointer
+ * @pcie_index: mmio register offset
+ * @pcie_data: mmio register offset
+ *
+ * Returns the value of indirect register @reg_addr
+ */
+u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
+                               u32 pcie_index, u32 pcie_data,
+                               u32 reg_addr)
+{
+       unsigned long flags;
+       u32 r;
+       void __iomem *pcie_index_offset;
+       void __iomem *pcie_data_offset;
+
+       spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+       pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
+       pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
+
+       writel(reg_addr, pcie_index_offset);
+       readl(pcie_index_offset);
+       r = readl(pcie_data_offset);
+       spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+
+       return r;
+}
+
+/**
+ * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
+ *
+ * @adev: amdgpu_device pointer
+ * @pcie_index: mmio register offset
+ * @pcie_data: mmio register offset
+ *
+ * Returns the value of indirect register @reg_addr
+ */
+u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
+                                 u32 pcie_index, u32 pcie_data,
+                                 u32 reg_addr)
+{
+       unsigned long flags;
+       u64 r;
+       void __iomem *pcie_index_offset;
+       void __iomem *pcie_data_offset;
+
+       spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+       pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
+       pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
+
+       /* read low 32 bits */
+       writel(reg_addr, pcie_index_offset);
+       readl(pcie_index_offset);
+       r = readl(pcie_data_offset);
+       /* read high 32 bits */
+       writel(reg_addr + 4, pcie_index_offset);
+       readl(pcie_index_offset);
+       r |= ((u64)readl(pcie_data_offset) << 32);
+       spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+
+       return r;
+}
+
+/**
+ * amdgpu_device_indirect_wreg - write an indirect register address
+ *
+ * @adev: amdgpu_device pointer
+ * @pcie_index: mmio register offset
+ * @pcie_data: mmio register offset
+ * @reg_addr: indirect register offset
+ * @reg_data: indirect register data
+ *
+ */
+void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
+                                u32 pcie_index, u32 pcie_data,
+                                u32 reg_addr, u32 reg_data)
+{
+       unsigned long flags;
+       void __iomem *pcie_index_offset;
+       void __iomem *pcie_data_offset;
+
+       spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+       pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
+       pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
+
+       writel(reg_addr, pcie_index_offset);
+       readl(pcie_index_offset);
+       writel(reg_data, pcie_data_offset);
+       readl(pcie_data_offset);
+       spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+}
+
+/**
+ * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
+ *
+ * @adev: amdgpu_device pointer
+ * @pcie_index: mmio register offset
+ * @pcie_data: mmio register offset
+ * @reg_addr: indirect register offset
+ * @reg_data: indirect register data
+ *
+ */
+void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
+                                  u32 pcie_index, u32 pcie_data,
+                                  u32 reg_addr, u64 reg_data)
+{
+       unsigned long flags;
+       void __iomem *pcie_index_offset;
+       void __iomem *pcie_data_offset;
+
+       spin_lock_irqsave(&adev->pcie_idx_lock, flags);
+       pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
+       pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;
+
+       /* write low 32 bits */
+       writel(reg_addr, pcie_index_offset);
+       readl(pcie_index_offset);
+       writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
+       readl(pcie_data_offset);
+       /* write high 32 bits */
+       writel(reg_addr + 4, pcie_index_offset);
+       readl(pcie_index_offset);
+       writel((u32)(reg_data >> 32), pcie_data_offset);
+       readl(pcie_data_offset);
+       spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+}
+
 /**
  * amdgpu_invalid_rreg - dummy reg read function
  *
@@ -1262,11 +1370,15 @@ static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
 
        amdgpu_gmc_tmz_set(adev);
 
-       if (amdgpu_num_kcq > 8 || amdgpu_num_kcq < 0) {
+       if (amdgpu_num_kcq == -1) {
+               amdgpu_num_kcq = 8;
+       } else if (amdgpu_num_kcq > 8 || amdgpu_num_kcq < 0) {
                amdgpu_num_kcq = 8;
                dev_warn(adev->dev, "set kernel compute queue number to 8 due to invalid parameter provided by user\n");
        }
 
+       amdgpu_gmc_noretry_set(adev);
+
        return 0;
 }
 
@@ -1669,6 +1781,8 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
        case CHIP_CARRIZO:
        case CHIP_STONEY:
        case CHIP_VEGA20:
+       case CHIP_SIENNA_CICHLID:
+       case CHIP_NAVY_FLOUNDER:
        default:
                return 0;
        case CHIP_VEGA10:
@@ -1700,12 +1814,6 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
        case CHIP_NAVI12:
                chip_name = "navi12";
                break;
-       case CHIP_SIENNA_CICHLID:
-               chip_name = "sienna_cichlid";
-               break;
-       case CHIP_NAVY_FLOUNDER:
-               chip_name = "navy_flounder";
-               break;
        }
 
        snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
@@ -3445,8 +3553,10 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
        /* make sure IB test finished before entering exclusive mode
         * to avoid preemption on IB test
         * */
-       if (amdgpu_sriov_vf(adev))
+       if (amdgpu_sriov_vf(adev)) {
                amdgpu_virt_request_full_gpu(adev, false);
+               amdgpu_virt_fini_data_exchange(adev);
+       }
 
        /* disable all interrupts */
        amdgpu_irq_disable_all(adev);
@@ -4080,6 +4190,11 @@ static int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
 
        amdgpu_debugfs_wait_dump(adev);
 
+       if (amdgpu_sriov_vf(adev)) {
+               /* stop the data exchange thread */
+               amdgpu_virt_fini_data_exchange(adev);
+       }
+
        /* block all schedulers and reset given job's ring */
        for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
                struct amdgpu_ring *ring = adev->rings[i];
index c81206e..7cc7af2 100644 (file)
@@ -297,7 +297,7 @@ int amdgpu_display_crtc_set_config(struct drm_mode_set *set,
           take the current one */
        if (active && !adev->have_disp_power_ref) {
                adev->have_disp_power_ref = true;
-               goto out;
+               return ret;
        }
        /* if we have no active crtcs, then drop the power ref
           we got before */
index 81e4cf8..c241317 100644 (file)
@@ -147,7 +147,7 @@ int amdgpu_async_gfx_ring = 1;
 int amdgpu_mcbp = 0;
 int amdgpu_discovery = -1;
 int amdgpu_mes = 0;
-int amdgpu_noretry;
+int amdgpu_noretry = -1;
 int amdgpu_force_asic_type = -1;
 int amdgpu_tmz = 0;
 int amdgpu_reset_method = -1; /* auto */
@@ -596,8 +596,13 @@ MODULE_PARM_DESC(mes,
        "Enable Micro Engine Scheduler (0 = disabled (default), 1 = enabled)");
 module_param_named(mes, amdgpu_mes, int, 0444);
 
+/**
+ * DOC: noretry (int)
+ * Disable retry faults in the GPU memory controller.
+ * (0 = retry enabled, 1 = retry disabled, -1 auto (default))
+ */
 MODULE_PARM_DESC(noretry,
-       "Disable retry faults (0 = retry enabled (default), 1 = retry disabled)");
+       "Disable retry faults (0 = retry enabled, 1 = retry disabled, -1 auto (default))");
 module_param_named(noretry, amdgpu_noretry, int, 0644);
 
 /**
index e811fec..8f4a8f8 100644 (file)
 
 static bool is_fru_eeprom_supported(struct amdgpu_device *adev)
 {
-       /* TODO: Gaming SKUs don't have the FRU EEPROM.
-        * Use this hack to address hangs on modprobe on gaming SKUs
-        * until a proper solution can be implemented by only supporting
-        * the explicit chip IDs for VG20 Server cards
-        *
-        * TODO: Add list of supported Arcturus DIDs once confirmed
+       /* Only server cards have the FRU EEPROM
+        * TODO: See if we can figure this out dynamically instead of
+        * having to parse VBIOS versions.
         */
-       if ((adev->asic_type == CHIP_VEGA20 && adev->pdev->device == 0x66a0) ||
-           (adev->asic_type == CHIP_VEGA20 && adev->pdev->device == 0x66a1) ||
-           (adev->asic_type == CHIP_VEGA20 && adev->pdev->device == 0x66a4))
-               return true;
-       return false;
+       struct atom_context *atom_ctx = adev->mode_info.atom_context;
+
+       /* VBIOS is of the format ###-DXXXYY-##. For SKU identification,
+        * we can use just the "DXXX" portion. If there were more models, we
+        * could convert the 3 characters to a hex integer and use a switch
+        * for ease/speed/readability. For now, 2 string comparisons are
+        * reasonable and not too expensive
+        */
+       switch (adev->asic_type) {
+       case CHIP_VEGA20:
+               /* D161 and D163 are the VG20 server SKUs */
+               if (strnstr(atom_ctx->vbios_version, "D161",
+                           sizeof(atom_ctx->vbios_version)) ||
+                   strnstr(atom_ctx->vbios_version, "D163",
+                           sizeof(atom_ctx->vbios_version)))
+                       return true;
+               else
+                       return false;
+       default:
+               return false;
+       }
 }
 
 static int amdgpu_fru_read_eeprom(struct amdgpu_device *adev, uint32_t addrptr,
index f29a861..1308d97 100644 (file)
@@ -26,4 +26,4 @@
 
 int amdgpu_fru_get_product_info(struct amdgpu_device *adev);
 
-#endif  // __AMDGPU_PRODINFO_H__
+#endif  // __AMDGPU_FRU_EEPROM_H__
index a611e78..258498c 100644 (file)
@@ -217,6 +217,7 @@ struct amdgpu_gfx_funcs {
        int (*query_ras_error_count) (struct amdgpu_device *adev, void *ras_error_status);
        void (*reset_ras_error_count) (struct amdgpu_device *adev);
        void (*init_spm_golden)(struct amdgpu_device *adev);
+       void (*query_ras_error_status) (struct amdgpu_device *adev);
 };
 
 struct sq_work {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfxhub.h
new file mode 100644 (file)
index 0000000..66ebc2e
--- /dev/null
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __AMDGPU_GFXHUB_H__
+#define __AMDGPU_GFXHUB_H__
+
+struct amdgpu_gfxhub_funcs {
+       u64 (*get_fb_location)(struct amdgpu_device *adev);
+       u64 (*get_mc_fb_offset)(struct amdgpu_device *adev);
+       void (*setup_vm_pt_regs)(struct amdgpu_device *adev, uint32_t vmid,
+                       uint64_t page_table_base);
+       int (*gart_enable)(struct amdgpu_device *adev);
+
+       void (*gart_disable)(struct amdgpu_device *adev);
+       void (*set_fault_enable_default)(struct amdgpu_device *adev, bool value);
+       void (*init)(struct amdgpu_device *adev);
+       int (*get_xgmi_info)(struct amdgpu_device *adev);
+};
+
+struct amdgpu_gfxhub {
+       const struct amdgpu_gfxhub_funcs *funcs;
+};
+
+#endif
index 213ef09..36604d7 100644 (file)
@@ -413,6 +413,44 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
        }
 }
 
+/**
+ * amdgpu_noretry_set -- set per asic noretry defaults
+ * @adev: amdgpu_device pointer
+ *
+ * Set a per asic default for the no-retry parameter.
+ *
+ */
+void amdgpu_gmc_noretry_set(struct amdgpu_device *adev)
+{
+       struct amdgpu_gmc *gmc = &adev->gmc;
+
+       switch (adev->asic_type) {
+       case CHIP_RAVEN:
+               /* Raven currently has issues with noretry
+                * regardless of what we decide for other
+                * asics, we should leave raven with
+                * noretry = 0 until we root cause the
+                * issues.
+                */
+               if (amdgpu_noretry == -1)
+                       gmc->noretry = 0;
+               else
+                       gmc->noretry = amdgpu_noretry;
+               break;
+       default:
+               /* default this to 0 for now, but we may want
+                * to change this in the future for certain
+                * GPUs as it can increase performance in
+                * certain cases.
+                */
+               if (amdgpu_noretry == -1)
+                       gmc->noretry = 0;
+               else
+                       gmc->noretry = amdgpu_noretry;
+               break;
+       }
+}
+
 void amdgpu_gmc_set_vm_fault_masks(struct amdgpu_device *adev, int hub_type,
                                   bool enable)
 {
index d61bbde..aa0c837 100644 (file)
@@ -239,6 +239,7 @@ struct amdgpu_gmc {
 
        struct amdgpu_xgmi xgmi;
        struct amdgpu_irq_src   ecc_irq;
+       int noretry;
 };
 
 #define amdgpu_gmc_flush_gpu_tlb(adev, vmid, vmhub, type) ((adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid), (vmhub), (type)))
@@ -300,6 +301,7 @@ void amdgpu_gmc_ras_fini(struct amdgpu_device *adev);
 int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev);
 
 extern void amdgpu_gmc_tmz_set(struct amdgpu_device *adev);
+extern void amdgpu_gmc_noretry_set(struct amdgpu_device *adev);
 
 extern void
 amdgpu_gmc_set_vm_fault_masks(struct amdgpu_device *adev, int hub_type,
index bccaf4f..a5aaff1 100644 (file)
@@ -177,7 +177,7 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
                        break;
                case CHIP_VEGA10:
                        /* turn runpm on if noretry=0 */
-                       if (!amdgpu_noretry)
+                       if (!adev->gmc.noretry)
                                adev->runpm = true;
                        break;
                default:
index 0c43d7f..1ae9bda 100644 (file)
@@ -40,6 +40,7 @@ struct amdgpu_mmhub_funcs {
                                uint64_t page_table_base);
        void (*update_power_gating)(struct amdgpu_device *adev,
                                 bool enable);
+       void (*query_ras_error_status)(struct amdgpu_device *adev);
 };
 
 struct amdgpu_mmhub {
index 2c66e20..18be544 100644 (file)
@@ -161,10 +161,12 @@ static int psp_sw_init(void *handle)
        struct psp_context *psp = &adev->psp;
        int ret;
 
-       ret = psp_init_microcode(psp);
-       if (ret) {
-               DRM_ERROR("Failed to load psp firmware!\n");
-               return ret;
+       if (!amdgpu_sriov_vf(adev)) {
+               ret = psp_init_microcode(psp);
+               if (ret) {
+                       DRM_ERROR("Failed to load psp firmware!\n");
+                       return ret;
+               }
        }
 
        ret = psp_memory_training_init(psp);
index e5ea147..8bf6a7c 100644 (file)
@@ -1027,58 +1027,6 @@ static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev,
        return scnprintf(buf, PAGE_SIZE, "feature mask: 0x%x\n", con->features);
 }
 
-static void amdgpu_ras_sysfs_add_bad_page_node(struct amdgpu_device *adev)
-{
-       struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
-       struct attribute_group group;
-       struct bin_attribute *bin_attrs[] = {
-               &con->badpages_attr,
-               NULL,
-       };
-
-       con->badpages_attr = (struct bin_attribute) {
-               .attr = {
-                       .name = "gpu_vram_bad_pages",
-                       .mode = S_IRUGO,
-               },
-               .size = 0,
-               .private = NULL,
-               .read = amdgpu_ras_sysfs_badpages_read,
-       };
-
-       group.name = RAS_FS_NAME;
-       group.bin_attrs = bin_attrs;
-
-       sysfs_bin_attr_init(bin_attrs[0]);
-
-       sysfs_update_group(&adev->dev->kobj, &group);
-}
-
-static int amdgpu_ras_sysfs_create_feature_node(struct amdgpu_device *adev)
-{
-       struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
-       struct attribute *attrs[] = {
-               &con->features_attr.attr,
-               NULL
-       };
-       struct attribute_group group = {
-               .name = RAS_FS_NAME,
-               .attrs = attrs,
-       };
-
-       con->features_attr = (struct device_attribute) {
-               .attr = {
-                       .name = "features",
-                       .mode = S_IRUGO,
-               },
-                       .show = amdgpu_ras_sysfs_features_read,
-       };
-
-       sysfs_attr_init(attrs[0]);
-
-       return sysfs_create_group(&adev->dev->kobj, &group);
-}
-
 static void amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device *adev)
 {
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
@@ -1300,13 +1248,43 @@ static void amdgpu_ras_debugfs_remove_all(struct amdgpu_device *adev)
 /* debugfs end */
 
 /* ras fs */
-
+static BIN_ATTR(gpu_vram_bad_pages, S_IRUGO,
+               amdgpu_ras_sysfs_badpages_read, NULL, 0);
+static DEVICE_ATTR(features, S_IRUGO,
+               amdgpu_ras_sysfs_features_read, NULL);
 static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
 {
-       amdgpu_ras_sysfs_create_feature_node(adev);
+       struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+       struct attribute_group group = {
+               .name = RAS_FS_NAME,
+       };
+       struct attribute *attrs[] = {
+               &con->features_attr.attr,
+               NULL
+       };
+       struct bin_attribute *bin_attrs[] = {
+               NULL,
+               NULL,
+       };
+       int r;
 
-       if (amdgpu_bad_page_threshold != 0)
-               amdgpu_ras_sysfs_add_bad_page_node(adev);
+       /* add features entry */
+       con->features_attr = dev_attr_features;
+       group.attrs = attrs;
+       sysfs_attr_init(attrs[0]);
+
+       if (amdgpu_bad_page_threshold != 0) {
+               /* add bad_page_features entry */
+               bin_attr_gpu_vram_bad_pages.private = NULL;
+               con->badpages_attr = bin_attr_gpu_vram_bad_pages;
+               bin_attrs[0] = &con->badpages_attr;
+               group.bin_attrs = bin_attrs;
+               sysfs_bin_attr_init(bin_attrs[0]);
+       }
+
+       r = sysfs_create_group(&adev->dev->kobj, &group);
+       if (r)
+               dev_err(adev->dev, "Failed to create RAS sysfs group!");
 
        return 0;
 }
@@ -1498,6 +1476,45 @@ static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev)
        }
 }
 
+/* Parse RdRspStatus and WrRspStatus */
+void amdgpu_ras_error_status_query(struct amdgpu_device *adev,
+               struct ras_query_if *info)
+{
+       /*
+        * Only two block need to query read/write
+        * RspStatus at current state
+        */
+       switch (info->head.block) {
+       case AMDGPU_RAS_BLOCK__GFX:
+               if (adev->gfx.funcs->query_ras_error_status)
+                       adev->gfx.funcs->query_ras_error_status(adev);
+               break;
+       case AMDGPU_RAS_BLOCK__MMHUB:
+               if (adev->mmhub.funcs->query_ras_error_status)
+                       adev->mmhub.funcs->query_ras_error_status(adev);
+               break;
+       default:
+               break;
+       }
+}
+
+static void amdgpu_ras_query_err_status(struct amdgpu_device *adev)
+{
+       struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+       struct ras_manager *obj;
+
+       if (!con)
+               return;
+
+       list_for_each_entry(obj, &con->head, node) {
+               struct ras_query_if info = {
+                       .head = obj->head,
+               };
+
+               amdgpu_ras_error_status_query(adev, &info);
+       }
+}
+
 /* recovery begin */
 
 /* return 0 on success.
@@ -1568,8 +1585,10 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
                }
 
                list_for_each_entry(remote_adev,
-                               device_list_handle, gmc.xgmi.head)
+                               device_list_handle, gmc.xgmi.head) {
+                       amdgpu_ras_query_err_status(remote_adev);
                        amdgpu_ras_log_on_err_counter(remote_adev);
+               }
 
                amdgpu_put_xgmi_hive(hive);
        }
@@ -1967,8 +1986,7 @@ static int amdgpu_ras_check_asic_type(struct amdgpu_device *adev)
 {
        if (adev->asic_type != CHIP_VEGA10 &&
                adev->asic_type != CHIP_VEGA20 &&
-               adev->asic_type != CHIP_ARCTURUS &&
-               adev->asic_type != CHIP_SIENNA_CICHLID)
+               adev->asic_type != CHIP_ARCTURUS)
                return 1;
        else
                return 0;
@@ -2012,6 +2030,7 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev,
 
        *supported = amdgpu_ras_enable == 0 ?
                        0 : *hw_supported & amdgpu_ras_mask;
+
        adev->ras_features = *supported;
 }
 
index 63e734a..ee9480d 100644 (file)
@@ -35,7 +35,7 @@
 #define AMDGPU_JOB_GET_TIMELINE_NAME(job) \
         job->base.s_fence->finished.ops->get_timeline_name(&job->base.s_fence->finished)
 
-TRACE_EVENT(amdgpu_mm_rreg,
+TRACE_EVENT(amdgpu_device_rreg,
            TP_PROTO(unsigned did, uint32_t reg, uint32_t value),
            TP_ARGS(did, reg, value),
            TP_STRUCT__entry(
@@ -54,7 +54,7 @@ TRACE_EVENT(amdgpu_mm_rreg,
                      (unsigned long)__entry->value)
 );
 
-TRACE_EVENT(amdgpu_mm_wreg,
+TRACE_EVENT(amdgpu_device_wreg,
            TP_PROTO(unsigned did, uint32_t reg, uint32_t value),
            TP_ARGS(did, reg, value),
            TP_STRUCT__entry(
@@ -321,6 +321,49 @@ DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_cs,
            TP_ARGS(mapping)
 );
 
+TRACE_EVENT(amdgpu_vm_update_ptes,
+           TP_PROTO(struct amdgpu_vm_update_params *p,
+                    uint64_t start, uint64_t end,
+                    unsigned int nptes, uint64_t dst,
+                    uint64_t incr, uint64_t flags,
+                    pid_t pid, uint64_t vm_ctx),
+       TP_ARGS(p, start, end, nptes, dst, incr, flags, pid, vm_ctx),
+       TP_STRUCT__entry(
+                        __field(u64, start)
+                        __field(u64, end)
+                        __field(u64, flags)
+                        __field(unsigned int, nptes)
+                        __field(u64, incr)
+                        __field(pid_t, pid)
+                        __field(u64, vm_ctx)
+                        __dynamic_array(u64, dst, nptes)
+       ),
+
+       TP_fast_assign(
+                       unsigned int i;
+
+                       __entry->start = start;
+                       __entry->end = end;
+                       __entry->flags = flags;
+                       __entry->incr = incr;
+                       __entry->nptes = nptes;
+                       __entry->pid = pid;
+                       __entry->vm_ctx = vm_ctx;
+                       for (i = 0; i < nptes; ++i) {
+                               u64 addr = p->pages_addr ? amdgpu_vm_map_gart(
+                                       p->pages_addr, dst) : dst;
+
+                               ((u64 *)__get_dynamic_array(dst))[i] = addr;
+                               dst += incr;
+                       }
+       ),
+       TP_printk("pid:%u vm_ctx:0x%llx start:0x%010llx end:0x%010llx,"
+                 " flags:0x%llx, incr:%llu, dst:\n%s", __entry->pid,
+                 __entry->vm_ctx, __entry->start, __entry->end,
+                 __entry->flags, __entry->incr,  __print_array(
+                 __get_dynamic_array(dst), __entry->nptes, 8))
+);
+
 TRACE_EVENT(amdgpu_vm_set_ptes,
            TP_PROTO(uint64_t pe, uint64_t addr, unsigned count,
                     uint32_t incr, uint64_t flags, bool direct),
index 495c3d7..f3b7287 100644 (file)
@@ -68,6 +68,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
 
        INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
        mutex_init(&adev->vcn.vcn_pg_lock);
+       mutex_init(&adev->vcn.vcn1_jpeg1_workaround);
        atomic_set(&adev->vcn.total_submission_cnt, 0);
        for (i = 0; i < adev->vcn.num_vcn_inst; i++)
                atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0);
@@ -237,6 +238,7 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
        }
 
        release_firmware(adev->vcn.fw);
+       mutex_destroy(&adev->vcn.vcn1_jpeg1_workaround);
        mutex_destroy(&adev->vcn.vcn_pg_lock);
 
        return 0;
index 7a9b804..1769115 100644 (file)
@@ -220,6 +220,7 @@ struct amdgpu_vcn {
        struct amdgpu_vcn_inst   inst[AMDGPU_MAX_VCN_INSTANCES];
        struct amdgpu_vcn_reg    internal;
        struct mutex             vcn_pg_lock;
+       struct mutex            vcn1_jpeg1_workaround;
        atomic_t                 total_submission_cnt;
 
        unsigned        harvest_config;
index f76961d..d0aea5e 100644 (file)
 #include "soc15.h"
 #include "nv.h"
 
+#define POPULATE_UCODE_INFO(vf2pf_info, ucode, ver) \
+       do { \
+               vf2pf_info->ucode_info[ucode].id = ucode; \
+               vf2pf_info->ucode_info[ucode].version = ver; \
+       } while (0)
+
 bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
 {
        /* By now all MMIO pages except mailbox are blocked */
@@ -239,10 +245,10 @@ void amdgpu_virt_free_mm_table(struct amdgpu_device *adev)
 }
 
 
-int amdgpu_virt_fw_reserve_get_checksum(void *obj,
-                                       unsigned long obj_size,
-                                       unsigned int key,
-                                       unsigned int chksum)
+unsigned int amd_sriov_msg_checksum(void *obj,
+                               unsigned long obj_size,
+                               unsigned int key,
+                               unsigned int checksum)
 {
        unsigned int ret = key;
        unsigned long i = 0;
@@ -252,9 +258,9 @@ int amdgpu_virt_fw_reserve_get_checksum(void *obj,
        /* calculate checksum */
        for (i = 0; i < obj_size; ++i)
                ret += *(pos + i);
-       /* minus the chksum itself */
-       pos = (char *)&chksum;
-       for (i = 0; i < sizeof(chksum); ++i)
+       /* minus the checksum itself */
+       pos = (char *)&checksum;
+       for (i = 0; i < sizeof(checksum); ++i)
                ret -= *(pos + i);
        return ret;
 }
@@ -415,33 +421,188 @@ static void amdgpu_virt_add_bad_page(struct amdgpu_device *adev,
        }
 }
 
-void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
+static int amdgpu_virt_read_pf2vf_data(struct amdgpu_device *adev)
 {
-       uint32_t pf2vf_size = 0;
-       uint32_t checksum = 0;
+       struct amd_sriov_msg_pf2vf_info_header *pf2vf_info = adev->virt.fw_reserve.p_pf2vf;
+       uint32_t checksum;
        uint32_t checkval;
-       char *str;
+
+       if (adev->virt.fw_reserve.p_pf2vf == NULL)
+               return -EINVAL;
+
+       if (pf2vf_info->size > 1024) {
+               DRM_ERROR("invalid pf2vf message size\n");
+               return -EINVAL;
+       }
+
+       switch (pf2vf_info->version) {
+       case 1:
+               checksum = ((struct amdgim_pf2vf_info_v1 *)pf2vf_info)->checksum;
+               checkval = amd_sriov_msg_checksum(
+                       adev->virt.fw_reserve.p_pf2vf, pf2vf_info->size,
+                       adev->virt.fw_reserve.checksum_key, checksum);
+               if (checksum != checkval) {
+                       DRM_ERROR("invalid pf2vf message\n");
+                       return -EINVAL;
+               }
+
+               adev->virt.gim_feature =
+                       ((struct amdgim_pf2vf_info_v1 *)pf2vf_info)->feature_flags;
+               break;
+       case 2:
+               /* TODO: missing key, need to add it later */
+               checksum = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->checksum;
+               checkval = amd_sriov_msg_checksum(
+                       adev->virt.fw_reserve.p_pf2vf, pf2vf_info->size,
+                       0, checksum);
+               if (checksum != checkval) {
+                       DRM_ERROR("invalid pf2vf message\n");
+                       return -EINVAL;
+               }
+
+               adev->virt.vf2pf_update_interval_ms =
+                       ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->vf2pf_update_interval_ms;
+               adev->virt.gim_feature =
+                       ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->feature_flags.all;
+
+               break;
+       default:
+               DRM_ERROR("invalid pf2vf version\n");
+               return -EINVAL;
+       }
+
+       /* correct too large or too little interval value */
+       if (adev->virt.vf2pf_update_interval_ms < 200 || adev->virt.vf2pf_update_interval_ms > 10000)
+               adev->virt.vf2pf_update_interval_ms = 2000;
+
+       return 0;
+}
+
+static void amdgpu_virt_populate_vf2pf_ucode_info(struct amdgpu_device *adev)
+{
+       struct amd_sriov_msg_vf2pf_info *vf2pf_info;
+       vf2pf_info = (struct amd_sriov_msg_vf2pf_info *) adev->virt.fw_reserve.p_vf2pf;
+
+       if (adev->virt.fw_reserve.p_vf2pf == NULL)
+               return;
+
+       POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_VCE,      adev->vce.fw_version);
+       POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_UVD,      adev->uvd.fw_version);
+       POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MC,       adev->gmc.fw_version);
+       POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ME,       adev->gfx.me_fw_version);
+       POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_PFP,      adev->gfx.pfp_fw_version);
+       POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_CE,       adev->gfx.ce_fw_version);
+       POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC,      adev->gfx.rlc_fw_version);
+       POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLC, adev->gfx.rlc_srlc_fw_version);
+       POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLG, adev->gfx.rlc_srlg_fw_version);
+       POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLS, adev->gfx.rlc_srls_fw_version);
+       POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC,      adev->gfx.mec_fw_version);
+       POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC2,     adev->gfx.mec2_fw_version);
+       POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SOS,      adev->psp.sos_fw_version);
+       POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ASD,      adev->psp.asd_fw_version);
+       POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_RAS,   adev->psp.ta_ras_ucode_version);
+       POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_XGMI,  adev->psp.ta_xgmi_ucode_version);
+       POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SMC,      adev->pm.fw_version);
+       POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA,     adev->sdma.instance[0].fw_version);
+       POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA2,    adev->sdma.instance[1].fw_version);
+       POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_VCN,      adev->vcn.fw_version);
+       POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_DMCU,     adev->dm.dmcu_fw_version);
+}
+
+static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev)
+{
+       struct amd_sriov_msg_vf2pf_info *vf2pf_info;
+       struct ttm_resource_manager *vram_man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
+
+       vf2pf_info = (struct amd_sriov_msg_vf2pf_info *) adev->virt.fw_reserve.p_vf2pf;
+
+       if (adev->virt.fw_reserve.p_vf2pf == NULL)
+               return -EINVAL;
+
+       memset(vf2pf_info, 0, sizeof(struct amd_sriov_msg_vf2pf_info));
+
+       vf2pf_info->header.size = sizeof(struct amd_sriov_msg_vf2pf_info);
+       vf2pf_info->header.version = AMD_SRIOV_MSG_FW_VRAM_VF2PF_VER;
+
+#ifdef MODULE
+       if (THIS_MODULE->version != NULL)
+               strcpy(vf2pf_info->driver_version, THIS_MODULE->version);
+       else
+#endif
+               strcpy(vf2pf_info->driver_version, "N/A");
+
+       vf2pf_info->pf2vf_version_required = 0; // no requirement, guest understands all
+       vf2pf_info->driver_cert = 0;
+       vf2pf_info->os_info.all = 0;
+
+       vf2pf_info->fb_usage = amdgpu_vram_mgr_usage(vram_man) >> 20;
+       vf2pf_info->fb_vis_usage = amdgpu_vram_mgr_vis_usage(vram_man) >> 20;
+       vf2pf_info->fb_size = adev->gmc.real_vram_size >> 20;
+       vf2pf_info->fb_vis_size = adev->gmc.visible_vram_size >> 20;
+
+       amdgpu_virt_populate_vf2pf_ucode_info(adev);
+
+       /* TODO: read dynamic info */
+       vf2pf_info->gfx_usage = 0;
+       vf2pf_info->compute_usage = 0;
+       vf2pf_info->encode_usage = 0;
+       vf2pf_info->decode_usage = 0;
+
+       vf2pf_info->checksum =
+               amd_sriov_msg_checksum(
+               vf2pf_info, vf2pf_info->header.size, 0, 0);
+
+       return 0;
+}
+
+void amdgpu_virt_update_vf2pf_work_item(struct work_struct *work)
+{
+       struct amdgpu_device *adev = container_of(work, struct amdgpu_device, virt.vf2pf_work.work);
+
+       amdgpu_virt_read_pf2vf_data(adev);
+       amdgpu_virt_write_vf2pf_data(adev);
+
+       schedule_delayed_work(&(adev->virt.vf2pf_work), adev->virt.vf2pf_update_interval_ms);
+}
+
+void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev)
+{
+       if (adev->virt.vf2pf_update_interval_ms != 0) {
+               DRM_INFO("clean up the vf2pf work item\n");
+               flush_delayed_work(&adev->virt.vf2pf_work);
+               cancel_delayed_work_sync(&adev->virt.vf2pf_work);
+       }
+}
+
+void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
+{
        uint64_t bp_block_offset = 0;
        uint32_t bp_block_size = 0;
-       struct amdgim_pf2vf_info_v2 *pf2vf_v2 = NULL;
+       struct amd_sriov_msg_pf2vf_info *pf2vf_v2 = NULL;
 
        adev->virt.fw_reserve.p_pf2vf = NULL;
        adev->virt.fw_reserve.p_vf2pf = NULL;
+       adev->virt.vf2pf_update_interval_ms = 0;
 
        if (adev->mman.fw_vram_usage_va != NULL) {
+               adev->virt.vf2pf_update_interval_ms = 2000;
+
                adev->virt.fw_reserve.p_pf2vf =
-                       (struct amd_sriov_msg_pf2vf_info_header *)(
-                       adev->mman.fw_vram_usage_va + AMDGIM_DATAEXCHANGE_OFFSET);
-               AMDGPU_FW_VRAM_PF2VF_READ(adev, header.size, &pf2vf_size);
-               AMDGPU_FW_VRAM_PF2VF_READ(adev, checksum, &checksum);
-               AMDGPU_FW_VRAM_PF2VF_READ(adev, feature_flags, &adev->virt.gim_feature);
-
-               /* pf2vf message must be in 4K */
-               if (pf2vf_size > 0 && pf2vf_size < 4096) {
-                       if (adev->virt.fw_reserve.p_pf2vf->version == 2) {
-                               pf2vf_v2 = (struct amdgim_pf2vf_info_v2 *)adev->virt.fw_reserve.p_pf2vf;
-                               bp_block_offset = ((uint64_t)pf2vf_v2->bp_block_offset_L & 0xFFFFFFFF) |
-                                               ((((uint64_t)pf2vf_v2->bp_block_offset_H) << 32) & 0xFFFFFFFF00000000);
+                       (struct amd_sriov_msg_pf2vf_info_header *)
+                       (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
+               adev->virt.fw_reserve.p_vf2pf =
+                       (struct amd_sriov_msg_vf2pf_info_header *)
+                       (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10));
+
+               amdgpu_virt_read_pf2vf_data(adev);
+               amdgpu_virt_write_vf2pf_data(adev);
+
+               /* bad page handling for version 2 */
+               if (adev->virt.fw_reserve.p_pf2vf->version == 2) {
+                               pf2vf_v2 = (struct amd_sriov_msg_pf2vf_info *)adev->virt.fw_reserve.p_pf2vf;
+
+                               bp_block_offset = ((uint64_t)pf2vf_v2->bp_block_offset_low & 0xFFFFFFFF) |
+                                               ((((uint64_t)pf2vf_v2->bp_block_offset_high) << 32) & 0xFFFFFFFF00000000);
                                bp_block_size = pf2vf_v2->bp_block_size;
 
                                if (bp_block_size && !adev->virt.ras_init_done)
@@ -450,37 +611,11 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
                                if (adev->virt.ras_init_done)
                                        amdgpu_virt_add_bad_page(adev, bp_block_offset, bp_block_size);
                        }
+       }
 
-                       checkval = amdgpu_virt_fw_reserve_get_checksum(
-                               adev->virt.fw_reserve.p_pf2vf, pf2vf_size,
-                               adev->virt.fw_reserve.checksum_key, checksum);
-                       if (checkval == checksum) {
-                               adev->virt.fw_reserve.p_vf2pf =
-                                       ((void *)adev->virt.fw_reserve.p_pf2vf +
-                                       pf2vf_size);
-                               memset((void *)adev->virt.fw_reserve.p_vf2pf, 0,
-                                       sizeof(amdgim_vf2pf_info));
-                               AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.version,
-                                       AMDGPU_FW_VRAM_VF2PF_VER);
-                               AMDGPU_FW_VRAM_VF2PF_WRITE(adev, header.size,
-                                       sizeof(amdgim_vf2pf_info));
-                               AMDGPU_FW_VRAM_VF2PF_READ(adev, driver_version,
-                                       &str);
-#ifdef MODULE
-                               if (THIS_MODULE->version != NULL)
-                                       strcpy(str, THIS_MODULE->version);
-                               else
-#endif
-                                       strcpy(str, "N/A");
-                               AMDGPU_FW_VRAM_VF2PF_WRITE(adev, driver_cert,
-                                       0);
-                               AMDGPU_FW_VRAM_VF2PF_WRITE(adev, checksum,
-                                       amdgpu_virt_fw_reserve_get_checksum(
-                                       adev->virt.fw_reserve.p_vf2pf,
-                                       pf2vf_size,
-                                       adev->virt.fw_reserve.checksum_key, 0));
-                       }
-               }
+       if (adev->virt.vf2pf_update_interval_ms != 0) {
+               INIT_DELAYED_WORK(&adev->virt.vf2pf_work, amdgpu_virt_update_vf2pf_work_item);
+               schedule_delayed_work(&(adev->virt.vf2pf_work), adev->virt.vf2pf_update_interval_ms);
        }
 }
 
index b2046c3..8dd624c 100644 (file)
@@ -24,6 +24,8 @@
 #ifndef AMDGPU_VIRT_H
 #define AMDGPU_VIRT_H
 
+#include "amdgv_sriovmsg.h"
+
 #define AMDGPU_SRIOV_CAPS_SRIOV_VBIOS  (1 << 0) /* vBIOS is sr-iov ready */
 #define AMDGPU_SRIOV_CAPS_ENABLE_IOV   (1 << 1) /* sr-iov is enabled on this GPU */
 #define AMDGPU_SRIOV_CAPS_IS_VF        (1 << 2) /* this GPU is a virtual function */
@@ -79,7 +81,10 @@ struct amdgpu_virt_fw_reserve {
        struct amd_sriov_msg_vf2pf_info_header *p_vf2pf;
        unsigned int checksum_key;
 };
+
 /*
+ * Legacy GIM header
+ *
  * Defination between PF and VF
  * Structures forcibly aligned to 4 to keep the same style as PF.
  */
@@ -101,15 +106,7 @@ enum AMDGIM_FEATURE_FLAG {
        AMDGIM_FEATURE_PP_ONE_VF = (1 << 4),
 };
 
-struct amd_sriov_msg_pf2vf_info_header {
-       /* the total structure size in byte. */
-       uint32_t size;
-       /* version of this structure, written by the GIM */
-       uint32_t version;
-       /* reserved */
-       uint32_t reserved[2];
-} __aligned(4);
-struct  amdgim_pf2vf_info_v1 {
+struct amdgim_pf2vf_info_v1 {
        /* header contains size and version */
        struct amd_sriov_msg_pf2vf_info_header header;
        /* max_width * max_height */
@@ -128,54 +125,6 @@ struct  amdgim_pf2vf_info_v1 {
        unsigned int checksum;
 } __aligned(4);
 
-struct  amdgim_pf2vf_info_v2 {
-       /* header contains size and version */
-       struct amd_sriov_msg_pf2vf_info_header header;
-       /* use private key from mailbox 2 to create chueksum */
-       uint32_t checksum;
-       /* The features flags of the GIM driver supports. */
-       uint32_t feature_flags;
-       /* max_width * max_height */
-       uint32_t uvd_enc_max_pixels_count;
-       /* 16x16 pixels/sec, codec independent */
-       uint32_t uvd_enc_max_bandwidth;
-       /* max_width * max_height */
-       uint32_t vce_enc_max_pixels_count;
-       /* 16x16 pixels/sec, codec independent */
-       uint32_t vce_enc_max_bandwidth;
-       /* Bad pages block position in BYTE */
-       uint32_t bp_block_offset_L;
-       uint32_t bp_block_offset_H;
-       /* Bad pages block size in BYTE */
-       uint32_t bp_block_size;
-       /* MEC FW position in kb from the start of VF visible frame buffer */
-       uint32_t mecfw_kboffset_L;
-       uint32_t mecfw_kboffset_H;
-       /* MEC FW size in KB */
-       uint32_t mecfw_ksize;
-       /* UVD FW position in kb from the start of VF visible frame buffer */
-       uint32_t uvdfw_kboffset_L;
-       uint32_t uvdfw_kboffset_H;
-       /* UVD FW size in KB */
-       uint32_t uvdfw_ksize;
-       /* VCE FW position in kb from the start of VF visible frame buffer */
-       uint32_t vcefw_kboffset_L;
-       uint32_t vcefw_kboffset_H;
-       /* VCE FW size in KB */
-       uint32_t vcefw_ksize;
-       uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 0, 0, (18 + sizeof(struct amd_sriov_msg_pf2vf_info_header)/sizeof(uint32_t)), 0)];
-} __aligned(4);
-
-
-struct amd_sriov_msg_vf2pf_info_header {
-       /* the total structure size in byte. */
-       uint32_t size;
-       /*version of this structure, written by the guest */
-       uint32_t version;
-       /* reserved */
-       uint32_t reserved[2];
-} __aligned(4);
-
 struct amdgim_vf2pf_info_v1 {
        /* header contains size and version */
        struct amd_sriov_msg_vf2pf_info_header header;
@@ -237,31 +186,6 @@ struct amdgim_vf2pf_info_v2 {
        uint32_t reserved[AMDGIM_GET_STRUCTURE_RESERVED_SIZE(256, 64, 0, (12 + sizeof(struct amd_sriov_msg_vf2pf_info_header)/sizeof(uint32_t)), 0)];
 } __aligned(4);
 
-#define AMDGPU_FW_VRAM_VF2PF_VER 2
-typedef struct amdgim_vf2pf_info_v2 amdgim_vf2pf_info ;
-
-#define AMDGPU_FW_VRAM_VF2PF_WRITE(adev, field, val) \
-       do { \
-               ((amdgim_vf2pf_info *)adev->virt.fw_reserve.p_vf2pf)->field = (val); \
-       } while (0)
-
-#define AMDGPU_FW_VRAM_VF2PF_READ(adev, field, val) \
-       do { \
-               (*val) = ((amdgim_vf2pf_info *)adev->virt.fw_reserve.p_vf2pf)->field; \
-       } while (0)
-
-#define AMDGPU_FW_VRAM_PF2VF_READ(adev, field, val) \
-       do { \
-               if (!adev->virt.fw_reserve.p_pf2vf) \
-                       *(val) = 0; \
-               else { \
-                       if (adev->virt.fw_reserve.p_pf2vf->version == 1) \
-                               *(val) = ((struct amdgim_pf2vf_info_v1 *)adev->virt.fw_reserve.p_pf2vf)->field; \
-                       if (adev->virt.fw_reserve.p_pf2vf->version == 2) \
-                               *(val) = ((struct amdgim_pf2vf_info_v2 *)adev->virt.fw_reserve.p_pf2vf)->field; \
-               } \
-       } while (0)
-
 struct amdgpu_virt_ras_err_handler_data {
        /* point to bad page records array */
        struct eeprom_table_record *bps;
@@ -285,7 +209,7 @@ struct amdgpu_virt {
        struct work_struct              flr_work;
        struct amdgpu_mm_table          mm_table;
        const struct amdgpu_virt_ops    *ops;
-       struct amdgpu_vf_error_buffer   vf_errors;
+       struct amdgpu_vf_error_buffer   vf_errors;
        struct amdgpu_virt_fw_reserve   fw_reserve;
        uint32_t gim_feature;
        uint32_t reg_access_mode;
@@ -293,6 +217,10 @@ struct amdgpu_virt {
        bool tdr_debug;
        struct amdgpu_virt_ras_err_handler_data *virt_eh_data;
        bool ras_init_done;
+
+       /* vf2pf message */
+       struct delayed_work vf2pf_work;
+       uint32_t vf2pf_update_interval_ms;
 };
 
 #define amdgpu_sriov_enabled(adev) \
@@ -341,11 +269,9 @@ void amdgpu_virt_request_init_data(struct amdgpu_device *adev);
 int amdgpu_virt_wait_reset(struct amdgpu_device *adev);
 int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev);
 void amdgpu_virt_free_mm_table(struct amdgpu_device *adev);
-int amdgpu_virt_fw_reserve_get_checksum(void *obj, unsigned long obj_size,
-                                       unsigned int key,
-                                       unsigned int chksum);
 void amdgpu_virt_release_ras_err_handler_data(struct amdgpu_device *adev);
 void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
+void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev);
 void amdgpu_detect_virtualization(struct amdgpu_device *adev);
 
 bool amdgpu_virt_can_access_debugfs(struct amdgpu_device *adev);
index 420931d..2b65e83 100644 (file)
@@ -1502,6 +1502,8 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
 
                        pt = cursor.entry->base.bo;
                        shift = parent_shift;
+                       frag_end = max(frag_end, ALIGN(frag_start + 1,
+                                  1ULL << shift));
                }
 
                /* Looks good so far, calculate parameters for the update */
@@ -1513,19 +1515,26 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
                entry_end = min(entry_end, end);
 
                do {
+                       struct amdgpu_vm *vm = params->vm;
                        uint64_t upd_end = min(entry_end, frag_end);
                        unsigned nptes = (upd_end - frag_start) >> shift;
+                       uint64_t upd_flags = flags | AMDGPU_PTE_FRAG(frag);
 
                        /* This can happen when we set higher level PDs to
                         * silent to stop fault floods.
                         */
                        nptes = max(nptes, 1u);
+
+                       trace_amdgpu_vm_update_ptes(params, frag_start, upd_end,
+                                                   nptes, dst, incr, upd_flags,
+                                                   vm->task_info.pid,
+                                                   vm->immediate.fence_context);
                        amdgpu_vm_update_flags(params, pt, cursor.level,
                                               pe_start, dst, nptes, incr,
-                                              flags | AMDGPU_PTE_FRAG(frag));
+                                              upd_flags);
 
                        pe_start += nptes * 8;
-                       dst += (uint64_t)nptes * AMDGPU_GPU_PAGE_SIZE << shift;
+                       dst += nptes * incr;
 
                        frag_start = upd_end;
                        if (frag_start >= frag_end) {
index 770025a..7c46937 100644 (file)
@@ -98,7 +98,7 @@ struct amdgpu_bo_list_entry;
 #define AMDGPU_PTE_MTYPE_NV10(a)       ((uint64_t)(a) << 48)
 #define AMDGPU_PTE_MTYPE_NV10_MASK     AMDGPU_PTE_MTYPE_NV10(7ULL)
 
-/* How to programm VM fault handling */
+/* How to program VM fault handling */
 #define AMDGPU_VM_FAULT_STOP_NEVER     0
 #define AMDGPU_VM_FAULT_STOP_FIRST     1
 #define AMDGPU_VM_FAULT_STOP_ALWAYS    2
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
new file mode 100644 (file)
index 0000000..5355827
--- /dev/null
@@ -0,0 +1,276 @@
+/*
+ * Copyright 2018-2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef AMDGV_SRIOV_MSG__H_
+#define AMDGV_SRIOV_MSG__H_
+
+/* unit in kilobytes */
+#define AMD_SRIOV_MSG_VBIOS_OFFSET              0
+#define AMD_SRIOV_MSG_VBIOS_SIZE_KB             64
+#define AMD_SRIOV_MSG_DATAEXCHANGE_OFFSET_KB    AMD_SRIOV_MSG_VBIOS_SIZE_KB
+#define AMD_SRIOV_MSG_DATAEXCHANGE_SIZE_KB      4
+
+/*
+ * layout
+ * 0           64KB        65KB        66KB
+ * |   VBIOS   |   PF2VF   |   VF2PF   |   Bad Page   | ...
+ * |   64KB    |   1KB     |   1KB     |
+ */
+#define AMD_SRIOV_MSG_SIZE_KB                   1
+#define AMD_SRIOV_MSG_PF2VF_OFFSET_KB           AMD_SRIOV_MSG_DATAEXCHANGE_OFFSET_KB
+#define AMD_SRIOV_MSG_VF2PF_OFFSET_KB           (AMD_SRIOV_MSG_PF2VF_OFFSET_KB + AMD_SRIOV_MSG_SIZE_KB)
+#define AMD_SRIOV_MSG_BAD_PAGE_OFFSET_KB        (AMD_SRIOV_MSG_VF2PF_OFFSET_KB + AMD_SRIOV_MSG_SIZE_KB)
+
+/*
+ * PF2VF history log:
+ * v1 defined in amdgim
+ * v2 current
+ *
+ * VF2PF history log:
+ * v1 defined in amdgim
+ * v2 defined in amdgim
+ * v3 current
+ */
+#define AMD_SRIOV_MSG_FW_VRAM_PF2VF_VER                        2
+#define AMD_SRIOV_MSG_FW_VRAM_VF2PF_VER                        3
+
+#define AMD_SRIOV_MSG_RESERVE_UCODE            24
+
+enum amd_sriov_ucode_engine_id {
+       AMD_SRIOV_UCODE_ID_VCE = 0,
+       AMD_SRIOV_UCODE_ID_UVD,
+       AMD_SRIOV_UCODE_ID_MC,
+       AMD_SRIOV_UCODE_ID_ME,
+       AMD_SRIOV_UCODE_ID_PFP,
+       AMD_SRIOV_UCODE_ID_CE,
+       AMD_SRIOV_UCODE_ID_RLC,
+       AMD_SRIOV_UCODE_ID_RLC_SRLC,
+       AMD_SRIOV_UCODE_ID_RLC_SRLG,
+       AMD_SRIOV_UCODE_ID_RLC_SRLS,
+       AMD_SRIOV_UCODE_ID_MEC,
+       AMD_SRIOV_UCODE_ID_MEC2,
+       AMD_SRIOV_UCODE_ID_SOS,
+       AMD_SRIOV_UCODE_ID_ASD,
+       AMD_SRIOV_UCODE_ID_TA_RAS,
+       AMD_SRIOV_UCODE_ID_TA_XGMI,
+       AMD_SRIOV_UCODE_ID_SMC,
+       AMD_SRIOV_UCODE_ID_SDMA,
+       AMD_SRIOV_UCODE_ID_SDMA2,
+       AMD_SRIOV_UCODE_ID_VCN,
+       AMD_SRIOV_UCODE_ID_DMCU,
+       AMD_SRIOV_UCODE_ID__MAX
+};
+
+#pragma pack(push, 1)  // PF2VF / VF2PF data areas are byte packed
+
+union amd_sriov_msg_feature_flags {
+       struct {
+               uint32_t  error_log_collect  : 1;
+               uint32_t  host_load_ucodes   : 1;
+               uint32_t  host_flr_vramlost  : 1;
+               uint32_t  mm_bw_management   : 1;
+               uint32_t  pp_one_vf_mode     : 1;
+               uint32_t  reserved           : 27;
+       } flags;
+       uint32_t      all;
+};
+
+union amd_sriov_msg_os_info {
+       struct {
+               uint32_t  windows            : 1;
+               uint32_t  reserved           : 31;
+       } info;
+       uint32_t      all;
+};
+
+struct amd_sriov_msg_pf2vf_info_header {
+       /* the total structure size in byte */
+       uint32_t size;
+       /* version of this structure, written by the HOST */
+       uint32_t version;
+       /* reserved */
+       uint32_t reserved[2];
+};
+
+struct amd_sriov_msg_pf2vf_info {
+       /* header contains size and version */
+       struct amd_sriov_msg_pf2vf_info_header header;
+       /* use private key from mailbox 2 to create checksum */
+       uint32_t checksum;
+       /* The features flags of the HOST driver supports */
+       union amd_sriov_msg_feature_flags feature_flags;
+       /* (max_width * max_height * fps) / (16 * 16) */
+       uint32_t hevc_enc_max_mb_per_second;
+       /* (max_width * max_height) / (16 * 16) */
+       uint32_t hevc_enc_max_mb_per_frame;
+       /* (max_width * max_height * fps) / (16 * 16) */
+       uint32_t avc_enc_max_mb_per_second;
+       /* (max_width * max_height) / (16 * 16) */
+       uint32_t avc_enc_max_mb_per_frame;
+       /* MEC FW position in BYTE from the start of VF visible frame buffer */
+       uint64_t mecfw_offset;
+       /* MEC FW size in BYTE */
+       uint32_t mecfw_size;
+       /* UVD FW position in BYTE from the start of VF visible frame buffer */
+       uint64_t uvdfw_offset;
+       /* UVD FW size in BYTE */
+       uint32_t uvdfw_size;
+       /* VCE FW position in BYTE from the start of VF visible frame buffer */
+       uint64_t vcefw_offset;
+       /* VCE FW size in BYTE */
+       uint32_t vcefw_size;
+       /* Bad pages block position in BYTE */
+       uint32_t bp_block_offset_low;
+       uint32_t bp_block_offset_high;
+       /* Bad pages block size in BYTE */
+       uint32_t bp_block_size;
+       /* frequency for VF to update the VF2PF area in msec, 0 = manual */
+       uint32_t vf2pf_update_interval_ms;
+       /* identification in ROCm SMI */
+       uint64_t uuid;
+       uint32_t fcn_idx;
+       /* reserved */
+       uint32_t reserved[256-26];
+};
+
+struct amd_sriov_msg_vf2pf_info_header {
+       /* the total structure size in byte */
+       uint32_t size;
+       /* version of this structure, written by the guest */
+       uint32_t version;
+       /* reserved */
+       uint32_t reserved[2];
+};
+
+struct amd_sriov_msg_vf2pf_info {
+       /* header contains size and version */
+       struct amd_sriov_msg_vf2pf_info_header header;
+       uint32_t checksum;
+       /* driver version */
+       uint8_t  driver_version[64];
+       /* driver certification, 1=WHQL, 0=None */
+       uint32_t driver_cert;
+       /* guest OS type and version */
+       union amd_sriov_msg_os_info os_info;
+       /* guest fb information in the unit of MB */
+       uint32_t fb_usage;
+       /* guest gfx engine usage percentage */
+       uint32_t gfx_usage;
+       /* guest gfx engine health percentage */
+       uint32_t gfx_health;
+       /* guest compute engine usage percentage */
+       uint32_t compute_usage;
+       /* guest compute engine health percentage */
+       uint32_t compute_health;
+       /* guest avc engine usage percentage. 0xffff means N/A */
+       uint32_t avc_enc_usage;
+       /* guest avc engine health percentage. 0xffff means N/A */
+       uint32_t avc_enc_health;
+       /* guest hevc engine usage percentage. 0xffff means N/A */
+       uint32_t hevc_enc_usage;
+       /* guest hevc engine usage percentage. 0xffff means N/A */
+       uint32_t hevc_enc_health;
+       /* combined encode/decode usage */
+       uint32_t encode_usage;
+       uint32_t decode_usage;
+       /* Version of PF2VF that VF understands */
+       uint32_t pf2vf_version_required;
+       /* additional FB usage */
+       uint32_t fb_vis_usage;
+       uint32_t fb_vis_size;
+       uint32_t fb_size;
+       /* guest ucode data, each one is 1.25 Dword */
+       struct {
+               uint8_t  id;
+               uint32_t version;
+       } ucode_info[AMD_SRIOV_MSG_RESERVE_UCODE];
+
+       /* reserved */
+       uint32_t reserved[256-68];
+};
+
+/* mailbox message send from guest to host  */
+enum amd_sriov_mailbox_request_message {
+       MB_REQ_MSG_REQ_GPU_INIT_ACCESS = 1,
+       MB_REQ_MSG_REL_GPU_INIT_ACCESS,
+       MB_REQ_MSG_REQ_GPU_FINI_ACCESS,
+       MB_REQ_MSG_REL_GPU_FINI_ACCESS,
+       MB_REQ_MSG_REQ_GPU_RESET_ACCESS,
+       MB_REQ_MSG_REQ_GPU_INIT_DATA,
+
+       MB_REQ_MSG_LOG_VF_ERROR       = 200,
+};
+
+/* mailbox message send from host to guest  */
+enum amd_sriov_mailbox_response_message {
+       MB_RES_MSG_CLR_MSG_BUF = 0,
+       MB_RES_MSG_READY_TO_ACCESS_GPU = 1,
+       MB_RES_MSG_FLR_NOTIFICATION,
+       MB_RES_MSG_FLR_NOTIFICATION_COMPLETION,
+       MB_RES_MSG_SUCCESS,
+       MB_RES_MSG_FAIL,
+       MB_RES_MSG_QUERY_ALIVE,
+       MB_RES_MSG_GPU_INIT_DATA_READY,
+
+       MB_RES_MSG_TEXT_MESSAGE = 255
+};
+
+/* version data stored in MAILBOX_MSGBUF_RCV_DW1 for future expansion */
+enum amd_sriov_gpu_init_data_version {
+       GPU_INIT_DATA_READY_V1 = 1,
+};
+
+#pragma pack(pop)      // Restore previous packing option
+
+/* checksum function between host and guest */
+unsigned int amd_sriov_msg_checksum(void *obj,
+                               unsigned long obj_size,
+                               unsigned int key,
+                               unsigned int checksum);
+
+/* assertion at compile time */
+#ifdef __linux__
+#define stringification(s) _stringification(s)
+#define _stringification(s) #s
+
+_Static_assert(
+       sizeof(struct amd_sriov_msg_vf2pf_info) == AMD_SRIOV_MSG_SIZE_KB << 10,
+       "amd_sriov_msg_vf2pf_info must be " stringification(AMD_SRIOV_MSG_SIZE_KB) " KB");
+
+_Static_assert(
+       sizeof(struct amd_sriov_msg_pf2vf_info) == AMD_SRIOV_MSG_SIZE_KB << 10,
+       "amd_sriov_msg_pf2vf_info must be " stringification(AMD_SRIOV_MSG_SIZE_KB) " KB");
+
+_Static_assert(
+       AMD_SRIOV_MSG_RESERVE_UCODE % 4 == 0,
+       "AMD_SRIOV_MSG_RESERVE_UCODE must be multiple of 4");
+
+_Static_assert(
+       AMD_SRIOV_MSG_RESERVE_UCODE > AMD_SRIOV_UCODE_ID__MAX,
+       "AMD_SRIOV_MSG_RESERVE_UCODE must be bigger than AMD_SRIOV_UCODE_ID__MAX");
+
+#undef _stringification
+#undef stringification
+#endif
+
+#endif /* AMDGV_SRIOV_MSG__H_ */
index 401c99f..db953e9 100644 (file)
@@ -316,14 +316,9 @@ static int cik_ih_sw_fini(void *handle)
 
 static int cik_ih_hw_init(void *handle)
 {
-       int r;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       r = cik_ih_irq_init(adev);
-       if (r)
-               return r;
-
-       return 0;
+       return cik_ih_irq_init(adev);
 }
 
 static int cik_ih_hw_fini(void *handle)
index cc93577..b4d4b76 100644 (file)
@@ -47,6 +47,9 @@ static void dce_virtual_set_display_funcs(struct amdgpu_device *adev);
 static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev);
 static int dce_virtual_connector_encoder_init(struct amdgpu_device *adev,
                                              int index);
+static int dce_virtual_pageflip(struct amdgpu_device *adev,
+                               unsigned crtc_id);
+static enum hrtimer_restart dce_virtual_vblank_timer_handle(struct hrtimer *vblank_timer);
 static void dce_virtual_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
                                                        int crtc,
                                                        enum amdgpu_interrupt_state state);
@@ -171,8 +174,10 @@ static void dce_virtual_crtc_commit(struct drm_crtc *crtc)
 static void dce_virtual_crtc_disable(struct drm_crtc *crtc)
 {
        struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+       struct drm_device *dev = crtc->dev;
 
-       drm_crtc_vblank_off(crtc);
+       if (dev->num_crtcs)
+               drm_crtc_vblank_off(crtc);
 
        amdgpu_crtc->enabled = false;
        amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
@@ -247,6 +252,11 @@ static int dce_virtual_crtc_init(struct amdgpu_device *adev, int index)
        amdgpu_crtc->vsync_timer_enabled = AMDGPU_IRQ_STATE_DISABLE;
        drm_crtc_helper_add(&amdgpu_crtc->base, &dce_virtual_crtc_helper_funcs);
 
+       hrtimer_init(&amdgpu_crtc->vblank_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+       hrtimer_set_expires(&amdgpu_crtc->vblank_timer, DCE_VIRTUAL_VBLANK_PERIOD);
+       amdgpu_crtc->vblank_timer.function = dce_virtual_vblank_timer_handle;
+       hrtimer_start(&amdgpu_crtc->vblank_timer,
+                     DCE_VIRTUAL_VBLANK_PERIOD, HRTIMER_MODE_REL);
        return 0;
 }
 
@@ -476,7 +486,7 @@ static int dce_virtual_hw_fini(void *handle)
 
        for (i = 0; i<adev->mode_info.num_crtc; i++)
                if (adev->mode_info.crtcs[i])
-                       dce_virtual_set_crtc_vblank_interrupt_state(adev, i, AMDGPU_IRQ_STATE_DISABLE);
+                       hrtimer_cancel(&adev->mode_info.crtcs[i]->vblank_timer);
 
        return 0;
 }
@@ -698,9 +708,15 @@ static enum hrtimer_restart dce_virtual_vblank_timer_handle(struct hrtimer *vbla
                                                       struct amdgpu_crtc, vblank_timer);
        struct drm_device *ddev = amdgpu_crtc->base.dev;
        struct amdgpu_device *adev = drm_to_adev(ddev);
+       struct amdgpu_irq_src *source = adev->irq.client[AMDGPU_IRQ_CLIENTID_LEGACY].sources
+               [VISLANDS30_IV_SRCID_SMU_DISP_TIMER2_TRIGGER];
+       int irq_type = amdgpu_display_crtc_idx_to_irq_type(adev,
+                                               amdgpu_crtc->crtc_id);
 
-       drm_handle_vblank(ddev, amdgpu_crtc->crtc_id);
-       dce_virtual_pageflip(adev, amdgpu_crtc->crtc_id);
+       if (amdgpu_irq_enabled(adev, source, irq_type)) {
+               drm_handle_vblank(ddev, amdgpu_crtc->crtc_id);
+               dce_virtual_pageflip(adev, amdgpu_crtc->crtc_id);
+       }
        hrtimer_start(vblank_timer, DCE_VIRTUAL_VBLANK_PERIOD,
                      HRTIMER_MODE_REL);
 
@@ -716,21 +732,6 @@ static void dce_virtual_set_crtc_vblank_interrupt_state(struct amdgpu_device *ad
                return;
        }
 
-       if (state && !adev->mode_info.crtcs[crtc]->vsync_timer_enabled) {
-               DRM_DEBUG("Enable software vsync timer\n");
-               hrtimer_init(&adev->mode_info.crtcs[crtc]->vblank_timer,
-                            CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-               hrtimer_set_expires(&adev->mode_info.crtcs[crtc]->vblank_timer,
-                                   DCE_VIRTUAL_VBLANK_PERIOD);
-               adev->mode_info.crtcs[crtc]->vblank_timer.function =
-                       dce_virtual_vblank_timer_handle;
-               hrtimer_start(&adev->mode_info.crtcs[crtc]->vblank_timer,
-                             DCE_VIRTUAL_VBLANK_PERIOD, HRTIMER_MODE_REL);
-       } else if (!state && adev->mode_info.crtcs[crtc]->vsync_timer_enabled) {
-               DRM_DEBUG("Disable software vsync timer\n");
-               hrtimer_cancel(&adev->mode_info.crtcs[crtc]->vblank_timer);
-       }
-
        adev->mode_info.crtcs[crtc]->vsync_timer_enabled = state;
        DRM_DEBUG("[FM]set crtc %d vblank interrupt state %d\n", crtc, state);
 }
index 17fb2ef..9792ec7 100644 (file)
@@ -3610,6 +3610,9 @@ static void gfx_v10_0_check_gfxoff_flag(struct amdgpu_device *adev)
                if (!gfx_v10_0_navi10_gfxoff_should_enable(adev))
                        adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
                break;
+       case CHIP_NAVY_FLOUNDER:
+               adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
+               break;
        default:
                break;
        }
index d898c9f..6959aeb 100644 (file)
@@ -49,6 +49,7 @@
 #include "amdgpu_ras.h"
 
 #include "gfx_v9_4.h"
+#include "gfx_v9_0.h"
 
 #include "asic_reg/pwr/pwr_10_0_offset.h"
 #include "asic_reg/pwr/pwr_10_0_sh_mask.h"
@@ -788,7 +789,6 @@ static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev);
 static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
                                  struct amdgpu_cu_info *cu_info);
 static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev);
-static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance);
 static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring);
 static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring);
 static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
@@ -2075,6 +2075,7 @@ static const struct amdgpu_gfx_funcs gfx_v9_4_gfx_funcs = {
        .ras_error_inject = &gfx_v9_4_ras_error_inject,
        .query_ras_error_count = &gfx_v9_4_query_ras_error_count,
        .reset_ras_error_count = &gfx_v9_4_reset_ras_error_count,
+       .query_ras_error_status = &gfx_v9_4_query_ras_error_status,
 };
 
 static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
@@ -2196,7 +2197,6 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
 static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
                                      int mec, int pipe, int queue)
 {
-       int r;
        unsigned irq_type;
        struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
        unsigned int hw_prio;
@@ -2221,13 +2221,8 @@ static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id,
        hw_prio = amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue) ?
                        AMDGPU_GFX_PIPE_PRIO_HIGH : AMDGPU_GFX_PIPE_PRIO_NORMAL;
        /* type-2 packets are deprecated on MEC, use type-3 instead */
-       r = amdgpu_ring_init(adev, ring, 1024,
-                            &adev->gfx.eop_irq, irq_type, hw_prio);
-       if (r)
-               return r;
-
-
-       return 0;
+       return amdgpu_ring_init(adev, ring, 1024,
+                               &adev->gfx.eop_irq, irq_type, hw_prio);
 }
 
 static int gfx_v9_0_sw_init(void *handle)
@@ -2402,7 +2397,8 @@ static void gfx_v9_0_tiling_mode_table_init(struct amdgpu_device *adev)
        /* TODO */
 }
 
-static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance)
+void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num,
+                          u32 instance)
 {
        u32 data;
 
@@ -2560,14 +2556,14 @@ static void gfx_v9_0_constants_init(struct amdgpu_device *adev)
                        tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
                                            SH_MEM_ALIGNMENT_MODE_UNALIGNED);
                        tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
-                                           !!amdgpu_noretry);
+                                           !!adev->gmc.noretry);
                        WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, tmp);
                        WREG32_SOC15_RLC(GC, 0, mmSH_MEM_BASES, 0);
                } else {
                        tmp = REG_SET_FIELD(0, SH_MEM_CONFIG, ALIGNMENT_MODE,
                                            SH_MEM_ALIGNMENT_MODE_UNALIGNED);
                        tmp = REG_SET_FIELD(tmp, SH_MEM_CONFIG, RETRY_DISABLE,
-                                           !!amdgpu_noretry);
+                                           !!adev->gmc.noretry);
                        WREG32_SOC15_RLC(GC, 0, mmSH_MEM_CONFIG, tmp);
                        tmp = REG_SET_FIELD(0, SH_MEM_BASES, PRIVATE_BASE,
                                (adev->gmc.private_aperture_start >> 48));
index fa5a3fb..dfe8d48 100644 (file)
@@ -26,9 +26,7 @@
 
 extern const struct amdgpu_ip_block_version gfx_v9_0_ip_block;
 
-void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num);
-
-uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev);
-int gfx_v9_0_get_cu_info(struct amdgpu_device *adev, struct amdgpu_cu_info *cu_info);
+void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num,
+                          u32 instance);
 
 #endif
index bd85aed..bc699d6 100644 (file)
@@ -992,3 +992,32 @@ int gfx_v9_4_ras_error_inject(struct amdgpu_device *adev, void *inject_if)
 
        return ret;
 }
+
+static const struct soc15_reg_entry gfx_v9_4_rdrsp_status_regs =
+       { SOC15_REG_ENTRY(GC, 0, mmGCEA_ERR_STATUS), 0, 1, 32 };
+
+void gfx_v9_4_query_ras_error_status(struct amdgpu_device *adev)
+{
+       uint32_t i, j;
+       uint32_t reg_value;
+
+       if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
+               return;
+
+       mutex_lock(&adev->grbm_idx_mutex);
+
+       for (i = 0; i < gfx_v9_4_rdrsp_status_regs.se_num; i++) {
+               for (j = 0; j < gfx_v9_4_rdrsp_status_regs.instance;
+                    j++) {
+                       gfx_v9_4_select_se_sh(adev, i, 0, j);
+                       reg_value = RREG32(SOC15_REG_ENTRY_OFFSET(
+                               gfx_v9_4_rdrsp_status_regs));
+                       if (reg_value)
+                               dev_warn(adev->dev, "GCEA err detected at instance: %d, status: 0x%x!\n",
+                                               j, reg_value);
+               }
+       }
+
+       gfx_v9_4_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+       mutex_unlock(&adev->grbm_idx_mutex);
+}
index 1ffecc5..875f184 100644 (file)
@@ -34,4 +34,6 @@ int gfx_v9_4_ras_error_inject(struct amdgpu_device *adev,
 
 void gfx_v9_4_reset_ras_error_count(struct amdgpu_device *adev);
 
+void gfx_v9_4_query_ras_error_status(struct amdgpu_device *adev);
+
 #endif /* __GFX_V9_4_H__ */
index 529e463..fad887a 100644 (file)
@@ -245,7 +245,7 @@ static void gfxhub_v1_0_setup_vmid_config(struct amdgpu_device *adev)
                /* Send no-retry XNACK on fault to suppress VM fault storm. */
                tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
                                    RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
-                                   !amdgpu_noretry);
+                                   !adev->gmc.noretry);
                WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_CNTL,
                                    i * hub->ctx_distance, tmp);
                WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
@@ -403,3 +403,13 @@ void gfxhub_v1_0_init(struct amdgpu_device *adev)
        hub->eng_addr_distance = mmVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 -
                mmVM_INVALIDATE_ENG0_ADDR_RANGE_LO32;
 }
+
+
+const struct amdgpu_gfxhub_funcs gfxhub_v1_0_funcs = {
+       .get_mc_fb_offset = gfxhub_v1_0_get_mc_fb_offset,
+       .setup_vm_pt_regs = gfxhub_v1_0_setup_vm_pt_regs,
+       .gart_enable = gfxhub_v1_0_gart_enable,
+       .gart_disable = gfxhub_v1_0_gart_disable,
+       .set_fault_enable_default = gfxhub_v1_0_set_fault_enable_default,
+       .init = gfxhub_v1_0_init,
+};
index 92d3a70..0c46672 100644 (file)
@@ -33,4 +33,5 @@ u64 gfxhub_v1_0_get_mc_fb_offset(struct amdgpu_device *adev);
 void gfxhub_v1_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
                                uint64_t page_table_base);
 
+extern const struct amdgpu_gfxhub_funcs gfxhub_v1_0_funcs;
 #endif
index c0ab71d..1e24b6d 100644 (file)
@@ -21,6 +21,7 @@
  *
  */
 #include "amdgpu.h"
+#include "gfxhub_v1_0.h"
 #include "gfxhub_v1_1.h"
 
 #include "gc/gc_9_2_1_offset.h"
@@ -28,7 +29,7 @@
 
 #include "soc15_common.h"
 
-int gfxhub_v1_1_get_xgmi_info(struct amdgpu_device *adev)
+static int gfxhub_v1_1_get_xgmi_info(struct amdgpu_device *adev)
 {
        u32 xgmi_lfb_cntl = RREG32_SOC15(GC, 0, mmMC_VM_XGMI_LFB_CNTL);
        u32 max_region =
@@ -66,3 +67,13 @@ int gfxhub_v1_1_get_xgmi_info(struct amdgpu_device *adev)
 
        return 0;
 }
+
+const struct amdgpu_gfxhub_funcs gfxhub_v1_1_funcs = {
+       .get_mc_fb_offset = gfxhub_v1_0_get_mc_fb_offset,
+       .setup_vm_pt_regs = gfxhub_v1_0_setup_vm_pt_regs,
+       .gart_enable = gfxhub_v1_0_gart_enable,
+       .gart_disable = gfxhub_v1_0_gart_disable,
+       .set_fault_enable_default = gfxhub_v1_0_set_fault_enable_default,
+       .init = gfxhub_v1_0_init,
+       .get_xgmi_info = gfxhub_v1_1_get_xgmi_info,
+};
index d753cf2..ae5759f 100644 (file)
@@ -24,6 +24,6 @@
 #ifndef __GFXHUB_V1_1_H__
 #define __GFXHUB_V1_1_H__
 
-int gfxhub_v1_1_get_xgmi_info(struct amdgpu_device *adev);
+extern const struct amdgpu_gfxhub_funcs gfxhub_v1_1_funcs;
 
 #endif
index b882ac5..456360b 100644 (file)
@@ -102,7 +102,7 @@ gfxhub_v2_0_print_l2_protection_fault_status(struct amdgpu_device *adev,
                GCVM_L2_PROTECTION_FAULT_STATUS, RW));
 }
 
-u64 gfxhub_v2_0_get_fb_location(struct amdgpu_device *adev)
+static u64 gfxhub_v2_0_get_fb_location(struct amdgpu_device *adev)
 {
        u64 base = RREG32_SOC15(GC, 0, mmGCMC_VM_FB_LOCATION_BASE);
 
@@ -112,12 +112,12 @@ u64 gfxhub_v2_0_get_fb_location(struct amdgpu_device *adev)
        return base;
 }
 
-u64 gfxhub_v2_0_get_mc_fb_offset(struct amdgpu_device *adev)
+static u64 gfxhub_v2_0_get_mc_fb_offset(struct amdgpu_device *adev)
 {
        return (u64)RREG32_SOC15(GC, 0, mmGCMC_VM_FB_OFFSET) << 24;
 }
 
-void gfxhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+static void gfxhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
                                uint64_t page_table_base)
 {
        struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
@@ -153,11 +153,6 @@ static void gfxhub_v2_0_init_system_aperture_regs(struct amdgpu_device *adev)
        uint64_t value;
 
        if (!amdgpu_sriov_vf(adev)) {
-               /*
-                * the new L1 policy will block SRIOV guest from writing
-                * these regs, and they will be programed at host.
-                * so skip programing these regs.
-                */
                /* Disable AGP. */
                WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BASE, 0);
                WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_TOP, 0);
@@ -318,7 +313,7 @@ static void gfxhub_v2_0_setup_vmid_config(struct amdgpu_device *adev)
                /* Send no-retry XNACK on fault to suppress VM fault storm. */
                tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
                                    RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
-                                   !amdgpu_noretry);
+                                   !adev->gmc.noretry);
                WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_CNTL,
                                    i * hub->ctx_distance, tmp);
                WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
@@ -347,7 +342,7 @@ static void gfxhub_v2_0_program_invalidation(struct amdgpu_device *adev)
        }
 }
 
-int gfxhub_v2_0_gart_enable(struct amdgpu_device *adev)
+static int gfxhub_v2_0_gart_enable(struct amdgpu_device *adev)
 {
        /* GART Enable. */
        gfxhub_v2_0_init_gart_aperture_regs(adev);
@@ -363,7 +358,7 @@ int gfxhub_v2_0_gart_enable(struct amdgpu_device *adev)
        return 0;
 }
 
-void gfxhub_v2_0_gart_disable(struct amdgpu_device *adev)
+static void gfxhub_v2_0_gart_disable(struct amdgpu_device *adev)
 {
        struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
        u32 tmp;
@@ -394,7 +389,7 @@ void gfxhub_v2_0_gart_disable(struct amdgpu_device *adev)
  * @adev: amdgpu_device pointer
  * @value: true redirects VM faults to the default page
  */
-void gfxhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev,
+static void gfxhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev,
                                          bool value)
 {
        u32 tmp;
@@ -436,7 +431,7 @@ static const struct amdgpu_vmhub_funcs gfxhub_v2_0_vmhub_funcs = {
        .get_invalidate_req = gfxhub_v2_0_get_invalidate_req,
 };
 
-void gfxhub_v2_0_init(struct amdgpu_device *adev)
+static void gfxhub_v2_0_init(struct amdgpu_device *adev)
 {
        struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
 
@@ -477,3 +472,13 @@ void gfxhub_v2_0_init(struct amdgpu_device *adev)
 
        hub->vmhub_funcs = &gfxhub_v2_0_vmhub_funcs;
 }
+
+const struct amdgpu_gfxhub_funcs gfxhub_v2_0_funcs = {
+       .get_fb_location = gfxhub_v2_0_get_fb_location,
+       .get_mc_fb_offset = gfxhub_v2_0_get_mc_fb_offset,
+       .setup_vm_pt_regs = gfxhub_v2_0_setup_vm_pt_regs,
+       .gart_enable = gfxhub_v2_0_gart_enable,
+       .gart_disable = gfxhub_v2_0_gart_disable,
+       .set_fault_enable_default = gfxhub_v2_0_set_fault_enable_default,
+       .init = gfxhub_v2_0_init,
+};
index 392b8cd..9ddc35c 100644 (file)
 #ifndef __GFXHUB_V2_0_H__
 #define __GFXHUB_V2_0_H__
 
-u64 gfxhub_v2_0_get_fb_location(struct amdgpu_device *adev);
-int gfxhub_v2_0_gart_enable(struct amdgpu_device *adev);
-void gfxhub_v2_0_gart_disable(struct amdgpu_device *adev);
-void gfxhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev,
-                                         bool value);
-void gfxhub_v2_0_init(struct amdgpu_device *adev);
-u64 gfxhub_v2_0_get_mc_fb_offset(struct amdgpu_device *adev);
-void gfxhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
-                               uint64_t page_table_base);
+extern const struct amdgpu_gfxhub_funcs gfxhub_v2_0_funcs;
 
 #endif
index 237a9ff..724bb29 100644 (file)
@@ -102,7 +102,7 @@ gfxhub_v2_1_print_l2_protection_fault_status(struct amdgpu_device *adev,
                GCVM_L2_PROTECTION_FAULT_STATUS, RW));
 }
 
-u64 gfxhub_v2_1_get_fb_location(struct amdgpu_device *adev)
+static u64 gfxhub_v2_1_get_fb_location(struct amdgpu_device *adev)
 {
        u64 base = RREG32_SOC15(GC, 0, mmGCMC_VM_FB_LOCATION_BASE);
 
@@ -112,12 +112,12 @@ u64 gfxhub_v2_1_get_fb_location(struct amdgpu_device *adev)
        return base;
 }
 
-u64 gfxhub_v2_1_get_mc_fb_offset(struct amdgpu_device *adev)
+static u64 gfxhub_v2_1_get_mc_fb_offset(struct amdgpu_device *adev)
 {
        return (u64)RREG32_SOC15(GC, 0, mmGCMC_VM_FB_OFFSET) << 24;
 }
 
-void gfxhub_v2_1_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
+static void gfxhub_v2_1_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
                                uint64_t page_table_base)
 {
        struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
@@ -319,7 +319,7 @@ static void gfxhub_v2_1_setup_vmid_config(struct amdgpu_device *adev)
                /* Send no-retry XNACK on fault to suppress VM fault storm. */
                tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT1_CNTL,
                                    RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
-                                   !amdgpu_noretry);
+                                   !adev->gmc.noretry);
                WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_CNTL,
                                    i * hub->ctx_distance, tmp);
                WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
@@ -348,7 +348,7 @@ static void gfxhub_v2_1_program_invalidation(struct amdgpu_device *adev)
        }
 }
 
-int gfxhub_v2_1_gart_enable(struct amdgpu_device *adev)
+static int gfxhub_v2_1_gart_enable(struct amdgpu_device *adev)
 {
        if (amdgpu_sriov_vf(adev)) {
                /*
@@ -376,7 +376,7 @@ int gfxhub_v2_1_gart_enable(struct amdgpu_device *adev)
        return 0;
 }
 
-void gfxhub_v2_1_gart_disable(struct amdgpu_device *adev)
+static void gfxhub_v2_1_gart_disable(struct amdgpu_device *adev)
 {
        struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
        u32 tmp;
@@ -405,7 +405,7 @@ void gfxhub_v2_1_gart_disable(struct amdgpu_device *adev)
  * @adev: amdgpu_device pointer
  * @value: true redirects VM faults to the default page
  */
-void gfxhub_v2_1_set_fault_enable_default(struct amdgpu_device *adev,
+static void gfxhub_v2_1_set_fault_enable_default(struct amdgpu_device *adev,
                                          bool value)
 {
        u32 tmp;
@@ -454,7 +454,7 @@ static const struct amdgpu_vmhub_funcs gfxhub_v2_1_vmhub_funcs = {
        .get_invalidate_req = gfxhub_v2_1_get_invalidate_req,
 };
 
-void gfxhub_v2_1_init(struct amdgpu_device *adev)
+static void gfxhub_v2_1_init(struct amdgpu_device *adev)
 {
        struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_GFXHUB_0];
 
@@ -496,7 +496,7 @@ void gfxhub_v2_1_init(struct amdgpu_device *adev)
        hub->vmhub_funcs = &gfxhub_v2_1_vmhub_funcs;
 }
 
-int gfxhub_v2_1_get_xgmi_info(struct amdgpu_device *adev)
+static int gfxhub_v2_1_get_xgmi_info(struct amdgpu_device *adev)
 {
        u32 xgmi_lfb_cntl = RREG32_SOC15(GC, 0, mmGCMC_VM_XGMI_LFB_CNTL);
        u32 max_region =
@@ -531,3 +531,14 @@ int gfxhub_v2_1_get_xgmi_info(struct amdgpu_device *adev)
 
        return 0;
 }
+
+const struct amdgpu_gfxhub_funcs gfxhub_v2_1_funcs = {
+       .get_fb_location = gfxhub_v2_1_get_fb_location,
+       .get_mc_fb_offset = gfxhub_v2_1_get_mc_fb_offset,
+       .setup_vm_pt_regs = gfxhub_v2_1_setup_vm_pt_regs,
+       .gart_enable = gfxhub_v2_1_gart_enable,
+       .gart_disable = gfxhub_v2_1_gart_disable,
+       .set_fault_enable_default = gfxhub_v2_1_set_fault_enable_default,
+       .init = gfxhub_v2_1_init,
+       .get_xgmi_info = gfxhub_v2_1_get_xgmi_info,
+};
index 3452a4e..f75c2ec 100644 (file)
 #ifndef __GFXHUB_V2_1_H__
 #define __GFXHUB_V2_1_H__
 
-u64 gfxhub_v2_1_get_fb_location(struct amdgpu_device *adev);
-int gfxhub_v2_1_gart_enable(struct amdgpu_device *adev);
-void gfxhub_v2_1_gart_disable(struct amdgpu_device *adev);
-void gfxhub_v2_1_set_fault_enable_default(struct amdgpu_device *adev,
-                                         bool value);
-void gfxhub_v2_1_init(struct amdgpu_device *adev);
-u64 gfxhub_v2_1_get_mc_fb_offset(struct amdgpu_device *adev);
-void gfxhub_v2_1_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid,
-                               uint64_t page_table_base);
-
-int gfxhub_v2_1_get_xgmi_info(struct amdgpu_device *adev);
+extern const struct amdgpu_gfxhub_funcs gfxhub_v2_1_funcs;
 
 #endif
index 31359e5..dbc8b76 100644 (file)
@@ -634,11 +634,26 @@ static void gmc_v10_0_set_mmhub_funcs(struct amdgpu_device *adev)
        adev->mmhub.funcs = &mmhub_v2_0_funcs;
 }
 
+static void gmc_v10_0_set_gfxhub_funcs(struct amdgpu_device *adev)
+{
+       switch (adev->asic_type) {
+       case CHIP_SIENNA_CICHLID:
+       case CHIP_NAVY_FLOUNDER:
+               adev->gfxhub.funcs = &gfxhub_v2_1_funcs;
+               break;
+       default:
+               adev->gfxhub.funcs = &gfxhub_v2_0_funcs;
+               break;
+       }
+}
+
+
 static int gmc_v10_0_early_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
        gmc_v10_0_set_mmhub_funcs(adev);
+       gmc_v10_0_set_gfxhub_funcs(adev);
        gmc_v10_0_set_gmc_funcs(adev);
        gmc_v10_0_set_irq_funcs(adev);
        gmc_v10_0_set_umc_funcs(adev);
@@ -676,11 +691,7 @@ static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev,
 {
        u64 base = 0;
 
-       if (adev->asic_type == CHIP_SIENNA_CICHLID ||
-           adev->asic_type == CHIP_NAVY_FLOUNDER)
-               base = gfxhub_v2_1_get_fb_location(adev);
-       else
-               base = gfxhub_v2_0_get_fb_location(adev);
+       base = adev->gfxhub.funcs->get_fb_location(adev);
 
        /* add the xgmi offset of the physical node */
        base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
@@ -689,11 +700,7 @@ static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev,
        amdgpu_gmc_gart_location(adev, mc);
 
        /* base offset of vram pages */
-       if (adev->asic_type == CHIP_SIENNA_CICHLID ||
-           adev->asic_type == CHIP_NAVY_FLOUNDER)
-               adev->vm_manager.vram_base_offset = gfxhub_v2_1_get_mc_fb_offset(adev);
-       else
-               adev->vm_manager.vram_base_offset = gfxhub_v2_0_get_mc_fb_offset(adev);
+       adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev);
 
        /* add the xgmi offset of the physical node */
        adev->vm_manager.vram_base_offset +=
@@ -777,11 +784,7 @@ static int gmc_v10_0_sw_init(void *handle)
        int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       if (adev->asic_type == CHIP_SIENNA_CICHLID ||
-           adev->asic_type == CHIP_NAVY_FLOUNDER)
-               gfxhub_v2_1_init(adev);
-       else
-               gfxhub_v2_0_init(adev);
+       adev->gfxhub.funcs->init(adev);
 
        adev->mmhub.funcs->init(adev);
 
@@ -852,7 +855,7 @@ static int gmc_v10_0_sw_init(void *handle)
        }
 
        if (adev->gmc.xgmi.supported) {
-               r = gfxhub_v2_1_get_xgmi_info(adev);
+               r = adev->gfxhub.funcs->get_xgmi_info(adev);
                if (r)
                        return r;
        }
@@ -944,11 +947,7 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
        if (r)
                return r;
 
-       if (adev->asic_type == CHIP_SIENNA_CICHLID ||
-           adev->asic_type == CHIP_NAVY_FLOUNDER)
-               r = gfxhub_v2_1_gart_enable(adev);
-       else
-               r = gfxhub_v2_0_gart_enable(adev);
+       r = adev->gfxhub.funcs->gart_enable(adev);
        if (r)
                return r;
 
@@ -969,11 +968,7 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
        value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
                false : true;
 
-       if (adev->asic_type == CHIP_SIENNA_CICHLID ||
-           adev->asic_type == CHIP_NAVY_FLOUNDER)
-               gfxhub_v2_1_set_fault_enable_default(adev, value);
-       else
-               gfxhub_v2_0_set_fault_enable_default(adev, value);
+       adev->gfxhub.funcs->set_fault_enable_default(adev, value);
        adev->mmhub.funcs->set_fault_enable_default(adev, value);
        gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB_0, 0);
        gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB_0, 0);
@@ -1014,11 +1009,7 @@ static int gmc_v10_0_hw_init(void *handle)
  */
 static void gmc_v10_0_gart_disable(struct amdgpu_device *adev)
 {
-       if (adev->asic_type == CHIP_SIENNA_CICHLID ||
-           adev->asic_type == CHIP_NAVY_FLOUNDER)
-               gfxhub_v2_1_gart_disable(adev);
-       else
-               gfxhub_v2_0_gart_disable(adev);
+       adev->gfxhub.funcs->gart_disable(adev);
        adev->mmhub.funcs->gart_disable(adev);
        amdgpu_gart_table_vram_unpin(adev);
 }
index 5400cac..3ebbddb 100644 (file)
@@ -1164,6 +1164,19 @@ static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
        }
 }
 
+static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device *adev)
+{
+       switch (adev->asic_type) {
+       case CHIP_ARCTURUS:
+       case CHIP_VEGA20:
+               adev->gfxhub.funcs = &gfxhub_v1_1_funcs;
+               break;
+       default:
+               adev->gfxhub.funcs = &gfxhub_v1_0_funcs;
+               break;
+       }
+}
+
 static int gmc_v9_0_early_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -1172,6 +1185,7 @@ static int gmc_v9_0_early_init(void *handle)
        gmc_v9_0_set_irq_funcs(adev);
        gmc_v9_0_set_umc_funcs(adev);
        gmc_v9_0_set_mmhub_funcs(adev);
+       gmc_v9_0_set_gfxhub_funcs(adev);
 
        adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
        adev->gmc.shared_aperture_end =
@@ -1193,21 +1207,16 @@ static int gmc_v9_0_late_init(void *handle)
        r = amdgpu_gmc_allocate_vm_inv_eng(adev);
        if (r)
                return r;
-       /* Check if ecc is available */
+
+       /*
+        * Workaround performance drop issue with VBIOS enables partial
+        * writes, while disables HBM ECC for vega10.
+        */
        if (!amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_VEGA10)) {
-               r = amdgpu_atomfirmware_mem_ecc_supported(adev);
-               if (!r) {
-                       DRM_INFO("ECC is not present.\n");
+               if (!(adev->ras_features & (1 << AMDGPU_RAS_BLOCK__UMC))) {
                        if (adev->df.funcs->enable_ecc_force_par_wr_rmw)
                                adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false);
-               } else
-                       DRM_INFO("ECC is active.\n");
-
-               r = amdgpu_atomfirmware_sram_ecc_supported(adev);
-               if (!r)
-                       DRM_INFO("SRAM ECC is not present.\n");
-               else
-                       DRM_INFO("SRAM ECC is active.\n");
+               }
        }
 
        if (adev->mmhub.funcs && adev->mmhub.funcs->reset_ras_error_count)
@@ -1234,7 +1243,7 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
        amdgpu_gmc_gart_location(adev, mc);
        amdgpu_gmc_agp_location(adev, mc);
        /* base offset of vram pages */
-       adev->vm_manager.vram_base_offset = gfxhub_v1_0_get_mc_fb_offset(adev);
+       adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev);
 
        /* XXX: add the xgmi offset of the physical node? */
        adev->vm_manager.vram_base_offset +=
@@ -1269,7 +1278,7 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
 
 #ifdef CONFIG_X86_64
        if (adev->flags & AMD_IS_APU) {
-               adev->gmc.aper_base = gfxhub_v1_0_get_mc_fb_offset(adev);
+               adev->gmc.aper_base = adev->gfxhub.funcs->get_mc_fb_offset(adev);
                adev->gmc.aper_size = adev->gmc.real_vram_size;
        }
 #endif
@@ -1339,7 +1348,7 @@ static int gmc_v9_0_sw_init(void *handle)
        int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       gfxhub_v1_0_init(adev);
+       adev->gfxhub.funcs->init(adev);
 
        adev->mmhub.funcs->init(adev);
 
@@ -1453,7 +1462,7 @@ static int gmc_v9_0_sw_init(void *handle)
        adev->need_swiotlb = drm_need_swiotlb(44);
 
        if (adev->gmc.xgmi.supported) {
-               r = gfxhub_v1_1_get_xgmi_info(adev);
+               r = adev->gfxhub.funcs->get_xgmi_info(adev);
                if (r)
                        return r;
        }
@@ -1569,7 +1578,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
        if (r)
                return r;
 
-       r = gfxhub_v1_0_gart_enable(adev);
+       r = adev->gfxhub.funcs->gart_enable(adev);
        if (r)
                return r;
 
@@ -1636,7 +1645,7 @@ static int gmc_v9_0_hw_init(void *handle)
                value = true;
 
        if (!amdgpu_sriov_vf(adev)) {
-               gfxhub_v1_0_set_fault_enable_default(adev, value);
+               adev->gfxhub.funcs->set_fault_enable_default(adev, value);
                adev->mmhub.funcs->set_fault_enable_default(adev, value);
        }
        for (i = 0; i < adev->num_vmhubs; ++i)
@@ -1659,7 +1668,7 @@ static int gmc_v9_0_hw_init(void *handle)
  */
 static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
 {
-       gfxhub_v1_0_gart_disable(adev);
+       adev->gfxhub.funcs->gart_disable(adev);
        adev->mmhub.funcs->gart_disable(adev);
        amdgpu_gart_table_vram_unpin(adev);
 }
@@ -1683,14 +1692,9 @@ static int gmc_v9_0_hw_fini(void *handle)
 
 static int gmc_v9_0_suspend(void *handle)
 {
-       int r;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
-       r = gmc_v9_0_hw_fini(adev);
-       if (r)
-               return r;
-
-       return 0;
+       return gmc_v9_0_hw_fini(adev);
 }
 
 static int gmc_v9_0_resume(void *handle)
index bc30028..c600b61 100644 (file)
@@ -33,6 +33,7 @@
 
 static void jpeg_v1_0_set_dec_ring_funcs(struct amdgpu_device *adev);
 static void jpeg_v1_0_set_irq_funcs(struct amdgpu_device *adev);
+static void jpeg_v1_0_ring_begin_use(struct amdgpu_ring *ring);
 
 static void jpeg_v1_0_decode_ring_patch_wreg(struct amdgpu_ring *ring, uint32_t *ptr, uint32_t reg_offset, uint32_t val)
 {
@@ -564,8 +565,8 @@ static const struct amdgpu_ring_funcs jpeg_v1_0_decode_ring_vm_funcs = {
        .insert_start = jpeg_v1_0_decode_ring_insert_start,
        .insert_end = jpeg_v1_0_decode_ring_insert_end,
        .pad_ib = amdgpu_ring_generic_pad_ib,
-       .begin_use = vcn_v1_0_ring_begin_use,
-       .end_use = amdgpu_vcn_ring_end_use,
+       .begin_use = jpeg_v1_0_ring_begin_use,
+       .end_use = vcn_v1_0_ring_end_use,
        .emit_wreg = jpeg_v1_0_decode_ring_emit_wreg,
        .emit_reg_wait = jpeg_v1_0_decode_ring_emit_reg_wait,
        .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
@@ -586,3 +587,22 @@ static void jpeg_v1_0_set_irq_funcs(struct amdgpu_device *adev)
 {
        adev->jpeg.inst->irq.funcs = &jpeg_v1_0_irq_funcs;
 }
+
+static void jpeg_v1_0_ring_begin_use(struct amdgpu_ring *ring)
+{
+       struct  amdgpu_device *adev = ring->adev;
+       bool    set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
+       int             cnt = 0;
+
+       mutex_lock(&adev->vcn.vcn1_jpeg1_workaround);
+
+       if (amdgpu_fence_wait_empty(&adev->vcn.inst->ring_dec))
+               DRM_ERROR("JPEG dec: vcn dec ring may not be empty\n");
+
+       for (cnt = 0; cnt < adev->vcn.num_enc_rings; cnt++) {
+               if (amdgpu_fence_wait_empty(&adev->vcn.inst->ring_enc[cnt]))
+                       DRM_ERROR("JPEG dec: vcn enc ring[%d] may not be empty\n", cnt);
+       }
+
+       vcn_v1_0_set_pg_for_begin_use(ring, set_clocks);
+}
index 4b74658..1c22d83 100644 (file)
@@ -832,7 +832,6 @@ static int mes_v10_1_queue_init(struct amdgpu_device *adev)
 static int mes_v10_1_ring_init(struct amdgpu_device *adev)
 {
        struct amdgpu_ring *ring;
-       int r;
 
        ring = &adev->mes.ring;
 
@@ -849,11 +848,7 @@ static int mes_v10_1_ring_init(struct amdgpu_device *adev)
        ring->no_scheduler = true;
        sprintf(ring->name, "mes_%d.%d.%d", ring->me, ring->pipe, ring->queue);
 
-       r = amdgpu_ring_init(adev, ring, 1024, NULL, 0, AMDGPU_RING_PRIO_DEFAULT);
-       if (r)
-               return r;
-
-       return 0;
+       return amdgpu_ring_init(adev, ring, 1024, NULL, 0, AMDGPU_RING_PRIO_DEFAULT);
 }
 
 static int mes_v10_1_mqd_sw_init(struct amdgpu_device *adev)
index 45a902b..f84701c 100644 (file)
@@ -268,7 +268,7 @@ static void mmhub_v1_0_setup_vmid_config(struct amdgpu_device *adev)
                /* Send no-retry XNACK on fault to suppress VM fault storm. */
                tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
                                    RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
-                                   !amdgpu_noretry);
+                                   !adev->gmc.noretry);
                WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL,
                                    i * hub->ctx_distance, tmp);
                WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
index 2d88278..2063700 100644 (file)
@@ -201,11 +201,6 @@ static void mmhub_v2_0_init_system_aperture_regs(struct amdgpu_device *adev)
        WREG32_SOC15(MMHUB, 0, mmMMMC_VM_AGP_BOT, 0x00FFFFFF);
 
        if (!amdgpu_sriov_vf(adev)) {
-               /*
-                * the new L1 policy will block SRIOV guest from writing
-                * these regs, and they will be programed at host.
-                * so skip programing these regs.
-                */
                /* Program the system aperture low logical page number. */
                WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_LOW_ADDR,
                             adev->gmc.vram_start >> 18);
@@ -374,7 +369,7 @@ static void mmhub_v2_0_setup_vmid_config(struct amdgpu_device *adev)
                /* Send no-retry XNACK on fault to suppress VM fault storm. */
                tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
                                    RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
-                                   !amdgpu_noretry);
+                                   !adev->gmc.noretry);
                WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_CNTL,
                                    i * hub->ctx_distance, tmp);
                WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
index 6c6ad52..66748bb 100644 (file)
@@ -330,7 +330,7 @@ static void mmhub_v9_4_setup_vmid_config(struct amdgpu_device *adev, int hubid)
                /* Send no-retry XNACK on fault to suppress VM fault storm. */
                tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT1_CNTL,
                                    RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
-                                   !amdgpu_noretry);
+                                   !adev->gmc.noretry);
                WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT1_CNTL,
                                    hubid * MMHUB_INSTANCE_REGISTER_OFFSET +
                                    i * hub->ctx_distance, tmp);
@@ -1624,6 +1624,34 @@ static void mmhub_v9_4_reset_ras_error_count(struct amdgpu_device *adev)
        }
 }
 
+static const struct soc15_reg_entry mmhub_v9_4_err_status_regs[] = {
+       { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_ERR_STATUS), 0, 0, 0 },
+       { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_ERR_STATUS), 0, 0, 0 },
+       { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_ERR_STATUS), 0, 0, 0 },
+       { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_ERR_STATUS), 0, 0, 0 },
+       { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_ERR_STATUS), 0, 0, 0 },
+       { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_ERR_STATUS), 0, 0, 0 },
+       { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_ERR_STATUS), 0, 0, 0 },
+       { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_ERR_STATUS), 0, 0, 0 },
+};
+
+static void mmhub_v9_4_query_ras_error_status(struct amdgpu_device *adev)
+{
+       int i;
+       uint32_t reg_value;
+
+       if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB))
+               return;
+
+       for (i = 0; i < ARRAY_SIZE(mmhub_v9_4_err_status_regs); i++) {
+               reg_value =
+                       RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v9_4_err_status_regs[i]));
+               if (reg_value)
+                       dev_warn(adev->dev, "MMHUB EA err detected at instance: %d, status: 0x%x!\n",
+                                       i, reg_value);
+       }
+}
+
 const struct amdgpu_mmhub_funcs mmhub_v9_4_funcs = {
        .ras_late_init = amdgpu_mmhub_ras_late_init,
        .query_ras_error_count = mmhub_v9_4_query_ras_error_count,
@@ -1636,4 +1664,5 @@ const struct amdgpu_mmhub_funcs mmhub_v9_4_funcs = {
        .set_clockgating = mmhub_v9_4_set_clockgating,
        .get_clockgating = mmhub_v9_4_get_clockgating,
        .setup_vm_pt_regs = mmhub_v9_4_setup_vm_pt_regs,
+       .query_ras_error_status = mmhub_v9_4_query_ras_error_status,
 };
index 0ec6603..1ce741a 100644 (file)
@@ -69,75 +69,40 @@ static const struct amd_ip_funcs nv_common_ip_funcs;
  */
 static u32 nv_pcie_rreg(struct amdgpu_device *adev, u32 reg)
 {
-       unsigned long flags, address, data;
-       u32 r;
+       unsigned long address, data;
        address = adev->nbio.funcs->get_pcie_index_offset(adev);
        data = adev->nbio.funcs->get_pcie_data_offset(adev);
 
-       spin_lock_irqsave(&adev->pcie_idx_lock, flags);
-       WREG32(address, reg);
-       (void)RREG32(address);
-       r = RREG32(data);
-       spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
-       return r;
+       return amdgpu_device_indirect_rreg(adev, address, data, reg);
 }
 
 static void nv_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
 {
-       unsigned long flags, address, data;
+       unsigned long address, data;
 
        address = adev->nbio.funcs->get_pcie_index_offset(adev);
        data = adev->nbio.funcs->get_pcie_data_offset(adev);
 
-       spin_lock_irqsave(&adev->pcie_idx_lock, flags);
-       WREG32(address, reg);
-       (void)RREG32(address);
-       WREG32(data, v);
-       (void)RREG32(data);
-       spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+       amdgpu_device_indirect_wreg(adev, address, data, reg, v);
 }
 
 static u64 nv_pcie_rreg64(struct amdgpu_device *adev, u32 reg)
 {
-       unsigned long flags, address, data;
-       u64 r;
+       unsigned long address, data;
        address = adev->nbio.funcs->get_pcie_index_offset(adev);
        data = adev->nbio.funcs->get_pcie_data_offset(adev);
 
-       spin_lock_irqsave(&adev->pcie_idx_lock, flags);
-       /* read low 32 bit */
-       WREG32(address, reg);
-       (void)RREG32(address);
-       r = RREG32(data);
-
-       /* read high 32 bit*/
-       WREG32(address, reg + 4);
-       (void)RREG32(address);
-       r |= ((u64)RREG32(data) << 32);
-       spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
-       return r;
+       return amdgpu_device_indirect_rreg64(adev, address, data, reg);
 }
 
 static void nv_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v)
 {
-       unsigned long flags, address, data;
+       unsigned long address, data;
 
        address = adev->nbio.funcs->get_pcie_index_offset(adev);
        data = adev->nbio.funcs->get_pcie_data_offset(adev);
 
-       spin_lock_irqsave(&adev->pcie_idx_lock, flags);
-       /* write low 32 bit */
-       WREG32(address, reg);
-       (void)RREG32(address);
-       WREG32(data, (u32)(v & 0xffffffffULL));
-       (void)RREG32(data);
-
-       /* write high 32 bit */
-       WREG32(address, reg + 4);
-       (void)RREG32(address);
-       WREG32(data, (u32)(v >> 32));
-       (void)RREG32(data);
-       spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+       amdgpu_device_indirect_wreg64(adev, address, data, reg, v);
 }
 
 static u32 nv_didt_rreg(struct amdgpu_device *adev, u32 reg)
@@ -621,7 +586,7 @@ static void nv_invalidate_hdp(struct amdgpu_device *adev,
                                struct amdgpu_ring *ring)
 {
        if (!ring || !ring->funcs->emit_wreg) {
-               WREG32_SOC15_NO_KIQ(NBIO, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
+               WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1);
        } else {
                amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET(
                                        HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
index cbc04a5..1ef2f5b 100644 (file)
@@ -83,19 +83,6 @@ struct psp_gfx_ctrl
 */
 #define GFX_FLAG_RESPONSE               0x80000000
 
-/* Gbr IH registers ID */
-enum ih_reg_id {
-       IH_RB           = 0,            // IH_RB_CNTL
-       IH_RB_RNG1      = 1,            // IH_RB_CNTL_RING1
-       IH_RB_RNG2      = 2,            // IH_RB_CNTL_RING2
-};
-
-/* Command to setup Gibraltar IH register */
-struct psp_gfx_cmd_gbr_ih_reg {
-       uint32_t                reg_value;      /* Value to be set to the IH_RB_CNTL... register*/
-       enum ih_reg_id          reg_id;         /* ID of the register */
-};
-
 /* TEE Gfx Command IDs for the ring buffer interface. */
 enum psp_gfx_cmd_id
 {
index 810635c..86fb1ed 100644 (file)
@@ -592,6 +592,9 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
        struct amdgpu_firmware_info *info = NULL;
        const struct common_firmware_header *header = NULL;
 
+       if (amdgpu_sriov_vf(adev))
+               return 0;
+
        DRM_DEBUG("\n");
 
        switch (adev->asic_type) {
index 48c95a7..9c72b95 100644 (file)
@@ -203,6 +203,9 @@ static int sdma_v5_0_init_microcode(struct amdgpu_device *adev)
        const struct common_firmware_header *header = NULL;
        const struct sdma_firmware_header_v1_0 *hdr;
 
+       if (amdgpu_sriov_vf(adev))
+               return 0;
+
        DRM_DEBUG("\n");
 
        switch (adev->asic_type) {
index 34ccf37..9f39527 100644 (file)
@@ -148,6 +148,9 @@ static int sdma_v5_2_init_microcode(struct amdgpu_device *adev)
        struct amdgpu_firmware_info *info = NULL;
        const struct common_firmware_header *header = NULL;
 
+       if (amdgpu_sriov_vf(adev))
+               return 0;
+
        DRM_DEBUG("\n");
 
        switch (adev->asic_type) {
index ddd55e3..afcccc6 100644 (file)
  */
 static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg)
 {
-       unsigned long flags, address, data;
-       u32 r;
+       unsigned long address, data;
        address = adev->nbio.funcs->get_pcie_index_offset(adev);
        data = adev->nbio.funcs->get_pcie_data_offset(adev);
 
-       spin_lock_irqsave(&adev->pcie_idx_lock, flags);
-       WREG32(address, reg);
-       (void)RREG32(address);
-       r = RREG32(data);
-       spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
-       return r;
+       return amdgpu_device_indirect_rreg(adev, address, data, reg);
 }
 
 static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
 {
-       unsigned long flags, address, data;
+       unsigned long address, data;
 
        address = adev->nbio.funcs->get_pcie_index_offset(adev);
        data = adev->nbio.funcs->get_pcie_data_offset(adev);
 
-       spin_lock_irqsave(&adev->pcie_idx_lock, flags);
-       WREG32(address, reg);
-       (void)RREG32(address);
-       WREG32(data, v);
-       (void)RREG32(data);
-       spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+       amdgpu_device_indirect_wreg(adev, address, data, reg, v);
 }
 
 static u64 soc15_pcie_rreg64(struct amdgpu_device *adev, u32 reg)
 {
-       unsigned long flags, address, data;
-       u64 r;
+       unsigned long address, data;
        address = adev->nbio.funcs->get_pcie_index_offset(adev);
        data = adev->nbio.funcs->get_pcie_data_offset(adev);
 
-       spin_lock_irqsave(&adev->pcie_idx_lock, flags);
-       /* read low 32 bit */
-       WREG32(address, reg);
-       (void)RREG32(address);
-       r = RREG32(data);
-
-       /* read high 32 bit*/
-       WREG32(address, reg + 4);
-       (void)RREG32(address);
-       r |= ((u64)RREG32(data) << 32);
-       spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
-       return r;
+       return amdgpu_device_indirect_rreg64(adev, address, data, reg);
 }
 
 static void soc15_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v)
 {
-       unsigned long flags, address, data;
+       unsigned long address, data;
 
        address = adev->nbio.funcs->get_pcie_index_offset(adev);
        data = adev->nbio.funcs->get_pcie_data_offset(adev);
 
-       spin_lock_irqsave(&adev->pcie_idx_lock, flags);
-       /* write low 32 bit */
-       WREG32(address, reg);
-       (void)RREG32(address);
-       WREG32(data, (u32)(v & 0xffffffffULL));
-       (void)RREG32(data);
-
-       /* write high 32 bit */
-       WREG32(address, reg + 4);
-       (void)RREG32(address);
-       WREG32(data, (u32)(v >> 32));
-       (void)RREG32(data);
-       spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
+       amdgpu_device_indirect_wreg64(adev, address, data, reg, v);
 }
 
 static u32 soc15_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg)
@@ -697,12 +662,12 @@ static void soc15_reg_base_init(struct amdgpu_device *adev)
                 * it doesn't support SRIOV. */
                if (amdgpu_discovery) {
                        r = amdgpu_discovery_reg_base_init(adev);
-                       if (r) {
-                               DRM_WARN("failed to init reg base from ip discovery table, "
-                                        "fallback to legacy init method\n");
-                               vega10_reg_base_init(adev);
-                       }
+                       if (r == 0)
+                               break;
+                       DRM_WARN("failed to init reg base from ip discovery table, "
+                                "fallback to legacy init method\n");
                }
+               vega10_reg_base_init(adev);
                break;
        case CHIP_VEGA20:
                vega20_reg_base_init(adev);
index 3cafba7..b0c0c43 100644 (file)
@@ -348,7 +348,7 @@ static int uvd_v4_2_start(struct amdgpu_device *adev)
        /* Set the write pointer delay */
        WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
 
-       /* programm the 4GB memory segment for rptr and ring buffer */
+       /* program the 4GB memory segment for rptr and ring buffer */
        WREG32(mmUVD_LMI_EXT40_ADDR, upper_32_bits(ring->gpu_addr) |
                                   (0x7 << 16) | (0x1 << 31));
 
@@ -541,7 +541,7 @@ static void uvd_v4_2_mc_resume(struct amdgpu_device *adev)
        uint64_t addr;
        uint32_t size;
 
-       /* programm the VCPU memory controller bits 0-27 */
+       /* program the VCPU memory controller bits 0-27 */
        addr = (adev->uvd.inst->gpu_addr + AMDGPU_UVD_FIRMWARE_OFFSET) >> 3;
        size = AMDGPU_UVD_FIRMWARE_SIZE(adev) >> 3;
        WREG32(mmUVD_VCPU_CACHE_OFFSET0, addr);
index a566ff9..6e57001 100644 (file)
@@ -253,7 +253,7 @@ static void uvd_v5_0_mc_resume(struct amdgpu_device *adev)
        uint64_t offset;
        uint32_t size;
 
-       /* programm memory controller bits 0-27 */
+       /* program memory controller bits 0-27 */
        WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
                        lower_32_bits(adev->uvd.inst->gpu_addr));
        WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
@@ -404,7 +404,7 @@ static int uvd_v5_0_start(struct amdgpu_device *adev)
        /* set the wb address */
        WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2));
 
-       /* programm the RB_BASE for ring buffer */
+       /* program the RB_BASE for ring buffer */
        WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
                        lower_32_bits(ring->gpu_addr));
        WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
index ed30fb4..666bfa4 100644 (file)
@@ -583,7 +583,7 @@ static void uvd_v6_0_mc_resume(struct amdgpu_device *adev)
        uint64_t offset;
        uint32_t size;
 
-       /* programm memory controller bits 0-27 */
+       /* program memory controller bits 0-27 */
        WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
                        lower_32_bits(adev->uvd.inst->gpu_addr));
        WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
@@ -825,7 +825,7 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
        /* set the wb address */
        WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2));
 
-       /* programm the RB_BASE for ring buffer */
+       /* program the RB_BASE for ring buffer */
        WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
                        lower_32_bits(ring->gpu_addr));
        WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
index e07e3fa..b44c867 100644 (file)
@@ -1073,7 +1073,7 @@ static int uvd_v7_0_start(struct amdgpu_device *adev)
                WREG32_SOC15(UVD, k, mmUVD_RBC_RB_RPTR_ADDR,
                                (upper_32_bits(ring->gpu_addr) >> 2));
 
-               /* programm the RB_BASE for ring buffer */
+               /* program the RB_BASE for ring buffer */
                WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
                                lower_32_bits(ring->gpu_addr));
                WREG32_SOC15(UVD, k, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
index 927c330..86e1ef7 100644 (file)
@@ -54,6 +54,7 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
                                int inst_idx, struct dpg_pause_state *new_state);
 
 static void vcn_v1_0_idle_work_handler(struct work_struct *work);
+static void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring);
 
 /**
  * vcn_v1_0_early_init - set function pointers
@@ -910,7 +911,7 @@ static int vcn_v1_0_start_spg_mode(struct amdgpu_device *adev)
        WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR,
                        (upper_32_bits(ring->gpu_addr) >> 2));
 
-       /* programm the RB_BASE for ring buffer */
+       /* program the RB_BASE for ring buffer */
        WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
                        lower_32_bits(ring->gpu_addr));
        WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
@@ -1068,7 +1069,7 @@ static int vcn_v1_0_start_dpg_mode(struct amdgpu_device *adev)
        WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR,
                                                                (upper_32_bits(ring->gpu_addr) >> 2));
 
-       /* programm the RB_BASE for ring buffer */
+       /* program the RB_BASE for ring buffer */
        WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
                                                                lower_32_bits(ring->gpu_addr));
        WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
@@ -1804,11 +1805,24 @@ static void vcn_v1_0_idle_work_handler(struct work_struct *work)
        }
 }
 
-void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring)
+static void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring)
 {
-       struct amdgpu_device *adev = ring->adev;
+       struct  amdgpu_device *adev = ring->adev;
        bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work);
 
+       mutex_lock(&adev->vcn.vcn1_jpeg1_workaround);
+
+       if (amdgpu_fence_wait_empty(&ring->adev->jpeg.inst->ring_dec))
+               DRM_ERROR("VCN dec: jpeg dec ring may not be empty\n");
+
+       vcn_v1_0_set_pg_for_begin_use(ring, set_clocks);
+
+}
+
+void vcn_v1_0_set_pg_for_begin_use(struct amdgpu_ring *ring, bool set_clocks)
+{
+       struct amdgpu_device *adev = ring->adev;
+
        if (set_clocks) {
                amdgpu_gfx_off_ctrl(adev, false);
                if (adev->pm.dpm_enabled)
@@ -1844,6 +1858,12 @@ void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring)
        }
 }
 
+void vcn_v1_0_ring_end_use(struct amdgpu_ring *ring)
+{
+       schedule_delayed_work(&ring->adev->vcn.idle_work, VCN_IDLE_TIMEOUT);
+       mutex_unlock(&ring->adev->vcn.vcn1_jpeg1_workaround);
+}
+
 static const struct amd_ip_funcs vcn_v1_0_ip_funcs = {
        .name = "vcn_v1_0",
        .early_init = vcn_v1_0_early_init,
@@ -1891,7 +1911,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_dec_ring_vm_funcs = {
        .insert_end = vcn_v1_0_dec_ring_insert_end,
        .pad_ib = amdgpu_ring_generic_pad_ib,
        .begin_use = vcn_v1_0_ring_begin_use,
-       .end_use = amdgpu_vcn_ring_end_use,
+       .end_use = vcn_v1_0_ring_end_use,
        .emit_wreg = vcn_v1_0_dec_ring_emit_wreg,
        .emit_reg_wait = vcn_v1_0_dec_ring_emit_reg_wait,
        .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
@@ -1923,7 +1943,7 @@ static const struct amdgpu_ring_funcs vcn_v1_0_enc_ring_vm_funcs = {
        .insert_end = vcn_v1_0_enc_ring_insert_end,
        .pad_ib = amdgpu_ring_generic_pad_ib,
        .begin_use = vcn_v1_0_ring_begin_use,
-       .end_use = amdgpu_vcn_ring_end_use,
+       .end_use = vcn_v1_0_ring_end_use,
        .emit_wreg = vcn_v1_0_enc_ring_emit_wreg,
        .emit_reg_wait = vcn_v1_0_enc_ring_emit_reg_wait,
        .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
index f67d739..1f1cc7f 100644 (file)
@@ -24,7 +24,8 @@
 #ifndef __VCN_V1_0_H__
 #define __VCN_V1_0_H__
 
-void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring);
+void vcn_v1_0_ring_end_use(struct amdgpu_ring *ring);
+void vcn_v1_0_set_pg_for_begin_use(struct amdgpu_ring *ring, bool set_clocks);
 
 extern const struct amdgpu_ip_block_version vcn_v1_0_ip_block;
 
index 23a9eb5..e5d29de 100644 (file)
@@ -900,7 +900,7 @@ static int vcn_v2_0_start_dpg_mode(struct amdgpu_device *adev, bool indirect)
        WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_RPTR_ADDR,
                (upper_32_bits(ring->gpu_addr) >> 2));
 
-       /* programm the RB_BASE for ring buffer */
+       /* program the RB_BASE for ring buffer */
        WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
                lower_32_bits(ring->gpu_addr));
        WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
@@ -1060,7 +1060,7 @@ static int vcn_v2_0_start(struct amdgpu_device *adev)
        WREG32_SOC15(UVD, 0, mmUVD_RBC_RB_CNTL, tmp);
 
        fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
-       /* programm the RB_BASE for ring buffer */
+       /* program the RB_BASE for ring buffer */
        WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
                lower_32_bits(ring->gpu_addr));
        WREG32_SOC15(UVD, 0, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
index 139fac0..0f1d3ef 100644 (file)
@@ -882,7 +882,7 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo
        WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR_ADDR,
                (upper_32_bits(ring->gpu_addr) >> 2));
 
-       /* programm the RB_BASE for ring buffer */
+       /* program the RB_BASE for ring buffer */
        WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
                lower_32_bits(ring->gpu_addr));
        WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
@@ -1062,7 +1062,7 @@ static int vcn_v2_5_start(struct amdgpu_device *adev)
                WREG32_SOC15(VCN, i, mmUVD_RBC_RB_CNTL, tmp);
 
                fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
-               /* programm the RB_BASE for ring buffer */
+               /* program the RB_BASE for ring buffer */
                WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
                        lower_32_bits(ring->gpu_addr));
                WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
index b7b16ad..222f1df 100644 (file)
@@ -97,6 +97,7 @@ void kfd_chardev_exit(void)
        device_destroy(kfd_class, MKDEV(kfd_char_dev_major, 0));
        class_destroy(kfd_class);
        unregister_chrdev(kfd_char_dev_major, kfd_dev_name);
+       kfd_device = NULL;
 }
 
 struct device *kfd_chardev(void)
@@ -1290,18 +1291,6 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
                return -EINVAL;
        }
 
-       if (flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) {
-               if (args->size != kfd_doorbell_process_slice(dev))
-                       return -EINVAL;
-               offset = kfd_get_process_doorbells(dev, p);
-       } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP) {
-               if (args->size != PAGE_SIZE)
-                       return -EINVAL;
-               offset = amdgpu_amdkfd_get_mmio_remap_phys_addr(dev->kgd);
-               if (!offset)
-                       return -ENOMEM;
-       }
-
        mutex_lock(&p->mutex);
 
        pdd = kfd_bind_process_to_device(dev, p);
@@ -1310,6 +1299,24 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
                goto err_unlock;
        }
 
+       if (flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) {
+               if (args->size != kfd_doorbell_process_slice(dev)) {
+                       err = -EINVAL;
+                       goto err_unlock;
+               }
+               offset = kfd_get_process_doorbells(pdd);
+       } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP) {
+               if (args->size != PAGE_SIZE) {
+                       err = -EINVAL;
+                       goto err_unlock;
+               }
+               offset = amdgpu_amdkfd_get_mmio_remap_phys_addr(dev->kgd);
+               if (!offset) {
+                       err = -ENOMEM;
+                       goto err_unlock;
+               }
+       }
+
        err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
                dev->kgd, args->va_addr, args->size,
                pdd->vm, (struct kgd_mem **) &mem, &offset,
index 3fac06b..5e2254b 100644 (file)
@@ -797,7 +797,8 @@ int kfd_create_crat_image_acpi(void **crat_image, size_t *size)
                return -ENODATA;
        }
 
-       pcrat_image = kmemdup(crat_table, crat_table->length, GFP_KERNEL);
+       pcrat_image = kvmalloc(crat_table->length, GFP_KERNEL);
+       memcpy(pcrat_image, crat_table, crat_table->length);
        if (!pcrat_image)
                return -ENOMEM;
 
@@ -809,11 +810,10 @@ int kfd_create_crat_image_acpi(void **crat_image, size_t *size)
 
 /* Memory required to create Virtual CRAT.
  * Since there is no easy way to predict the amount of memory required, the
- * following amount are allocated for CPU and GPU Virtual CRAT. This is
+ * following amount is allocated for GPU Virtual CRAT. This is
  * expected to cover all known conditions. But to be safe additional check
  * is put in the code to ensure we don't overwrite.
  */
-#define VCRAT_SIZE_FOR_CPU     (2 * PAGE_SIZE)
 #define VCRAT_SIZE_FOR_GPU     (4 * PAGE_SIZE)
 
 /* kfd_fill_cu_for_cpu - Fill in Compute info for the given CPU NUMA node
@@ -964,7 +964,7 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
 #endif
        int ret = 0;
 
-       if (!pcrat_image || avail_size < VCRAT_SIZE_FOR_CPU)
+       if (!pcrat_image)
                return -EINVAL;
 
        /* Fill in CRAT Header.
@@ -1364,30 +1364,37 @@ int kfd_create_crat_image_virtual(void **crat_image, size_t *size,
                                  uint32_t proximity_domain)
 {
        void *pcrat_image = NULL;
-       int ret = 0;
+       int ret = 0, num_nodes;
+       size_t dyn_size;
 
        if (!crat_image)
                return -EINVAL;
 
        *crat_image = NULL;
 
-       /* Allocate one VCRAT_SIZE_FOR_CPU for CPU virtual CRAT image and
-        * VCRAT_SIZE_FOR_GPU for GPU virtual CRAT image. This should cover
-        * all the current conditions. A check is put not to overwrite beyond
-        * allocated size
+       /* Allocate the CPU Virtual CRAT size based on the number of online
+        * nodes. Allocate VCRAT_SIZE_FOR_GPU for GPU virtual CRAT image.
+        * This should cover all the current conditions. A check is put not
+        * to overwrite beyond allocated size for GPUs
         */
        switch (flags) {
        case COMPUTE_UNIT_CPU:
-               pcrat_image = kmalloc(VCRAT_SIZE_FOR_CPU, GFP_KERNEL);
+               num_nodes = num_online_nodes();
+               dyn_size = sizeof(struct crat_header) +
+                       num_nodes * (sizeof(struct crat_subtype_computeunit) +
+                       sizeof(struct crat_subtype_memory) +
+                       (num_nodes - 1) * sizeof(struct crat_subtype_iolink));
+               pcrat_image = kvmalloc(dyn_size, GFP_KERNEL);
                if (!pcrat_image)
                        return -ENOMEM;
-               *size = VCRAT_SIZE_FOR_CPU;
+               *size = dyn_size;
+               pr_debug("CRAT size is %ld", dyn_size);
                ret = kfd_create_vcrat_image_cpu(pcrat_image, size);
                break;
        case COMPUTE_UNIT_GPU:
                if (!kdev)
                        return -EINVAL;
-               pcrat_image = kmalloc(VCRAT_SIZE_FOR_GPU, GFP_KERNEL);
+               pcrat_image = kvmalloc(VCRAT_SIZE_FOR_GPU, GFP_KERNEL);
                if (!pcrat_image)
                        return -ENOMEM;
                *size = VCRAT_SIZE_FOR_GPU;
@@ -1406,7 +1413,7 @@ int kfd_create_crat_image_virtual(void **crat_image, size_t *size,
        if (!ret)
                *crat_image = pcrat_image;
        else
-               kfree(pcrat_image);
+               kvfree(pcrat_image);
 
        return ret;
 }
@@ -1419,5 +1426,5 @@ int kfd_create_crat_image_virtual(void **crat_image, size_t *size,
  */
 void kfd_destroy_crat_image(void *crat_image)
 {
-       kfree(crat_image);
+       kvfree(crat_image);
 }
index e3fc6ed..903170e 100644 (file)
@@ -583,6 +583,8 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
 
        atomic_set(&kfd->sram_ecc_flag, 0);
 
+       ida_init(&kfd->doorbell_ida);
+
        return kfd;
 }
 
@@ -716,6 +718,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
 
        kfd->unique_id = amdgpu_amdkfd_get_unique_id(kfd->kgd);
 
+       kfd->noretry = amdgpu_amdkfd_get_noretry(kfd->kgd);
+
        if (kfd_interrupt_init(kfd)) {
                dev_err(kfd_device, "Error initializing interrupts\n");
                goto kfd_interrupt_error;
@@ -798,6 +802,7 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd)
                kfd_interrupt_exit(kfd);
                kfd_topology_remove_device(kfd);
                kfd_doorbell_fini(kfd);
+               ida_destroy(&kfd->doorbell_ida);
                kfd_gtt_sa_fini(kfd);
                amdgpu_amdkfd_free_gtt_mem(kfd->kgd, kfd->gtt_mem);
                if (kfd->gws)
index ed362ab..62504d5 100644 (file)
@@ -191,9 +191,8 @@ static int allocate_doorbell(struct qcm_process_device *qpd, struct queue *q)
        }
 
        q->properties.doorbell_off =
-               kfd_get_doorbell_dw_offset_in_bar(dev, q->process,
+               kfd_get_doorbell_dw_offset_in_bar(dev, qpd_to_pdd(qpd),
                                          q->doorbell_id);
-
        return 0;
 }
 
index 309f63a..eca6331 100644 (file)
@@ -61,7 +61,7 @@ static int update_qpd_v9(struct device_queue_manager *dqm,
                qpd->sh_mem_config =
                                SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
                                        SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
-               if (amdgpu_noretry &&
+               if (dqm->dev->noretry &&
                    !dqm->dev->use_iommu_v2)
                        qpd->sh_mem_config |=
                                1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT;
index 8e0c00b..768d153 100644 (file)
@@ -31,9 +31,6 @@
  * kernel queues using the first doorbell page reserved for the kernel.
  */
 
-static DEFINE_IDA(doorbell_ida);
-static unsigned int max_doorbell_slices;
-
 /*
  * Each device exposes a doorbell aperture, a PCI MMIO aperture that
  * receives 32-bit writes that are passed to queues as wptr values.
@@ -84,9 +81,9 @@ int kfd_doorbell_init(struct kfd_dev *kfd)
        else
                return -ENOSPC;
 
-       if (!max_doorbell_slices ||
-           doorbell_process_limit < max_doorbell_slices)
-               max_doorbell_slices = doorbell_process_limit;
+       if (!kfd->max_doorbell_slices ||
+           doorbell_process_limit < kfd->max_doorbell_slices)
+               kfd->max_doorbell_slices = doorbell_process_limit;
 
        kfd->doorbell_base = kfd->shared_resources.doorbell_physical_address +
                                doorbell_start_offset;
@@ -130,6 +127,7 @@ int kfd_doorbell_mmap(struct kfd_dev *dev, struct kfd_process *process,
                      struct vm_area_struct *vma)
 {
        phys_addr_t address;
+       struct kfd_process_device *pdd;
 
        /*
         * For simplicitly we only allow mapping of the entire doorbell
@@ -138,9 +136,12 @@ int kfd_doorbell_mmap(struct kfd_dev *dev, struct kfd_process *process,
        if (vma->vm_end - vma->vm_start != kfd_doorbell_process_slice(dev))
                return -EINVAL;
 
-       /* Calculate physical address of doorbell */
-       address = kfd_get_process_doorbells(dev, process);
+       pdd = kfd_get_process_device_data(dev, process);
+       if (!pdd)
+               return -EINVAL;
 
+       /* Calculate physical address of doorbell */
+       address = kfd_get_process_doorbells(pdd);
        vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
                                VM_DONTDUMP | VM_PFNMAP;
 
@@ -226,7 +227,7 @@ void write_kernel_doorbell64(void __iomem *db, u64 value)
 }
 
 unsigned int kfd_get_doorbell_dw_offset_in_bar(struct kfd_dev *kfd,
-                                       struct kfd_process *process,
+                                       struct kfd_process_device *pdd,
                                        unsigned int doorbell_id)
 {
        /*
@@ -236,7 +237,7 @@ unsigned int kfd_get_doorbell_dw_offset_in_bar(struct kfd_dev *kfd,
         * units regardless of the ASIC-dependent doorbell size.
         */
        return kfd->doorbell_base_dw_offset +
-               process->doorbell_index
+               pdd->doorbell_index
                * kfd_doorbell_process_slice(kfd) / sizeof(u32) +
                doorbell_id * kfd->device_info->doorbell_size / sizeof(u32);
 }
@@ -251,25 +252,24 @@ uint64_t kfd_get_number_elems(struct kfd_dev *kfd)
 
 }
 
-phys_addr_t kfd_get_process_doorbells(struct kfd_dev *dev,
-                                       struct kfd_process *process)
+phys_addr_t kfd_get_process_doorbells(struct kfd_process_device *pdd)
 {
-       return dev->doorbell_base +
-               process->doorbell_index * kfd_doorbell_process_slice(dev);
+       return pdd->dev->doorbell_base +
+               pdd->doorbell_index * kfd_doorbell_process_slice(pdd->dev);
 }
 
-int kfd_alloc_process_doorbells(struct kfd_process *process)
+int kfd_alloc_process_doorbells(struct kfd_dev *kfd, unsigned int *doorbell_index)
 {
-       int r = ida_simple_get(&doorbell_ida, 1, max_doorbell_slices,
+       int r = ida_simple_get(&kfd->doorbell_ida, 1, kfd->max_doorbell_slices,
                                GFP_KERNEL);
        if (r > 0)
-               process->doorbell_index = r;
+               *doorbell_index = r;
 
        return r;
 }
 
-void kfd_free_process_doorbells(struct kfd_process *process)
+void kfd_free_process_doorbells(struct kfd_dev *kfd, unsigned int doorbell_index)
 {
-       if (process->doorbell_index)
-               ida_simple_remove(&doorbell_ida, process->doorbell_index);
+       if (doorbell_index)
+               ida_simple_remove(&kfd->doorbell_ida, doorbell_index);
 }
index f4b7f7e..5e90fe6 100644 (file)
@@ -70,6 +70,7 @@ err_create_wq:
 err_topology:
        kfd_chardev_exit();
 err_ioctl:
+       pr_err("KFD is disabled due to module initialization failure\n");
        return err;
 }
 
index 8c2b8cc..b7be5c5 100644 (file)
@@ -314,6 +314,11 @@ struct kfd_dev {
        spinlock_t smi_lock;
 
        uint32_t reset_seq_num;
+
+       struct ida doorbell_ida;
+       unsigned int max_doorbell_slices;
+
+       int noretry;
 };
 
 enum kfd_mempool {
@@ -699,6 +704,32 @@ struct kfd_process_device {
        struct attribute attr_evict;
 
        struct kobject *kobj_stats;
+       unsigned int doorbell_index;
+
+       /*
+        * @cu_occupancy: Reports occupancy of Compute Units (CU) of a process
+        * that is associated with device encoded by "this" struct instance. The
+        * value reflects CU usage by all of the waves launched by this process
+        * on this device. A very important property of occupancy parameter is
+        * that its value is a snapshot of current use.
+        *
+        * Following is to be noted regarding how this parameter is reported:
+        *
+        *  The number of waves that a CU can launch is limited by couple of
+        *  parameters. These are encoded by struct amdgpu_cu_info instance
+        *  that is part of every device definition. For GFX9 devices this
+        *  translates to 40 waves (simd_per_cu * max_waves_per_simd) when waves
+        *  do not use scratch memory and 32 waves (max_scratch_slots_per_cu)
+        *  when they do use scratch memory. This could change for future
+        *  devices and therefore this example should be considered as a guide.
+        *
+        *  All CU's of a device are available for the process. This may not be true
+        *  under certain conditions - e.g. CU masking.
+        *
+        *  Finally number of CU's that are occupied by a process is affected by both
+        *  number of CU's a device has along with number of other competing processes
+        */
+       struct attribute attr_cu_occupancy;
 };
 
 #define qpd_to_pdd(x) container_of(x, struct kfd_process_device, qpd)
@@ -736,7 +767,6 @@ struct kfd_process {
        struct mmu_notifier mmu_notifier;
 
        uint16_t pasid;
-       unsigned int doorbell_index;
 
        /*
         * List of kfd_process_device structures,
@@ -869,13 +899,13 @@ u32 read_kernel_doorbell(u32 __iomem *db);
 void write_kernel_doorbell(void __iomem *db, u32 value);
 void write_kernel_doorbell64(void __iomem *db, u64 value);
 unsigned int kfd_get_doorbell_dw_offset_in_bar(struct kfd_dev *kfd,
-                                       struct kfd_process *process,
+                                       struct kfd_process_device *pdd,
                                        unsigned int doorbell_id);
-phys_addr_t kfd_get_process_doorbells(struct kfd_dev *dev,
-                                       struct kfd_process *process);
-int kfd_alloc_process_doorbells(struct kfd_process *process);
-void kfd_free_process_doorbells(struct kfd_process *process);
-
+phys_addr_t kfd_get_process_doorbells(struct kfd_process_device *pdd);
+int kfd_alloc_process_doorbells(struct kfd_dev *kfd,
+                               unsigned int *doorbell_index);
+void kfd_free_process_doorbells(struct kfd_dev *kfd,
+                               unsigned int doorbell_index);
 /* GTT Sub-Allocator */
 
 int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size,
index ad53b26..2807e1c 100644 (file)
@@ -249,6 +249,52 @@ cleanup:
        }
 }
 
+/**
+ * @kfd_get_cu_occupancy() - Collect number of waves in-flight on this device
+ * by current process. Translates acquired wave count into number of compute units
+ * that are occupied.
+ *
+ * @atr: Handle of attribute that allows reporting of wave count. The attribute
+ * handle encapsulates GPU device it is associated with, thereby allowing collection
+ * of waves in flight, etc
+ *
+ * @buffer: Handle of user provided buffer updated with wave count
+ *
+ * Return: Number of bytes written to user buffer or an error value
+ */
+static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer)
+{
+       int cu_cnt;
+       int wave_cnt;
+       int max_waves_per_cu;
+       struct kfd_dev *dev = NULL;
+       struct kfd_process *proc = NULL;
+       struct kfd_process_device *pdd = NULL;
+
+       pdd = container_of(attr, struct kfd_process_device, attr_cu_occupancy);
+       dev = pdd->dev;
+       if (dev->kfd2kgd->get_cu_occupancy == NULL)
+               return -EINVAL;
+
+       cu_cnt = 0;
+       proc = pdd->process;
+       if (pdd->qpd.queue_count == 0) {
+               pr_debug("Gpu-Id: %d has no active queues for process %d\n",
+                        dev->id, proc->pasid);
+               return snprintf(buffer, PAGE_SIZE, "%d\n", cu_cnt);
+       }
+
+       /* Collect wave count from device if it supports */
+       wave_cnt = 0;
+       max_waves_per_cu = 0;
+       dev->kfd2kgd->get_cu_occupancy(dev->kgd, proc->pasid, &wave_cnt,
+                       &max_waves_per_cu);
+
+       /* Translate wave count to number of compute units */
+       cu_cnt = (wave_cnt + (max_waves_per_cu - 1)) / max_waves_per_cu;
+       return snprintf(buffer, PAGE_SIZE, "%d\n", cu_cnt);
+}
+
 static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr,
                               char *buffer)
 {
@@ -344,6 +390,7 @@ static ssize_t kfd_procfs_queue_show(struct kobject *kobj,
 
        return 0;
 }
+
 static ssize_t kfd_procfs_stats_show(struct kobject *kobj,
                                     struct attribute *attr, char *buffer)
 {
@@ -359,8 +406,13 @@ static ssize_t kfd_procfs_stats_show(struct kobject *kobj,
                                PAGE_SIZE,
                                "%llu\n",
                                jiffies64_to_msecs(evict_jiffies));
-       } else
+
+       /* Sysfs handle that gets CU occupancy is per device */
+       } else if (strcmp(attr->name, "cu_occupancy") == 0) {
+               return kfd_get_cu_occupancy(attr, buffer);
+       } else {
                pr_err("Invalid attribute");
+       }
 
        return 0;
 }
@@ -466,6 +518,7 @@ static int kfd_procfs_add_sysfs_stats(struct kfd_process *p)
         * Create sysfs files for each GPU:
         * - proc/<pid>/stats_<gpuid>/
         * - proc/<pid>/stats_<gpuid>/evicted_ms
+        * - proc/<pid>/stats_<gpuid>/cu_occupancy
         */
        list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
                struct kobject *kobj_stats;
@@ -496,6 +549,19 @@ static int kfd_procfs_add_sysfs_stats(struct kfd_process *p)
                if (ret)
                        pr_warn("Creating eviction stats for gpuid %d failed",
                                        (int)pdd->dev->id);
+
+               /* Add sysfs file to report compute unit occupancy */
+               if (pdd->dev->kfd2kgd->get_cu_occupancy != NULL) {
+                       pdd->attr_cu_occupancy.name = "cu_occupancy";
+                       pdd->attr_cu_occupancy.mode = KFD_SYSFS_FILE_MODE;
+                       sysfs_attr_init(&pdd->attr_cu_occupancy);
+                       ret = sysfs_create_file(kobj_stats,
+                                               &pdd->attr_cu_occupancy);
+                       if (ret)
+                               pr_warn("Creating %s failed for gpuid: %d",
+                                       pdd->attr_cu_occupancy.name,
+                                       (int)pdd->dev->id);
+               }
        }
 err:
        return ret;
@@ -537,7 +603,6 @@ static int kfd_procfs_add_sysfs_files(struct kfd_process *p)
        return ret;
 }
 
-
 void kfd_procfs_del_queue(struct queue *q)
 {
        if (!q)
@@ -750,11 +815,6 @@ struct kfd_process *kfd_create_process(struct file *filep)
                        pr_warn("Creating sysfs stats dir for pid %d failed",
                                (int)process->lead_thread->pid);
 
-               ret = kfd_procfs_add_sysfs_stats(process);
-               if (ret)
-                       pr_warn("Creating sysfs stats dir for pid %d failed",
-                               (int)process->lead_thread->pid);
-
                ret = kfd_procfs_add_sysfs_files(process);
                if (ret)
                        pr_warn("Creating sysfs usage file for pid %d failed",
@@ -876,6 +936,8 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
                kfree(pdd->qpd.doorbell_bitmap);
                idr_destroy(&pdd->alloc_idr);
 
+               kfd_free_process_doorbells(pdd->dev, pdd->doorbell_index);
+
                /*
                 * before destroying pdd, make sure to report availability
                 * for auto suspend
@@ -912,6 +974,8 @@ static void kfd_process_wq_release(struct work_struct *work)
                        sysfs_remove_file(p->kobj, &pdd->attr_vram);
                        sysfs_remove_file(p->kobj, &pdd->attr_sdma);
                        sysfs_remove_file(p->kobj, &pdd->attr_evict);
+                       if (pdd->dev->kfd2kgd->get_cu_occupancy != NULL)
+                               sysfs_remove_file(p->kobj, &pdd->attr_cu_occupancy);
                        kobject_del(pdd->kobj_stats);
                        kobject_put(pdd->kobj_stats);
                        pdd->kobj_stats = NULL;
@@ -932,8 +996,6 @@ static void kfd_process_wq_release(struct work_struct *work)
        kfd_event_free_process(p);
 
        kfd_pasid_free(p->pasid);
-       kfd_free_process_doorbells(p);
-
        mutex_destroy(&p->mutex);
 
        put_task_struct(p->lead_thread);
@@ -1111,9 +1173,6 @@ static struct kfd_process *create_process(const struct task_struct *thread)
        if (process->pasid == 0)
                goto err_alloc_pasid;
 
-       if (kfd_alloc_process_doorbells(process) < 0)
-               goto err_alloc_doorbells;
-
        err = pqm_init(&process->pqm, process);
        if (err != 0)
                goto err_process_pqm_init;
@@ -1141,8 +1200,6 @@ err_register_notifier:
 err_init_apertures:
        pqm_uninit(&process->pqm);
 err_process_pqm_init:
-       kfd_free_process_doorbells(process);
-err_alloc_doorbells:
        kfd_pasid_free(process->pasid);
 err_alloc_pasid:
        mutex_destroy(&process->mutex);
@@ -1205,10 +1262,14 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
        if (!pdd)
                return NULL;
 
+       if (kfd_alloc_process_doorbells(dev, &pdd->doorbell_index) < 0) {
+               pr_err("Failed to alloc doorbell for pdd\n");
+               goto err_free_pdd;
+       }
+
        if (init_doorbell_bitmap(&pdd->qpd, dev)) {
                pr_err("Failed to init doorbell for process\n");
-               kfree(pdd);
-               return NULL;
+               goto err_free_pdd;
        }
 
        pdd->dev = dev;
@@ -1231,6 +1292,10 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
        idr_init(&pdd->alloc_idr);
 
        return pdd;
+
+err_free_pdd:
+       kfree(pdd);
+       return NULL;
 }
 
 /**
index 9c1e003..34f6369 100644 (file)
@@ -149,6 +149,8 @@ struct amdgpu_dm_backlight_caps {
  * @cached_state: Caches device atomic state for suspend/resume
  * @cached_dc_state: Cached state of content streams
  * @compressor: Frame buffer compression buffer. See &struct dm_comressor_info
+ * @force_timing_sync: set via debugfs. When set, indicates that all connected
+ *                    displays will be forced to synchronize.
  */
 struct amdgpu_display_manager {
 
index 004cd8d..8cd646e 100644 (file)
@@ -908,7 +908,7 @@ static ssize_t dp_dpcd_address_write(struct file *f, const char __user *buf,
        struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
 
        if (size < sizeof(connector->debugfs_dpcd_address))
-               return 0;
+               return -EINVAL;
 
        r = copy_from_user(&connector->debugfs_dpcd_address,
                        buf, sizeof(connector->debugfs_dpcd_address));
@@ -923,7 +923,7 @@ static ssize_t dp_dpcd_size_write(struct file *f, const char __user *buf,
        struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
 
        if (size < sizeof(connector->debugfs_dpcd_size))
-               return 0;
+               return -EINVAL;
 
        r = copy_from_user(&connector->debugfs_dpcd_size,
                        buf, sizeof(connector->debugfs_dpcd_size));
@@ -943,8 +943,8 @@ static ssize_t dp_dpcd_data_write(struct file *f, const char __user *buf,
        struct dc_link *link = connector->dc_link;
        uint32_t write_size = connector->debugfs_dpcd_size;
 
-       if (size < write_size)
-               return 0;
+       if (!write_size || size < write_size)
+               return -EINVAL;
 
        data = kzalloc(write_size, GFP_KERNEL);
        if (!data)
@@ -967,7 +967,7 @@ static ssize_t dp_dpcd_data_read(struct file *f, char __user *buf,
        struct dc_link *link = connector->dc_link;
        uint32_t read_size = connector->debugfs_dpcd_size;
 
-       if (size < read_size)
+       if (!read_size || size < read_size)
                return 0;
 
        data = kzalloc(read_size, GFP_KERNEL);
index 694c5bc..c2cd184 100644 (file)
@@ -604,7 +604,7 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct
        int i = 0;
 
        hdcp_work = kcalloc(max_caps, sizeof(*hdcp_work), GFP_KERNEL);
-       if (hdcp_work == NULL)
+       if (ZERO_OR_NULL_PTR(hdcp_work))
                return NULL;
 
        hdcp_work->srm = kcalloc(PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, sizeof(*hdcp_work->srm), GFP_KERNEL);
index 9d7333a..eee19ed 100644 (file)
@@ -159,7 +159,20 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto
        u8 dsc_caps[16] = { 0 };
 
        aconnector->dsc_aux = drm_dp_mst_dsc_aux_for_port(port);
+#if defined(CONFIG_HP_HOOK_WORKAROUND)
+       /*
+        * drm_dp_mst_dsc_aux_for_port() will return NULL for certain configs
+        * because it only check the dsc/fec caps of the "port variable" and not the dock
+        *
+        * This case will return NULL: DSC capabe MST dock connected to a non fec/dsc capable display
+        *
+        * Workaround: explicitly check the use case above and use the mst dock's aux as dsc_aux
+        *
+        */
 
+       if (!aconnector->dsc_aux && !port->parent->port_parent)
+               aconnector->dsc_aux = &aconnector->mst_port->dm_dp_aux.aux;
+#endif
        if (!aconnector->dsc_aux)
                return false;
 
@@ -634,7 +647,7 @@ static void try_disable_dsc(struct drm_atomic_state *state,
        for (i = 0; i < count; i++) {
                if (vars[i].dsc_enabled
                                && vars[i].bpp_x16 == params[i].bw_range.max_target_bpp_x16
-                               && !params[i].clock_force_enable == DSC_CLK_FORCE_DEFAULT) {
+                               && params[i].clock_force_enable == DSC_CLK_FORCE_DEFAULT) {
                        kbps_increase[i] = params[i].bw_range.stream_kbps - params[i].bw_range.max_kbps;
                        tried[i] = false;
                        remaining_to_try += 1;
index d031bd3..807dca8 100644 (file)
@@ -79,8 +79,7 @@ int dce112_set_clock(struct clk_mgr *clk_mgr_base, int requested_clk_khz)
        memset(&dce_clk_params, 0, sizeof(dce_clk_params));
 
        /* Make sure requested clock isn't lower than minimum threshold*/
-       if (requested_clk_khz > 0)
-               requested_clk_khz = max(requested_clk_khz,
+       requested_clk_khz = max(requested_clk_khz,
                                clk_mgr_dce->base.dentist_vco_freq_khz / 62);
 
        dce_clk_params.target_clock_frequency = requested_clk_khz;
index 136ae6d..2f8fee0 100644 (file)
@@ -784,7 +784,6 @@ void rn_clk_mgr_construct(
        } else {
                struct clk_log_info log_info = {0};
 
-               clk_mgr->smu_ver = rn_vbios_smu_get_smu_version(clk_mgr);
                clk_mgr->periodic_retraining_disabled = rn_vbios_smu_is_periodic_retraining_disabled(clk_mgr);
 
                /* SMU Version 55.51.0 and up no longer have an issue
index 83ce55e..1eb29c3 100644 (file)
@@ -735,6 +735,8 @@ static bool dc_construct(struct dc *dc,
        dc->clk_mgr->force_smu_not_present = init_params->force_smu_not_present;
 #endif
 
+       dc->debug.force_ignore_link_settings = init_params->force_ignore_link_settings;
+
        if (dc->res_pool->funcs->update_bw_bounding_box)
                dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
 
@@ -842,6 +844,60 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
        dc_release_state(current_ctx);
 }
 
+static void disable_vbios_mode_if_required(
+               struct dc *dc,
+               struct dc_state *context)
+{
+       unsigned int i, j;
+
+       /* check if timing_changed, disable stream*/
+       for (i = 0; i < dc->res_pool->pipe_count; i++) {
+               struct dc_stream_state *stream = NULL;
+               struct dc_link *link = NULL;
+               struct pipe_ctx *pipe = NULL;
+
+               pipe = &context->res_ctx.pipe_ctx[i];
+               stream = pipe->stream;
+               if (stream == NULL)
+                       continue;
+
+               if (stream->link->local_sink &&
+                       stream->link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
+                       link = stream->link;
+               }
+
+               if (link != NULL) {
+                       unsigned int enc_inst, tg_inst = 0;
+                       unsigned int pix_clk_100hz;
+
+                       enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
+                       if (enc_inst != ENGINE_ID_UNKNOWN) {
+                               for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
+                                       if (dc->res_pool->stream_enc[j]->id == enc_inst) {
+                                               tg_inst = dc->res_pool->stream_enc[j]->funcs->dig_source_otg(
+                                                       dc->res_pool->stream_enc[j]);
+                                               break;
+                                       }
+                               }
+
+                               dc->res_pool->dp_clock_source->funcs->get_pixel_clk_frequency_100hz(
+                                       dc->res_pool->dp_clock_source,
+                                       tg_inst, &pix_clk_100hz);
+
+                               if (link->link_status.link_active) {
+                                       uint32_t requested_pix_clk_100hz =
+                                               pipe->stream_res.pix_clk_params.requested_pix_clk_100hz;
+
+                                       if (pix_clk_100hz != requested_pix_clk_100hz) {
+                                               core_link_disable_stream(pipe);
+                                               pipe->stream->dpms_off = false;
+                                       }
+                               }
+                       }
+               }
+       }
+}
+
 static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
 {
        int i;
@@ -1278,15 +1334,17 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
        for (i = 0; i < context->stream_count; i++)
                dc_streams[i] =  context->streams[i];
 
-       if (!dcb->funcs->is_accelerated_mode(dcb))
+       if (!dcb->funcs->is_accelerated_mode(dcb)) {
+               disable_vbios_mode_if_required(dc, context);
                dc->hwss.enable_accelerated_mode(dc, context);
+       }
 
-       for (i = 0; i < context->stream_count; i++) {
+       for (i = 0; i < context->stream_count; i++)
                if (context->streams[i]->apply_seamless_boot_optimization)
                        dc->optimize_seamless_boot_streams++;
-       }
 
-       if (context->stream_count > dc->optimize_seamless_boot_streams)
+       if (context->stream_count > dc->optimize_seamless_boot_streams ||
+               context->stream_count == 0)
                dc->hwss.prepare_bandwidth(dc, context);
 
        disable_dangling_plane(dc, context);
@@ -1368,7 +1426,8 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
 
        dc_enable_stereo(dc, context, dc_streams, context->stream_count);
 
-       if (context->stream_count > dc->optimize_seamless_boot_streams) {
+       if (context->stream_count > dc->optimize_seamless_boot_streams ||
+               context->stream_count == 0) {
                /* Must wait for no flips to be pending before doing optimize bw */
                wait_for_no_pipes_pending(dc, context);
                /* pplib is notified if disp_num changed */
index c026b39..2a90804 100644 (file)
@@ -177,7 +177,7 @@ static bool is_ycbcr709_limited_type(
                ret = true;
        return ret;
 }
-enum dc_color_space_type get_color_space_type(enum dc_color_space color_space)
+static enum dc_color_space_type get_color_space_type(enum dc_color_space color_space)
 {
        enum dc_color_space_type type = COLOR_SPACE_RGB_TYPE;
 
index 1871ff6..fec87a2 100644 (file)
@@ -2441,7 +2441,7 @@ enum dc_status dc_link_validate_mode_timing(
        /* A hack to avoid failing any modes for EDID override feature on
         * topology change such as lower quality cable for DP or different dongle
         */
-       if (link->remote_sinks[0])
+       if (link->remote_sinks[0] && link->remote_sinks[0]->sink_signal == SIGNAL_TYPE_VIRTUAL)
                return DC_OK;
 
        /* Passive Dongle */
@@ -2566,7 +2566,7 @@ bool dc_link_set_psr_allow_active(struct dc_link *link, bool allow_active, bool
        link->psr_settings.psr_allow_active = allow_active;
 
        if (psr != NULL && link->psr_settings.psr_feature_enabled)
-               psr->funcs->psr_enable(psr, allow_active);
+               psr->funcs->psr_enable(psr, allow_active, wait);
        else if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_settings.psr_feature_enabled)
                dmcu->funcs->set_psr_enable(dmcu, allow_active, wait);
        else
index b984eec..dec12de 100644 (file)
@@ -148,14 +148,6 @@ static uint32_t dal_ddc_i2c_payloads_get_count(struct i2c_payloads *p)
        return p->payloads.count;
 }
 
-static void dal_ddc_i2c_payloads_destroy(struct i2c_payloads *p)
-{
-       if (!p)
-               return;
-
-       dal_vector_destruct(&p->payloads);
-}
-
 #define DDC_MIN(a, b) (((a) < (b)) ? (a) : (b))
 
 void dal_ddc_i2c_payloads_add(
@@ -582,7 +574,7 @@ bool dal_ddc_service_query_ddc_data(
                                ddc->link,
                                &command);
 
-               dal_ddc_i2c_payloads_destroy(&payloads);
+               dal_vector_destruct(&payloads.payloads);
        }
 
        return success;
index b9b66db..ff1e996 100644 (file)
@@ -49,6 +49,23 @@ static struct dc_link_settings get_common_supported_link_settings(
                struct dc_link_settings link_setting_a,
                struct dc_link_settings link_setting_b);
 
+static uint32_t get_cr_training_aux_rd_interval(struct dc_link *link,
+               const struct dc_link_settings *link_settings)
+{
+       union training_aux_rd_interval training_rd_interval;
+       uint32_t wait_in_micro_secs = 100;
+
+       memset(&training_rd_interval, 0, sizeof(training_rd_interval));
+       core_link_read_dpcd(
+                       link,
+                       DP_TRAINING_AUX_RD_INTERVAL,
+                       (uint8_t *)&training_rd_interval,
+                       sizeof(training_rd_interval));
+       if (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL)
+               wait_in_micro_secs = training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000;
+       return wait_in_micro_secs;
+}
+
 static uint32_t get_eq_training_aux_rd_interval(
        struct dc_link *link,
        const struct dc_link_settings *link_settings)
@@ -1247,7 +1264,7 @@ static void initialize_training_settings(
        if (overrides->cr_pattern_time != NULL)
                lt_settings->cr_pattern_time = *overrides->cr_pattern_time;
        else
-               lt_settings->cr_pattern_time = 100;
+               lt_settings->cr_pattern_time = get_cr_training_aux_rd_interval(link, link_setting);
 
        if (overrides->eq_pattern_time != NULL)
                lt_settings->eq_pattern_time = *overrides->eq_pattern_time;
index 81c0263..11a619b 100644 (file)
@@ -229,6 +229,8 @@ void dp_disable_link_phy(struct dc_link *link, enum signal_type signal)
                dp_receiver_power_ctrl(link, false);
 
        if (signal == SIGNAL_TYPE_EDP) {
+               if (link->dc->hwss.edp_backlight_control)
+                       link->dc->hwss.edp_backlight_control(link, false);
                link->link_enc->funcs->disable_output(link->link_enc, signal);
                link->dc->hwss.edp_power_control(link, false);
        } else {
@@ -491,13 +493,15 @@ void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
                                OPTC_DSC_DISABLED, 0, 0);
 
                /* disable DSC in stream encoder */
-               if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
-                       pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config(
-                                       pipe_ctx->stream_res.stream_enc,
-                                       OPTC_DSC_DISABLED, 0, 0);
-
-                       pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet(
-                                       pipe_ctx->stream_res.stream_enc, false, NULL);
+               if (dc_is_dp_signal(stream->signal)) {
+
+                       if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+                               pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config(
+                                               pipe_ctx->stream_res.stream_enc,
+                                               OPTC_DSC_DISABLED, 0, 0);
+                               pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet(
+                                                       pipe_ctx->stream_res.stream_enc, false, NULL);
+                       }
                }
 
                /* disable DSC block */
@@ -534,7 +538,6 @@ out:
 bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable)
 {
        struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
-       struct dc *dc = pipe_ctx->stream->ctx->dc;
        struct dc_stream_state *stream = pipe_ctx->stream;
 
        if (!pipe_ctx->stream->timing.flags.DSC || !dsc)
@@ -557,7 +560,7 @@ bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable)
 
                DC_LOG_DSC(" ");
                dsc->funcs->dsc_get_packed_pps(dsc, &dsc_cfg, &dsc_packed_pps[0]);
-               if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+               if (dc_is_dp_signal(stream->signal)) {
                        DC_LOG_DSC("Setting stream encoder DSC PPS SDP for engine %d\n", (int)pipe_ctx->stream_res.stream_enc->id);
                        pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet(
                                                                        pipe_ctx->stream_res.stream_enc,
@@ -566,7 +569,7 @@ bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable)
                }
        } else {
                /* disable DSC PPS in stream encoder */
-               if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+               if (dc_is_dp_signal(stream->signal)) {
                        pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet(
                                                pipe_ctx->stream_res.stream_enc, false, NULL);
                }
index 4cea934..e430148 100644 (file)
@@ -785,14 +785,15 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx)
        /*
         * Only the leftmost ODM pipe should be offset by a nonzero distance
         */
-       if (!pipe_ctx->prev_odm_pipe)
+       if (!pipe_ctx->prev_odm_pipe) {
                data->recout.x = stream->dst.x;
-       else
-               data->recout.x = 0;
-       if (stream->src.x < surf_clip.x)
-               data->recout.x += (surf_clip.x - stream->src.x) * stream->dst.width
+               if (stream->src.x < surf_clip.x)
+                       data->recout.x += (surf_clip.x - stream->src.x) * stream->dst.width
                                                / stream->src.width;
 
+       } else
+               data->recout.x = 0;
+
        data->recout.width = surf_clip.width * stream->dst.width / stream->src.width;
        if (data->recout.width + data->recout.x > stream->dst.x + stream->dst.width)
                data->recout.width = stream->dst.x + stream->dst.width - data->recout.x;
index d9b22d6..82fe0ab 100644 (file)
@@ -42,7 +42,7 @@
 #include "inc/hw/dmcu.h"
 #include "dml/display_mode_lib.h"
 
-#define DC_VER "3.2.102"
+#define DC_VER "3.2.104"
 
 #define MAX_SURFACES 3
 #define MAX_PLANES 6
@@ -503,6 +503,7 @@ struct dc_debug_options {
        bool usbc_combo_phy_reset_wa;
        bool disable_dsc;
        bool enable_dram_clock_change_one_display_vactive;
+       bool force_ignore_link_settings;
 };
 
 struct dc_debug_data {
@@ -660,6 +661,7 @@ struct dc_init_data {
 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
        bool force_smu_not_present;
 #endif
+       bool force_ignore_link_settings;
 };
 
 struct dc_callback_init {
index e002ef7..266b93a 100644 (file)
@@ -237,6 +237,8 @@ enum dc_detect_reason {
        DETECT_REASON_BOOT,
        DETECT_REASON_HPD,
        DETECT_REASON_HPDRX,
+       DETECT_REASON_FALLBACK,
+       DETECT_REASON_RETRAIN
 };
 
 bool dc_link_detect(struct dc_link *dc_link, enum dc_detect_reason reason);
index df7f826..74f7619 100644 (file)
@@ -159,11 +159,15 @@ static uint32_t dce_panel_cntl_hw_init(struct panel_cntl *panel_cntl)
 static bool dce_is_panel_backlight_on(struct panel_cntl *panel_cntl)
 {
        struct dce_panel_cntl *dce_panel_cntl = TO_DCE_PANEL_CNTL(panel_cntl);
-       uint32_t value;
+       uint32_t blon, blon_ovrd, pwrseq_target_state;
 
-       REG_GET(PWRSEQ_CNTL, LVTMA_BLON, &value);
+       REG_GET_2(PWRSEQ_CNTL, LVTMA_BLON, &blon, LVTMA_BLON_OVRD, &blon_ovrd);
+       REG_GET(PWRSEQ_CNTL, LVTMA_PWRSEQ_TARGET_STATE, &pwrseq_target_state);
 
-       return value;
+       if (blon_ovrd)
+               return blon;
+       else
+               return pwrseq_target_state;
 }
 
 static bool dce_is_panel_powered_on(struct panel_cntl *panel_cntl)
index 99c68ca..6bd1196 100644 (file)
        SR(BL_PWM_CNTL2), \
        SR(BL_PWM_PERIOD_CNTL), \
        SR(BL_PWM_GRP1_REG_LOCK), \
-       SR(BIOS_SCRATCH_2)
+       NBIO_SR(BIOS_SCRATCH_2)
 
 #define DCE_PANEL_CNTL_SF(reg_name, field_name, post_fix)\
        .field_name = reg_name ## __ ## field_name ## post_fix
 
 #define DCE_PANEL_CNTL_MASK_SH_LIST(mask_sh) \
        DCE_PANEL_CNTL_SF(LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh),\
+       DCE_PANEL_CNTL_SF(LVTMA_PWRSEQ_CNTL, LVTMA_BLON_OVRD, mask_sh),\
        DCE_PANEL_CNTL_SF(LVTMA_PWRSEQ_CNTL, LVTMA_DIGON, mask_sh),\
        DCE_PANEL_CNTL_SF(LVTMA_PWRSEQ_CNTL, LVTMA_DIGON_OVRD, mask_sh),\
+       DCE_PANEL_CNTL_SF(LVTMA_PWRSEQ_CNTL, LVTMA_PWRSEQ_TARGET_STATE, mask_sh), \
        DCE_PANEL_CNTL_SF(LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh), \
        DCE_PANEL_CNTL_SF(LVTMA_PWRSEQ_REF_DIV, BL_PWM_REF_DIV, mask_sh), \
        DCE_PANEL_CNTL_SF(BL_PWM_PERIOD_CNTL, BL_PWM_PERIOD, mask_sh), \
 
 #define DCE_PANEL_CNTL_REG_FIELD_LIST(type) \
        type LVTMA_BLON;\
+       type LVTMA_BLON_OVRD;\
        type LVTMA_DIGON;\
        type LVTMA_DIGON_OVRD;\
+       type LVTMA_PWRSEQ_TARGET_STATE; \
        type LVTMA_PWRSEQ_TARGET_STATE_R; \
        type BL_PWM_REF_DIV; \
        type BL_PWM_EN; \
index 5167d6b..67af67e 100644 (file)
@@ -119,10 +119,11 @@ static bool dmub_psr_set_version(struct dmub_psr *dmub, struct dc_stream_state *
 /**
  * Enable/Disable PSR.
  */
-static void dmub_psr_enable(struct dmub_psr *dmub, bool enable)
+static void dmub_psr_enable(struct dmub_psr *dmub, bool enable, bool wait)
 {
        union dmub_rb_cmd cmd;
        struct dc_context *dc = dmub->ctx;
+       uint32_t retry_count, psr_state = 0;
 
        cmd.psr_enable.header.type = DMUB_CMD__PSR;
 
@@ -136,6 +137,30 @@ static void dmub_psr_enable(struct dmub_psr *dmub, bool enable)
        dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
        dc_dmub_srv_cmd_execute(dc->dmub_srv);
        dc_dmub_srv_wait_idle(dc->dmub_srv);
+
+       /* Below loops 1000 x 500us = 500 ms.
+        *  Exit PSR may need to wait 1-2 frames to power up. Timeout after at
+        *  least a few frames. Should never hit the max retry assert below.
+        */
+       if (wait) {
+               for (retry_count = 0; retry_count <= 1000; retry_count++) {
+                       dmub_psr_get_state(dmub, &psr_state);
+
+                       if (enable) {
+                               if (psr_state != 0)
+                                       break;
+                       } else {
+                               if (psr_state == 0)
+                                       break;
+                       }
+
+                       udelay(500);
+               }
+
+               /* assert if max retry hit */
+               if (retry_count >= 1000)
+                       ASSERT(0);
+       }
 }
 
 /**
@@ -231,10 +256,11 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
        copy_settings_data->smu_optimizations_en                = psr_context->allow_smu_optimizations;
        copy_settings_data->frame_delay                         = psr_context->frame_delay;
        copy_settings_data->frame_cap_ind                       = psr_context->psrFrameCaptureIndicationReq;
+       copy_settings_data->init_sdp_deadline                   = psr_context->sdpTransmitLineNumDeadline;
+       copy_settings_data->debug.u32All = 0;
        copy_settings_data->debug.bitfields.visual_confirm      = dc->dc->debug.visual_confirm == VISUAL_CONFIRM_PSR ?
                                                                        true : false;
-       copy_settings_data->debug.bitfields.use_hw_lock_mgr     = 1;
-       copy_settings_data->init_sdp_deadline                   = psr_context->sdpTransmitLineNumDeadline;
+       copy_settings_data->debug.bitfields.use_hw_lock_mgr             = 1;
 
        dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
        dc_dmub_srv_cmd_execute(dc->dmub_srv);
index f404fec..dc121ed 100644 (file)
@@ -36,7 +36,7 @@ struct dmub_psr {
 
 struct dmub_psr_funcs {
        bool (*psr_copy_settings)(struct dmub_psr *dmub, struct dc_link *link, struct psr_context *psr_context);
-       void (*psr_enable)(struct dmub_psr *dmub, bool enable);
+       void (*psr_enable)(struct dmub_psr *dmub, bool enable, bool wait);
        void (*psr_get_state)(struct dmub_psr *dmub, uint32_t *psr_state);
        void (*psr_set_level)(struct dmub_psr *dmub, uint16_t psr_level);
 };
index 1002ce9..3ac6c7b 100644 (file)
@@ -1654,7 +1654,7 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
                // enable fastboot if backend is enabled on eDP
                if (edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc)) {
                        /* Set optimization flag on eDP stream*/
-                       if (edp_stream) {
+                       if (edp_stream && edp_link->link_status.link_active) {
                                edp_stream->apply_edp_fast_boot_optimization = true;
                                can_apply_edp_fast_boot = true;
                        }
@@ -2737,7 +2737,7 @@ static void program_output_csc(struct dc *dc,
        }
 }
 
-void dce110_set_cursor_position(struct pipe_ctx *pipe_ctx)
+static void dce110_set_cursor_position(struct pipe_ctx *pipe_ctx)
 {
        struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
        struct input_pixel_processor *ipp = pipe_ctx->plane_res.ipp;
@@ -2782,7 +2782,7 @@ void dce110_set_cursor_position(struct pipe_ctx *pipe_ctx)
                mi->funcs->set_cursor_position(mi, &pos_cpy, &param);
 }
 
-void dce110_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
+static void dce110_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
 {
        struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
 
@@ -2890,6 +2890,7 @@ static const struct hw_sequencer_funcs dce110_funcs = {
        .setup_stereo = NULL,
        .set_avmute = dce110_set_avmute,
        .wait_for_mpcc_disconnect = dce110_wait_for_mpcc_disconnect,
+       .edp_backlight_control = dce110_edp_backlight_control,
        .edp_power_control = dce110_edp_power_control,
        .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
        .set_cursor_position = dce110_set_cursor_position,
index a1d1559..b24c8ae 100644 (file)
@@ -66,6 +66,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
        .get_hw_state = dcn10_get_hw_state,
        .clear_status_bits = dcn10_clear_status_bits,
        .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
+       .edp_backlight_control = dce110_edp_backlight_control,
        .edp_power_control = dce110_edp_power_control,
        .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
        .set_cursor_position = dcn10_set_cursor_position,
index 2972392..800be26 100644 (file)
@@ -288,6 +288,17 @@ void optc1_program_timing(
        if (optc1_is_two_pixels_per_containter(&patched_crtc_timing) || optc1->opp_count == 2)
                h_div = H_TIMING_DIV_BY2;
 
+       if (REG(OPTC_DATA_FORMAT_CONTROL)) {
+               uint32_t data_fmt = 0;
+
+               if (patched_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR422)
+                       data_fmt = 1;
+               else if (patched_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
+                       data_fmt = 2;
+
+               REG_UPDATE(OPTC_DATA_FORMAT_CONTROL, OPTC_DATA_FORMAT, data_fmt);
+       }
+
 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
        if (optc1->tg_mask->OTG_H_TIMING_DIV_MODE != 0) {
                if (optc1->opp_count == 4)
index 1abd81e..a78712c 100644 (file)
@@ -798,7 +798,7 @@ static const struct encoder_feature_support link_enc_feature = {
                .max_hdmi_deep_color = COLOR_DEPTH_121212,
                .max_hdmi_pixel_clock = 600000,
                .hdmi_ycbcr420_supported = true,
-               .dp_ycbcr420_supported = false,
+               .dp_ycbcr420_supported = true,
                .flags.bits.IS_HBR2_CAPABLE = true,
                .flags.bits.IS_HBR3_CAPABLE = true,
                .flags.bits.IS_TPS3_CAPABLE = true,
index 9cf139b..f70fcad 100644 (file)
@@ -896,10 +896,10 @@ void enc1_stream_encoder_dp_blank(
         */
        REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_DIS_DEFER, 2);
        /* Larger delay to wait until VBLANK - use max retry of
-        * 10us*5000=50ms. This covers 41.7ms of minimum 24 Hz mode +
+        * 10us*10200=102ms. This covers 100.0ms of minimum 10 Hz mode +
         * a little more because we may not trust delay accuracy.
         */
-       max_retries = DP_BLANK_MAX_RETRY * 250;
+       max_retries = DP_BLANK_MAX_RETRY * 501;
 
        /* disable DP stream */
        REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, 0);
index 966e179..072193c 100644 (file)
@@ -68,6 +68,7 @@ static const struct hw_sequencer_funcs dcn20_funcs = {
        .get_hw_state = dcn10_get_hw_state,
        .clear_status_bits = dcn10_clear_status_bits,
        .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
+       .edp_backlight_control = dce110_edp_backlight_control,
        .edp_power_control = dce110_edp_power_control,
        .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
        .set_cursor_position = dcn10_set_cursor_position,
index 8c16967..d8b18c5 100644 (file)
@@ -239,7 +239,6 @@ void optc2_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_c
        int mpcc_hactive = (timing->h_addressable + timing->h_border_left + timing->h_border_right)
                        / opp_cnt;
        uint32_t memory_mask;
-       uint32_t data_fmt = 0;
 
        ASSERT(opp_cnt == 2);
 
@@ -262,13 +261,6 @@ void optc2_set_odm_combine(struct timing_generator *optc, int *opp_id, int opp_c
                REG_SET(OPTC_MEMORY_CONFIG, 0,
                        OPTC_MEM_SEL, memory_mask);
 
-       if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
-               data_fmt = 1;
-       else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
-               data_fmt = 2;
-
-       REG_UPDATE(OPTC_DATA_FORMAT_CONTROL, OPTC_DATA_FORMAT, data_fmt);
-
        REG_SET_3(OPTC_DATA_SOURCE_SELECT, 0,
                        OPTC_NUM_OF_INPUT_SEGMENT, 1,
                        OPTC_SEG0_SRC_SEL, opp_id[0],
index 18b9465..d50a9c3 100644 (file)
@@ -150,7 +150,6 @@ struct _vcs_dpi_ip_params_st dcn2_0_ip = {
        .dispclk_delay_subtotal = 87, //
        .dcfclk_cstate_latency = 10, // SRExitTime
        .max_inter_dcn_tile_repeaters = 8,
-
        .xfc_supported = true,
        .xfc_fill_bw_overhead_percent = 10.0,
        .xfc_fill_constant_bytes = 0,
@@ -298,8 +297,8 @@ static struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc = {
                        },
                },
        .num_states = 5,
-       .sr_exit_time_us = 8.6,
-       .sr_enter_plus_exit_time_us = 10.9,
+       .sr_exit_time_us = 11.6,
+       .sr_enter_plus_exit_time_us = 13.9,
        .urgent_latency_us = 4.0,
        .urgent_latency_pixel_data_only_us = 4.0,
        .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
@@ -2203,9 +2202,9 @@ int dcn20_populate_dml_pipes_from_context(
                /* todo: default max for now, until there is logic reflecting this in dc*/
                pipes[pipe_cnt].dout.output_bpc = 12;
 #if defined(CONFIG_DRM_AMD_DC_DCN3_0)
-               /*fill up the audio sample rate*/
+               /*fill up the audio sample rate (unit in kHz)*/
                get_audio_check(&res_ctx->pipe_ctx[i].stream->audio_info, &aud_check);
-               pipes[pipe_cnt].dout.max_audio_sample_rate = aud_check.max_audiosample_rate;
+               pipes[pipe_cnt].dout.max_audio_sample_rate = aud_check.max_audiosample_rate / 1000;
 #endif
                /*
                 * For graphic plane, cursor number is 1, nv12 is 0
index 2ba880c..2b7396c 100644 (file)
@@ -69,6 +69,7 @@ static const struct hw_sequencer_funcs dcn21_funcs = {
        .get_hw_state = dcn10_get_hw_state,
        .clear_status_bits = dcn10_clear_status_bits,
        .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
+       .edp_backlight_control = dce110_edp_backlight_control,
        .edp_power_control = dce110_edp_power_control,
        .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
        .set_cursor_position = dcn10_set_cursor_position,
index 025637a..bd2a068 100644 (file)
@@ -31,9 +31,21 @@ DCN30 = dcn30_init.o dcn30_hubbub.o dcn30_hubp.o dcn30_dpp.o dcn30_optc.o \
        dcn30_dio_link_encoder.o dcn30_resource.o
 
 
-CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o := -mhard-float -msse -mpreferred-stack-boundary=4
-
+ifdef CONFIG_X86
 CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o := -mhard-float -msse
+CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o := -mhard-float -msse
+endif
+
+ifdef CONFIG_PPC64
+CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o := -mhard-float -maltivec
+CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o := -mhard-float -maltivec
+endif
+
+ifdef CONFIG_ARM64
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o := -mgeneral-regs-only
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o := -mgeneral-regs-only
+endif
+
 ifdef CONFIG_CC_IS_GCC
 ifeq ($(call cc-ifversion, -lt, 0701, y), y)
 IS_OLD_GCC = 1
@@ -45,8 +57,10 @@ ifdef IS_OLD_GCC
 # GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
 # (8B stack alignment).
 CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o += -mpreferred-stack-boundary=4
+CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o += -mpreferred-stack-boundary=4
 else
 CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o += -msse2
+CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o += -msse2
 endif
 
 AMD_DAL_DCN30 = $(addprefix $(AMDDALPATH)/dc/dcn30/,$(DCN30))
index 19daa45..7c90c22 100644 (file)
@@ -69,6 +69,7 @@ static const struct hw_sequencer_funcs dcn30_funcs = {
        .get_hw_state = dcn10_get_hw_state,
        .clear_status_bits = dcn10_clear_status_bits,
        .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
+       .edp_backlight_control = dce110_edp_backlight_control,
        .edp_power_control = dce110_edp_power_control,
        .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
        .set_cursor_position = dcn10_set_cursor_position,
index 6d13431..b1f228f 100644 (file)
@@ -209,7 +209,6 @@ static void optc3_set_odm_combine(struct timing_generator *optc, int *opp_id, in
        int mpcc_hactive = (timing->h_addressable + timing->h_border_left + timing->h_border_right)
                        / opp_cnt;
        uint32_t memory_mask = 0;
-       uint32_t data_fmt = 0;
 
        /* TODO: In pseudocode but does not affect maximus, delete comment if we dont need on asic
         * REG_SET(OTG_GLOBAL_CONTROL2, 0, GLOBAL_UPDATE_LOCK_EN, 1);
@@ -240,13 +239,6 @@ static void optc3_set_odm_combine(struct timing_generator *optc, int *opp_id, in
                REG_SET(OPTC_MEMORY_CONFIG, 0,
                        OPTC_MEM_SEL, memory_mask);
 
-       if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
-               data_fmt = 1;
-       else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
-               data_fmt = 2;
-
-       REG_UPDATE(OPTC_DATA_FORMAT_CONTROL, OPTC_DATA_FORMAT, data_fmt);
-
        if (opp_cnt == 2) {
                REG_SET_3(OPTC_DATA_SOURCE_SELECT, 0,
                                OPTC_NUM_OF_INPUT_SEGMENT, 1,
index dde87ba..24fb39a 100644 (file)
@@ -1899,6 +1899,48 @@ static bool dcn30_split_stream_for_mpc_or_odm(
        return true;
 }
 
+static struct pipe_ctx *dcn30_find_split_pipe(
+               struct dc *dc,
+               struct dc_state *context,
+               int old_index)
+{
+       struct pipe_ctx *pipe = NULL;
+       int i;
+
+       if (old_index >= 0 && context->res_ctx.pipe_ctx[old_index].stream == NULL) {
+               pipe = &context->res_ctx.pipe_ctx[old_index];
+               pipe->pipe_idx = old_index;
+       }
+
+       if (!pipe)
+               for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
+                       if (dc->current_state->res_ctx.pipe_ctx[i].top_pipe == NULL
+                                       && dc->current_state->res_ctx.pipe_ctx[i].prev_odm_pipe == NULL) {
+                               if (context->res_ctx.pipe_ctx[i].stream == NULL) {
+                                       pipe = &context->res_ctx.pipe_ctx[i];
+                                       pipe->pipe_idx = i;
+                                       break;
+                               }
+                       }
+               }
+
+       /*
+        * May need to fix pipes getting tossed from 1 opp to another on flip
+        * Add for debugging transient underflow during topology updates:
+        * ASSERT(pipe);
+        */
+       if (!pipe)
+               for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) {
+                       if (context->res_ctx.pipe_ctx[i].stream == NULL) {
+                               pipe = &context->res_ctx.pipe_ctx[i];
+                               pipe->pipe_idx = i;
+                               break;
+                       }
+               }
+
+       return pipe;
+}
+
 static bool dcn30_internal_validate_bw(
                struct dc *dc,
                struct dc_state *context,
@@ -2024,6 +2066,7 @@ static bool dcn30_internal_validate_bw(
                                dcn20_release_dsc(&context->res_ctx, dc->res_pool, &pipe->stream_res.dsc);
                        memset(&pipe->plane_res, 0, sizeof(pipe->plane_res));
                        memset(&pipe->stream_res, 0, sizeof(pipe->stream_res));
+                       repopulate_pipes = true;
                } else if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) {
                        struct pipe_ctx *top_pipe = pipe->top_pipe;
                        struct pipe_ctx *bottom_pipe = pipe->bottom_pipe;
@@ -2038,6 +2081,7 @@ static bool dcn30_internal_validate_bw(
                        pipe->stream = NULL;
                        memset(&pipe->plane_res, 0, sizeof(pipe->plane_res));
                        memset(&pipe->stream_res, 0, sizeof(pipe->stream_res));
+                       repopulate_pipes = true;
                } else
                        ASSERT(0); /* Should never try to merge master pipe */
 
@@ -2045,8 +2089,10 @@ static bool dcn30_internal_validate_bw(
 
        for (i = 0, pipe_idx = -1; i < dc->res_pool->pipe_count; i++) {
                struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+               struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
                struct pipe_ctx *hsplit_pipe = NULL;
                bool odm;
+               int old_index = -1;
 
                if (!pipe->stream || newly_split[i])
                        continue;
@@ -2058,7 +2104,20 @@ static bool dcn30_internal_validate_bw(
                        continue;
 
                if (split[i]) {
-                       hsplit_pipe = find_idle_secondary_pipe(&context->res_ctx, dc->res_pool, pipe);
+                       if (odm) {
+                               if (split[i] == 4 && old_pipe->next_odm_pipe->next_odm_pipe)
+                                       old_index = old_pipe->next_odm_pipe->next_odm_pipe->pipe_idx;
+                               else if (old_pipe->next_odm_pipe)
+                                       old_index = old_pipe->next_odm_pipe->pipe_idx;
+                       } else {
+                               if (split[i] == 4 && old_pipe->bottom_pipe->bottom_pipe &&
+                                               old_pipe->bottom_pipe->bottom_pipe->plane_state == old_pipe->plane_state)
+                                       old_index = old_pipe->bottom_pipe->bottom_pipe->pipe_idx;
+                               else if (old_pipe->bottom_pipe &&
+                                               old_pipe->bottom_pipe->plane_state == old_pipe->plane_state)
+                                       old_index = old_pipe->bottom_pipe->pipe_idx;
+                       }
+                       hsplit_pipe = dcn30_find_split_pipe(dc, context, old_index);
                        ASSERT(hsplit_pipe);
                        if (!hsplit_pipe)
                                goto validate_fail;
@@ -2072,8 +2131,16 @@ static bool dcn30_internal_validate_bw(
                        repopulate_pipes = true;
                }
                if (split[i] == 4) {
-                       struct pipe_ctx *pipe_4to1 = find_idle_secondary_pipe(&context->res_ctx, dc->res_pool, pipe);
+                       struct pipe_ctx *pipe_4to1;
 
+                       if (odm && old_pipe->next_odm_pipe)
+                               old_index = old_pipe->next_odm_pipe->pipe_idx;
+                       else if (!odm && old_pipe->bottom_pipe &&
+                                               old_pipe->bottom_pipe->plane_state == old_pipe->plane_state)
+                               old_index = old_pipe->bottom_pipe->pipe_idx;
+                       else
+                               old_index = -1;
+                       pipe_4to1 = dcn30_find_split_pipe(dc, context, old_index);
                        ASSERT(pipe_4to1);
                        if (!pipe_4to1)
                                goto validate_fail;
@@ -2083,7 +2150,14 @@ static bool dcn30_internal_validate_bw(
                                goto validate_fail;
                        newly_split[pipe_4to1->pipe_idx] = true;
 
-                       pipe_4to1 = find_idle_secondary_pipe(&context->res_ctx, dc->res_pool, pipe);
+                       if (odm && old_pipe->next_odm_pipe->next_odm_pipe->next_odm_pipe)
+                               old_index = old_pipe->next_odm_pipe->next_odm_pipe->next_odm_pipe->pipe_idx;
+                       else if (!odm && old_pipe->bottom_pipe->bottom_pipe->bottom_pipe &&
+                                               old_pipe->bottom_pipe->bottom_pipe->bottom_pipe->plane_state == old_pipe->plane_state)
+                               old_index = old_pipe->bottom_pipe->bottom_pipe->bottom_pipe->pipe_idx;
+                       else
+                               old_index = -1;
+                       pipe_4to1 = dcn30_find_split_pipe(dc, context, old_index);
                        ASSERT(pipe_4to1);
                        if (!pipe_4to1)
                                goto validate_fail;
@@ -2127,7 +2201,7 @@ validate_out:
        return out;
 }
 
-static void dcn30_calculate_wm(
+void dcn30_calculate_wm_and_dlg(
                struct dc *dc, struct dc_state *context,
                display_e2e_pipe_params_st *pipes,
                int pipe_cnt,
@@ -2135,6 +2209,8 @@ static void dcn30_calculate_wm(
 {
        int i, pipe_idx;
        double dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
+       bool pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] !=
+                       dm_dram_clock_change_unsupported;
 
        if (context->bw_ctx.dml.soc.min_dcfclk > dcfclk)
                dcfclk = context->bw_ctx.dml.soc.min_dcfclk;
@@ -2168,30 +2244,12 @@ static void dcn30_calculate_wm(
        pipes[0].clks_cfg.voltage = vlevel;
        pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
 
-       /* Set C:
-        * DCFCLK: Min Required
-        * FCLK(proportional to UCLK): 1GHz or Max
-        * pstate latency overriden to 5us
-        */
-       if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid) {
-               context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.pstate_latency_us;
-               context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us;
-               context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us;
-       }
-       context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       context->bw_ctx.bw.dcn.watermarks.c.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-
        /* Set D:
         * DCFCLK: Min Required
         * FCLK(proportional to UCLK): 1GHz or Max
         * sr_enter_exit = 4, sr_exit = 2us
         */
+       /*
        if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].valid) {
                context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us;
                context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us;
@@ -2205,29 +2263,72 @@ static void dcn30_calculate_wm(
        context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
        context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
        context->bw_ctx.bw.dcn.watermarks.d.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+       */
 
-       /* Set A:
+       /* Set C:
         * DCFCLK: Min Required
         * FCLK(proportional to UCLK): 1GHz or Max
-        *
-        * Set A calculated last so that following calculations are based on Set A
+        * pstate latency overridden to 5us
         */
-       if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid) {
-               context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
-               context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us;
-               context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us;
+       if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid) {
+               unsigned int min_dram_speed_mts = context->bw_ctx.dml.vba.DRAMSpeed;
+               unsigned int min_dram_speed_mts_margin = 160;
+
+               context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->dummy_pstate_table[0].dummy_pstate_latency_us;
+
+               if (context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] == dm_dram_clock_change_unsupported)
+                       min_dram_speed_mts = dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz * 16;
+
+               for (i = 3; i > 0; i--) {
+                       if ((min_dram_speed_mts + min_dram_speed_mts_margin > dc->clk_mgr->bw_params->dummy_pstate_table[i].dram_speed_mts) &&
+                                       (min_dram_speed_mts - min_dram_speed_mts_margin < dc->clk_mgr->bw_params->dummy_pstate_table[i].dram_speed_mts))
+                               context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->dummy_pstate_table[i].dummy_pstate_latency_us;
+               }
+
+               context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us;
+               context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us;
        }
-       context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-       context->bw_ctx.bw.dcn.watermarks.a.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
-
-       context->perf_params.stutter_period_us =
-               context->bw_ctx.dml.vba.StutterPeriod;
+       context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+       context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+       context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+       context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+       context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+       context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+       context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+       context->bw_ctx.bw.dcn.watermarks.c.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+
+       if (!pstate_en) {
+               /* The only difference between A and C is p-state latency, if p-state is not supported we want to
+                * calculate DLG based on dummy p-state latency, and max out the set A p-state watermark
+                */
+               context->bw_ctx.bw.dcn.watermarks.a = context->bw_ctx.bw.dcn.watermarks.c;
+               context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 0x13FFFF;
+       } else {
+               /* Set A:
+                * DCFCLK: Min Required
+                * FCLK(proportional to UCLK): 1GHz or Max
+                *
+                * Set A calculated last so that following calculations are based on Set A
+                */
+               if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid) {
+                       context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
+                       context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us;
+                       context->bw_ctx.dml.soc.sr_exit_time_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us;
+               }
+               context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = get_wm_urgent(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+               context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+               context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+               context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+               context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+               context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+               context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+               context->bw_ctx.bw.dcn.watermarks.a.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000;
+       }
+
+       context->perf_params.stutter_period_us = context->bw_ctx.dml.vba.StutterPeriod;
+
+       /* Make set D = set A until set D is enabled */
+       context->bw_ctx.bw.dcn.watermarks.d = context->bw_ctx.bw.dcn.watermarks.a;
 
        for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
                if (!context->res_ctx.pipe_ctx[i].stream)
@@ -2247,6 +2348,13 @@ static void dcn30_calculate_wm(
 
                pipe_idx++;
        }
+
+       dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
+
+       if (!pstate_en)
+               /* Restore full p-state latency */
+               context->bw_ctx.dml.soc.dram_clock_change_latency_us =
+                               dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us;
 }
 
 bool dcn30_validate_bandwidth(struct dc *dc,
@@ -2279,8 +2387,7 @@ bool dcn30_validate_bandwidth(struct dc *dc,
                goto validate_out;
        }
 
-       dcn30_calculate_wm(dc, context, pipes, pipe_cnt, vlevel);
-       dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
+       dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel);
 
        BW_VAL_TRACE_END_WATERMARKS();
 
@@ -2448,6 +2555,7 @@ static const struct resource_funcs dcn30_res_pool_funcs = {
        .link_enc_create = dcn30_link_encoder_create,
        .panel_cntl_create = dcn30_panel_cntl_create,
        .validate_bandwidth = dcn30_validate_bandwidth,
+       .calculate_wm_and_dlg = dcn30_calculate_wm_and_dlg,
        .populate_dml_pipes = dcn30_populate_dml_pipes_from_context,
        .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
        .add_stream_to_ctx = dcn30_add_stream_to_ctx,
index c9d5f94..d163812 100644 (file)
@@ -55,6 +55,11 @@ unsigned int dcn30_calc_max_scaled_time(
 
 bool dcn30_validate_bandwidth(struct dc *dc, struct dc_state *context,
                bool fast_validate);
+void dcn30_calculate_wm_and_dlg(
+               struct dc *dc, struct dc_state *context,
+               display_e2e_pipe_params_st *pipes,
+               int pipe_cnt,
+               int vlevel);
 void dcn30_populate_dml_writeback_from_context(
                struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes);
 
index 50b7d01..9e0ae18 100644 (file)
@@ -5558,7 +5558,7 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
                }
        }
 
-       if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 0) {
+       if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 0 && PrefetchMode == 0) {
                *DRAMClockChangeSupport = dm_dram_clock_change_vactive;
        } else if (((mode_lib->vba.SynchronizedVBlank == true || mode_lib->vba.TotalNumberOfActiveOTG == 1 || SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank > 0) && PrefetchMode == 0)) {
                *DRAMClockChangeSupport = dm_dram_clock_change_vblank;
index 1daa563..6e6bc66 100644 (file)
@@ -101,7 +101,7 @@ struct resource_funcs {
                                        struct dc *dc,
                                        struct dc_state *context,
                                        bool fast_validate);
-       void (*calculate_wm)(
+       void (*calculate_wm_and_dlg)(
                                struct dc *dc, struct dc_state *context,
                                display_e2e_pipe_params_st *pipes,
                                int pipe_cnt,
index f0a0d41..1053b16 100644 (file)
@@ -99,6 +99,12 @@ static void virtual_setup_stereo_sync(
                        bool enable)
 {}
 
+static void virtual_stream_encoder_set_dsc_pps_info_packet(
+               struct stream_encoder *enc,
+               bool enable,
+               uint8_t *dsc_packed_pps)
+{}
+
 static const struct stream_encoder_funcs virtual_str_enc_funcs = {
        .dp_set_odm_combine =
                virtual_enc_dp_set_odm_combine,
@@ -128,6 +134,7 @@ static const struct stream_encoder_funcs virtual_str_enc_funcs = {
        .hdmi_reset_stream_attribute = virtual_stream_encoder_reset_hdmi_stream_attribute,
        .dig_connect_to_otg = virtual_dig_connect_to_otg,
        .setup_stereo_sync = virtual_setup_stereo_sync,
+       .dp_set_dsc_pps_info_packet = virtual_stream_encoder_set_dsc_pps_info_packet,
 };
 
 bool virtual_stream_encoder_construct(
index f74c7fa..d103ec1 100644 (file)
 
 /* Firmware versioning. */
 #ifdef DMUB_EXPOSE_VERSION
-#define DMUB_FW_VERSION_GIT_HASH 0x82f998da6
+#define DMUB_FW_VERSION_GIT_HASH 0x9cf8f05fe
 #define DMUB_FW_VERSION_MAJOR 0
 #define DMUB_FW_VERSION_MINOR 0
-#define DMUB_FW_VERSION_REVISION 32
+#define DMUB_FW_VERSION_REVISION 35
 #define DMUB_FW_VERSION_TEST 0
 #define DMUB_FW_VERSION_VBIOS 0
 #define DMUB_FW_VERSION_HOTFIX 0
@@ -57,6 +57,7 @@
 
 #define SET_ABM_PIPE_GRADUALLY_DISABLE           0
 #define SET_ABM_PIPE_IMMEDIATELY_DISABLE         255
+#define SET_ABM_PIPE_IMMEDIATE_KEEP_GAIN_DISABLE 254
 #define SET_ABM_PIPE_NORMAL                      1
 
 /* Maximum number of streams on any ASIC. */
 #define PHYSICAL_ADDRESS_LOC union large_integer
 #endif
 
-#if defined(__cplusplus)
-extern "C" {
-#endif
-
 #ifndef dmub_memcpy
 #define dmub_memcpy(dest, source, bytes) memcpy((dest), (source), (bytes))
 #endif
@@ -81,6 +78,10 @@ extern "C" {
 #define dmub_memset(dest, val, bytes) memset((dest), (val), (bytes))
 #endif
 
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
 #ifndef dmub_udelay
 #define dmub_udelay(microseconds) udelay(microseconds)
 #endif
@@ -170,7 +171,7 @@ union dmub_fw_boot_status {
                uint32_t dal_fw : 1;
                uint32_t mailbox_rdy : 1;
                uint32_t optimized_init_done : 1;
-               uint32_t reserved : 29;
+               uint32_t restore_required : 1;
        } bits;
        uint32_t all;
 };
@@ -179,6 +180,7 @@ enum dmub_fw_boot_status_bit {
        DMUB_FW_BOOT_STATUS_BIT_DAL_FIRMWARE = (1 << 0),
        DMUB_FW_BOOT_STATUS_BIT_MAILBOX_READY = (1 << 1),
        DMUB_FW_BOOT_STATUS_BIT_OPTIMIZED_INIT_DONE = (1 << 2),
+       DMUB_FW_BOOT_STATUS_BIT_RESTORE_REQUIRED = (1 << 3),
 };
 
 /* Register bit definition for SCRATCH15 */
@@ -298,9 +300,17 @@ enum dmub_cmd_type {
        DMUB_CMD__PSR = 64,
        DMUB_CMD__ABM = 66,
        DMUB_CMD__HW_LOCK = 69,
+       DMUB_CMD__DP_AUX_ACCESS = 70,
+       DMUB_CMD__OUTBOX1_ENABLE = 71,
        DMUB_CMD__VBIOS = 128,
 };
 
+enum dmub_out_cmd_type {
+       DMUB_OUT_CMD__NULL = 0,
+       DMUB_OUT_CMD__DP_AUX_REPLY = 1,
+       DMUB_OUT_CMD__DP_HPD_NOTIFY = 2,
+};
+
 #pragma pack(push, 1)
 
 struct dmub_cmd_header {
@@ -456,6 +466,78 @@ struct dmub_rb_cmd_dpphy_init {
        uint8_t reserved[60];
 };
 
+enum dp_aux_request_action {
+       DP_AUX_REQ_ACTION_I2C_WRITE             = 0x00,
+       DP_AUX_REQ_ACTION_I2C_READ              = 0x10,
+       DP_AUX_REQ_ACTION_I2C_STATUS_REQ        = 0x20,
+       DP_AUX_REQ_ACTION_I2C_WRITE_MOT         = 0x40,
+       DP_AUX_REQ_ACTION_I2C_READ_MOT          = 0x50,
+       DP_AUX_REQ_ACTION_I2C_STATUS_REQ_MOT    = 0x60,
+       DP_AUX_REQ_ACTION_DPCD_WRITE            = 0x80,
+       DP_AUX_REQ_ACTION_DPCD_READ             = 0x90
+};
+
+/* DP AUX command */
+struct aux_transaction_parameters {
+       uint8_t is_i2c_over_aux;
+       uint8_t action;
+       uint8_t length;
+       uint8_t pad;
+       uint32_t address;
+       uint8_t data[16];
+};
+
+struct dmub_cmd_dp_aux_control_data {
+       uint32_t handle;
+       uint8_t port_index;
+       uint8_t sw_crc_enabled;
+       uint16_t timeout;
+       struct aux_transaction_parameters dpaux;
+};
+
+struct dmub_rb_cmd_dp_aux_access {
+       struct dmub_cmd_header header;
+       struct dmub_cmd_dp_aux_control_data aux_control;
+};
+
+struct dmub_rb_cmd_outbox1_enable {
+       struct dmub_cmd_header header;
+       uint32_t enable;
+};
+
+/* DP AUX Reply command - OutBox Cmd */
+struct aux_reply_data {
+       uint8_t command;
+       uint8_t length;
+       uint8_t pad[2];
+       uint8_t data[16];
+};
+
+struct aux_reply_control_data {
+       uint32_t handle;
+       uint8_t phy_port_index;
+       uint8_t result;
+       uint16_t pad;
+};
+
+struct dmub_rb_cmd_dp_aux_reply {
+       struct dmub_cmd_header header;
+       struct aux_reply_control_data control;
+       struct aux_reply_data reply_data;
+};
+
+struct dp_hpd_data {
+       uint8_t phy_port_index;
+       uint8_t hpd_type;
+       uint8_t hpd_status;
+       uint8_t pad;
+};
+
+struct dmub_rb_cmd_dp_hpd_notify {
+       struct dmub_cmd_header header;
+       struct dp_hpd_data hpd_data;
+};
+
 /*
  * Command IDs should be treated as stable ABI.
  * Do not reuse or modify IDs.
@@ -685,8 +767,15 @@ union dmub_rb_cmd {
        struct dmub_rb_cmd_abm_set_ambient_level abm_set_ambient_level;
        struct dmub_rb_cmd_abm_set_pwm_frac abm_set_pwm_frac;
        struct dmub_rb_cmd_abm_init_config abm_init_config;
+       struct dmub_rb_cmd_dp_aux_access dp_aux_access;
+       struct dmub_rb_cmd_outbox1_enable outbox1_enable;
 };
 
+union dmub_rb_out_cmd {
+       struct dmub_rb_cmd_common cmd_common;
+       struct dmub_rb_cmd_dp_aux_reply dp_aux_reply;
+       struct dmub_rb_cmd_dp_hpd_notify dp_hpd_notify;
+};
 #pragma pack(pop)
 
 
@@ -759,6 +848,25 @@ static inline bool dmub_rb_push_front(struct dmub_rb *rb,
        return true;
 }
 
+static inline bool dmub_rb_out_push_front(struct dmub_rb *rb,
+                                     const union dmub_rb_out_cmd *cmd)
+{
+       uint8_t *dst = (uint8_t *)(rb->base_address) + rb->wrpt;
+       const uint8_t *src = (uint8_t *)cmd;
+
+       if (dmub_rb_full(rb))
+               return false;
+
+       dmub_memcpy(dst, src, DMUB_RB_CMD_SIZE);
+
+       rb->wrpt += DMUB_RB_CMD_SIZE;
+
+       if (rb->wrpt >= rb->capacity)
+               rb->wrpt %= rb->capacity;
+
+       return true;
+}
+
 static inline bool dmub_rb_front(struct dmub_rb *rb,
                                 union dmub_rb_cmd  *cmd)
 {
@@ -772,6 +880,23 @@ static inline bool dmub_rb_front(struct dmub_rb *rb,
        return true;
 }
 
+static inline bool dmub_rb_out_front(struct dmub_rb *rb,
+                                union dmub_rb_out_cmd  *cmd)
+{
+       const uint64_t volatile *src = (const uint64_t volatile *)(rb->base_address) + rb->rptr / sizeof(uint64_t);
+       uint64_t *dst = (uint64_t *)cmd;
+       int i;
+
+       if (dmub_rb_empty(rb))
+               return false;
+
+       // copying data
+       for (i = 0; i < DMUB_RB_CMD_SIZE / sizeof(uint64_t); i++)
+               *dst++ = *src++;
+
+       return true;
+}
+
 static inline bool dmub_rb_pop_front(struct dmub_rb *rb)
 {
        if (dmub_rb_empty(rb))
index e98c84e..10dc481 100644 (file)
@@ -47,6 +47,40 @@ enum amd_apu_flags {
        AMD_APU_IS_RENOIR = 0x00000008UL,
 };
 
+/**
+* DOC: IP Blocks
+*
+* GPUs are composed of IP (intellectual property) blocks. These
+* IP blocks provide various functionalities: display, graphics,
+* video decode, etc. The IP blocks that comprise a particular GPU
+* are listed in the GPU's respective SoC file. amdgpu_device.c
+* acquires the list of IP blocks for the GPU in use on initialization.
+* It can then operate on this list to perform standard driver operations
+* such as: init, fini, suspend, resume, etc.
+* 
+*
+* IP block implementations are named using the following convention:
+* <functionality>_v<version> (E.g.: gfx_v6_0).
+*/
+
+/**
+* enum amd_ip_block_type - Used to classify IP blocks by functionality.
+*
+* @AMD_IP_BLOCK_TYPE_COMMON: GPU Family
+* @AMD_IP_BLOCK_TYPE_GMC: Graphics Memory Controller
+* @AMD_IP_BLOCK_TYPE_IH: Interrupt Handler
+* @AMD_IP_BLOCK_TYPE_SMC: System Management Controller
+* @AMD_IP_BLOCK_TYPE_PSP: Platform Security Processor
+* @AMD_IP_BLOCK_TYPE_DCE: Display and Compositing Engine
+* @AMD_IP_BLOCK_TYPE_GFX: Graphics and Compute Engine
+* @AMD_IP_BLOCK_TYPE_SDMA: System DMA Engine
+* @AMD_IP_BLOCK_TYPE_UVD: Unified Video Decoder
+* @AMD_IP_BLOCK_TYPE_VCE: Video Compression Engine
+* @AMD_IP_BLOCK_TYPE_ACP: Audio Co-Processor
+* @AMD_IP_BLOCK_TYPE_VCN: Video Core/Codec Next
+* @AMD_IP_BLOCK_TYPE_MES: Micro-Engine Scheduler
+* @AMD_IP_BLOCK_TYPE_JPEG: JPEG Engine
+*/
 enum amd_ip_block_type {
        AMD_IP_BLOCK_TYPE_COMMON,
        AMD_IP_BLOCK_TYPE_GMC,
@@ -128,6 +162,34 @@ enum amd_powergating_state {
 #define AMD_PG_SUPPORT_ATHUB                   (1 << 16)
 #define AMD_PG_SUPPORT_JPEG                    (1 << 17)
 
+/**
+ * enum PP_FEATURE_MASK - Used to mask power play features.
+ *
+ * @PP_SCLK_DPM_MASK: Dynamic adjustment of the system (graphics) clock.
+ * @PP_MCLK_DPM_MASK: Dynamic adjustment of the memory clock.
+ * @PP_PCIE_DPM_MASK: Dynamic adjustment of PCIE clocks and lanes.
+ * @PP_SCLK_DEEP_SLEEP_MASK: System (graphics) clock deep sleep.
+ * @PP_POWER_CONTAINMENT_MASK: Power containment.
+ * @PP_UVD_HANDSHAKE_MASK: Unified video decoder handshake.
+ * @PP_SMC_VOLTAGE_CONTROL_MASK: Dynamic voltage control.
+ * @PP_VBI_TIME_SUPPORT_MASK: Vertical blank interval support.
+ * @PP_ULV_MASK: Ultra low voltage.
+ * @PP_ENABLE_GFX_CG_THRU_SMU: SMU control of GFX engine clockgating.
+ * @PP_CLOCK_STRETCH_MASK: Clock stretching.
+ * @PP_OD_FUZZY_FAN_CONTROL_MASK: Overdrive fuzzy fan control.
+ * @PP_SOCCLK_DPM_MASK: Dynamic adjustment of the SoC clock.
+ * @PP_DCEFCLK_DPM_MASK: Dynamic adjustment of the Display Controller Engine Fabric clock.
+ * @PP_OVERDRIVE_MASK: Over- and under-clocking support.
+ * @PP_GFXOFF_MASK: Dynamic graphics engine power control.
+ * @PP_ACG_MASK: Adaptive clock generator.
+ * @PP_STUTTER_MODE: Stutter mode.
+ * @PP_AVFS_MASK: Adaptive voltage and frequency scaling.
+ *
+ * To override these settings on boot, append amdgpu.ppfeaturemask=<mask> to
+ * the kernel's command line parameters. This is usually done through a system's
+ * boot loader (E.g. GRUB). If manually loading the driver, pass
+ * ppfeaturemask=<mask> as a modprobe parameter.
+ */
 enum PP_FEATURE_MASK {
        PP_SCLK_DPM_MASK = 0x1,
        PP_MCLK_DPM_MASK = 0x2,
@@ -165,56 +227,59 @@ enum DC_DEBUG_MASK {
 };
 
 enum amd_dpm_forced_level;
+
 /**
  * struct amd_ip_funcs - general hooks for managing amdgpu IP Blocks
+ * @name: Name of IP block
+ * @early_init: sets up early driver state (pre sw_init),
+ *              does not configure hw - Optional
+ * @late_init: sets up late driver/hw state (post hw_init) - Optional
+ * @sw_init: sets up driver state, does not configure hw
+ * @sw_fini: tears down driver state, does not configure hw
+ * @hw_init: sets up the hw state
+ * @hw_fini: tears down the hw state
+ * @late_fini: final cleanup
+ * @suspend: handles IP specific hw/sw changes for suspend
+ * @resume: handles IP specific hw/sw changes for resume
+ * @is_idle: returns current IP block idle status
+ * @wait_for_idle: poll for idle
+ * @check_soft_reset: check soft reset the IP block
+ * @pre_soft_reset: pre soft reset the IP block
+ * @soft_reset: soft reset the IP block
+ * @post_soft_reset: post soft reset the IP block
+ * @set_clockgating_state: enable/disable cg for the IP block
+ * @set_powergating_state: enable/disable pg for the IP block
+ * @get_clockgating_state: get current clockgating status
+ * @enable_umd_pstate: enable UMD powerstate
+ *
+ * These hooks provide an interface for controlling the operational state
+ * of IP blocks. After acquiring a list of IP blocks for the GPU in use,
+ * the driver can make chip-wide state changes by walking this list and
+ * making calls to hooks from each IP block. This list is ordered to ensure
+ * that the driver initializes the IP blocks in a safe sequence.
  */
 struct amd_ip_funcs {
-       /** @name: Name of IP block */
        char *name;
-       /**
-        * @early_init:
-        *
-        * sets up early driver state (pre sw_init),
-        * does not configure hw - Optional
-        */
        int (*early_init)(void *handle);
-       /** @late_init: sets up late driver/hw state (post hw_init) - Optional */
        int (*late_init)(void *handle);
-       /** @sw_init: sets up driver state, does not configure hw */
        int (*sw_init)(void *handle);
-       /** @sw_fini: tears down driver state, does not configure hw */
        int (*sw_fini)(void *handle);
-       /** @hw_init: sets up the hw state */
        int (*hw_init)(void *handle);
-       /** @hw_fini: tears down the hw state */
        int (*hw_fini)(void *handle);
-       /** @late_fini: final cleanup */
        void (*late_fini)(void *handle);
-       /** @suspend: handles IP specific hw/sw changes for suspend */
        int (*suspend)(void *handle);
-       /** @resume: handles IP specific hw/sw changes for resume */
        int (*resume)(void *handle);
-       /** @is_idle: returns current IP block idle status */
        bool (*is_idle)(void *handle);
-       /** @wait_for_idle: poll for idle */
        int (*wait_for_idle)(void *handle);
-       /** @check_soft_reset: check soft reset the IP block */
        bool (*check_soft_reset)(void *handle);
-       /** @pre_soft_reset: pre soft reset the IP block */
        int (*pre_soft_reset)(void *handle);
-       /** @soft_reset: soft reset the IP block */
        int (*soft_reset)(void *handle);
-       /** @post_soft_reset: post soft reset the IP block */
        int (*post_soft_reset)(void *handle);
-       /** @set_clockgating_state: enable/disable cg for the IP block */
        int (*set_clockgating_state)(void *handle,
                                     enum amd_clockgating_state state);
-       /** @set_powergating_state: enable/disable pg for the IP block */
        int (*set_powergating_state)(void *handle,
                                     enum amd_powergating_state state);
-       /** @get_clockgating_state: get current clockgating status */
        void (*get_clockgating_state)(void *handle, u32 *flags);
-       /** @enable_umd_pstate: enable UMD powerstate */
        int (*enable_umd_pstate)(void *handle, enum amd_dpm_forced_level *level);
 };
 
index f41556a..629a8a3 100644 (file)
 #define mmGCEA_EDC_CNT2_BASE_IDX                                                                       0
 #define mmGCEA_EDC_CNT3                                                                                0x071b
 #define mmGCEA_EDC_CNT3_BASE_IDX                                                                       0
+#define mmGCEA_ERR_STATUS                                                                              0x0712
+#define mmGCEA_ERR_STATUS_BASE_IDX                                                                     0
 
 // addressBlock: gc_gfxudec
 // base address: 0x30000
 #define mmRLC_EDC_CNT2                                                                                 0x4d41
 #define mmRLC_EDC_CNT2_BASE_IDX                                                                        1
 
-#endif
\ No newline at end of file
+#endif
index fc592f6..e37b4b9 100644 (file)
@@ -212,6 +212,15 @@ struct tile_config {
  * IH ring entry. This function allows the KFD ISR to get the VMID
  * from the fault status register as early as possible.
  *
+ * @get_cu_occupancy: Function pointer that returns to caller the number
+ * of wave fronts that are in flight for all of the queues of a process
+ * as identified by its pasid. It is important to note that the value
+ * returned by this function is a snapshot of current moment and cannot
+ * guarantee any minimum for the number of waves in-flight. This function
+ * is defined for devices that belong to GFX9 and later GFX families. Care
+ * must be taken in calling this function as it is not defined for devices
+ * that belong to GFX8 and below GFX families.
+ *
  * This structure contains function pointers to services that the kgd driver
  * provides to amdkfd driver.
  *
@@ -286,6 +295,9 @@ struct kfd2kgd_calls {
        void (*set_vm_context_page_table_base)(struct kgd_dev *kgd,
                        uint32_t vmid, uint64_t page_table_base);
        uint32_t (*read_vmid_from_vmfault_reg)(struct kgd_dev *kgd);
+
+       void (*get_cu_occupancy)(struct kgd_dev *kgd, int pasid, int *wave_cnt,
+                       int *max_waves_per_cu);
 };
 
 #endif /* KGD_KFD_INTERFACE_H_INCLUDED */
index 0aec28f..94132c7 100644 (file)
@@ -281,6 +281,7 @@ struct amd_pm_funcs {
        int (*get_power_limit)(void *handle, uint32_t *limit, bool default_limit);
        int (*get_power_profile_mode)(void *handle, char *buf);
        int (*set_power_profile_mode)(void *handle, long *input, uint32_t size);
+       int (*set_fine_grain_clk_vol)(void *handle, uint32_t type, long *input, uint32_t size);
        int (*odn_edit_dpm_table)(void *handle, uint32_t type, long *input, uint32_t size);
        int (*set_mp1_state)(void *handle, enum pp_mp1_state mp1_state);
        int (*smu_i2c_bus_access)(void *handle, bool acquire);
index 2d924e8..5298166 100644 (file)
@@ -827,6 +827,18 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
                        return -EINVAL;
                }
        } else {
+
+               if (adev->powerplay.pp_funcs->set_fine_grain_clk_vol) {
+                       ret = amdgpu_dpm_set_fine_grain_clk_vol(adev, type,
+                                                               parameter,
+                                                               parameter_size);
+                       if (ret) {
+                               pm_runtime_mark_last_busy(ddev->dev);
+                               pm_runtime_put_autosuspend(ddev->dev);
+                               return -EINVAL;
+                       }
+               }
+
                if (adev->powerplay.pp_funcs->odn_edit_dpm_table) {
                        ret = amdgpu_dpm_odn_edit_dpm_table(adev, type,
                                                parameter, parameter_size);
index dff4a5f..f6e0e7d 100644 (file)
@@ -349,6 +349,10 @@ enum amdgpu_pcie_gen {
                ((adev)->powerplay.pp_funcs->set_power_profile_mode(\
                        (adev)->powerplay.pp_handle, parameter, size))
 
+#define amdgpu_dpm_set_fine_grain_clk_vol(adev, type, parameter, size) \
+               ((adev)->powerplay.pp_funcs->set_fine_grain_clk_vol(\
+                       (adev)->powerplay.pp_handle, type, parameter, size))
+
 #define amdgpu_dpm_odn_edit_dpm_table(adev, type, parameter, size) \
                ((adev)->powerplay.pp_funcs->odn_edit_dpm_table(\
                        (adev)->powerplay.pp_handle, type, parameter, size))
index 85c5e86..44fd0cd 100644 (file)
@@ -453,6 +453,7 @@ struct smu_context
 
        struct work_struct throttling_logging_work;
        atomic64_t throttle_int_counter;
+       struct work_struct interrupt_work;
 
        unsigned fan_max_rpm;
        unsigned manual_fan_speed_rpm;
@@ -601,6 +602,7 @@ struct pptable_funcs {
        int (*deep_sleep_control)(struct smu_context *smu, bool enablement);
        int (*get_fan_parameters)(struct smu_context *smu);
        int (*post_init)(struct smu_context *smu);
+       void (*interrupt_work)(struct smu_context *smu);
 };
 
 typedef enum {
index 1b3529e..3898a95 100644 (file)
@@ -340,6 +340,9 @@ struct pp_hwmgr_func {
        int (*odn_edit_dpm_table)(struct pp_hwmgr *hwmgr,
                                        enum PP_OD_DPM_TABLE_COMMAND type,
                                        long *input, uint32_t size);
+       int (*set_fine_grain_clk_vol)(struct pp_hwmgr *hwmgr,
+                                     enum PP_OD_DPM_TABLE_COMMAND type,
+                                     long *input, uint32_t size);
        int (*set_power_limit)(struct pp_hwmgr *hwmgr, uint32_t n);
        int (*powergate_mmhub)(struct pp_hwmgr *hwmgr);
        int (*smus_notify_pwe)(struct pp_hwmgr *hwmgr);
@@ -347,6 +350,8 @@ struct pp_hwmgr_func {
        int (*enable_mgpu_fan_boost)(struct pp_hwmgr *hwmgr);
        int (*set_hard_min_dcefclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock);
        int (*set_hard_min_fclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock);
+       int (*set_hard_min_gfxclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock);
+       int (*set_soft_max_gfxclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock);
        int (*get_asic_baco_capability)(struct pp_hwmgr *hwmgr, bool *cap);
        int (*get_asic_baco_state)(struct pp_hwmgr *hwmgr, enum BACO_STATE *state);
        int (*set_asic_baco_state)(struct pp_hwmgr *hwmgr, enum BACO_STATE state);
index 11a6cf9..1275246 100644 (file)
@@ -27,7 +27,7 @@
 // *** IMPORTANT ***
 // SMU TEAM: Always increment the interface version if 
 // any structure is changed in this file
-#define SMU11_DRIVER_IF_VERSION 0x37
+#define SMU11_DRIVER_IF_VERSION 0x39
 
 #define PPTABLE_Sienna_Cichlid_SMU_VERSION 6
 
@@ -962,7 +962,7 @@ typedef struct {
   uint8_t                FanLinearPwmPoints[NUM_OD_FAN_MAX_POINTS];
   uint8_t                FanLinearTempPoints[NUM_OD_FAN_MAX_POINTS];
   uint16_t               MaxOpTemp;            // Degree Celcius
-  uint16_t               Padding_16[1];
+  int16_t                VddGfxOffset;         // in mV
   uint8_t                FanZeroRpmEnable;
   uint8_t                FanZeroRpmStopTemp;
   uint8_t                FanMode;
index 7ae83df..2d1c3ba 100644 (file)
@@ -30,7 +30,7 @@
 #define SMU11_DRIVER_IF_VERSION_NV10 0x36
 #define SMU11_DRIVER_IF_VERSION_NV12 0x36
 #define SMU11_DRIVER_IF_VERSION_NV14 0x36
-#define SMU11_DRIVER_IF_VERSION_Sienna_Cichlid 0x37
+#define SMU11_DRIVER_IF_VERSION_Sienna_Cichlid 0x39
 #define SMU11_DRIVER_IF_VERSION_Navy_Flounder 0x5
 
 /* MP Apertures */
@@ -280,5 +280,7 @@ int smu_v11_0_gfx_ulv_control(struct smu_context *smu,
 int smu_v11_0_deep_sleep_control(struct smu_context *smu,
                                 bool enablement);
 
+void smu_v11_0_interrupt_work(struct smu_context *smu);
+
 #endif
 #endif
index a6321f2..eab9768 100644 (file)
@@ -911,6 +911,19 @@ static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
        return ret;
 }
 
+static int pp_set_fine_grain_clk_vol(void *handle, uint32_t type, long *input, uint32_t size)
+{
+       struct pp_hwmgr *hwmgr = handle;
+
+       if (!hwmgr || !hwmgr->pm_en)
+               return -EINVAL;
+
+       if (hwmgr->hwmgr_func->set_fine_grain_clk_vol == NULL)
+               return 0;
+
+       return hwmgr->hwmgr_func->set_fine_grain_clk_vol(hwmgr, type, input, size);
+}
+
 static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint32_t size)
 {
        struct pp_hwmgr *hwmgr = handle;
@@ -920,7 +933,7 @@ static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint3
 
        if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) {
                pr_info_ratelimited("%s was not implemented.\n", __func__);
-               return -EINVAL;
+               return 0;
        }
 
        return hwmgr->hwmgr_func->odn_edit_dpm_table(hwmgr, type, input, size);
@@ -1645,6 +1658,7 @@ static const struct amd_pm_funcs pp_dpm_funcs = {
        .set_powergating_by_smu = pp_set_powergating_by_smu,
        .get_power_profile_mode = pp_get_power_profile_mode,
        .set_power_profile_mode = pp_set_power_profile_mode,
+       .set_fine_grain_clk_vol = pp_set_fine_grain_clk_vol,
        .odn_edit_dpm_table = pp_odn_edit_dpm_table,
        .set_mp1_state = pp_dpm_set_mp1_state,
        .set_power_limit = pp_set_power_limit,
index a5d1a32..cf60f39 100644 (file)
@@ -242,6 +242,34 @@ static int smu10_set_hard_min_fclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t cloc
        return 0;
 }
 
+static int smu10_set_hard_min_gfxclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t clock)
+{
+       struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
+
+       if (clock && smu10_data->gfx_actual_soft_min_freq != clock) {
+               smu10_data->gfx_actual_soft_min_freq = clock;
+               smum_send_msg_to_smc_with_parameter(hwmgr,
+                                       PPSMC_MSG_SetHardMinGfxClk,
+                                       smu10_data->gfx_actual_soft_min_freq,
+                                       NULL);
+       }
+       return 0;
+}
+
+static int smu10_set_soft_max_gfxclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t clock)
+{
+       struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
+
+       if (clock && smu10_data->gfx_max_freq_limit != (clock * 100))  {
+               smu10_data->gfx_max_freq_limit = clock * 100;
+               smum_send_msg_to_smc_with_parameter(hwmgr,
+                                       PPSMC_MSG_SetSoftMaxGfxClk,
+                                       clock,
+                                       NULL);
+       }
+       return 0;
+}
+
 static int smu10_set_active_display_count(struct pp_hwmgr *hwmgr, uint32_t count)
 {
        struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
@@ -527,6 +555,9 @@ static int smu10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
        hwmgr->pstate_sclk = SMU10_UMD_PSTATE_GFXCLK * 100;
        hwmgr->pstate_mclk = SMU10_UMD_PSTATE_FCLK * 100;
 
+       /* enable the pp_od_clk_voltage sysfs file */
+       hwmgr->od_enabled = 1;
+
        return result;
 }
 
@@ -563,6 +594,8 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
        struct smu10_hwmgr *data = hwmgr->backend;
        uint32_t min_sclk = hwmgr->display_config->min_core_set_clock;
        uint32_t min_mclk = hwmgr->display_config->min_mem_set_clock/100;
+       uint32_t index_fclk = data->clock_vol_info.vdd_dep_on_fclk->count - 1;
+       uint32_t index_socclk = data->clock_vol_info.vdd_dep_on_socclk->count - 1;
 
        if (hwmgr->smu_version < 0x1E3700) {
                pr_info("smu firmware version too old, can not set dpm level\n");
@@ -676,13 +709,13 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetHardMinFclkByFreq,
                                                hwmgr->display_config->num_display > 3 ?
-                                               SMU10_UMD_PSTATE_PEAK_FCLK :
+                                               data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk :
                                                min_mclk,
                                                NULL);
 
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetHardMinSocclkByFreq,
-                                               SMU10_UMD_PSTATE_MIN_SOCCLK,
+                                               data->clock_vol_info.vdd_dep_on_socclk->entries[0].clk,
                                                NULL);
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetHardMinVcn,
@@ -695,11 +728,11 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
                                                NULL);
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetSoftMaxFclkByFreq,
-                                               SMU10_UMD_PSTATE_PEAK_FCLK,
+                                               data->clock_vol_info.vdd_dep_on_fclk->entries[index_fclk].clk,
                                                NULL);
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetSoftMaxSocclkByFreq,
-                                               SMU10_UMD_PSTATE_PEAK_SOCCLK,
+                                               data->clock_vol_info.vdd_dep_on_socclk->entries[index_socclk].clk,
                                                NULL);
                smum_send_msg_to_smc_with_parameter(hwmgr,
                                                PPSMC_MSG_SetSoftMaxVcn,
@@ -947,6 +980,26 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
                                        ((mclk_table->entries[i].clk / 100)
                                         == now) ? "*" : "");
                break;
+       case OD_SCLK:
+               if (hwmgr->od_enabled) {
+                       size = sprintf(buf, "%s:\n", "OD_SCLK");
+
+                       size += sprintf(buf + size, "0: %10uMhz\n",
+                       (data->gfx_actual_soft_min_freq > 0) ? data->gfx_actual_soft_min_freq : data->gfx_min_freq_limit/100);
+                       size += sprintf(buf + size, "1: %10uMhz\n", data->gfx_max_freq_limit/100);
+               }
+               break;
+       case OD_RANGE:
+               if (hwmgr->od_enabled) {
+                       uint32_t min_freq, max_freq = 0;
+                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
+                       smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
+
+                       size = sprintf(buf, "%s:\n", "OD_RANGE");
+                       size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
+                               min_freq, max_freq);
+               }
+               break;
        default:
                break;
        }
@@ -1359,6 +1412,32 @@ static int smu10_asic_reset(struct pp_hwmgr *hwmgr, enum SMU_ASIC_RESET_MODE mod
                                                   NULL);
 }
 
+static int smu10_set_fine_grain_clk_vol(struct pp_hwmgr *hwmgr,
+                                       enum PP_OD_DPM_TABLE_COMMAND type,
+                                       long *input, uint32_t size)
+{
+       if (!hwmgr->od_enabled) {
+               pr_err("Fine grain not support\n");
+               return -EINVAL;
+       }
+
+       if (size != 2) {
+               pr_err("Input parameter number not correct\n");
+               return -EINVAL;
+       }
+
+       if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) {
+               if (input[0] == 0)
+                       smu10_set_hard_min_gfxclk_by_freq(hwmgr, input[1]);
+               else if (input[0] == 1)
+                       smu10_set_soft_max_gfxclk_by_freq(hwmgr, input[1]);
+               else
+                       return -EINVAL;
+       }
+
+       return 0;
+}
+
 static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
        .backend_init = smu10_hwmgr_backend_init,
        .backend_fini = smu10_hwmgr_backend_fini,
@@ -1399,9 +1478,12 @@ static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
        .powergate_sdma = smu10_powergate_sdma,
        .set_hard_min_dcefclk_by_freq = smu10_set_hard_min_dcefclk_by_freq,
        .set_hard_min_fclk_by_freq = smu10_set_hard_min_fclk_by_freq,
+       .set_hard_min_gfxclk_by_freq = smu10_set_hard_min_gfxclk_by_freq,
+       .set_soft_max_gfxclk_by_freq = smu10_set_soft_max_gfxclk_by_freq,
        .get_power_profile_mode = smu10_get_power_profile_mode,
        .set_power_profile_mode = smu10_set_power_profile_mode,
        .asic_reset = smu10_asic_reset,
+       .set_fine_grain_clk_vol = smu10_set_fine_grain_clk_vol,
 };
 
 int smu10_init_function_pointers(struct pp_hwmgr *hwmgr)
index ee0c959..6c9b5f0 100644 (file)
@@ -284,7 +284,7 @@ struct smu10_hwmgr {
        uint32_t                        dclk_soft_min;
        uint32_t                        gfx_actual_soft_min_freq;
        uint32_t                        gfx_min_freq_limit;
-       uint32_t                        gfx_max_freq_limit;
+       uint32_t                        gfx_max_freq_limit; /* in 10Khz*/
 
        bool                           vcn_power_gated;
        bool                           vcn_dpg_mode;
index 4a3b64a..1e8919b 100644 (file)
@@ -1585,9 +1585,19 @@ static void smu7_init_dpm_defaults(struct pp_hwmgr *hwmgr)
        data->current_profile_setting.sclk_down_hyst = 100;
        data->current_profile_setting.sclk_activity = SMU7_SCLK_TARGETACTIVITY_DFLT;
        data->current_profile_setting.bupdate_mclk = 1;
-       data->current_profile_setting.mclk_up_hyst = 0;
-       data->current_profile_setting.mclk_down_hyst = 100;
-       data->current_profile_setting.mclk_activity = SMU7_MCLK_TARGETACTIVITY_DFLT;
+       if (adev->gmc.vram_width == 256) {
+               data->current_profile_setting.mclk_up_hyst = 10;
+               data->current_profile_setting.mclk_down_hyst = 60;
+               data->current_profile_setting.mclk_activity = 25;
+       } else if (adev->gmc.vram_width == 128) {
+               data->current_profile_setting.mclk_up_hyst = 5;
+               data->current_profile_setting.mclk_down_hyst = 16;
+               data->current_profile_setting.mclk_activity = 20;
+       } else if (adev->gmc.vram_width == 64) {
+               data->current_profile_setting.mclk_up_hyst = 3;
+               data->current_profile_setting.mclk_down_hyst = 16;
+               data->current_profile_setting.mclk_activity = 20;
+       }
        hwmgr->workload_mask = 1 << hwmgr->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D];
        hwmgr->power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
        hwmgr->default_power_profile_mode = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
@@ -2873,7 +2883,7 @@ static int smu7_vblank_too_short(struct pp_hwmgr *hwmgr,
                if (hwmgr->is_kicker)
                        switch_limit_us = data->is_memory_gddr5 ? 450 : 150;
                else
-                       switch_limit_us = data->is_memory_gddr5 ? 190 : 150;
+                       switch_limit_us = data->is_memory_gddr5 ? 200 : 150;
                break;
        case CHIP_VEGAM:
                switch_limit_us = 30;
index adfbcbe..8a9aee8 100644 (file)
@@ -61,9 +61,6 @@ static uint32_t smu9_wait_for_response(struct pp_hwmgr *hwmgr)
        uint32_t reg;
        uint32_t ret;
 
-       /* Due to the L1 policy problem under SRIOV, we have to use
-        * mmMP1_SMN_C2PMSG_103 as the driver response register
-        */
        if (hwmgr->pp_one_vf) {
                reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_103);
 
@@ -148,10 +145,6 @@ int smu9_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
 
        smu9_wait_for_response(hwmgr);
 
-       /* Due to the L1 policy problem under SRIOV, we have to use
-        * mmMP1_SMN_C2PMSG_101 as the driver message register and
-        * mmMP1_SMN_C2PMSG_102 as the driver parameter register.
-        */
        if (hwmgr->pp_one_vf) {
                WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_103, 0);
                WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_102, parameter);
index 1e222c5..daf122f 100644 (file)
@@ -209,11 +209,13 @@ static int vega10_smu_init(struct pp_hwmgr *hwmgr)
        int ret;
        struct cgs_firmware_info info = {0};
 
-       ret = cgs_get_firmware_info(hwmgr->device,
-                                   CGS_UCODE_ID_SMU,
-                                   &info);
-       if (ret || !info.kptr)
-               return -EINVAL;
+       if (!amdgpu_sriov_vf((struct amdgpu_device *)hwmgr->adev)) {
+               ret = cgs_get_firmware_info(hwmgr->device,
+                                               CGS_UCODE_ID_SMU,
+                                               &info);
+               if (ret || !info.kptr)
+                       return -EINVAL;
+       }
 
        priv = kzalloc(sizeof(struct vega10_smumgr), GFP_KERNEL);
 
index 5c4b74f..b1e5ec0 100644 (file)
@@ -417,6 +417,9 @@ static int smu_early_init(void *handle)
        smu->pm_enabled = !!amdgpu_dpm;
        smu->is_apu = false;
        mutex_init(&smu->mutex);
+       mutex_init(&smu->smu_baco.mutex);
+       smu->smu_baco.state = SMU_BACO_STATE_EXIT;
+       smu->smu_baco.platform_support = false;
 
        return smu_set_funcs(adev);
 }
@@ -481,17 +484,6 @@ static int smu_late_init(void *handle)
                return ret;
        }
 
-       /*
-        * Set initialized values (get from vbios) to dpm tables context such as
-        * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
-        * type of clks.
-        */
-       ret = smu_set_default_dpm_table(smu);
-       if (ret) {
-               dev_err(adev->dev, "Failed to setup default dpm clock tables!\n");
-               return ret;
-       }
-
        ret = smu_populate_umd_state_clk(smu);
        if (ret) {
                dev_err(adev->dev, "Failed to populate UMD state clocks!\n");
@@ -780,6 +772,19 @@ static void smu_throttling_logging_work_fn(struct work_struct *work)
        smu_log_thermal_throttling(smu);
 }
 
+static void smu_interrupt_work_fn(struct work_struct *work)
+{
+       struct smu_context *smu = container_of(work, struct smu_context,
+                                              interrupt_work);
+
+       mutex_lock(&smu->mutex);
+
+       if (smu->ppt_funcs && smu->ppt_funcs->interrupt_work)
+               smu->ppt_funcs->interrupt_work(smu);
+
+       mutex_unlock(&smu->mutex);
+}
+
 static int smu_sw_init(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -793,15 +798,12 @@ static int smu_sw_init(void *handle)
        bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX);
        bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
 
-       mutex_init(&smu->smu_baco.mutex);
-       smu->smu_baco.state = SMU_BACO_STATE_EXIT;
-       smu->smu_baco.platform_support = false;
-
        mutex_init(&smu->sensor_lock);
        mutex_init(&smu->metrics_lock);
        mutex_init(&smu->message_lock);
 
        INIT_WORK(&smu->throttling_logging_work, smu_throttling_logging_work_fn);
+       INIT_WORK(&smu->interrupt_work, smu_interrupt_work_fn);
        atomic64_set(&smu->throttle_int_counter, 0);
        smu->watermarks_bitmap = 0;
        smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
@@ -832,10 +834,13 @@ static int smu_sw_init(void *handle)
 
        smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
        smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
-       ret = smu_init_microcode(smu);
-       if (ret) {
-               dev_err(adev->dev, "Failed to load smu firmware!\n");
-               return ret;
+
+       if (!amdgpu_sriov_vf(adev)) {
+               ret = smu_init_microcode(smu);
+               if (ret) {
+                       dev_err(adev->dev, "Failed to load smu firmware!\n");
+                       return ret;
+               }
        }
 
        ret = smu_smc_table_sw_init(smu);
@@ -1013,6 +1018,17 @@ static int smu_smc_hw_setup(struct smu_context *smu)
                return ret;
        }
 
+       /*
+        * Set initialized values (get from vbios) to dpm tables context such as
+        * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
+        * type of clks.
+        */
+       ret = smu_set_default_dpm_table(smu);
+       if (ret) {
+               dev_err(adev->dev, "Failed to setup default dpm clock tables!\n");
+               return ret;
+       }
+
        ret = smu_notify_display_change(smu);
        if (ret)
                return ret;
@@ -1194,6 +1210,7 @@ static int smu_smc_hw_cleanup(struct smu_context *smu)
        int ret = 0;
 
        cancel_work_sync(&smu->throttling_logging_work);
+       cancel_work_sync(&smu->interrupt_work);
 
        ret = smu_disable_thermal_alert(smu);
        if (ret) {
@@ -1214,7 +1231,6 @@ static int smu_hw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        struct smu_context *smu = &adev->smu;
-       int ret = 0;
 
        if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
                return 0;
@@ -1230,11 +1246,7 @@ static int smu_hw_fini(void *handle)
 
        adev->pm.dpm_enabled = false;
 
-       ret = smu_smc_hw_cleanup(smu);
-       if (ret)
-               return ret;
-
-       return 0;
+       return smu_smc_hw_cleanup(smu);
 }
 
 int smu_reset(struct smu_context *smu)
@@ -1823,18 +1835,12 @@ int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
        if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
                return -EOPNOTSUPP;
 
-       mutex_lock(&smu->mutex);
+       if (smu->disable_watermark)
+               return 0;
 
-       if (!smu->disable_watermark &&
-                       smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
-                       smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
-               ret = smu_set_watermarks_table(smu, clock_ranges);
+       mutex_lock(&smu->mutex);
 
-               if (!(smu->watermarks_bitmap & WATERMARKS_EXIST)) {
-                       smu->watermarks_bitmap |= WATERMARKS_EXIST;
-                       smu->watermarks_bitmap &= ~WATERMARKS_LOADED;
-               }
-       }
+       ret = smu_set_watermarks_table(smu, clock_ranges);
 
        mutex_unlock(&smu->mutex);
 
index d298fa6..fc37628 100644 (file)
@@ -2388,6 +2388,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
        .gfx_ulv_control = smu_v11_0_gfx_ulv_control,
        .deep_sleep_control = smu_v11_0_deep_sleep_control,
        .get_fan_parameters = arcturus_get_fan_parameters,
+       .interrupt_work = smu_v11_0_interrupt_work,
 };
 
 void arcturus_set_ppt_funcs(struct smu_context *smu)
index 985c706..8d8081c 100644 (file)
@@ -316,6 +316,18 @@ navi10_get_allowed_feature_mask(struct smu_context *smu,
        if (smu->dc_controlled_by_gpio)
                *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_ACDC_BIT);
 
+       if (adev->pm.pp_feature & PP_SOCCLK_DPM_MASK)
+               *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT);
+
+       /* DPM UCLK enablement should be skipped for navi10 A0 secure board */
+       if (!(is_asic_secure(smu) &&
+            (adev->asic_type == CHIP_NAVI10) &&
+            (adev->rev_id == 0)) &&
+           (adev->pm.pp_feature & PP_MCLK_DPM_MASK))
+               *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_UCLK_BIT)
+                               | FEATURE_MASK(FEATURE_MEM_VDDCI_SCALING_BIT)
+                               | FEATURE_MASK(FEATURE_MEM_MVDD_SCALING_BIT);
+
        /* DS SOCCLK enablement should be skipped for navi10 A0 secure board */
        if (is_asic_secure(smu) &&
            (adev->asic_type == CHIP_NAVI10) &&
@@ -2279,13 +2291,14 @@ static int navi10_run_umc_cdr_workaround(struct smu_context *smu)
        }
 
        /*
-        * The messages below are only supported by 42.53.0 and later
-        * PMFWs.
+        * The messages below are only supported by Navi10 42.53.0 and later
+        * PMFWs and Navi14 53.29.0 and later PMFWs.
         * - PPSMC_MSG_SetDriverDummyTableDramAddrHigh
         * - PPSMC_MSG_SetDriverDummyTableDramAddrLow
         * - PPSMC_MSG_GetUMCFWWA
         */
-       if (pmfw_version >= 0x2a3500) {
+       if (((adev->asic_type == CHIP_NAVI10) && (pmfw_version >= 0x2a3500)) ||
+           ((adev->asic_type == CHIP_NAVI14) && (pmfw_version >= 0x351D00))) {
                ret = smu_cmn_send_smc_msg_with_param(smu,
                                                      SMU_MSG_GET_UMC_FW_WA,
                                                      0,
@@ -2323,8 +2336,6 @@ static void navi10_fill_i2c_req(SwI2cRequest_t  *req, bool write,
 {
        int i;
 
-       BUG_ON(numbytes > MAX_SW_I2C_COMMANDS);
-
        req->I2CcontrollerPort = 0;
        req->I2CSpeed = 2;
        req->SlaveAddress = address;
@@ -2362,6 +2373,12 @@ static int navi10_i2c_read_data(struct i2c_adapter *control,
        struct smu_table_context *smu_table = &adev->smu.smu_table;
        struct smu_table *table = &smu_table->driver_table;
 
+       if (numbytes > MAX_SW_I2C_COMMANDS) {
+               dev_err(adev->dev, "numbytes requested %d is over max allowed %d\n",
+                       numbytes, MAX_SW_I2C_COMMANDS);
+               return -EINVAL;
+       }
+
        memset(&req, 0, sizeof(req));
        navi10_fill_i2c_req(&req, false, address, numbytes, data);
 
@@ -2398,6 +2415,12 @@ static int navi10_i2c_write_data(struct i2c_adapter *control,
        SwI2cRequest_t req;
        struct amdgpu_device *adev = to_amdgpu_device(control);
 
+       if (numbytes > MAX_SW_I2C_COMMANDS) {
+               dev_err(adev->dev, "numbytes requested %d is over max allowed %d\n",
+                       numbytes, MAX_SW_I2C_COMMANDS);
+               return -EINVAL;
+       }
+
        memset(&req, 0, sizeof(req));
        navi10_fill_i2c_req(&req, true, address, numbytes, data);
 
@@ -2628,43 +2651,12 @@ static int navi10_enable_mgpu_fan_boost(struct smu_context *smu)
 
 static int navi10_post_smu_init(struct smu_context *smu)
 {
-       struct smu_feature *feature = &smu->smu_feature;
        struct amdgpu_device *adev = smu->adev;
-       uint64_t feature_mask = 0;
        int ret = 0;
 
        if (amdgpu_sriov_vf(adev))
                return 0;
 
-       /* For Naiv1x, enable these features only after DAL initialization */
-       if (adev->pm.pp_feature & PP_SOCCLK_DPM_MASK)
-               feature_mask |= FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT);
-
-       /* DPM UCLK enablement should be skipped for navi10 A0 secure board */
-       if (!(is_asic_secure(smu) &&
-            (adev->asic_type == CHIP_NAVI10) &&
-            (adev->rev_id == 0)) &&
-           (adev->pm.pp_feature & PP_MCLK_DPM_MASK))
-               feature_mask |= FEATURE_MASK(FEATURE_DPM_UCLK_BIT)
-                               | FEATURE_MASK(FEATURE_MEM_VDDCI_SCALING_BIT)
-                               | FEATURE_MASK(FEATURE_MEM_MVDD_SCALING_BIT);
-
-       if (!feature_mask)
-               return 0;
-
-       bitmap_or(feature->allowed,
-                 feature->allowed,
-                 (unsigned long *)(&feature_mask),
-                 SMU_FEATURE_MAX);
-
-       ret = smu_cmn_feature_update_enable_state(smu,
-                                                 feature_mask,
-                                                 true);
-       if (ret) {
-               dev_err(adev->dev, "Failed to post uclk/socclk dpm enablement!\n");
-               return ret;
-       }
-
        ret = navi10_run_umc_cdr_workaround(smu);
        if (ret) {
                dev_err(adev->dev, "Failed to apply umc cdr workaround!\n");
@@ -2773,6 +2765,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
        .deep_sleep_control = smu_v11_0_deep_sleep_control,
        .get_fan_parameters = navi10_get_fan_parameters,
        .post_init = navi10_post_smu_init,
+       .interrupt_work = smu_v11_0_interrupt_work,
 };
 
 void navi10_set_ppt_funcs(struct smu_context *smu)
index a2cb831..c27806f 100644 (file)
@@ -2422,8 +2422,6 @@ static void sienna_cichlid_fill_i2c_req(SwI2cRequest_t  *req, bool write,
 {
        int i;
 
-       BUG_ON(numbytes > MAX_SW_I2C_COMMANDS);
-
        req->I2CcontrollerPort = 0;
        req->I2CSpeed = 2;
        req->SlaveAddress = address;
@@ -2461,6 +2459,12 @@ static int sienna_cichlid_i2c_read_data(struct i2c_adapter *control,
        struct smu_table_context *smu_table = &adev->smu.smu_table;
        struct smu_table *table = &smu_table->driver_table;
 
+       if (numbytes > MAX_SW_I2C_COMMANDS) {
+               dev_err(adev->dev, "numbytes requested %d is over max allowed %d\n",
+                       numbytes, MAX_SW_I2C_COMMANDS);
+               return -EINVAL;
+       }
+
        memset(&req, 0, sizeof(req));
        sienna_cichlid_fill_i2c_req(&req, false, address, numbytes, data);
 
@@ -2497,6 +2501,12 @@ static int sienna_cichlid_i2c_write_data(struct i2c_adapter *control,
        SwI2cRequest_t req;
        struct amdgpu_device *adev = to_amdgpu_device(control);
 
+       if (numbytes > MAX_SW_I2C_COMMANDS) {
+               dev_err(adev->dev, "numbytes requested %d is over max allowed %d\n",
+                       numbytes, MAX_SW_I2C_COMMANDS);
+               return -EINVAL;
+       }
+
        memset(&req, 0, sizeof(req));
        sienna_cichlid_fill_i2c_req(&req, true, address, numbytes, data);
 
@@ -2784,6 +2794,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
        .gfx_ulv_control = smu_v11_0_gfx_ulv_control,
        .deep_sleep_control = smu_v11_0_deep_sleep_control,
        .get_fan_parameters = sienna_cichlid_get_fan_parameters,
+       .interrupt_work = smu_v11_0_interrupt_work,
 };
 
 void sienna_cichlid_set_ppt_funcs(struct smu_context *smu)
index b53872e..2380759 100644 (file)
@@ -322,39 +322,42 @@ int smu_v11_0_setup_pptable(struct smu_context *smu)
        void *table;
        uint16_t version_major, version_minor;
 
-       hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
-       version_major = le16_to_cpu(hdr->header.header_version_major);
-       version_minor = le16_to_cpu(hdr->header.header_version_minor);
-       if ((version_major == 2 && smu->smu_table.boot_values.pp_table_id > 0) ||
-           adev->asic_type == CHIP_NAVY_FLOUNDER) {
-               dev_info(adev->dev, "use driver provided pptable %d\n", smu->smu_table.boot_values.pp_table_id);
-               switch (version_minor) {
-               case 0:
-                       ret = smu_v11_0_set_pptable_v2_0(smu, &table, &size);
-                       break;
-               case 1:
-                       ret = smu_v11_0_set_pptable_v2_1(smu, &table, &size,
-                                                        smu->smu_table.boot_values.pp_table_id);
-                       break;
-               default:
-                       ret = -EINVAL;
-                       break;
+       if (!amdgpu_sriov_vf(adev)) {
+               hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
+               version_major = le16_to_cpu(hdr->header.header_version_major);
+               version_minor = le16_to_cpu(hdr->header.header_version_minor);
+               if ((version_major == 2 && smu->smu_table.boot_values.pp_table_id > 0) ||
+                       adev->asic_type == CHIP_NAVY_FLOUNDER) {
+                       dev_info(adev->dev, "use driver provided pptable %d\n", smu->smu_table.boot_values.pp_table_id);
+                       switch (version_minor) {
+                       case 0:
+                               ret = smu_v11_0_set_pptable_v2_0(smu, &table, &size);
+                               break;
+                       case 1:
+                               ret = smu_v11_0_set_pptable_v2_1(smu, &table, &size,
+                                                               smu->smu_table.boot_values.pp_table_id);
+                               break;
+                       default:
+                               ret = -EINVAL;
+                               break;
+                       }
+                       if (ret)
+                               return ret;
+                       goto out;
                }
-               if (ret)
-                       return ret;
+       }
 
-       } else {
-               dev_info(adev->dev, "use vbios provided pptable\n");
-               index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
-                                                   powerplayinfo);
+       dev_info(adev->dev, "use vbios provided pptable\n");
+       index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+                                               powerplayinfo);
 
-               ret = amdgpu_atombios_get_data_table(adev, index, &atom_table_size, &frev, &crev,
-                                             (uint8_t **)&table);
-               if (ret)
-                       return ret;
-               size = atom_table_size;
-       }
+       ret = amdgpu_atombios_get_data_table(adev, index, &atom_table_size, &frev, &crev,
+                                               (uint8_t **)&table);
+       if (ret)
+               return ret;
+       size = atom_table_size;
 
+out:
        if (!smu->smu_table.power_play_table)
                smu->smu_table.power_play_table = table;
        if (!smu->smu_table.power_play_table_size)
@@ -952,6 +955,12 @@ static int smu_v11_0_process_pending_interrupt(struct smu_context *smu)
        return ret;
 }
 
+void smu_v11_0_interrupt_work(struct smu_context *smu)
+{
+       if (smu_v11_0_ack_ac_dc_interrupt(smu))
+               dev_err(smu->adev->dev, "Ack AC/DC interrupt Failed!\n");
+}
+
 int smu_v11_0_enable_thermal_alert(struct smu_context *smu)
 {
        int ret = 0;
@@ -1317,11 +1326,11 @@ static int smu_v11_0_irq_process(struct amdgpu_device *adev,
                        switch (ctxid) {
                        case 0x3:
                                dev_dbg(adev->dev, "Switched to AC mode!\n");
-                               smu_v11_0_ack_ac_dc_interrupt(&adev->smu);
+                               schedule_work(&smu->interrupt_work);
                                break;
                        case 0x4:
                                dev_dbg(adev->dev, "Switched to DC mode!\n");
-                               smu_v11_0_ack_ac_dc_interrupt(&adev->smu);
+                               schedule_work(&smu->interrupt_work);
                                break;
                        case 0x7:
                                /*
index 55a254b..66c1026 100644 (file)
@@ -222,14 +222,16 @@ static int renoir_get_profiling_clk_mask(struct smu_context *smu,
                        *sclk_mask = 0;
        } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
                if (mclk_mask)
-                       *mclk_mask = 0;
+                       /* mclk levels are in reverse order */
+                       *mclk_mask = NUM_MEMCLK_DPM_LEVELS - 1;
        } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
                if(sclk_mask)
                        /* The sclk as gfxclk and has three level about max/min/current */
                        *sclk_mask = 3 - 1;
 
                if(mclk_mask)
-                       *mclk_mask = NUM_MEMCLK_DPM_LEVELS - 1;
+                       /* mclk levels are in reverse order */
+                       *mclk_mask = 0;
 
                if(soc_mask)
                        *soc_mask = NUM_SOCCLK_DPM_LEVELS - 1;
@@ -323,7 +325,7 @@ static int renoir_get_dpm_ultimate_freq(struct smu_context *smu,
                case SMU_UCLK:
                case SMU_FCLK:
                case SMU_MCLK:
-                       ret = renoir_get_dpm_clk_limited(smu, clk_type, 0, min);
+                       ret = renoir_get_dpm_clk_limited(smu, clk_type, NUM_MEMCLK_DPM_LEVELS - 1, min);
                        if (ret)
                                goto failed;
                        break;
index 621ebdb..d0c6561 100644 (file)
@@ -748,7 +748,7 @@ static int cdns_mhdp_fw_activate(const struct firmware *fw,
         * bridge should already be detached.
         */
        if (mhdp->bridge_attached)
-               writel(~CDNS_APB_INT_MASK_SW_EVENT_INT,
+               writel(~(u32)CDNS_APB_INT_MASK_SW_EVENT_INT,
                       mhdp->regs + CDNS_APB_INT_MASK);
 
        spin_unlock(&mhdp->start_lock);
@@ -1689,7 +1689,7 @@ static int cdns_mhdp_attach(struct drm_bridge *bridge,
 
        /* Enable SW event interrupts */
        if (hw_ready)
-               writel(~CDNS_APB_INT_MASK_SW_EVENT_INT,
+               writel(~(u32)CDNS_APB_INT_MASK_SW_EVENT_INT,
                       mhdp->regs + CDNS_APB_INT_MASK);
 
        return 0;
@@ -2122,7 +2122,7 @@ static void cdns_mhdp_bridge_hpd_enable(struct drm_bridge *bridge)
 
        /* Enable SW event interrupts */
        if (mhdp->bridge_attached)
-               writel(~CDNS_APB_INT_MASK_SW_EVENT_INT,
+               writel(~(u32)CDNS_APB_INT_MASK_SW_EVENT_INT,
                       mhdp->regs + CDNS_APB_INT_MASK);
 }
 
index fac4657..a1fba7e 100644 (file)
@@ -8161,7 +8161,7 @@ static void compute_m_n(unsigned int m, unsigned int n,
         * which the devices expect also in synchronous clock mode.
         */
        if (constant_n)
-               *ret_n = 0x8000;
+               *ret_n = DP_LINK_CONSTANT_N_VALUE;
        else
                *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
 
index 937d080..a3d1617 100644 (file)
@@ -9,8 +9,6 @@
 #include <linux/component.h>
 #include <linux/clk.h>
 #include <linux/dma-mapping.h>
-#include <linux/dma-noncoherent.h>
-#include <linux/io.h>
 #include <linux/module.h>
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
@@ -21,7 +19,6 @@
 #include <drm/drm_bridge.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_crtc_helper.h>
-#include <drm/drm_damage_helper.h>
 #include <drm/drm_drv.h>
 #include <drm/drm_gem_cma_helper.h>
 #include <drm/drm_fb_cma_helper.h>
@@ -79,11 +76,6 @@ static const u32 ingenic_drm_primary_formats[] = {
        DRM_FORMAT_XRGB8888,
 };
 
-static bool ingenic_drm_cached_gem_buf;
-module_param_named(cached_gem_buffers, ingenic_drm_cached_gem_buf, bool, 0400);
-MODULE_PARM_DESC(cached_gem_buffers,
-                "Enable fully cached GEM buffers [default=false]");
-
 static bool ingenic_drm_writeable_reg(struct device *dev, unsigned int reg)
 {
        switch (reg) {
@@ -346,8 +338,6 @@ static int ingenic_drm_plane_atomic_check(struct drm_plane *plane,
             plane->state->fb->format->format != state->fb->format->format))
                crtc_state->mode_changed = true;
 
-       drm_atomic_helper_check_plane_damage(state->state, state);
-
        return 0;
 }
 
@@ -450,38 +440,6 @@ void ingenic_drm_plane_config(struct device *dev,
        }
 }
 
-void ingenic_drm_sync_data(struct device *dev,
-                          struct drm_plane_state *old_state,
-                          struct drm_plane_state *state)
-{
-       const struct drm_format_info *finfo = state->fb->format;
-       struct ingenic_drm *priv = dev_get_drvdata(dev);
-       struct drm_atomic_helper_damage_iter iter;
-       unsigned int offset, i;
-       struct drm_rect clip;
-       dma_addr_t paddr;
-       void *addr;
-
-       if (!ingenic_drm_cached_gem_buf)
-               return;
-
-       drm_atomic_helper_damage_iter_init(&iter, old_state, state);
-
-       drm_atomic_for_each_plane_damage(&iter, &clip) {
-               for (i = 0; i < finfo->num_planes; i++) {
-                       paddr = drm_fb_cma_get_gem_addr(state->fb, state, i);
-                       addr = phys_to_virt(paddr);
-
-                       /* Ignore x1/x2 values, invalidate complete lines */
-                       offset = clip.y1 * state->fb->pitches[i];
-
-                       dma_cache_sync(priv->dev, addr + offset,
-                                      (clip.y2 - clip.y1) * state->fb->pitches[i],
-                                      DMA_TO_DEVICE);
-               }
-       }
-}
-
 static void ingenic_drm_plane_atomic_update(struct drm_plane *plane,
                                            struct drm_plane_state *oldstate)
 {
@@ -492,8 +450,6 @@ static void ingenic_drm_plane_atomic_update(struct drm_plane *plane,
        dma_addr_t addr;
 
        if (state && state->fb) {
-               ingenic_drm_sync_data(priv->dev, oldstate, state);
-
                addr = drm_fb_cma_get_gem_addr(state->fb, state, 0);
                width = state->src_w >> 16;
                height = state->src_h >> 16;
@@ -649,69 +605,7 @@ static void ingenic_drm_disable_vblank(struct drm_crtc *crtc)
        regmap_update_bits(priv->map, JZ_REG_LCD_CTRL, JZ_LCD_CTRL_EOF_IRQ, 0);
 }
 
-static struct drm_framebuffer *
-ingenic_drm_gem_fb_create(struct drm_device *dev, struct drm_file *file,
-                         const struct drm_mode_fb_cmd2 *mode_cmd)
-{
-       if (ingenic_drm_cached_gem_buf)
-               return drm_gem_fb_create_with_dirty(dev, file, mode_cmd);
-
-       return drm_gem_fb_create(dev, file, mode_cmd);
-}
-
-static int ingenic_drm_gem_mmap(struct drm_gem_object *obj,
-                               struct vm_area_struct *vma)
-{
-       struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
-       struct device *dev = cma_obj->base.dev->dev;
-       unsigned long attrs;
-       int ret;
-
-       if (ingenic_drm_cached_gem_buf)
-               attrs = DMA_ATTR_NON_CONSISTENT;
-       else
-               attrs = DMA_ATTR_WRITE_COMBINE;
-
-       /*
-        * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
-        * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
-        * the whole buffer.
-        */
-       vma->vm_flags &= ~VM_PFNMAP;
-       vma->vm_pgoff = 0;
-       vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
-
-       ret = dma_mmap_attrs(dev, vma, cma_obj->vaddr, cma_obj->paddr,
-                            vma->vm_end - vma->vm_start, attrs);
-       if (ret)
-               drm_gem_vm_close(vma);
-
-       return ret;
-}
-
-static int ingenic_drm_gem_cma_mmap(struct file *filp,
-                                   struct vm_area_struct *vma)
-{
-       int ret;
-
-       ret = drm_gem_mmap(filp, vma);
-       if (ret)
-               return ret;
-
-       return ingenic_drm_gem_mmap(vma->vm_private_data, vma);
-}
-
-static const struct file_operations ingenic_drm_fops = {
-       .owner          = THIS_MODULE,
-       .open           = drm_open,
-       .release        = drm_release,
-       .unlocked_ioctl = drm_ioctl,
-       .compat_ioctl   = drm_compat_ioctl,
-       .poll           = drm_poll,
-       .read           = drm_read,
-       .llseek         = noop_llseek,
-       .mmap           = ingenic_drm_gem_cma_mmap,
-};
+DEFINE_DRM_GEM_CMA_FOPS(ingenic_drm_fops);
 
 static struct drm_driver ingenic_drm_driver_data = {
        .driver_features        = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
@@ -775,7 +669,7 @@ static const struct drm_encoder_helper_funcs ingenic_drm_encoder_helper_funcs =
 };
 
 static const struct drm_mode_config_funcs ingenic_drm_mode_config_funcs = {
-       .fb_create              = ingenic_drm_gem_fb_create,
+       .fb_create              = drm_gem_fb_create,
        .output_poll_changed    = drm_fb_helper_output_poll_changed,
        .atomic_check           = drm_atomic_helper_check,
        .atomic_commit          = drm_atomic_helper_commit,
@@ -902,8 +796,6 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
                return ret;
        }
 
-       drm_plane_enable_fb_damage_clips(&priv->f1);
-
        drm_crtc_helper_add(&priv->crtc, &ingenic_drm_crtc_helper_funcs);
 
        ret = drm_crtc_init_with_planes(drm, &priv->crtc, &priv->f1,
@@ -929,8 +821,6 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
                        return ret;
                }
 
-               drm_plane_enable_fb_damage_clips(&priv->f0);
-
                if (IS_ENABLED(CONFIG_DRM_INGENIC_IPU) && has_components) {
                        ret = component_bind_all(dev, drm);
                        if (ret) {
index df99f0f..43f7d95 100644 (file)
@@ -168,10 +168,6 @@ void ingenic_drm_plane_config(struct device *dev,
                              struct drm_plane *plane, u32 fourcc);
 void ingenic_drm_plane_disable(struct device *dev, struct drm_plane *plane);
 
-void ingenic_drm_sync_data(struct device *dev,
-                          struct drm_plane_state *old_state,
-                          struct drm_plane_state *state);
-
 extern struct platform_driver *ingenic_ipu_driver_ptr;
 
 #endif /* DRIVERS_GPU_DRM_INGENIC_INGENIC_DRM_H */
index 38c83e8..fc8c6e9 100644 (file)
@@ -20,7 +20,6 @@
 
 #include <drm/drm_atomic.h>
 #include <drm/drm_atomic_helper.h>
-#include <drm/drm_damage_helper.h>
 #include <drm/drm_drv.h>
 #include <drm/drm_fb_cma_helper.h>
 #include <drm/drm_fourcc.h>
@@ -317,8 +316,6 @@ static void ingenic_ipu_plane_atomic_update(struct drm_plane *plane,
                                JZ_IPU_CTRL_CHIP_EN | JZ_IPU_CTRL_LCDC_SEL);
        }
 
-       ingenic_drm_sync_data(ipu->master, oldstate, state);
-
        /* New addresses will be committed in vblank handler... */
        ipu->addr_y = drm_fb_cma_get_gem_addr(state->fb, state, 0);
        if (finfo->num_planes > 1)
@@ -537,7 +534,7 @@ static int ingenic_ipu_plane_atomic_check(struct drm_plane *plane,
 
        if (!state->crtc ||
            !crtc_state->mode.hdisplay || !crtc_state->mode.vdisplay)
-               goto out_check_damage;
+               return 0;
 
        /* Plane must be fully visible */
        if (state->crtc_x < 0 || state->crtc_y < 0 ||
@@ -554,7 +551,7 @@ static int ingenic_ipu_plane_atomic_check(struct drm_plane *plane,
                return -EINVAL;
 
        if (!osd_changed(state, plane->state))
-               goto out_check_damage;
+               return 0;
 
        crtc_state->mode_changed = true;
 
@@ -581,9 +578,6 @@ static int ingenic_ipu_plane_atomic_check(struct drm_plane *plane,
        ipu->denom_w = denom_w;
        ipu->denom_h = denom_h;
 
-out_check_damage:
-       drm_atomic_helper_check_plane_damage(state->state, state);
-
        return 0;
 }
 
@@ -765,8 +759,6 @@ static int ingenic_ipu_bind(struct device *dev, struct device *master, void *d)
                return err;
        }
 
-       drm_plane_enable_fb_damage_clips(plane);
-
        /*
         * Sharpness settings range is [0,32]
         * 0       : nearest-neighbor
index aa74aac..65cd03a 100644 (file)
@@ -24,6 +24,6 @@ config DRM_MEDIATEK_HDMI
        tristate "DRM HDMI Support for Mediatek SoCs"
        depends on DRM_MEDIATEK
        select SND_SOC_HDMI_CODEC if SND_SOC
-       select GENERIC_PHY
+       select PHY_MTK_HDMI
        help
          DRM/KMS HDMI driver for Mediatek SoCs
index b7a82ed..77b0fd8 100644 (file)
@@ -19,9 +19,6 @@ obj-$(CONFIG_DRM_MEDIATEK) += mediatek-drm.o
 
 mediatek-drm-hdmi-objs := mtk_cec.o \
                          mtk_hdmi.o \
-                         mtk_hdmi_ddc.o \
-                         mtk_mt2701_hdmi_phy.o \
-                         mtk_mt8173_hdmi_phy.o \
-                         mtk_hdmi_phy.o
+                         mtk_hdmi_ddc.o
 
 obj-$(CONFIG_DRM_MEDIATEK_HDMI) += mediatek-drm-hdmi.o
index d4f0fb7..cf11c48 100644 (file)
@@ -64,7 +64,8 @@ enum mtk_dpi_out_color_format {
 struct mtk_dpi {
        struct mtk_ddp_comp ddp_comp;
        struct drm_encoder encoder;
-       struct drm_bridge *bridge;
+       struct drm_bridge bridge;
+       struct drm_bridge *next_bridge;
        void __iomem *regs;
        struct device *dev;
        struct clk *engine_clk;
@@ -83,9 +84,9 @@ struct mtk_dpi {
        int refcount;
 };
 
-static inline struct mtk_dpi *mtk_dpi_from_encoder(struct drm_encoder *e)
+static inline struct mtk_dpi *bridge_to_dpi(struct drm_bridge *b)
 {
-       return container_of(e, struct mtk_dpi, encoder);
+       return container_of(b, struct mtk_dpi, bridge);
 }
 
 enum mtk_dpi_polarity {
@@ -521,50 +522,53 @@ static int mtk_dpi_set_display_mode(struct mtk_dpi *dpi,
        return 0;
 }
 
-static bool mtk_dpi_encoder_mode_fixup(struct drm_encoder *encoder,
-                                      const struct drm_display_mode *mode,
-                                      struct drm_display_mode *adjusted_mode)
+static void mtk_dpi_encoder_destroy(struct drm_encoder *encoder)
 {
-       return true;
+       drm_encoder_cleanup(encoder);
 }
 
-static void mtk_dpi_encoder_mode_set(struct drm_encoder *encoder,
-                                    struct drm_display_mode *mode,
-                                    struct drm_display_mode *adjusted_mode)
+static const struct drm_encoder_funcs mtk_dpi_encoder_funcs = {
+       .destroy = mtk_dpi_encoder_destroy,
+};
+
+static int mtk_dpi_bridge_attach(struct drm_bridge *bridge,
+                                enum drm_bridge_attach_flags flags)
 {
-       struct mtk_dpi *dpi = mtk_dpi_from_encoder(encoder);
+       struct mtk_dpi *dpi = bridge_to_dpi(bridge);
+
+       return drm_bridge_attach(bridge->encoder, dpi->next_bridge,
+                                &dpi->bridge, flags);
+}
+
+static void mtk_dpi_bridge_mode_set(struct drm_bridge *bridge,
+                               const struct drm_display_mode *mode,
+                               const struct drm_display_mode *adjusted_mode)
+{
+       struct mtk_dpi *dpi = bridge_to_dpi(bridge);
 
        drm_mode_copy(&dpi->mode, adjusted_mode);
 }
 
-static void mtk_dpi_encoder_disable(struct drm_encoder *encoder)
+static void mtk_dpi_bridge_disable(struct drm_bridge *bridge)
 {
-       struct mtk_dpi *dpi = mtk_dpi_from_encoder(encoder);
+       struct mtk_dpi *dpi = bridge_to_dpi(bridge);
 
        mtk_dpi_power_off(dpi);
 }
 
-static void mtk_dpi_encoder_enable(struct drm_encoder *encoder)
+static void mtk_dpi_bridge_enable(struct drm_bridge *bridge)
 {
-       struct mtk_dpi *dpi = mtk_dpi_from_encoder(encoder);
+       struct mtk_dpi *dpi = bridge_to_dpi(bridge);
 
        mtk_dpi_power_on(dpi);
        mtk_dpi_set_display_mode(dpi, &dpi->mode);
 }
 
-static int mtk_dpi_atomic_check(struct drm_encoder *encoder,
-                               struct drm_crtc_state *crtc_state,
-                               struct drm_connector_state *conn_state)
-{
-       return 0;
-}
-
-static const struct drm_encoder_helper_funcs mtk_dpi_encoder_helper_funcs = {
-       .mode_fixup = mtk_dpi_encoder_mode_fixup,
-       .mode_set = mtk_dpi_encoder_mode_set,
-       .disable = mtk_dpi_encoder_disable,
-       .enable = mtk_dpi_encoder_enable,
-       .atomic_check = mtk_dpi_atomic_check,
+static const struct drm_bridge_funcs mtk_dpi_bridge_funcs = {
+       .attach = mtk_dpi_bridge_attach,
+       .mode_set = mtk_dpi_bridge_mode_set,
+       .disable = mtk_dpi_bridge_disable,
+       .enable = mtk_dpi_bridge_enable,
 };
 
 static void mtk_dpi_start(struct mtk_ddp_comp *comp)
@@ -605,12 +609,10 @@ static int mtk_dpi_bind(struct device *dev, struct device *master, void *data)
                dev_err(dev, "Failed to initialize decoder: %d\n", ret);
                goto err_unregister;
        }
-       drm_encoder_helper_add(&dpi->encoder, &mtk_dpi_encoder_helper_funcs);
 
-       /* Currently DPI0 is fixed to be driven by OVL1 */
-       dpi->encoder.possible_crtcs = BIT(1);
+       dpi->encoder.possible_crtcs = mtk_drm_find_possible_crtc_by_comp(drm_dev, dpi->ddp_comp);
 
-       ret = drm_bridge_attach(&dpi->encoder, dpi->bridge, NULL, 0);
+       ret = drm_bridge_attach(&dpi->encoder, &dpi->bridge, NULL, 0);
        if (ret) {
                dev_err(dev, "Failed to attach bridge: %d\n", ret);
                goto err_cleanup;
@@ -770,11 +772,11 @@ static int mtk_dpi_probe(struct platform_device *pdev)
        }
 
        ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0,
-                                         NULL, &dpi->bridge);
+                                         NULL, &dpi->next_bridge);
        if (ret)
                return ret;
 
-       dev_info(dev, "Found bridge node: %pOF\n", dpi->bridge->of_node);
+       dev_info(dev, "Found bridge node: %pOF\n", dpi->next_bridge->of_node);
 
        comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DPI);
        if (comp_id < 0) {
@@ -791,8 +793,15 @@ static int mtk_dpi_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, dpi);
 
+       dpi->bridge.funcs = &mtk_dpi_bridge_funcs;
+       dpi->bridge.of_node = dev->of_node;
+       dpi->bridge.type = DRM_MODE_CONNECTOR_DPI;
+
+       drm_bridge_add(&dpi->bridge);
+
        ret = component_add(dev, &mtk_dpi_component_ops);
        if (ret) {
+               drm_bridge_remove(&dpi->bridge);
                dev_err(dev, "Failed to add component: %d\n", ret);
                return ret;
        }
@@ -802,7 +811,10 @@ static int mtk_dpi_probe(struct platform_device *pdev)
 
 static int mtk_dpi_remove(struct platform_device *pdev)
 {
+       struct mtk_dpi *dpi = platform_get_drvdata(pdev);
+
        component_del(&pdev->dev, &mtk_dpi_component_ops);
+       drm_bridge_remove(&dpi->bridge);
 
        return 0;
 }
index 57c88de..bfd42ae 100644 (file)
@@ -13,6 +13,8 @@
 #include <linux/of_platform.h>
 #include <linux/platform_device.h>
 #include <linux/soc/mediatek/mtk-cmdq.h>
+#include <drm/drm_print.h>
+
 #include "mtk_drm_drv.h"
 #include "mtk_drm_plane.h"
 #include "mtk_drm_ddp_comp.h"
@@ -412,6 +414,22 @@ static const struct mtk_ddp_comp_match mtk_ddp_matches[DDP_COMPONENT_ID_MAX] = {
        [DDP_COMPONENT_WDMA1]   = { MTK_DISP_WDMA,      1, NULL },
 };
 
+static bool mtk_drm_find_comp_in_ddp(struct mtk_ddp_comp ddp_comp,
+                                    const enum mtk_ddp_comp_id *path,
+                                    unsigned int path_len)
+{
+       unsigned int i;
+
+       if (path == NULL)
+               return false;
+
+       for (i = 0U; i < path_len; i++)
+               if (ddp_comp.id == path[i])
+                       return true;
+
+       return false;
+}
+
 int mtk_ddp_comp_get_id(struct device_node *node,
                        enum mtk_ddp_comp_type comp_type)
 {
@@ -427,6 +445,26 @@ int mtk_ddp_comp_get_id(struct device_node *node,
        return -EINVAL;
 }
 
+unsigned int mtk_drm_find_possible_crtc_by_comp(struct drm_device *drm,
+                                               struct mtk_ddp_comp ddp_comp)
+{
+       struct mtk_drm_private *private = drm->dev_private;
+       unsigned int ret = 0;
+
+       if (mtk_drm_find_comp_in_ddp(ddp_comp, private->data->main_path, private->data->main_len))
+               ret = BIT(0);
+       else if (mtk_drm_find_comp_in_ddp(ddp_comp, private->data->ext_path,
+                                         private->data->ext_len))
+               ret = BIT(1);
+       else if (mtk_drm_find_comp_in_ddp(ddp_comp, private->data->third_path,
+                                         private->data->third_len))
+               ret = BIT(2);
+       else
+               DRM_INFO("Failed to find comp in ddp table\n");
+
+       return ret;
+}
+
 int mtk_ddp_comp_init(struct device *dev, struct device_node *node,
                      struct mtk_ddp_comp *comp, enum mtk_ddp_comp_id comp_id,
                      const struct mtk_ddp_comp_funcs *funcs)
index debe363..1d9e00b 100644 (file)
@@ -202,6 +202,8 @@ static inline void mtk_ddp_ctm_set(struct mtk_ddp_comp *comp,
 
 int mtk_ddp_comp_get_id(struct device_node *node,
                        enum mtk_ddp_comp_type comp_type);
+unsigned int mtk_drm_find_possible_crtc_by_comp(struct drm_device *drm,
+                                               struct mtk_ddp_comp ddp_comp);
 int mtk_ddp_comp_init(struct device *dev, struct device_node *comp_node,
                      struct mtk_ddp_comp *comp, enum mtk_ddp_comp_id comp_id,
                      const struct mtk_ddp_comp_funcs *funcs);
index 040a8f3..2350e32 100644 (file)
@@ -74,6 +74,19 @@ static const enum mtk_ddp_comp_id mt2701_mtk_ddp_ext[] = {
        DDP_COMPONENT_DPI0,
 };
 
+static const enum mtk_ddp_comp_id mt7623_mtk_ddp_main[] = {
+       DDP_COMPONENT_OVL0,
+       DDP_COMPONENT_RDMA0,
+       DDP_COMPONENT_COLOR0,
+       DDP_COMPONENT_BLS,
+       DDP_COMPONENT_DPI0,
+};
+
+static const enum mtk_ddp_comp_id mt7623_mtk_ddp_ext[] = {
+       DDP_COMPONENT_RDMA1,
+       DDP_COMPONENT_DSI0,
+};
+
 static const enum mtk_ddp_comp_id mt2712_mtk_ddp_main[] = {
        DDP_COMPONENT_OVL0,
        DDP_COMPONENT_COLOR0,
@@ -127,6 +140,14 @@ static const struct mtk_mmsys_driver_data mt2701_mmsys_driver_data = {
        .shadow_register = true,
 };
 
+static const struct mtk_mmsys_driver_data mt7623_mmsys_driver_data = {
+       .main_path = mt7623_mtk_ddp_main,
+       .main_len = ARRAY_SIZE(mt7623_mtk_ddp_main),
+       .ext_path = mt7623_mtk_ddp_ext,
+       .ext_len = ARRAY_SIZE(mt7623_mtk_ddp_ext),
+       .shadow_register = true,
+};
+
 static const struct mtk_mmsys_driver_data mt2712_mmsys_driver_data = {
        .main_path = mt2712_mtk_ddp_main,
        .main_len = ARRAY_SIZE(mt2712_mtk_ddp_main),
@@ -422,6 +443,8 @@ static const struct of_device_id mtk_ddp_comp_dt_ids[] = {
 static const struct of_device_id mtk_drm_of_ids[] = {
        { .compatible = "mediatek,mt2701-mmsys",
          .data = &mt2701_mmsys_driver_data},
+       { .compatible = "mediatek,mt7623-mmsys",
+         .data = &mt7623_mmsys_driver_data},
        { .compatible = "mediatek,mt2712-mmsys",
          .data = &mt2712_mmsys_driver_data},
        { .compatible = "mediatek,mt8173-mmsys",
index 16fd99d..20f3489 100644 (file)
@@ -970,11 +970,7 @@ static int mtk_dsi_encoder_init(struct drm_device *drm, struct mtk_dsi *dsi)
                return ret;
        }
 
-       /*
-        * Currently display data paths are statically assigned to a crtc each.
-        * crtc 0 is OVL0 -> COLOR0 -> AAL -> OD -> RDMA0 -> UFOE -> DSI0
-        */
-       dsi->encoder.possible_crtcs = 1;
+       dsi->encoder.possible_crtcs = mtk_drm_find_possible_crtc_by_comp(drm, dsi->ddp_comp);
 
        ret = drm_bridge_attach(&dsi->encoder, &dsi->bridge, NULL,
                                DRM_BRIDGE_ATTACH_NO_CONNECTOR);
index f2e9b42..0ed7b0b 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/io.h>
 #include <linux/kernel.h>
 #include <linux/mfd/syscon.h>
+#include <linux/module.h>
 #include <linux/mutex.h>
 #include <linux/of_platform.h>
 #include <linux/of.h>
@@ -145,11 +146,16 @@ struct hdmi_audio_param {
        struct hdmi_codec_params codec_params;
 };
 
+struct mtk_hdmi_conf {
+       bool tz_disabled;
+};
+
 struct mtk_hdmi {
        struct drm_bridge bridge;
        struct drm_bridge *next_bridge;
        struct drm_connector conn;
        struct device *dev;
+       const struct mtk_hdmi_conf *conf;
        struct phy *phy;
        struct device *cec_dev;
        struct i2c_adapter *ddc_adpt;
@@ -234,7 +240,6 @@ static void mtk_hdmi_hw_vid_black(struct mtk_hdmi *hdmi, bool black)
 static void mtk_hdmi_hw_make_reg_writable(struct mtk_hdmi *hdmi, bool enable)
 {
        struct arm_smccc_res res;
-       struct mtk_hdmi_phy *hdmi_phy = phy_get_drvdata(hdmi->phy);
 
        /*
         * MT8173 HDMI hardware has an output control bit to enable/disable HDMI
@@ -242,7 +247,7 @@ static void mtk_hdmi_hw_make_reg_writable(struct mtk_hdmi *hdmi, bool enable)
         * The ARM trusted firmware provides an API for the HDMI driver to set
         * this control bit to enable HDMI output in supervisor mode.
         */
-       if (hdmi_phy->conf && hdmi_phy->conf->tz_disabled)
+       if (hdmi->conf && hdmi->conf->tz_disabled)
                regmap_update_bits(hdmi->sys_regmap,
                                   hdmi->sys_offset + HDMI_SYS_CFG20,
                                   0x80008005, enable ? 0x80000005 : 0x8000);
@@ -1723,6 +1728,7 @@ static int mtk_drm_hdmi_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        hdmi->dev = dev;
+       hdmi->conf = of_device_get_match_data(dev);
 
        ret = mtk_hdmi_dt_parse_pdata(hdmi, pdev);
        if (ret)
@@ -1803,8 +1809,16 @@ static int mtk_hdmi_resume(struct device *dev)
 static SIMPLE_DEV_PM_OPS(mtk_hdmi_pm_ops,
                         mtk_hdmi_suspend, mtk_hdmi_resume);
 
+static const struct mtk_hdmi_conf mtk_hdmi_conf_mt2701 = {
+       .tz_disabled = true,
+};
+
 static const struct of_device_id mtk_drm_hdmi_of_ids[] = {
-       { .compatible = "mediatek,mt8173-hdmi", },
+       { .compatible = "mediatek,mt2701-hdmi",
+         .data = &mtk_hdmi_conf_mt2701,
+       },
+       { .compatible = "mediatek,mt8173-hdmi",
+       },
        {}
 };
 
@@ -1819,7 +1833,6 @@ static struct platform_driver mtk_hdmi_driver = {
 };
 
 static struct platform_driver * const mtk_hdmi_drivers[] = {
-       &mtk_hdmi_phy_driver,
        &mtk_hdmi_ddc_driver,
        &mtk_cec_driver,
        &mtk_hdmi_driver,
index bb3653d..472bf14 100644 (file)
@@ -5,7 +5,6 @@
  */
 #ifndef _MTK_HDMI_CTRL_H
 #define _MTK_HDMI_CTRL_H
-#include "mtk_hdmi_phy.h"
 
 struct platform_driver;
 
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_phy.c b/drivers/gpu/drm/mediatek/mtk_hdmi_phy.c
deleted file mode 100644 (file)
index 5223498..0000000
+++ /dev/null
@@ -1,210 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (c) 2018 MediaTek Inc.
- * Author: Jie Qiu <jie.qiu@mediatek.com>
- */
-
-#include "mtk_hdmi_phy.h"
-
-static int mtk_hdmi_phy_power_on(struct phy *phy);
-static int mtk_hdmi_phy_power_off(struct phy *phy);
-
-static const struct phy_ops mtk_hdmi_phy_dev_ops = {
-       .power_on = mtk_hdmi_phy_power_on,
-       .power_off = mtk_hdmi_phy_power_off,
-       .owner = THIS_MODULE,
-};
-
-void mtk_hdmi_phy_clear_bits(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
-                            u32 bits)
-{
-       void __iomem *reg = hdmi_phy->regs + offset;
-       u32 tmp;
-
-       tmp = readl(reg);
-       tmp &= ~bits;
-       writel(tmp, reg);
-}
-
-void mtk_hdmi_phy_set_bits(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
-                          u32 bits)
-{
-       void __iomem *reg = hdmi_phy->regs + offset;
-       u32 tmp;
-
-       tmp = readl(reg);
-       tmp |= bits;
-       writel(tmp, reg);
-}
-
-void mtk_hdmi_phy_mask(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
-                      u32 val, u32 mask)
-{
-       void __iomem *reg = hdmi_phy->regs + offset;
-       u32 tmp;
-
-       tmp = readl(reg);
-       tmp = (tmp & ~mask) | (val & mask);
-       writel(tmp, reg);
-}
-
-inline struct mtk_hdmi_phy *to_mtk_hdmi_phy(struct clk_hw *hw)
-{
-       return container_of(hw, struct mtk_hdmi_phy, pll_hw);
-}
-
-static int mtk_hdmi_phy_power_on(struct phy *phy)
-{
-       struct mtk_hdmi_phy *hdmi_phy = phy_get_drvdata(phy);
-       int ret;
-
-       ret = clk_prepare_enable(hdmi_phy->pll);
-       if (ret < 0)
-               return ret;
-
-       hdmi_phy->conf->hdmi_phy_enable_tmds(hdmi_phy);
-       return 0;
-}
-
-static int mtk_hdmi_phy_power_off(struct phy *phy)
-{
-       struct mtk_hdmi_phy *hdmi_phy = phy_get_drvdata(phy);
-
-       hdmi_phy->conf->hdmi_phy_disable_tmds(hdmi_phy);
-       clk_disable_unprepare(hdmi_phy->pll);
-
-       return 0;
-}
-
-static const struct phy_ops *
-mtk_hdmi_phy_dev_get_ops(const struct mtk_hdmi_phy *hdmi_phy)
-{
-       if (hdmi_phy && hdmi_phy->conf &&
-           hdmi_phy->conf->hdmi_phy_enable_tmds &&
-           hdmi_phy->conf->hdmi_phy_disable_tmds)
-               return &mtk_hdmi_phy_dev_ops;
-
-       dev_err(hdmi_phy->dev, "Failed to get dev ops of phy\n");
-               return NULL;
-}
-
-static void mtk_hdmi_phy_clk_get_data(struct mtk_hdmi_phy *hdmi_phy,
-                                     struct clk_init_data *clk_init)
-{
-       clk_init->flags = hdmi_phy->conf->flags;
-       clk_init->ops = hdmi_phy->conf->hdmi_phy_clk_ops;
-}
-
-static int mtk_hdmi_phy_probe(struct platform_device *pdev)
-{
-       struct device *dev = &pdev->dev;
-       struct mtk_hdmi_phy *hdmi_phy;
-       struct resource *mem;
-       struct clk *ref_clk;
-       const char *ref_clk_name;
-       struct clk_init_data clk_init = {
-               .num_parents = 1,
-               .parent_names = (const char * const *)&ref_clk_name,
-       };
-
-       struct phy *phy;
-       struct phy_provider *phy_provider;
-       int ret;
-
-       hdmi_phy = devm_kzalloc(dev, sizeof(*hdmi_phy), GFP_KERNEL);
-       if (!hdmi_phy)
-               return -ENOMEM;
-
-       mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       hdmi_phy->regs = devm_ioremap_resource(dev, mem);
-       if (IS_ERR(hdmi_phy->regs)) {
-               ret = PTR_ERR(hdmi_phy->regs);
-               dev_err(dev, "Failed to get memory resource: %d\n", ret);
-               return ret;
-       }
-
-       ref_clk = devm_clk_get(dev, "pll_ref");
-       if (IS_ERR(ref_clk)) {
-               ret = PTR_ERR(ref_clk);
-               dev_err(&pdev->dev, "Failed to get PLL reference clock: %d\n",
-                       ret);
-               return ret;
-       }
-       ref_clk_name = __clk_get_name(ref_clk);
-
-       ret = of_property_read_string(dev->of_node, "clock-output-names",
-                                     &clk_init.name);
-       if (ret < 0) {
-               dev_err(dev, "Failed to read clock-output-names: %d\n", ret);
-               return ret;
-       }
-
-       hdmi_phy->dev = dev;
-       hdmi_phy->conf =
-               (struct mtk_hdmi_phy_conf *)of_device_get_match_data(dev);
-       mtk_hdmi_phy_clk_get_data(hdmi_phy, &clk_init);
-       hdmi_phy->pll_hw.init = &clk_init;
-       hdmi_phy->pll = devm_clk_register(dev, &hdmi_phy->pll_hw);
-       if (IS_ERR(hdmi_phy->pll)) {
-               ret = PTR_ERR(hdmi_phy->pll);
-               dev_err(dev, "Failed to register PLL: %d\n", ret);
-               return ret;
-       }
-
-       ret = of_property_read_u32(dev->of_node, "mediatek,ibias",
-                                  &hdmi_phy->ibias);
-       if (ret < 0) {
-               dev_err(&pdev->dev, "Failed to get ibias: %d\n", ret);
-               return ret;
-       }
-
-       ret = of_property_read_u32(dev->of_node, "mediatek,ibias_up",
-                                  &hdmi_phy->ibias_up);
-       if (ret < 0) {
-               dev_err(&pdev->dev, "Failed to get ibias up: %d\n", ret);
-               return ret;
-       }
-
-       dev_info(dev, "Using default TX DRV impedance: 4.2k/36\n");
-       hdmi_phy->drv_imp_clk = 0x30;
-       hdmi_phy->drv_imp_d2 = 0x30;
-       hdmi_phy->drv_imp_d1 = 0x30;
-       hdmi_phy->drv_imp_d0 = 0x30;
-
-       phy = devm_phy_create(dev, NULL, mtk_hdmi_phy_dev_get_ops(hdmi_phy));
-       if (IS_ERR(phy)) {
-               dev_err(dev, "Failed to create HDMI PHY\n");
-               return PTR_ERR(phy);
-       }
-       phy_set_drvdata(phy, hdmi_phy);
-
-       phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
-       if (IS_ERR(phy_provider)) {
-               dev_err(dev, "Failed to register HDMI PHY\n");
-               return PTR_ERR(phy_provider);
-       }
-
-       return of_clk_add_provider(dev->of_node, of_clk_src_simple_get,
-                                  hdmi_phy->pll);
-}
-
-static const struct of_device_id mtk_hdmi_phy_match[] = {
-       { .compatible = "mediatek,mt2701-hdmi-phy",
-         .data = &mtk_hdmi_phy_2701_conf,
-       },
-       { .compatible = "mediatek,mt8173-hdmi-phy",
-         .data = &mtk_hdmi_phy_8173_conf,
-       },
-       {},
-};
-
-struct platform_driver mtk_hdmi_phy_driver = {
-       .probe = mtk_hdmi_phy_probe,
-       .driver = {
-               .name = "mediatek-hdmi-phy",
-               .of_match_table = mtk_hdmi_phy_match,
-       },
-};
-
-MODULE_DESCRIPTION("MediaTek HDMI PHY Driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_phy.h b/drivers/gpu/drm/mediatek/mtk_hdmi_phy.h
deleted file mode 100644 (file)
index 2d8b318..0000000
+++ /dev/null
@@ -1,57 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (c) 2018 MediaTek Inc.
- * Author: Chunhui Dai <chunhui.dai@mediatek.com>
- */
-
-#ifndef _MTK_HDMI_PHY_H
-#define _MTK_HDMI_PHY_H
-#include <linux/clk.h>
-#include <linux/clk-provider.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/mfd/syscon.h>
-#include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/phy/phy.h>
-#include <linux/platform_device.h>
-#include <linux/types.h>
-
-struct mtk_hdmi_phy;
-
-struct mtk_hdmi_phy_conf {
-       bool tz_disabled;
-       unsigned long flags;
-       const struct clk_ops *hdmi_phy_clk_ops;
-       void (*hdmi_phy_enable_tmds)(struct mtk_hdmi_phy *hdmi_phy);
-       void (*hdmi_phy_disable_tmds)(struct mtk_hdmi_phy *hdmi_phy);
-};
-
-struct mtk_hdmi_phy {
-       void __iomem *regs;
-       struct device *dev;
-       struct mtk_hdmi_phy_conf *conf;
-       struct clk *pll;
-       struct clk_hw pll_hw;
-       unsigned long pll_rate;
-       unsigned char drv_imp_clk;
-       unsigned char drv_imp_d2;
-       unsigned char drv_imp_d1;
-       unsigned char drv_imp_d0;
-       unsigned int ibias;
-       unsigned int ibias_up;
-};
-
-void mtk_hdmi_phy_clear_bits(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
-                            u32 bits);
-void mtk_hdmi_phy_set_bits(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
-                          u32 bits);
-void mtk_hdmi_phy_mask(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
-                      u32 val, u32 mask);
-struct mtk_hdmi_phy *to_mtk_hdmi_phy(struct clk_hw *hw);
-
-extern struct platform_driver mtk_hdmi_phy_driver;
-extern struct mtk_hdmi_phy_conf mtk_hdmi_phy_8173_conf;
-extern struct mtk_hdmi_phy_conf mtk_hdmi_phy_2701_conf;
-
-#endif /* _MTK_HDMI_PHY_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_mt2701_hdmi_phy.c b/drivers/gpu/drm/mediatek/mtk_mt2701_hdmi_phy.c
deleted file mode 100644 (file)
index d3cc402..0000000
+++ /dev/null
@@ -1,249 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (c) 2018 MediaTek Inc.
- * Author: Chunhui Dai <chunhui.dai@mediatek.com>
- */
-
-#include "mtk_hdmi_phy.h"
-
-#define HDMI_CON0      0x00
-#define RG_HDMITX_DRV_IBIAS            0
-#define RG_HDMITX_DRV_IBIAS_MASK       (0x3f << 0)
-#define RG_HDMITX_EN_SER               12
-#define RG_HDMITX_EN_SER_MASK          (0x0f << 12)
-#define RG_HDMITX_EN_SLDO              16
-#define RG_HDMITX_EN_SLDO_MASK         (0x0f << 16)
-#define RG_HDMITX_EN_PRED              20
-#define RG_HDMITX_EN_PRED_MASK         (0x0f << 20)
-#define RG_HDMITX_EN_IMP               24
-#define RG_HDMITX_EN_IMP_MASK          (0x0f << 24)
-#define RG_HDMITX_EN_DRV               28
-#define RG_HDMITX_EN_DRV_MASK          (0x0f << 28)
-
-#define HDMI_CON1      0x04
-#define RG_HDMITX_PRED_IBIAS           18
-#define RG_HDMITX_PRED_IBIAS_MASK      (0x0f << 18)
-#define RG_HDMITX_PRED_IMP             (0x01 << 22)
-#define RG_HDMITX_DRV_IMP              26
-#define RG_HDMITX_DRV_IMP_MASK         (0x3f << 26)
-
-#define HDMI_CON2      0x08
-#define RG_HDMITX_EN_TX_CKLDO          (0x01 << 0)
-#define RG_HDMITX_EN_TX_POSDIV         (0x01 << 1)
-#define RG_HDMITX_TX_POSDIV            3
-#define RG_HDMITX_TX_POSDIV_MASK       (0x03 << 3)
-#define RG_HDMITX_EN_MBIAS             (0x01 << 6)
-#define RG_HDMITX_MBIAS_LPF_EN         (0x01 << 7)
-
-#define HDMI_CON4      0x10
-#define RG_HDMITX_RESERVE_MASK         (0xffffffff << 0)
-
-#define HDMI_CON6      0x18
-#define RG_HTPLL_BR                    0
-#define RG_HTPLL_BR_MASK               (0x03 << 0)
-#define RG_HTPLL_BC                    2
-#define RG_HTPLL_BC_MASK               (0x03 << 2)
-#define RG_HTPLL_BP                    4
-#define RG_HTPLL_BP_MASK               (0x0f << 4)
-#define RG_HTPLL_IR                    8
-#define RG_HTPLL_IR_MASK               (0x0f << 8)
-#define RG_HTPLL_IC                    12
-#define RG_HTPLL_IC_MASK               (0x0f << 12)
-#define RG_HTPLL_POSDIV                        16
-#define RG_HTPLL_POSDIV_MASK           (0x03 << 16)
-#define RG_HTPLL_PREDIV                        18
-#define RG_HTPLL_PREDIV_MASK           (0x03 << 18)
-#define RG_HTPLL_FBKSEL                        20
-#define RG_HTPLL_FBKSEL_MASK           (0x03 << 20)
-#define RG_HTPLL_RLH_EN                        (0x01 << 22)
-#define RG_HTPLL_FBKDIV                        24
-#define RG_HTPLL_FBKDIV_MASK           (0x7f << 24)
-#define RG_HTPLL_EN                    (0x01 << 31)
-
-#define HDMI_CON7      0x1c
-#define RG_HTPLL_AUTOK_EN              (0x01 << 23)
-#define RG_HTPLL_DIVEN                 28
-#define RG_HTPLL_DIVEN_MASK            (0x07 << 28)
-
-static int mtk_hdmi_pll_prepare(struct clk_hw *hw)
-{
-       struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
-
-       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON7, RG_HTPLL_AUTOK_EN);
-       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_RLH_EN);
-       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_POSDIV_MASK);
-       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_MBIAS);
-       usleep_range(80, 100);
-       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_EN);
-       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_CKLDO);
-       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
-       usleep_range(80, 100);
-       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
-       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK);
-       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
-       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
-       usleep_range(80, 100);
-       return 0;
-}
-
-static void mtk_hdmi_pll_unprepare(struct clk_hw *hw)
-{
-       struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
-
-       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
-       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
-       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK);
-       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
-       usleep_range(80, 100);
-       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
-       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_CKLDO);
-       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_EN);
-       usleep_range(80, 100);
-       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_MBIAS);
-       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_POSDIV_MASK);
-       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_RLH_EN);
-       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON7, RG_HTPLL_AUTOK_EN);
-       usleep_range(80, 100);
-}
-
-static long mtk_hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate,
-                                   unsigned long *parent_rate)
-{
-       return rate;
-}
-
-static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
-                                unsigned long parent_rate)
-{
-       struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
-       u32 pos_div;
-
-       if (rate <= 64000000)
-               pos_div = 3;
-       else if (rate <= 128000000)
-               pos_div = 2;
-       else
-               pos_div = 1;
-
-       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_PREDIV_MASK);
-       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_POSDIV_MASK);
-       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_POSDIV);
-       mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (0x1 << RG_HTPLL_IC),
-                         RG_HTPLL_IC_MASK);
-       mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (0x1 << RG_HTPLL_IR),
-                         RG_HTPLL_IR_MASK);
-       mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON2, (pos_div << RG_HDMITX_TX_POSDIV),
-                         RG_HDMITX_TX_POSDIV_MASK);
-       mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (1 << RG_HTPLL_FBKSEL),
-                         RG_HTPLL_FBKSEL_MASK);
-       mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (19 << RG_HTPLL_FBKDIV),
-                         RG_HTPLL_FBKDIV_MASK);
-       mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON7, (0x2 << RG_HTPLL_DIVEN),
-                         RG_HTPLL_DIVEN_MASK);
-       mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (0xc << RG_HTPLL_BP),
-                         RG_HTPLL_BP_MASK);
-       mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (0x2 << RG_HTPLL_BC),
-                         RG_HTPLL_BC_MASK);
-       mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (0x1 << RG_HTPLL_BR),
-                         RG_HTPLL_BR_MASK);
-
-       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PRED_IMP);
-       mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON1, (0x3 << RG_HDMITX_PRED_IBIAS),
-                         RG_HDMITX_PRED_IBIAS_MASK);
-       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_IMP_MASK);
-       mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON1, (0x28 << RG_HDMITX_DRV_IMP),
-                         RG_HDMITX_DRV_IMP_MASK);
-       mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON4, 0x28, RG_HDMITX_RESERVE_MASK);
-       mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON0, (0xa << RG_HDMITX_DRV_IBIAS),
-                         RG_HDMITX_DRV_IBIAS_MASK);
-       return 0;
-}
-
-static unsigned long mtk_hdmi_pll_recalc_rate(struct clk_hw *hw,
-                                             unsigned long parent_rate)
-{
-       struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
-       unsigned long out_rate, val;
-
-       val = (readl(hdmi_phy->regs + HDMI_CON6)
-              & RG_HTPLL_PREDIV_MASK) >> RG_HTPLL_PREDIV;
-       switch (val) {
-       case 0x00:
-               out_rate = parent_rate;
-               break;
-       case 0x01:
-               out_rate = parent_rate / 2;
-               break;
-       default:
-               out_rate = parent_rate / 4;
-               break;
-       }
-
-       val = (readl(hdmi_phy->regs + HDMI_CON6)
-              & RG_HTPLL_FBKDIV_MASK) >> RG_HTPLL_FBKDIV;
-       out_rate *= (val + 1) * 2;
-       val = (readl(hdmi_phy->regs + HDMI_CON2)
-              & RG_HDMITX_TX_POSDIV_MASK);
-       out_rate >>= (val >> RG_HDMITX_TX_POSDIV);
-
-       if (readl(hdmi_phy->regs + HDMI_CON2) & RG_HDMITX_EN_TX_POSDIV)
-               out_rate /= 5;
-
-       return out_rate;
-}
-
-static const struct clk_ops mtk_hdmi_phy_pll_ops = {
-       .prepare = mtk_hdmi_pll_prepare,
-       .unprepare = mtk_hdmi_pll_unprepare,
-       .set_rate = mtk_hdmi_pll_set_rate,
-       .round_rate = mtk_hdmi_pll_round_rate,
-       .recalc_rate = mtk_hdmi_pll_recalc_rate,
-};
-
-static void mtk_hdmi_phy_enable_tmds(struct mtk_hdmi_phy *hdmi_phy)
-{
-       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON7, RG_HTPLL_AUTOK_EN);
-       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_RLH_EN);
-       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_POSDIV_MASK);
-       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_MBIAS);
-       usleep_range(80, 100);
-       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_EN);
-       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_CKLDO);
-       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
-       usleep_range(80, 100);
-       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
-       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK);
-       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
-       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
-       usleep_range(80, 100);
-}
-
-static void mtk_hdmi_phy_disable_tmds(struct mtk_hdmi_phy *hdmi_phy)
-{
-       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
-       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
-       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK);
-       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
-       usleep_range(80, 100);
-       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
-       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_CKLDO);
-       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_EN);
-       usleep_range(80, 100);
-       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_MBIAS);
-       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_POSDIV_MASK);
-       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_RLH_EN);
-       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON7, RG_HTPLL_AUTOK_EN);
-       usleep_range(80, 100);
-}
-
-struct mtk_hdmi_phy_conf mtk_hdmi_phy_2701_conf = {
-       .tz_disabled = true,
-       .flags = CLK_SET_RATE_GATE,
-       .hdmi_phy_clk_ops = &mtk_hdmi_phy_pll_ops,
-       .hdmi_phy_enable_tmds = mtk_hdmi_phy_enable_tmds,
-       .hdmi_phy_disable_tmds = mtk_hdmi_phy_disable_tmds,
-};
-
-MODULE_AUTHOR("Chunhui Dai <chunhui.dai@mediatek.com>");
-MODULE_DESCRIPTION("MediaTek HDMI PHY Driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c b/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c
deleted file mode 100644 (file)
index 827b937..0000000
+++ /dev/null
@@ -1,282 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2014 MediaTek Inc.
- * Author: Jie Qiu <jie.qiu@mediatek.com>
- */
-
-#include "mtk_hdmi_phy.h"
-
-#define HDMI_CON0              0x00
-#define RG_HDMITX_PLL_EN               BIT(31)
-#define RG_HDMITX_PLL_FBKDIV           (0x7f << 24)
-#define PLL_FBKDIV_SHIFT               24
-#define RG_HDMITX_PLL_FBKSEL           (0x3 << 22)
-#define PLL_FBKSEL_SHIFT               22
-#define RG_HDMITX_PLL_PREDIV           (0x3 << 20)
-#define PREDIV_SHIFT                   20
-#define RG_HDMITX_PLL_POSDIV           (0x3 << 18)
-#define POSDIV_SHIFT                   18
-#define RG_HDMITX_PLL_RST_DLY          (0x3 << 16)
-#define RG_HDMITX_PLL_IR               (0xf << 12)
-#define PLL_IR_SHIFT                   12
-#define RG_HDMITX_PLL_IC               (0xf << 8)
-#define PLL_IC_SHIFT                   8
-#define RG_HDMITX_PLL_BP               (0xf << 4)
-#define PLL_BP_SHIFT                   4
-#define RG_HDMITX_PLL_BR               (0x3 << 2)
-#define PLL_BR_SHIFT                   2
-#define RG_HDMITX_PLL_BC               (0x3 << 0)
-#define PLL_BC_SHIFT                   0
-#define HDMI_CON1              0x04
-#define RG_HDMITX_PLL_DIVEN            (0x7 << 29)
-#define PLL_DIVEN_SHIFT                        29
-#define RG_HDMITX_PLL_AUTOK_EN         BIT(28)
-#define RG_HDMITX_PLL_AUTOK_KF         (0x3 << 26)
-#define RG_HDMITX_PLL_AUTOK_KS         (0x3 << 24)
-#define RG_HDMITX_PLL_AUTOK_LOAD       BIT(23)
-#define RG_HDMITX_PLL_BAND             (0x3f << 16)
-#define RG_HDMITX_PLL_REF_SEL          BIT(15)
-#define RG_HDMITX_PLL_BIAS_EN          BIT(14)
-#define RG_HDMITX_PLL_BIAS_LPF_EN      BIT(13)
-#define RG_HDMITX_PLL_TXDIV_EN         BIT(12)
-#define RG_HDMITX_PLL_TXDIV            (0x3 << 10)
-#define PLL_TXDIV_SHIFT                        10
-#define RG_HDMITX_PLL_LVROD_EN         BIT(9)
-#define RG_HDMITX_PLL_MONVC_EN         BIT(8)
-#define RG_HDMITX_PLL_MONCK_EN         BIT(7)
-#define RG_HDMITX_PLL_MONREF_EN                BIT(6)
-#define RG_HDMITX_PLL_TST_EN           BIT(5)
-#define RG_HDMITX_PLL_TST_CK_EN                BIT(4)
-#define RG_HDMITX_PLL_TST_SEL          (0xf << 0)
-#define HDMI_CON2              0x08
-#define RGS_HDMITX_PLL_AUTOK_BAND      (0x7f << 8)
-#define RGS_HDMITX_PLL_AUTOK_FAIL      BIT(1)
-#define RG_HDMITX_EN_TX_CKLDO          BIT(0)
-#define HDMI_CON3              0x0c
-#define RG_HDMITX_SER_EN               (0xf << 28)
-#define RG_HDMITX_PRD_EN               (0xf << 24)
-#define RG_HDMITX_PRD_IMP_EN           (0xf << 20)
-#define RG_HDMITX_DRV_EN               (0xf << 16)
-#define RG_HDMITX_DRV_IMP_EN           (0xf << 12)
-#define DRV_IMP_EN_SHIFT               12
-#define RG_HDMITX_MHLCK_FORCE          BIT(10)
-#define RG_HDMITX_MHLCK_PPIX_EN                BIT(9)
-#define RG_HDMITX_MHLCK_EN             BIT(8)
-#define RG_HDMITX_SER_DIN_SEL          (0xf << 4)
-#define RG_HDMITX_SER_5T1_BIST_EN      BIT(3)
-#define RG_HDMITX_SER_BIST_TOG         BIT(2)
-#define RG_HDMITX_SER_DIN_TOG          BIT(1)
-#define RG_HDMITX_SER_CLKDIG_INV       BIT(0)
-#define HDMI_CON4              0x10
-#define RG_HDMITX_PRD_IBIAS_CLK                (0xf << 24)
-#define RG_HDMITX_PRD_IBIAS_D2         (0xf << 16)
-#define RG_HDMITX_PRD_IBIAS_D1         (0xf << 8)
-#define RG_HDMITX_PRD_IBIAS_D0         (0xf << 0)
-#define PRD_IBIAS_CLK_SHIFT            24
-#define PRD_IBIAS_D2_SHIFT             16
-#define PRD_IBIAS_D1_SHIFT             8
-#define PRD_IBIAS_D0_SHIFT             0
-#define HDMI_CON5              0x14
-#define RG_HDMITX_DRV_IBIAS_CLK                (0x3f << 24)
-#define RG_HDMITX_DRV_IBIAS_D2         (0x3f << 16)
-#define RG_HDMITX_DRV_IBIAS_D1         (0x3f << 8)
-#define RG_HDMITX_DRV_IBIAS_D0         (0x3f << 0)
-#define DRV_IBIAS_CLK_SHIFT            24
-#define DRV_IBIAS_D2_SHIFT             16
-#define DRV_IBIAS_D1_SHIFT             8
-#define DRV_IBIAS_D0_SHIFT             0
-#define HDMI_CON6              0x18
-#define RG_HDMITX_DRV_IMP_CLK          (0x3f << 24)
-#define RG_HDMITX_DRV_IMP_D2           (0x3f << 16)
-#define RG_HDMITX_DRV_IMP_D1           (0x3f << 8)
-#define RG_HDMITX_DRV_IMP_D0           (0x3f << 0)
-#define DRV_IMP_CLK_SHIFT              24
-#define DRV_IMP_D2_SHIFT               16
-#define DRV_IMP_D1_SHIFT               8
-#define DRV_IMP_D0_SHIFT               0
-#define HDMI_CON7              0x1c
-#define RG_HDMITX_MHLCK_DRV_IBIAS      (0x1f << 27)
-#define RG_HDMITX_SER_DIN              (0x3ff << 16)
-#define RG_HDMITX_CHLDC_TST            (0xf << 12)
-#define RG_HDMITX_CHLCK_TST            (0xf << 8)
-#define RG_HDMITX_RESERVE              (0xff << 0)
-#define HDMI_CON8              0x20
-#define RGS_HDMITX_2T1_LEV             (0xf << 16)
-#define RGS_HDMITX_2T1_EDG             (0xf << 12)
-#define RGS_HDMITX_5T1_LEV             (0xf << 8)
-#define RGS_HDMITX_5T1_EDG             (0xf << 4)
-#define RGS_HDMITX_PLUG_TST            BIT(0)
-
-static int mtk_hdmi_pll_prepare(struct clk_hw *hw)
-{
-       struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
-
-       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_AUTOK_EN);
-       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_PLL_POSDIV);
-       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON3, RG_HDMITX_MHLCK_EN);
-       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_BIAS_EN);
-       usleep_range(100, 150);
-       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_PLL_EN);
-       usleep_range(100, 150);
-       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_BIAS_LPF_EN);
-       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_TXDIV_EN);
-
-       return 0;
-}
-
-static void mtk_hdmi_pll_unprepare(struct clk_hw *hw)
-{
-       struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
-
-       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_TXDIV_EN);
-       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_BIAS_LPF_EN);
-       usleep_range(100, 150);
-       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_PLL_EN);
-       usleep_range(100, 150);
-       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_BIAS_EN);
-       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_PLL_POSDIV);
-       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_AUTOK_EN);
-       usleep_range(100, 150);
-}
-
-static long mtk_hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate,
-                                   unsigned long *parent_rate)
-{
-       struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
-
-       hdmi_phy->pll_rate = rate;
-       if (rate <= 74250000)
-               *parent_rate = rate;
-       else
-               *parent_rate = rate / 2;
-
-       return rate;
-}
-
-static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
-                                unsigned long parent_rate)
-{
-       struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
-       unsigned int pre_div;
-       unsigned int div;
-       unsigned int pre_ibias;
-       unsigned int hdmi_ibias;
-       unsigned int imp_en;
-
-       dev_dbg(hdmi_phy->dev, "%s: %lu Hz, parent: %lu Hz\n", __func__,
-               rate, parent_rate);
-
-       if (rate <= 27000000) {
-               pre_div = 0;
-               div = 3;
-       } else if (rate <= 74250000) {
-               pre_div = 1;
-               div = 2;
-       } else {
-               pre_div = 1;
-               div = 1;
-       }
-
-       mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON0,
-                         (pre_div << PREDIV_SHIFT), RG_HDMITX_PLL_PREDIV);
-       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_PLL_POSDIV);
-       mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON0,
-                         (0x1 << PLL_IC_SHIFT) | (0x1 << PLL_IR_SHIFT),
-                         RG_HDMITX_PLL_IC | RG_HDMITX_PLL_IR);
-       mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON1,
-                         (div << PLL_TXDIV_SHIFT), RG_HDMITX_PLL_TXDIV);
-       mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON0,
-                         (0x1 << PLL_FBKSEL_SHIFT) | (19 << PLL_FBKDIV_SHIFT),
-                         RG_HDMITX_PLL_FBKSEL | RG_HDMITX_PLL_FBKDIV);
-       mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON1,
-                         (0x2 << PLL_DIVEN_SHIFT), RG_HDMITX_PLL_DIVEN);
-       mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON0,
-                         (0xc << PLL_BP_SHIFT) | (0x2 << PLL_BC_SHIFT) |
-                         (0x1 << PLL_BR_SHIFT),
-                         RG_HDMITX_PLL_BP | RG_HDMITX_PLL_BC |
-                         RG_HDMITX_PLL_BR);
-       if (rate < 165000000) {
-               mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON3,
-                                       RG_HDMITX_PRD_IMP_EN);
-               pre_ibias = 0x3;
-               imp_en = 0x0;
-               hdmi_ibias = hdmi_phy->ibias;
-       } else {
-               mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON3,
-                                     RG_HDMITX_PRD_IMP_EN);
-               pre_ibias = 0x6;
-               imp_en = 0xf;
-               hdmi_ibias = hdmi_phy->ibias_up;
-       }
-       mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON4,
-                         (pre_ibias << PRD_IBIAS_CLK_SHIFT) |
-                         (pre_ibias << PRD_IBIAS_D2_SHIFT) |
-                         (pre_ibias << PRD_IBIAS_D1_SHIFT) |
-                         (pre_ibias << PRD_IBIAS_D0_SHIFT),
-                         RG_HDMITX_PRD_IBIAS_CLK |
-                         RG_HDMITX_PRD_IBIAS_D2 |
-                         RG_HDMITX_PRD_IBIAS_D1 |
-                         RG_HDMITX_PRD_IBIAS_D0);
-       mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON3,
-                         (imp_en << DRV_IMP_EN_SHIFT),
-                         RG_HDMITX_DRV_IMP_EN);
-       mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6,
-                         (hdmi_phy->drv_imp_clk << DRV_IMP_CLK_SHIFT) |
-                         (hdmi_phy->drv_imp_d2 << DRV_IMP_D2_SHIFT) |
-                         (hdmi_phy->drv_imp_d1 << DRV_IMP_D1_SHIFT) |
-                         (hdmi_phy->drv_imp_d0 << DRV_IMP_D0_SHIFT),
-                         RG_HDMITX_DRV_IMP_CLK | RG_HDMITX_DRV_IMP_D2 |
-                         RG_HDMITX_DRV_IMP_D1 | RG_HDMITX_DRV_IMP_D0);
-       mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON5,
-                         (hdmi_ibias << DRV_IBIAS_CLK_SHIFT) |
-                         (hdmi_ibias << DRV_IBIAS_D2_SHIFT) |
-                         (hdmi_ibias << DRV_IBIAS_D1_SHIFT) |
-                         (hdmi_ibias << DRV_IBIAS_D0_SHIFT),
-                         RG_HDMITX_DRV_IBIAS_CLK |
-                         RG_HDMITX_DRV_IBIAS_D2 |
-                         RG_HDMITX_DRV_IBIAS_D1 |
-                         RG_HDMITX_DRV_IBIAS_D0);
-       return 0;
-}
-
-static unsigned long mtk_hdmi_pll_recalc_rate(struct clk_hw *hw,
-                                             unsigned long parent_rate)
-{
-       struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
-
-       return hdmi_phy->pll_rate;
-}
-
-static const struct clk_ops mtk_hdmi_phy_pll_ops = {
-       .prepare = mtk_hdmi_pll_prepare,
-       .unprepare = mtk_hdmi_pll_unprepare,
-       .set_rate = mtk_hdmi_pll_set_rate,
-       .round_rate = mtk_hdmi_pll_round_rate,
-       .recalc_rate = mtk_hdmi_pll_recalc_rate,
-};
-
-static void mtk_hdmi_phy_enable_tmds(struct mtk_hdmi_phy *hdmi_phy)
-{
-       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON3,
-                             RG_HDMITX_SER_EN | RG_HDMITX_PRD_EN |
-                             RG_HDMITX_DRV_EN);
-       usleep_range(100, 150);
-}
-
-static void mtk_hdmi_phy_disable_tmds(struct mtk_hdmi_phy *hdmi_phy)
-{
-       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON3,
-                               RG_HDMITX_DRV_EN | RG_HDMITX_PRD_EN |
-                               RG_HDMITX_SER_EN);
-}
-
-struct mtk_hdmi_phy_conf mtk_hdmi_phy_8173_conf = {
-       .flags = CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE,
-       .hdmi_phy_clk_ops = &mtk_hdmi_phy_pll_ops,
-       .hdmi_phy_enable_tmds = mtk_hdmi_phy_enable_tmds,
-       .hdmi_phy_disable_tmds = mtk_hdmi_phy_disable_tmds,
-};
-
-MODULE_AUTHOR("Jie Qiu <jie.qiu@mediatek.com>");
-MODULE_DESCRIPTION("MediaTek MT8173 HDMI PHY Driver");
-MODULE_LICENSE("GPL v2");
index 6deaa7d..e5816b4 100644 (file)
@@ -6,8 +6,8 @@ config DRM_MSM
        depends on ARCH_QCOM || SOC_IMX5 || (ARM && COMPILE_TEST)
        depends on OF && COMMON_CLK
        depends on MMU
-       depends on INTERCONNECT || !INTERCONNECT
        depends on QCOM_OCMEM || QCOM_OCMEM=n
+       select IOMMU_IO_PGTABLE
        select QCOM_MDT_LOADER if ARCH_QCOM
        select REGULATOR
        select DRM_KMS_HELPER
@@ -57,6 +57,15 @@ config DRM_MSM_HDMI_HDCP
        help
          Choose this option to enable HDCP state machine
 
+config DRM_MSM_DP
+       bool "Enable DisplayPort support in MSM DRM driver"
+       depends on DRM_MSM
+       default y
+       help
+         Compile in support for DP driver in MSM DRM driver. DP external
+         display support is enabled through this config option. It can
+         be primary or secondary display on device.
+
 config DRM_MSM_DSI
        bool "Enable DSI support in MSM DRM driver"
        depends on DRM_MSM
@@ -110,3 +119,11 @@ config DRM_MSM_DSI_10NM_PHY
        default y
        help
          Choose this option if DSI PHY on SDM845 is used on the platform.
+
+config DRM_MSM_DSI_7NM_PHY
+       bool "Enable DSI 7nm PHY driver in MSM DRM (used by SM8150/SM8250)"
+       depends on DRM_MSM_DSI
+       default y
+       help
+         Choose this option if DSI PHY on SM8150/SM8250 is used on the
+         platform.
index 42f8aae..340682c 100644 (file)
@@ -2,6 +2,7 @@
 ccflags-y := -I $(srctree)/$(src)
 ccflags-y += -I $(srctree)/$(src)/disp/dpu1
 ccflags-$(CONFIG_DRM_MSM_DSI) += -I $(srctree)/$(src)/dsi
+ccflags-$(CONFIG_DRM_MSM_DP) += -I $(srctree)/$(src)/dp
 
 msm-y := \
        adreno/adreno_device.o \
@@ -95,10 +96,23 @@ msm-y := \
        msm_gpu_tracepoints.o \
        msm_gpummu.o
 
-msm-$(CONFIG_DEBUG_FS) += adreno/a5xx_debugfs.o
+msm-$(CONFIG_DEBUG_FS) += adreno/a5xx_debugfs.o \
+       dp/dp_debug.o
 
 msm-$(CONFIG_DRM_MSM_GPU_STATE)        += adreno/a6xx_gpu_state.o
 
+msm-$(CONFIG_DRM_MSM_DP)+= dp/dp_aux.o \
+       dp/dp_catalog.o \
+       dp/dp_ctrl.o \
+       dp/dp_display.o \
+       dp/dp_drm.o \
+       dp/dp_hpd.o \
+       dp/dp_link.o \
+       dp/dp_panel.o \
+       dp/dp_parser.o \
+       dp/dp_power.o \
+       dp/dp_audio.o
+
 msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o
 msm-$(CONFIG_COMMON_CLK) += disp/mdp4/mdp4_lvds_pll.o
 msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_pll_8960.o
@@ -119,6 +133,7 @@ msm-$(CONFIG_DRM_MSM_DSI_20NM_PHY) += dsi/phy/dsi_phy_20nm.o
 msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/phy/dsi_phy_28nm_8960.o
 msm-$(CONFIG_DRM_MSM_DSI_14NM_PHY) += dsi/phy/dsi_phy_14nm.o
 msm-$(CONFIG_DRM_MSM_DSI_10NM_PHY) += dsi/phy/dsi_phy_10nm.o
+msm-$(CONFIG_DRM_MSM_DSI_7NM_PHY) += dsi/phy/dsi_phy_7nm.o
 
 ifeq ($(CONFIG_DRM_MSM_DSI_PLL),y)
 msm-y += dsi/pll/dsi_pll.o
@@ -126,6 +141,7 @@ msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/pll/dsi_pll_28nm.o
 msm-$(CONFIG_DRM_MSM_DSI_28NM_8960_PHY) += dsi/pll/dsi_pll_28nm_8960.o
 msm-$(CONFIG_DRM_MSM_DSI_14NM_PHY) += dsi/pll/dsi_pll_14nm.o
 msm-$(CONFIG_DRM_MSM_DSI_10NM_PHY) += dsi/pll/dsi_pll_10nm.o
+msm-$(CONFIG_DRM_MSM_DSI_7NM_PHY) += dsi/pll/dsi_pll_7nm.o
 endif
 
 obj-$(CONFIG_DRM_MSM)  += msm.o
index 48fa49f..7e82c41 100644 (file)
@@ -10,6 +10,48 @@ extern bool hang_debug;
 static void a2xx_dump(struct msm_gpu *gpu);
 static bool a2xx_idle(struct msm_gpu *gpu);
 
+static void a2xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
+{
+       struct msm_drm_private *priv = gpu->dev->dev_private;
+       struct msm_ringbuffer *ring = submit->ring;
+       unsigned int i;
+
+       for (i = 0; i < submit->nr_cmds; i++) {
+               switch (submit->cmd[i].type) {
+               case MSM_SUBMIT_CMD_IB_TARGET_BUF:
+                       /* ignore IB-targets */
+                       break;
+               case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
+                       /* ignore if there has not been a ctx switch: */
+                       if (priv->lastctx == submit->queue->ctx)
+                               break;
+                       fallthrough;
+               case MSM_SUBMIT_CMD_BUF:
+                       OUT_PKT3(ring, CP_INDIRECT_BUFFER_PFD, 2);
+                       OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
+                       OUT_RING(ring, submit->cmd[i].size);
+                       OUT_PKT2(ring);
+                       break;
+               }
+       }
+
+       OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
+       OUT_RING(ring, submit->seqno);
+
+       /* wait for idle before cache flush/interrupt */
+       OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
+       OUT_RING(ring, 0x00000000);
+
+       OUT_PKT3(ring, CP_EVENT_WRITE, 3);
+       OUT_RING(ring, CACHE_FLUSH_TS);
+       OUT_RING(ring, rbmemptr(ring, fence));
+       OUT_RING(ring, submit->seqno);
+       OUT_PKT3(ring, CP_INTERRUPT, 1);
+       OUT_RING(ring, 0x80000000);
+
+       adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR);
+}
+
 static bool a2xx_me_init(struct msm_gpu *gpu)
 {
        struct msm_ringbuffer *ring = gpu->rb[0];
@@ -53,7 +95,7 @@ static bool a2xx_me_init(struct msm_gpu *gpu)
        OUT_PKT3(ring, CP_SET_PROTECTED_MODE, 1);
        OUT_RING(ring, 1);
 
-       gpu->funcs->flush(gpu, ring);
+       adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR);
        return a2xx_idle(gpu);
 }
 
@@ -421,16 +463,11 @@ a2xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev)
        return aspace;
 }
 
-/* Register offset defines for A2XX - copy of A3XX */
-static const unsigned int a2xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_AXXX_CP_RB_BASE),
-       REG_ADRENO_SKIP(REG_ADRENO_CP_RB_BASE_HI),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_AXXX_CP_RB_RPTR_ADDR),
-       REG_ADRENO_SKIP(REG_ADRENO_CP_RB_RPTR_ADDR_HI),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_AXXX_CP_RB_RPTR),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_AXXX_CP_RB_WPTR),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_AXXX_CP_RB_CNTL),
-};
+static u32 a2xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+       ring->memptrs->rptr = gpu_read(gpu, REG_AXXX_CP_RB_RPTR);
+       return ring->memptrs->rptr;
+}
 
 static const struct adreno_gpu_funcs funcs = {
        .base = {
@@ -439,8 +476,7 @@ static const struct adreno_gpu_funcs funcs = {
                .pm_suspend = msm_gpu_pm_suspend,
                .pm_resume = msm_gpu_pm_resume,
                .recover = a2xx_recover,
-               .submit = adreno_submit,
-               .flush = adreno_flush,
+               .submit = a2xx_submit,
                .active_ring = adreno_active_ring,
                .irq = a2xx_irq,
                .destroy = a2xx_destroy,
@@ -450,6 +486,7 @@ static const struct adreno_gpu_funcs funcs = {
                .gpu_state_get = a2xx_gpu_state_get,
                .gpu_state_put = adreno_gpu_state_put,
                .create_address_space = a2xx_create_address_space,
+               .get_rptr = a2xx_get_rptr,
        },
 };
 
@@ -491,8 +528,6 @@ struct msm_gpu *a2xx_gpu_init(struct drm_device *dev)
        else
                adreno_gpu->registers = a220_registers;
 
-       adreno_gpu->reg_offsets = a2xx_register_offsets;
-
        ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
        if (ret)
                goto fail;
index f647114..f29c77d 100644 (file)
@@ -28,6 +28,61 @@ extern bool hang_debug;
 static void a3xx_dump(struct msm_gpu *gpu);
 static bool a3xx_idle(struct msm_gpu *gpu);
 
+static void a3xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
+{
+       struct msm_drm_private *priv = gpu->dev->dev_private;
+       struct msm_ringbuffer *ring = submit->ring;
+       unsigned int i;
+
+       for (i = 0; i < submit->nr_cmds; i++) {
+               switch (submit->cmd[i].type) {
+               case MSM_SUBMIT_CMD_IB_TARGET_BUF:
+                       /* ignore IB-targets */
+                       break;
+               case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
+                       /* ignore if there has not been a ctx switch: */
+                       if (priv->lastctx == submit->queue->ctx)
+                               break;
+                       fallthrough;
+               case MSM_SUBMIT_CMD_BUF:
+                       OUT_PKT3(ring, CP_INDIRECT_BUFFER_PFD, 2);
+                       OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
+                       OUT_RING(ring, submit->cmd[i].size);
+                       OUT_PKT2(ring);
+                       break;
+               }
+       }
+
+       OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
+       OUT_RING(ring, submit->seqno);
+
+       /* Flush HLSQ lazy updates to make sure there is nothing
+        * pending for indirect loads after the timestamp has
+        * passed:
+        */
+       OUT_PKT3(ring, CP_EVENT_WRITE, 1);
+       OUT_RING(ring, HLSQ_FLUSH);
+
+       /* wait for idle before cache flush/interrupt */
+       OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
+       OUT_RING(ring, 0x00000000);
+
+       /* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */
+       OUT_PKT3(ring, CP_EVENT_WRITE, 3);
+       OUT_RING(ring, CACHE_FLUSH_TS | BIT(31));
+       OUT_RING(ring, rbmemptr(ring, fence));
+       OUT_RING(ring, submit->seqno);
+
+#if 0
+       /* Dummy set-constant to trigger context rollover */
+       OUT_PKT3(ring, CP_SET_CONSTANT, 2);
+       OUT_RING(ring, CP_REG(REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG));
+       OUT_RING(ring, 0x00000000);
+#endif
+
+       adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR);
+}
+
 static bool a3xx_me_init(struct msm_gpu *gpu)
 {
        struct msm_ringbuffer *ring = gpu->rb[0];
@@ -51,7 +106,7 @@ static bool a3xx_me_init(struct msm_gpu *gpu)
        OUT_RING(ring, 0x00000000);
        OUT_RING(ring, 0x00000000);
 
-       gpu->funcs->flush(gpu, ring);
+       adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR);
        return a3xx_idle(gpu);
 }
 
@@ -423,16 +478,11 @@ static struct msm_gpu_state *a3xx_gpu_state_get(struct msm_gpu *gpu)
        return state;
 }
 
-/* Register offset defines for A3XX */
-static const unsigned int a3xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_AXXX_CP_RB_BASE),
-       REG_ADRENO_SKIP(REG_ADRENO_CP_RB_BASE_HI),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_AXXX_CP_RB_RPTR_ADDR),
-       REG_ADRENO_SKIP(REG_ADRENO_CP_RB_RPTR_ADDR_HI),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_AXXX_CP_RB_RPTR),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_AXXX_CP_RB_WPTR),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_AXXX_CP_RB_CNTL),
-};
+static u32 a3xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+       ring->memptrs->rptr = gpu_read(gpu, REG_AXXX_CP_RB_RPTR);
+       return ring->memptrs->rptr;
+}
 
 static const struct adreno_gpu_funcs funcs = {
        .base = {
@@ -441,8 +491,7 @@ static const struct adreno_gpu_funcs funcs = {
                .pm_suspend = msm_gpu_pm_suspend,
                .pm_resume = msm_gpu_pm_resume,
                .recover = a3xx_recover,
-               .submit = adreno_submit,
-               .flush = adreno_flush,
+               .submit = a3xx_submit,
                .active_ring = adreno_active_ring,
                .irq = a3xx_irq,
                .destroy = a3xx_destroy,
@@ -452,6 +501,7 @@ static const struct adreno_gpu_funcs funcs = {
                .gpu_state_get = a3xx_gpu_state_get,
                .gpu_state_put = adreno_gpu_state_put,
                .create_address_space = adreno_iommu_create_address_space,
+               .get_rptr = a3xx_get_rptr,
        },
 };
 
@@ -490,7 +540,6 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
        gpu->num_perfcntrs = ARRAY_SIZE(perfcntrs);
 
        adreno_gpu->registers = a3xx_registers;
-       adreno_gpu->reg_offsets = a3xx_register_offsets;
 
        ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
        if (ret)
index 9547536..2b93b33 100644 (file)
@@ -22,6 +22,54 @@ extern bool hang_debug;
 static void a4xx_dump(struct msm_gpu *gpu);
 static bool a4xx_idle(struct msm_gpu *gpu);
 
+static void a4xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
+{
+       struct msm_drm_private *priv = gpu->dev->dev_private;
+       struct msm_ringbuffer *ring = submit->ring;
+       unsigned int i;
+
+       for (i = 0; i < submit->nr_cmds; i++) {
+               switch (submit->cmd[i].type) {
+               case MSM_SUBMIT_CMD_IB_TARGET_BUF:
+                       /* ignore IB-targets */
+                       break;
+               case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
+                       /* ignore if there has not been a ctx switch: */
+                       if (priv->lastctx == submit->queue->ctx)
+                               break;
+                       fallthrough;
+               case MSM_SUBMIT_CMD_BUF:
+                       OUT_PKT3(ring, CP_INDIRECT_BUFFER_PFE, 2);
+                       OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
+                       OUT_RING(ring, submit->cmd[i].size);
+                       OUT_PKT2(ring);
+                       break;
+               }
+       }
+
+       OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
+       OUT_RING(ring, submit->seqno);
+
+       /* Flush HLSQ lazy updates to make sure there is nothing
+        * pending for indirect loads after the timestamp has
+        * passed:
+        */
+       OUT_PKT3(ring, CP_EVENT_WRITE, 1);
+       OUT_RING(ring, HLSQ_FLUSH);
+
+       /* wait for idle before cache flush/interrupt */
+       OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
+       OUT_RING(ring, 0x00000000);
+
+       /* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */
+       OUT_PKT3(ring, CP_EVENT_WRITE, 3);
+       OUT_RING(ring, CACHE_FLUSH_TS | BIT(31));
+       OUT_RING(ring, rbmemptr(ring, fence));
+       OUT_RING(ring, submit->seqno);
+
+       adreno_flush(gpu, ring, REG_A4XX_CP_RB_WPTR);
+}
+
 /*
  * a4xx_enable_hwcg() - Program the clock control registers
  * @device: The adreno device pointer
@@ -129,7 +177,7 @@ static bool a4xx_me_init(struct msm_gpu *gpu)
        OUT_RING(ring, 0x00000000);
        OUT_RING(ring, 0x00000000);
 
-       gpu->funcs->flush(gpu, ring);
+       adreno_flush(gpu, ring, REG_A4XX_CP_RB_WPTR);
        return a4xx_idle(gpu);
 }
 
@@ -515,17 +563,6 @@ static struct msm_gpu_state *a4xx_gpu_state_get(struct msm_gpu *gpu)
        return state;
 }
 
-/* Register offset defines for A4XX, in order of enum adreno_regs */
-static const unsigned int a4xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_A4XX_CP_RB_BASE),
-       REG_ADRENO_SKIP(REG_ADRENO_CP_RB_BASE_HI),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_A4XX_CP_RB_RPTR_ADDR),
-       REG_ADRENO_SKIP(REG_ADRENO_CP_RB_RPTR_ADDR_HI),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_A4XX_CP_RB_RPTR),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_A4XX_CP_RB_WPTR),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A4XX_CP_RB_CNTL),
-};
-
 static void a4xx_dump(struct msm_gpu *gpu)
 {
        printk("status:   %08x\n",
@@ -576,6 +613,12 @@ static int a4xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
        return 0;
 }
 
+static u32 a4xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+       ring->memptrs->rptr = gpu_read(gpu, REG_A4XX_CP_RB_RPTR);
+       return ring->memptrs->rptr;
+}
+
 static const struct adreno_gpu_funcs funcs = {
        .base = {
                .get_param = adreno_get_param,
@@ -583,8 +626,7 @@ static const struct adreno_gpu_funcs funcs = {
                .pm_suspend = a4xx_pm_suspend,
                .pm_resume = a4xx_pm_resume,
                .recover = a4xx_recover,
-               .submit = adreno_submit,
-               .flush = adreno_flush,
+               .submit = a4xx_submit,
                .active_ring = adreno_active_ring,
                .irq = a4xx_irq,
                .destroy = a4xx_destroy,
@@ -594,6 +636,7 @@ static const struct adreno_gpu_funcs funcs = {
                .gpu_state_get = a4xx_gpu_state_get,
                .gpu_state_put = adreno_gpu_state_put,
                .create_address_space = adreno_iommu_create_address_space,
+               .get_rptr = a4xx_get_rptr,
        },
        .get_timestamp = a4xx_get_timestamp,
 };
@@ -631,15 +674,12 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
 
        adreno_gpu->registers = adreno_is_a405(adreno_gpu) ? a405_registers :
                                                             a4xx_registers;
-       adreno_gpu->reg_offsets = a4xx_register_offsets;
 
        /* if needed, allocate gmem: */
-       if (adreno_is_a4xx(adreno_gpu)) {
-               ret = adreno_gpu_ocmem_init(dev->dev, adreno_gpu,
-                                           &a4xx_gpu->ocmem);
-               if (ret)
-                       goto fail;
-       }
+       ret = adreno_gpu_ocmem_init(dev->dev, adreno_gpu,
+                                   &a4xx_gpu->ocmem);
+       if (ret)
+               goto fail;
 
        if (!gpu->aspace) {
                /* TODO we think it is possible to configure the GPU to
index 68eddac..fc2c905 100644 (file)
@@ -11,7 +11,7 @@
 
 #include "a5xx_gpu.h"
 
-static int pfp_print(struct msm_gpu *gpu, struct drm_printer *p)
+static void pfp_print(struct msm_gpu *gpu, struct drm_printer *p)
 {
        int i;
 
@@ -22,11 +22,9 @@ static int pfp_print(struct msm_gpu *gpu, struct drm_printer *p)
                drm_printf(p, "  %02x: %08x\n", i,
                        gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA));
        }
-
-       return 0;
 }
 
-static int me_print(struct msm_gpu *gpu, struct drm_printer *p)
+static void me_print(struct msm_gpu *gpu, struct drm_printer *p)
 {
        int i;
 
@@ -37,11 +35,9 @@ static int me_print(struct msm_gpu *gpu, struct drm_printer *p)
                drm_printf(p, "  %02x: %08x\n", i,
                        gpu_read(gpu, REG_A5XX_CP_ME_STAT_DATA));
        }
-
-       return 0;
 }
 
-static int meq_print(struct msm_gpu *gpu, struct drm_printer *p)
+static void meq_print(struct msm_gpu *gpu, struct drm_printer *p)
 {
        int i;
 
@@ -52,11 +48,9 @@ static int meq_print(struct msm_gpu *gpu, struct drm_printer *p)
                drm_printf(p, "  %02x: %08x\n", i,
                        gpu_read(gpu, REG_A5XX_CP_MEQ_DBG_DATA));
        }
-
-       return 0;
 }
 
-static int roq_print(struct msm_gpu *gpu, struct drm_printer *p)
+static void roq_print(struct msm_gpu *gpu, struct drm_printer *p)
 {
        int i;
 
@@ -71,8 +65,6 @@ static int roq_print(struct msm_gpu *gpu, struct drm_printer *p)
                drm_printf(p, "  %02x: %08x %08x %08x %08x\n", i,
                        val[0], val[1], val[2], val[3]);
        }
-
-       return 0;
 }
 
 static int show(struct seq_file *m, void *arg)
@@ -81,10 +73,11 @@ static int show(struct seq_file *m, void *arg)
        struct drm_device *dev = node->minor->dev;
        struct msm_drm_private *priv = dev->dev_private;
        struct drm_printer p = drm_seq_file_printer(m);
-       int (*show)(struct msm_gpu *gpu, struct drm_printer *p) =
+       void (*show)(struct msm_gpu *gpu, struct drm_printer *p) =
                node->info_ent->data;
 
-       return show(priv->gpu, &p);
+       show(priv->gpu, &p);
+       return 0;
 }
 
 #define ENT(n) { .name = #n, .show = show, .data = n ##_print }
index 91726da..d6804a8 100644 (file)
@@ -18,13 +18,24 @@ static void a5xx_dump(struct msm_gpu *gpu);
 
 #define GPU_PAS_ID 13
 
-static void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
+               bool sync)
 {
        struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
        struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
        uint32_t wptr;
        unsigned long flags;
 
+       /*
+        * Most flush operations need to issue a WHERE_AM_I opcode to sync up
+        * the rptr shadow
+        */
+       if (a5xx_gpu->has_whereami && sync) {
+               OUT_PKT7(ring, CP_WHERE_AM_I, 2);
+               OUT_RING(ring, lower_32_bits(shadowptr(a5xx_gpu, ring)));
+               OUT_RING(ring, upper_32_bits(shadowptr(a5xx_gpu, ring)));
+       }
+
        spin_lock_irqsave(&ring->lock, flags);
 
        /* Copy the shadow to the actual register */
@@ -43,8 +54,7 @@ static void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
                gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr);
 }
 
-static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit,
-       struct msm_file_private *ctx)
+static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit)
 {
        struct msm_drm_private *priv = gpu->dev->dev_private;
        struct msm_ringbuffer *ring = submit->ring;
@@ -57,7 +67,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit
                case MSM_SUBMIT_CMD_IB_TARGET_BUF:
                        break;
                case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
-                       if (priv->lastctx == ctx)
+                       if (priv->lastctx == submit->queue->ctx)
                                break;
                        fallthrough;
                case MSM_SUBMIT_CMD_BUF:
@@ -91,7 +101,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit
                }
        }
 
-       a5xx_flush(gpu, ring);
+       a5xx_flush(gpu, ring, true);
        a5xx_preempt_trigger(gpu);
 
        /* we might not necessarily have a cmd from userspace to
@@ -103,8 +113,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit
        msm_gpu_retire(gpu);
 }
 
-static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
-       struct msm_file_private *ctx)
+static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
 {
        struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
        struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
@@ -114,7 +123,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
 
        if (IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) && submit->in_rb) {
                priv->lastctx = NULL;
-               a5xx_submit_in_rb(gpu, submit, ctx);
+               a5xx_submit_in_rb(gpu, submit);
                return;
        }
 
@@ -148,7 +157,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
                case MSM_SUBMIT_CMD_IB_TARGET_BUF:
                        break;
                case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
-                       if (priv->lastctx == ctx)
+                       if (priv->lastctx == submit->queue->ctx)
                                break;
                        fallthrough;
                case MSM_SUBMIT_CMD_BUF:
@@ -206,7 +215,8 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
        /* Set bit 0 to trigger an interrupt on preempt complete */
        OUT_RING(ring, 0x01);
 
-       a5xx_flush(gpu, ring);
+       /* A WHERE_AM_I packet is not needed after a YIELD */
+       a5xx_flush(gpu, ring, false);
 
        /* Check to see if we need to start preemption */
        a5xx_preempt_trigger(gpu);
@@ -365,7 +375,7 @@ static int a5xx_me_init(struct msm_gpu *gpu)
        OUT_RING(ring, 0x00000000);
        OUT_RING(ring, 0x00000000);
 
-       gpu->funcs->flush(gpu, ring);
+       a5xx_flush(gpu, ring, true);
        return a5xx_idle(gpu, ring) ? 0 : -EINVAL;
 }
 
@@ -407,11 +417,31 @@ static int a5xx_preempt_start(struct msm_gpu *gpu)
        OUT_RING(ring, 0x01);
        OUT_RING(ring, 0x01);
 
-       gpu->funcs->flush(gpu, ring);
+       /* The WHERE_AMI_I packet is not needed after a YIELD is issued */
+       a5xx_flush(gpu, ring, false);
 
        return a5xx_idle(gpu, ring) ? 0 : -EINVAL;
 }
 
+static void a5xx_ucode_check_version(struct a5xx_gpu *a5xx_gpu,
+               struct drm_gem_object *obj)
+{
+       u32 *buf = msm_gem_get_vaddr_active(obj);
+
+       if (IS_ERR(buf))
+               return;
+
+       /*
+        * If the lowest nibble is 0xa that is an indication that this microcode
+        * has been patched. The actual version is in dword [3] but we only care
+        * about the patchlevel which is the lowest nibble of dword [3]
+        */
+       if (((buf[0] & 0xf) == 0xa) && (buf[2] & 0xf) >= 1)
+               a5xx_gpu->has_whereami = true;
+
+       msm_gem_put_vaddr(obj);
+}
+
 static int a5xx_ucode_init(struct msm_gpu *gpu)
 {
        struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -447,6 +477,7 @@ static int a5xx_ucode_init(struct msm_gpu *gpu)
                }
 
                msm_gem_object_set_name(a5xx_gpu->pfp_bo, "pfpfw");
+               a5xx_ucode_check_version(a5xx_gpu, a5xx_gpu->pfp_bo);
        }
 
        gpu_write64(gpu, REG_A5XX_CP_ME_INSTR_BASE_LO,
@@ -506,6 +537,7 @@ static int a5xx_zap_shader_init(struct msm_gpu *gpu)
 static int a5xx_hw_init(struct msm_gpu *gpu)
 {
        struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
        int ret;
 
        gpu_write(gpu, REG_A5XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
@@ -714,9 +746,36 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
        gpu_write64(gpu, REG_A5XX_CP_RB_BASE, REG_A5XX_CP_RB_BASE_HI,
                gpu->rb[0]->iova);
 
+       /*
+        * If the microcode supports the WHERE_AM_I opcode then we can use that
+        * in lieu of the RPTR shadow and enable preemption. Otherwise, we
+        * can't safely use the RPTR shadow or preemption. In either case, the
+        * RPTR shadow should be disabled in hardware.
+        */
        gpu_write(gpu, REG_A5XX_CP_RB_CNTL,
                MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
 
+       /* Disable preemption if WHERE_AM_I isn't available */
+       if (!a5xx_gpu->has_whereami && gpu->nr_rings > 1) {
+               a5xx_preempt_fini(gpu);
+               gpu->nr_rings = 1;
+       } else {
+               /* Create a privileged buffer for the RPTR shadow */
+               if (!a5xx_gpu->shadow_bo) {
+                       a5xx_gpu->shadow = msm_gem_kernel_new(gpu->dev,
+                               sizeof(u32) * gpu->nr_rings,
+                               MSM_BO_UNCACHED | MSM_BO_MAP_PRIV,
+                               gpu->aspace, &a5xx_gpu->shadow_bo,
+                               &a5xx_gpu->shadow_iova);
+
+                       if (IS_ERR(a5xx_gpu->shadow))
+                               return PTR_ERR(a5xx_gpu->shadow);
+               }
+
+               gpu_write64(gpu, REG_A5XX_CP_RB_RPTR_ADDR,
+                       REG_A5XX_CP_RB_RPTR_ADDR_HI, shadowptr(a5xx_gpu, gpu->rb[0]));
+       }
+
        a5xx_preempt_hw_init(gpu);
 
        /* Disable the interrupts through the initial bringup stage */
@@ -740,7 +799,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
                OUT_PKT7(gpu->rb[0], CP_EVENT_WRITE, 1);
                OUT_RING(gpu->rb[0], CP_EVENT_WRITE_0_EVENT(STAT_EVENT));
 
-               gpu->funcs->flush(gpu, gpu->rb[0]);
+               a5xx_flush(gpu, gpu->rb[0], true);
                if (!a5xx_idle(gpu, gpu->rb[0]))
                        return -EINVAL;
        }
@@ -758,7 +817,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
                OUT_PKT7(gpu->rb[0], CP_SET_SECURE_MODE, 1);
                OUT_RING(gpu->rb[0], 0x00000000);
 
-               gpu->funcs->flush(gpu, gpu->rb[0]);
+               a5xx_flush(gpu, gpu->rb[0], true);
                if (!a5xx_idle(gpu, gpu->rb[0]))
                        return -EINVAL;
        } else if (ret == -ENODEV) {
@@ -825,6 +884,11 @@ static void a5xx_destroy(struct msm_gpu *gpu)
                drm_gem_object_put(a5xx_gpu->gpmu_bo);
        }
 
+       if (a5xx_gpu->shadow_bo) {
+               msm_gem_unpin_iova(a5xx_gpu->shadow_bo, gpu->aspace);
+               drm_gem_object_put(a5xx_gpu->shadow_bo);
+       }
+
        adreno_gpu_cleanup(adreno_gpu);
        kfree(a5xx_gpu);
 }
@@ -1057,17 +1121,6 @@ static irqreturn_t a5xx_irq(struct msm_gpu *gpu)
        return IRQ_HANDLED;
 }
 
-static const u32 a5xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_A5XX_CP_RB_BASE),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE_HI, REG_A5XX_CP_RB_BASE_HI),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_A5XX_CP_RB_RPTR_ADDR),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR_HI,
-               REG_A5XX_CP_RB_RPTR_ADDR_HI),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_A5XX_CP_RB_RPTR),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_A5XX_CP_RB_WPTR),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A5XX_CP_RB_CNTL),
-};
-
 static const u32 a5xx_registers[] = {
        0x0000, 0x0002, 0x0004, 0x0020, 0x0022, 0x0026, 0x0029, 0x002B,
        0x002E, 0x0035, 0x0038, 0x0042, 0x0044, 0x0044, 0x0047, 0x0095,
@@ -1432,6 +1485,17 @@ static unsigned long a5xx_gpu_busy(struct msm_gpu *gpu)
        return (unsigned long)busy_time;
 }
 
+static uint32_t a5xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+
+       if (a5xx_gpu->has_whereami)
+               return a5xx_gpu->shadow[ring->id];
+
+       return ring->memptrs->rptr = gpu_read(gpu, REG_A5XX_CP_RB_RPTR);
+}
+
 static const struct adreno_gpu_funcs funcs = {
        .base = {
                .get_param = adreno_get_param,
@@ -1440,7 +1504,6 @@ static const struct adreno_gpu_funcs funcs = {
                .pm_resume = a5xx_pm_resume,
                .recover = a5xx_recover,
                .submit = a5xx_submit,
-               .flush = a5xx_flush,
                .active_ring = a5xx_active_ring,
                .irq = a5xx_irq,
                .destroy = a5xx_destroy,
@@ -1454,6 +1517,7 @@ static const struct adreno_gpu_funcs funcs = {
                .gpu_state_get = a5xx_gpu_state_get,
                .gpu_state_put = a5xx_gpu_state_put,
                .create_address_space = adreno_iommu_create_address_space,
+               .get_rptr = a5xx_get_rptr,
        },
        .get_timestamp = a5xx_get_timestamp,
 };
@@ -1512,14 +1576,12 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
        gpu = &adreno_gpu->base;
 
        adreno_gpu->registers = a5xx_registers;
-       adreno_gpu->reg_offsets = a5xx_register_offsets;
 
        a5xx_gpu->lm_leakage = 0x4E001A;
 
        check_speed_bin(&pdev->dev);
 
-       /* Restricting nr_rings to 1 to temporarily disable preemption */
-       ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
+       ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 4);
        if (ret) {
                a5xx_destroy(&(a5xx_gpu->base.base));
                return ERR_PTR(ret);
index 1e5b1a1..c7187bc 100644 (file)
@@ -37,6 +37,13 @@ struct a5xx_gpu {
 
        atomic_t preempt_state;
        struct timer_list preempt_timer;
+
+       struct drm_gem_object *shadow_bo;
+       uint64_t shadow_iova;
+       uint32_t *shadow;
+
+       /* True if the microcode supports the WHERE_AM_I opcode */
+       bool has_whereami;
 };
 
 #define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base)
@@ -141,6 +148,9 @@ static inline int spin_usecs(struct msm_gpu *gpu, uint32_t usecs,
        return -ETIMEDOUT;
 }
 
+#define shadowptr(a5xx_gpu, ring) ((a5xx_gpu)->shadow_iova + \
+               ((ring)->id * sizeof(uint32_t)))
+
 bool a5xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
 void a5xx_set_hwcg(struct msm_gpu *gpu, bool state);
 
@@ -150,6 +160,8 @@ void a5xx_preempt_trigger(struct msm_gpu *gpu);
 void a5xx_preempt_irq(struct msm_gpu *gpu);
 void a5xx_preempt_fini(struct msm_gpu *gpu);
 
+void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, bool sync);
+
 /* Return true if we are in a preempt state */
 static inline bool a5xx_in_preempt(struct a5xx_gpu *a5xx_gpu)
 {
index 321a806..f176a6f 100644 (file)
@@ -240,7 +240,7 @@ static int a5xx_gpmu_init(struct msm_gpu *gpu)
        OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
        OUT_RING(ring, 1);
 
-       gpu->funcs->flush(gpu, ring);
+       a5xx_flush(gpu, ring, true);
 
        if (!a5xx_idle(gpu, ring)) {
                DRM_ERROR("%s: Unable to load GPMU firmware. GPMU will not be active\n",
index 9f3fe17..7e04509 100644 (file)
@@ -259,8 +259,9 @@ static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu,
        ptr->magic = A5XX_PREEMPT_RECORD_MAGIC;
        ptr->info = 0;
        ptr->data = 0;
-       ptr->cntl = MSM_GPU_RB_CNTL_DEFAULT;
-       ptr->rptr_addr = rbmemptr(ring, rptr);
+       ptr->cntl = MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE;
+
+       ptr->rptr_addr = shadowptr(a5xx_gpu, ring);
        ptr->counter = counters_iova;
 
        return 0;
index e1c7bcd..491fee4 100644 (file)
@@ -11,6 +11,7 @@
 #include "a6xx_gpu.h"
 #include "a6xx_gmu.xml.h"
 #include "msm_gem.h"
+#include "msm_gpu_trace.h"
 #include "msm_mmu.h"
 
 static void a6xx_gmu_fault(struct a6xx_gmu *gmu)
@@ -124,6 +125,8 @@ void a6xx_gmu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp)
        gmu->current_perf_index = perf_index;
        gmu->freq = gmu->gpu_freqs[perf_index];
 
+       trace_msm_gmu_freq_change(gmu->freq, perf_index);
+
        /*
         * This can get called from devfreq while the hardware is idle. Don't
         * bring up the power if it isn't already active
index 66a95e2..948f365 100644 (file)
@@ -51,9 +51,20 @@ bool a6xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
 
 static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
 {
+       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
        uint32_t wptr;
        unsigned long flags;
 
+       /* Expanded APRIV doesn't need to issue the WHERE_AM_I opcode */
+       if (a6xx_gpu->has_whereami && !adreno_gpu->base.hw_apriv) {
+               struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+               OUT_PKT7(ring, CP_WHERE_AM_I, 2);
+               OUT_RING(ring, lower_32_bits(shadowptr(a6xx_gpu, ring)));
+               OUT_RING(ring, upper_32_bits(shadowptr(a6xx_gpu, ring)));
+       }
+
        spin_lock_irqsave(&ring->lock, flags);
 
        /* Copy the shadow to the actual register */
@@ -81,8 +92,50 @@ static void get_stats_counter(struct msm_ringbuffer *ring, u32 counter,
        OUT_RING(ring, upper_32_bits(iova));
 }
 
-static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
-       struct msm_file_private *ctx)
+static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
+               struct msm_ringbuffer *ring, struct msm_file_private *ctx)
+{
+       phys_addr_t ttbr;
+       u32 asid;
+       u64 memptr = rbmemptr(ring, ttbr0);
+
+       if (ctx == a6xx_gpu->cur_ctx)
+               return;
+
+       if (msm_iommu_pagetable_params(ctx->aspace->mmu, &ttbr, &asid))
+               return;
+
+       /* Execute the table update */
+       OUT_PKT7(ring, CP_SMMU_TABLE_UPDATE, 4);
+       OUT_RING(ring, CP_SMMU_TABLE_UPDATE_0_TTBR0_LO(lower_32_bits(ttbr)));
+
+       OUT_RING(ring,
+               CP_SMMU_TABLE_UPDATE_1_TTBR0_HI(upper_32_bits(ttbr)) |
+               CP_SMMU_TABLE_UPDATE_1_ASID(asid));
+       OUT_RING(ring, CP_SMMU_TABLE_UPDATE_2_CONTEXTIDR(0));
+       OUT_RING(ring, CP_SMMU_TABLE_UPDATE_3_CONTEXTBANK(0));
+
+       /*
+        * Write the new TTBR0 to the memstore. This is good for debugging.
+        */
+       OUT_PKT7(ring, CP_MEM_WRITE, 4);
+       OUT_RING(ring, CP_MEM_WRITE_0_ADDR_LO(lower_32_bits(memptr)));
+       OUT_RING(ring, CP_MEM_WRITE_1_ADDR_HI(upper_32_bits(memptr)));
+       OUT_RING(ring, lower_32_bits(ttbr));
+       OUT_RING(ring, (asid << 16) | upper_32_bits(ttbr));
+
+       /*
+        * And finally, trigger a uche flush to be sure there isn't anything
+        * lingering in that part of the GPU
+        */
+
+       OUT_PKT7(ring, CP_EVENT_WRITE, 1);
+       OUT_RING(ring, 0x31);
+
+       a6xx_gpu->cur_ctx = ctx;
+}
+
+static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
 {
        unsigned int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
        struct msm_drm_private *priv = gpu->dev->dev_private;
@@ -91,6 +144,8 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
        struct msm_ringbuffer *ring = submit->ring;
        unsigned int i;
 
+       a6xx_set_pagetable(a6xx_gpu, ring, submit->queue->ctx);
+
        get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
                rbmemptr_stats(ring, index, cpcycles_start));
 
@@ -115,7 +170,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
                case MSM_SUBMIT_CMD_IB_TARGET_BUF:
                        break;
                case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
-                       if (priv->lastctx == ctx)
+                       if (priv->lastctx == submit->queue->ctx)
                                break;
                        fallthrough;
                case MSM_SUBMIT_CMD_BUF:
@@ -464,6 +519,30 @@ static int a6xx_cp_init(struct msm_gpu *gpu)
        return a6xx_idle(gpu, ring) ? 0 : -EINVAL;
 }
 
+static void a6xx_ucode_check_version(struct a6xx_gpu *a6xx_gpu,
+               struct drm_gem_object *obj)
+{
+       u32 *buf = msm_gem_get_vaddr_active(obj);
+
+       if (IS_ERR(buf))
+               return;
+
+       /*
+        * If the lowest nibble is 0xa that is an indication that this microcode
+        * has been patched. The actual version is in dword [3] but we only care
+        * about the patchlevel which is the lowest nibble of dword [3]
+        *
+        * Otherwise check that the firmware is greater than or equal to 1.90
+        * which was the first version that had this fix built in
+        */
+       if (((buf[0] & 0xf) == 0xa) && (buf[2] & 0xf) >= 1)
+               a6xx_gpu->has_whereami = true;
+       else if ((buf[0] & 0xfff) > 0x190)
+               a6xx_gpu->has_whereami = true;
+
+       msm_gem_put_vaddr(obj);
+}
+
 static int a6xx_ucode_init(struct msm_gpu *gpu)
 {
        struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -484,6 +563,7 @@ static int a6xx_ucode_init(struct msm_gpu *gpu)
                }
 
                msm_gem_object_set_name(a6xx_gpu->sqe_bo, "sqefw");
+               a6xx_ucode_check_version(a6xx_gpu, a6xx_gpu->sqe_bo);
        }
 
        gpu_write64(gpu, REG_A6XX_CP_SQE_INSTR_BASE_LO,
@@ -699,12 +779,43 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
        gpu_write64(gpu, REG_A6XX_CP_RB_BASE, REG_A6XX_CP_RB_BASE_HI,
                gpu->rb[0]->iova);
 
-       gpu_write(gpu, REG_A6XX_CP_RB_CNTL,
-               MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
+       /* Targets that support extended APRIV can use the RPTR shadow from
+        * hardware but all the other ones need to disable the feature. Targets
+        * that support the WHERE_AM_I opcode can use that instead
+        */
+       if (adreno_gpu->base.hw_apriv)
+               gpu_write(gpu, REG_A6XX_CP_RB_CNTL, MSM_GPU_RB_CNTL_DEFAULT);
+       else
+               gpu_write(gpu, REG_A6XX_CP_RB_CNTL,
+                       MSM_GPU_RB_CNTL_DEFAULT | AXXX_CP_RB_CNTL_NO_UPDATE);
+
+       /*
+        * Expanded APRIV and targets that support WHERE_AM_I both need a
+        * privileged buffer to store the RPTR shadow
+        */
+
+       if (adreno_gpu->base.hw_apriv || a6xx_gpu->has_whereami) {
+               if (!a6xx_gpu->shadow_bo) {
+                       a6xx_gpu->shadow = msm_gem_kernel_new_locked(gpu->dev,
+                               sizeof(u32) * gpu->nr_rings,
+                               MSM_BO_UNCACHED | MSM_BO_MAP_PRIV,
+                               gpu->aspace, &a6xx_gpu->shadow_bo,
+                               &a6xx_gpu->shadow_iova);
+
+                       if (IS_ERR(a6xx_gpu->shadow))
+                               return PTR_ERR(a6xx_gpu->shadow);
+               }
+
+               gpu_write64(gpu, REG_A6XX_CP_RB_RPTR_ADDR_LO,
+                       REG_A6XX_CP_RB_RPTR_ADDR_HI,
+                       shadowptr(a6xx_gpu, gpu->rb[0]));
+       }
 
        /* Always come up on rb 0 */
        a6xx_gpu->cur_ring = gpu->rb[0];
 
+       a6xx_gpu->cur_ctx = NULL;
+
        /* Enable the SQE_to start the CP engine */
        gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 1);
 
@@ -911,18 +1022,6 @@ static irqreturn_t a6xx_irq(struct msm_gpu *gpu)
        return IRQ_HANDLED;
 }
 
-static const u32 a6xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_A6XX_CP_RB_BASE),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE_HI, REG_A6XX_CP_RB_BASE_HI),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR,
-               REG_A6XX_CP_RB_RPTR_ADDR_LO),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR_HI,
-               REG_A6XX_CP_RB_RPTR_ADDR_HI),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_A6XX_CP_RB_RPTR),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_A6XX_CP_RB_WPTR),
-       REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A6XX_CP_RB_CNTL),
-};
-
 static int a6xx_pm_resume(struct msm_gpu *gpu)
 {
        struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -931,6 +1030,8 @@ static int a6xx_pm_resume(struct msm_gpu *gpu)
 
        gpu->needs_hw_init = true;
 
+       trace_msm_gpu_resume(0);
+
        ret = a6xx_gmu_resume(a6xx_gpu);
        if (ret)
                return ret;
@@ -945,6 +1046,8 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu)
        struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
        struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
 
+       trace_msm_gpu_suspend(0);
+
        devfreq_suspend_device(gpu->devfreq.devfreq);
 
        return a6xx_gmu_stop(a6xx_gpu);
@@ -983,6 +1086,11 @@ static void a6xx_destroy(struct msm_gpu *gpu)
                drm_gem_object_put(a6xx_gpu->sqe_bo);
        }
 
+       if (a6xx_gpu->shadow_bo) {
+               msm_gem_unpin_iova(a6xx_gpu->shadow_bo, gpu->aspace);
+               drm_gem_object_put(a6xx_gpu->shadow_bo);
+       }
+
        a6xx_gmu_remove(a6xx_gpu);
 
        adreno_gpu_cleanup(adreno_gpu);
@@ -1017,6 +1125,31 @@ static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu)
        return (unsigned long)busy_time;
 }
 
+static struct msm_gem_address_space *
+a6xx_create_private_address_space(struct msm_gpu *gpu)
+{
+       struct msm_mmu *mmu;
+
+       mmu = msm_iommu_pagetable_create(gpu->aspace->mmu);
+
+       if (IS_ERR(mmu))
+               return ERR_CAST(mmu);
+
+       return msm_gem_address_space_create(mmu,
+               "gpu", 0x100000000ULL, 0x1ffffffffULL);
+}
+
+static uint32_t a6xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+       struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+       if (adreno_gpu->base.hw_apriv || a6xx_gpu->has_whereami)
+               return a6xx_gpu->shadow[ring->id];
+
+       return ring->memptrs->rptr = gpu_read(gpu, REG_A6XX_CP_RB_RPTR);
+}
+
 static const struct adreno_gpu_funcs funcs = {
        .base = {
                .get_param = adreno_get_param,
@@ -1025,7 +1158,6 @@ static const struct adreno_gpu_funcs funcs = {
                .pm_resume = a6xx_pm_resume,
                .recover = a6xx_recover,
                .submit = a6xx_submit,
-               .flush = a6xx_flush,
                .active_ring = a6xx_active_ring,
                .irq = a6xx_irq,
                .destroy = a6xx_destroy,
@@ -1040,6 +1172,8 @@ static const struct adreno_gpu_funcs funcs = {
                .gpu_state_put = a6xx_gpu_state_put,
 #endif
                .create_address_space = adreno_iommu_create_address_space,
+               .create_private_address_space = a6xx_create_private_address_space,
+               .get_rptr = a6xx_get_rptr,
        },
        .get_timestamp = a6xx_get_timestamp,
 };
@@ -1048,6 +1182,8 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
 {
        struct msm_drm_private *priv = dev->dev_private;
        struct platform_device *pdev = priv->gpu_pdev;
+       struct adreno_platform_config *config = pdev->dev.platform_data;
+       const struct adreno_info *info;
        struct device_node *node;
        struct a6xx_gpu *a6xx_gpu;
        struct adreno_gpu *adreno_gpu;
@@ -1062,9 +1198,15 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
        gpu = &adreno_gpu->base;
 
        adreno_gpu->registers = NULL;
-       adreno_gpu->reg_offsets = a6xx_register_offsets;
 
-       if (adreno_is_a650(adreno_gpu))
+       /*
+        * We need to know the platform type before calling into adreno_gpu_init
+        * so that the hw_apriv flag can be correctly set. Snoop into the info
+        * and grab the revision number
+        */
+       info = adreno_info(config->rev);
+
+       if (info && info->revn == 650)
                adreno_gpu->base.hw_apriv = true;
 
        ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
index 03ba60d..3eeebf6 100644 (file)
@@ -19,8 +19,15 @@ struct a6xx_gpu {
        uint64_t sqe_iova;
 
        struct msm_ringbuffer *cur_ring;
+       struct msm_file_private *cur_ctx;
 
        struct a6xx_gmu gmu;
+
+       struct drm_gem_object *shadow_bo;
+       uint64_t shadow_iova;
+       uint32_t *shadow;
+
+       bool has_whereami;
 };
 
 #define to_a6xx_gpu(x) container_of(x, struct a6xx_gpu, base)
@@ -50,6 +57,9 @@ static inline bool a6xx_has_gbif(struct adreno_gpu *gpu)
        return true;
 }
 
+#define shadowptr(_a6xx_gpu, _ring) ((_a6xx_gpu)->shadow_iova + \
+               ((_ring)->id * sizeof(uint32_t)))
+
 int a6xx_gmu_resume(struct a6xx_gpu *gpu);
 int a6xx_gmu_stop(struct a6xx_gpu *gpu);
 
index b12f5b4..e9ede19 100644 (file)
@@ -875,7 +875,7 @@ static void a6xx_get_indexed_registers(struct msm_gpu *gpu,
        int i;
 
        a6xx_state->indexed_regs = state_kcalloc(a6xx_state, count,
-               sizeof(a6xx_state->indexed_regs));
+               sizeof(*a6xx_state->indexed_regs));
        if (!a6xx_state->indexed_regs)
                return;
 
index 9eeb46b..58e03b2 100644 (file)
@@ -282,7 +282,7 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
        int ret;
 
        if (pdev)
-               gpu = platform_get_drvdata(pdev);
+               gpu = dev_to_gpu(&pdev->dev);
 
        if (!gpu) {
                dev_err_once(dev->dev, "no GPU device was found\n");
@@ -417,15 +417,13 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
                return PTR_ERR(gpu);
        }
 
-       dev_set_drvdata(dev, gpu);
-
        return 0;
 }
 
 static void adreno_unbind(struct device *dev, struct device *master,
                void *data)
 {
-       struct msm_gpu *gpu = dev_get_drvdata(dev);
+       struct msm_gpu *gpu = dev_to_gpu(dev);
 
        pm_runtime_force_suspend(dev);
        gpu->funcs->destroy(gpu);
@@ -490,16 +488,14 @@ static const struct of_device_id dt_match[] = {
 #ifdef CONFIG_PM
 static int adreno_resume(struct device *dev)
 {
-       struct platform_device *pdev = to_platform_device(dev);
-       struct msm_gpu *gpu = platform_get_drvdata(pdev);
+       struct msm_gpu *gpu = dev_to_gpu(dev);
 
        return gpu->funcs->pm_resume(gpu);
 }
 
 static int adreno_suspend(struct device *dev)
 {
-       struct platform_device *pdev = to_platform_device(dev);
-       struct msm_gpu *gpu = platform_get_drvdata(pdev);
+       struct msm_gpu *gpu = dev_to_gpu(dev);
 
        return gpu->funcs->pm_suspend(gpu);
 }
index 862dd35..458b5b2 100644 (file)
@@ -189,12 +189,27 @@ struct msm_gem_address_space *
 adreno_iommu_create_address_space(struct msm_gpu *gpu,
                struct platform_device *pdev)
 {
-       struct iommu_domain *iommu = iommu_domain_alloc(&platform_bus_type);
-       struct msm_mmu *mmu = msm_iommu_new(&pdev->dev, iommu);
+       struct iommu_domain *iommu;
+       struct msm_mmu *mmu;
        struct msm_gem_address_space *aspace;
+       u64 start, size;
 
-       aspace = msm_gem_address_space_create(mmu, "gpu", SZ_16M,
-               0xffffffff - SZ_16M);
+       iommu = iommu_domain_alloc(&platform_bus_type);
+       if (!iommu)
+               return NULL;
+
+       mmu = msm_iommu_new(&pdev->dev, iommu);
+
+       /*
+        * Use the aperture start or SZ_16M, whichever is greater. This will
+        * ensure that we align with the allocated pagetable range while still
+        * allowing room in the lower 32 bits for GMEM and whatnot
+        */
+       start = max_t(u64, SZ_16M, iommu->geometry.aperture_start);
+       size = iommu->geometry.aperture_end - start + 1;
+
+       aspace = msm_gem_address_space_create(mmu, "gpu",
+               start & GENMASK_ULL(48, 0), size);
 
        if (IS_ERR(aspace) && !IS_ERR(mmu))
                mmu->funcs->destroy(mmu);
@@ -407,8 +422,9 @@ int adreno_hw_init(struct msm_gpu *gpu)
 static uint32_t get_rptr(struct adreno_gpu *adreno_gpu,
                struct msm_ringbuffer *ring)
 {
-       return ring->memptrs->rptr = adreno_gpu_read(
-               adreno_gpu, REG_ADRENO_CP_RB_RPTR);
+       struct msm_gpu *gpu = &adreno_gpu->base;
+
+       return gpu->funcs->get_rptr(gpu, ring);
 }
 
 struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu)
@@ -434,81 +450,8 @@ void adreno_recover(struct msm_gpu *gpu)
        }
 }
 
-void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
-               struct msm_file_private *ctx)
-{
-       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
-       struct msm_drm_private *priv = gpu->dev->dev_private;
-       struct msm_ringbuffer *ring = submit->ring;
-       unsigned i;
-
-       for (i = 0; i < submit->nr_cmds; i++) {
-               switch (submit->cmd[i].type) {
-               case MSM_SUBMIT_CMD_IB_TARGET_BUF:
-                       /* ignore IB-targets */
-                       break;
-               case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
-                       /* ignore if there has not been a ctx switch: */
-                       if (priv->lastctx == ctx)
-                               break;
-                       fallthrough;
-               case MSM_SUBMIT_CMD_BUF:
-                       OUT_PKT3(ring, adreno_is_a4xx(adreno_gpu) ?
-                               CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2);
-                       OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
-                       OUT_RING(ring, submit->cmd[i].size);
-                       OUT_PKT2(ring);
-                       break;
-               }
-       }
-
-       OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
-       OUT_RING(ring, submit->seqno);
-
-       if (adreno_is_a3xx(adreno_gpu) || adreno_is_a4xx(adreno_gpu)) {
-               /* Flush HLSQ lazy updates to make sure there is nothing
-                * pending for indirect loads after the timestamp has
-                * passed:
-                */
-               OUT_PKT3(ring, CP_EVENT_WRITE, 1);
-               OUT_RING(ring, HLSQ_FLUSH);
-       }
-
-       /* wait for idle before cache flush/interrupt */
-       OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
-       OUT_RING(ring, 0x00000000);
-
-       if (!adreno_is_a2xx(adreno_gpu)) {
-               /* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */
-               OUT_PKT3(ring, CP_EVENT_WRITE, 3);
-               OUT_RING(ring, CACHE_FLUSH_TS | BIT(31));
-               OUT_RING(ring, rbmemptr(ring, fence));
-               OUT_RING(ring, submit->seqno);
-       } else {
-               /* BIT(31) means something else on a2xx */
-               OUT_PKT3(ring, CP_EVENT_WRITE, 3);
-               OUT_RING(ring, CACHE_FLUSH_TS);
-               OUT_RING(ring, rbmemptr(ring, fence));
-               OUT_RING(ring, submit->seqno);
-               OUT_PKT3(ring, CP_INTERRUPT, 1);
-               OUT_RING(ring, 0x80000000);
-       }
-
-#if 0
-       if (adreno_is_a3xx(adreno_gpu)) {
-               /* Dummy set-constant to trigger context rollover */
-               OUT_PKT3(ring, CP_SET_CONSTANT, 2);
-               OUT_RING(ring, CP_REG(REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG));
-               OUT_RING(ring, 0x00000000);
-       }
-#endif
-
-       gpu->funcs->flush(gpu, ring);
-}
-
-void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, u32 reg)
 {
-       struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
        uint32_t wptr;
 
        /* Copy the shadow to the actual register */
@@ -524,7 +467,7 @@ void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
        /* ensure writes to ringbuffer have hit system memory: */
        mb();
 
-       adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_WPTR, wptr);
+       gpu_write(gpu, reg, wptr);
 }
 
 bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
index e55abae..c3775f7 100644 (file)
 #include "adreno_common.xml.h"
 #include "adreno_pm4.xml.h"
 
-#define REG_ADRENO_DEFINE(_offset, _reg) [_offset] = (_reg) + 1
-#define REG_SKIP ~0
-#define REG_ADRENO_SKIP(_offset) [_offset] = REG_SKIP
-
 extern bool snapshot_debugbus;
 
-/**
- * adreno_regs: List of registers that are used in across all
- * 3D devices. Each device type has different offset value for the same
- * register, so an array of register offsets are declared for every device
- * and are indexed by the enumeration values defined in this enum
- */
-enum adreno_regs {
-       REG_ADRENO_CP_RB_BASE,
-       REG_ADRENO_CP_RB_BASE_HI,
-       REG_ADRENO_CP_RB_RPTR_ADDR,
-       REG_ADRENO_CP_RB_RPTR_ADDR_HI,
-       REG_ADRENO_CP_RB_RPTR,
-       REG_ADRENO_CP_RB_WPTR,
-       REG_ADRENO_CP_RB_CNTL,
-       REG_ADRENO_REGISTER_MAX,
-};
-
 enum {
        ADRENO_FW_PM4 = 0,
        ADRENO_FW_SQE = 0, /* a6xx */
@@ -176,11 +155,6 @@ static inline bool adreno_is_a225(struct adreno_gpu *gpu)
        return gpu->revn == 225;
 }
 
-static inline bool adreno_is_a3xx(struct adreno_gpu *gpu)
-{
-       return (gpu->revn >= 300) && (gpu->revn < 400);
-}
-
 static inline bool adreno_is_a305(struct adreno_gpu *gpu)
 {
        return gpu->revn == 305;
@@ -207,11 +181,6 @@ static inline bool adreno_is_a330v2(struct adreno_gpu *gpu)
        return adreno_is_a330(gpu) && (gpu->rev.patchid > 0);
 }
 
-static inline bool adreno_is_a4xx(struct adreno_gpu *gpu)
-{
-       return (gpu->revn >= 400) && (gpu->revn < 500);
-}
-
 static inline int adreno_is_a405(struct adreno_gpu *gpu)
 {
        return gpu->revn == 405;
@@ -269,9 +238,7 @@ struct drm_gem_object *adreno_fw_create_bo(struct msm_gpu *gpu,
                const struct firmware *fw, u64 *iova);
 int adreno_hw_init(struct msm_gpu *gpu);
 void adreno_recover(struct msm_gpu *gpu);
-void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
-               struct msm_file_private *ctx);
-void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
+void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, u32 reg);
 bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
 #if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
 void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
@@ -365,59 +332,12 @@ OUT_PKT7(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
                ((opcode & 0x7F) << 16) | (PM4_PARITY(opcode) << 23));
 }
 
-/*
- * adreno_reg_check() - Checks the validity of a register enum
- * @gpu:               Pointer to struct adreno_gpu
- * @offset_name:       The register enum that is checked
- */
-static inline bool adreno_reg_check(struct adreno_gpu *gpu,
-               enum adreno_regs offset_name)
-{
-       BUG_ON(offset_name >= REG_ADRENO_REGISTER_MAX || !gpu->reg_offsets[offset_name]);
-
-       /*
-        * REG_SKIP is a special value that tell us that the register in
-        * question isn't implemented on target but don't trigger a BUG(). This
-        * is used to cleanly implement adreno_gpu_write64() and
-        * adreno_gpu_read64() in a generic fashion
-        */
-       if (gpu->reg_offsets[offset_name] == REG_SKIP)
-               return false;
-
-       return true;
-}
-
-static inline u32 adreno_gpu_read(struct adreno_gpu *gpu,
-               enum adreno_regs offset_name)
-{
-       u32 reg = gpu->reg_offsets[offset_name];
-       u32 val = 0;
-       if(adreno_reg_check(gpu,offset_name))
-               val = gpu_read(&gpu->base, reg - 1);
-       return val;
-}
-
-static inline void adreno_gpu_write(struct adreno_gpu *gpu,
-               enum adreno_regs offset_name, u32 data)
-{
-       u32 reg = gpu->reg_offsets[offset_name];
-       if(adreno_reg_check(gpu, offset_name))
-               gpu_write(&gpu->base, reg - 1, data);
-}
-
 struct msm_gpu *a2xx_gpu_init(struct drm_device *dev);
 struct msm_gpu *a3xx_gpu_init(struct drm_device *dev);
 struct msm_gpu *a4xx_gpu_init(struct drm_device *dev);
 struct msm_gpu *a5xx_gpu_init(struct drm_device *dev);
 struct msm_gpu *a6xx_gpu_init(struct drm_device *dev);
 
-static inline void adreno_gpu_write64(struct adreno_gpu *gpu,
-               enum adreno_regs lo, enum adreno_regs hi, u64 data)
-{
-       adreno_gpu_write(gpu, lo, lower_32_bits(data));
-       adreno_gpu_write(gpu, hi, upper_32_bits(data));
-}
-
 static inline uint32_t get_wptr(struct msm_ringbuffer *ring)
 {
        return (ring->cur - ring->start) % (MSM_GPU_RINGBUFFER_SZ >> 2);
index 3931eec..59bb8c1 100644 (file)
@@ -298,6 +298,7 @@ enum adreno_pm4_type3_packets {
        CP_SET_BIN_DATA5_OFFSET = 46,
        CP_SET_CTXSWITCH_IB = 85,
        CP_REG_WRITE = 109,
+       CP_WHERE_AM_I = 98,
 };
 
 enum adreno_state_block {
index f1bc6a1..84ea09d 100644 (file)
@@ -288,19 +288,6 @@ static void dpu_disable_all_irqs(struct dpu_kms *dpu_kms)
 }
 
 #ifdef CONFIG_DEBUG_FS
-#define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix)                          \
-static int __prefix ## _open(struct inode *inode, struct file *file)   \
-{                                                                      \
-       return single_open(file, __prefix ## _show, inode->i_private);  \
-}                                                                      \
-static const struct file_operations __prefix ## _fops = {              \
-       .owner = THIS_MODULE,                                           \
-       .open = __prefix ## _open,                                      \
-       .release = single_release,                                      \
-       .read = seq_read,                                               \
-       .llseek = seq_lseek,                                            \
-}
-
 static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
 {
        struct dpu_irq *irq_obj = s->private;
@@ -328,7 +315,7 @@ static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
        return 0;
 }
 
-DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_core_irq);
+DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_core_irq);
 
 void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
                struct dentry *parent)
index b36919d..393858e 100644 (file)
@@ -30,6 +30,74 @@ enum dpu_perf_mode {
        DPU_PERF_MODE_MAX
 };
 
+/**
+ * @_dpu_core_perf_calc_bw() - to calculate BW per crtc
+ * @kms -  pointer to the dpu_kms
+ * @crtc - pointer to a crtc
+ * Return: returns aggregated BW for all planes in crtc.
+ */
+static u64 _dpu_core_perf_calc_bw(struct dpu_kms *kms,
+               struct drm_crtc *crtc)
+{
+       struct drm_plane *plane;
+       struct dpu_plane_state *pstate;
+       u64 crtc_plane_bw = 0;
+       u32 bw_factor;
+
+       drm_atomic_crtc_for_each_plane(plane, crtc) {
+               pstate = to_dpu_plane_state(plane->state);
+               if (!pstate)
+                       continue;
+
+               crtc_plane_bw += pstate->plane_fetch_bw;
+       }
+
+       bw_factor = kms->catalog->perf.bw_inefficiency_factor;
+       if (bw_factor) {
+               crtc_plane_bw *= bw_factor;
+               do_div(crtc_plane_bw, 100);
+       }
+
+       return crtc_plane_bw;
+}
+
+/**
+ * _dpu_core_perf_calc_clk() - to calculate clock per crtc
+ * @kms -  pointer to the dpu_kms
+ * @crtc - pointer to a crtc
+ * @state - pointer to a crtc state
+ * Return: returns max clk for all planes in crtc.
+ */
+static u64 _dpu_core_perf_calc_clk(struct dpu_kms *kms,
+               struct drm_crtc *crtc, struct drm_crtc_state *state)
+{
+       struct drm_plane *plane;
+       struct dpu_plane_state *pstate;
+       struct drm_display_mode *mode;
+       u64 crtc_clk;
+       u32 clk_factor;
+
+       mode = &state->adjusted_mode;
+
+       crtc_clk = mode->vtotal * mode->hdisplay * drm_mode_vrefresh(mode);
+
+       drm_atomic_crtc_for_each_plane(plane, crtc) {
+               pstate = to_dpu_plane_state(plane->state);
+               if (!pstate)
+                       continue;
+
+               crtc_clk = max(pstate->plane_clk, crtc_clk);
+       }
+
+       clk_factor = kms->catalog->perf.clk_inefficiency_factor;
+       if (clk_factor) {
+               crtc_clk *= clk_factor;
+               do_div(crtc_clk, 100);
+       }
+
+       return crtc_clk;
+}
+
 static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
 {
        struct msm_drm_private *priv;
@@ -52,12 +120,7 @@ static void _dpu_core_perf_calc_crtc(struct dpu_kms *kms,
        dpu_cstate = to_dpu_crtc_state(state);
        memset(perf, 0, sizeof(struct dpu_core_perf_params));
 
-       if (!dpu_cstate->bw_control) {
-               perf->bw_ctl = kms->catalog->perf.max_bw_high *
-                                       1000ULL;
-               perf->max_per_pipe_ib = perf->bw_ctl;
-               perf->core_clk_rate = kms->perf.max_core_clk_rate;
-       } else if (kms->perf.perf_tune.mode == DPU_PERF_MODE_MINIMUM) {
+       if (kms->perf.perf_tune.mode == DPU_PERF_MODE_MINIMUM) {
                perf->bw_ctl = 0;
                perf->max_per_pipe_ib = 0;
                perf->core_clk_rate = 0;
@@ -65,6 +128,10 @@ static void _dpu_core_perf_calc_crtc(struct dpu_kms *kms,
                perf->bw_ctl = kms->perf.fix_core_ab_vote;
                perf->max_per_pipe_ib = kms->perf.fix_core_ib_vote;
                perf->core_clk_rate = kms->perf.fix_core_clk_rate;
+       } else {
+               perf->bw_ctl = _dpu_core_perf_calc_bw(kms, crtc);
+               perf->max_per_pipe_ib = kms->catalog->perf.min_dram_ib;
+               perf->core_clk_rate = _dpu_core_perf_calc_clk(kms, crtc, state);
        }
 
        DPU_DEBUG(
@@ -116,11 +183,7 @@ int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
                        DPU_DEBUG("crtc:%d bw:%llu ctrl:%d\n",
                                tmp_crtc->base.id, tmp_cstate->new_perf.bw_ctl,
                                tmp_cstate->bw_control);
-                       /*
-                        * For bw check only use the bw if the
-                        * atomic property has been already set
-                        */
-                       if (tmp_cstate->bw_control)
+
                                bw_sum_of_intfs += tmp_cstate->new_perf.bw_ctl;
                }
 
@@ -132,9 +195,7 @@ int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
 
                DPU_DEBUG("final threshold bw limit = %d\n", threshold);
 
-               if (!dpu_cstate->bw_control) {
-                       DPU_DEBUG("bypass bandwidth check\n");
-               } else if (!threshold) {
+               if (!threshold) {
                        DPU_ERROR("no bandwidth limits specified\n");
                        return -E2BIG;
                } else if (bw > threshold) {
@@ -155,7 +216,11 @@ static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms,
                                        = dpu_crtc_get_client_type(crtc);
        struct drm_crtc *tmp_crtc;
        struct dpu_crtc_state *dpu_cstate;
-       int ret = 0;
+       int i, ret = 0;
+       u64 avg_bw;
+
+       if (!kms->num_paths)
+               return -EINVAL;
 
        drm_for_each_crtc(tmp_crtc, crtc->dev) {
                if (tmp_crtc->enabled &&
@@ -166,10 +231,20 @@ static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms,
                        perf.max_per_pipe_ib = max(perf.max_per_pipe_ib,
                                        dpu_cstate->new_perf.max_per_pipe_ib);
 
-                       DPU_DEBUG("crtc=%d bw=%llu\n", tmp_crtc->base.id,
-                                       dpu_cstate->new_perf.bw_ctl);
+                       perf.bw_ctl += dpu_cstate->new_perf.bw_ctl;
+
+                       DPU_DEBUG("crtc=%d bw=%llu paths:%d\n",
+                                 tmp_crtc->base.id,
+                                 dpu_cstate->new_perf.bw_ctl, kms->num_paths);
                }
        }
+
+       avg_bw = perf.bw_ctl;
+       do_div(avg_bw, (kms->num_paths * 1000)); /*Bps_to_icc*/
+
+       for (i = 0; i < kms->num_paths; i++)
+               icc_set_bw(kms->path[i], avg_bw, perf.max_per_pipe_ib);
+
        return ret;
 }
 
index c2729f7..f56414a 100644 (file)
@@ -265,11 +265,6 @@ enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc)
 {
        struct drm_encoder *encoder;
 
-       if (!crtc) {
-               DPU_ERROR("invalid crtc\n");
-               return INTF_MODE_NONE;
-       }
-
        /*
         * TODO: This function is called from dpu debugfs and as part of atomic
         * check. When called from debugfs, the crtc->mutex must be held to
@@ -297,7 +292,6 @@ void dpu_crtc_vblank_callback(struct drm_crtc *crtc)
                dpu_crtc->vblank_cb_time = ktime_get();
        else
                dpu_crtc->vblank_cb_count++;
-       _dpu_crtc_complete_flip(crtc);
        drm_crtc_handle_vblank(crtc);
        trace_dpu_crtc_vblank_cb(DRMID(crtc));
 }
@@ -402,6 +396,7 @@ static void dpu_crtc_frame_event_cb(void *data, u32 event)
 void dpu_crtc_complete_commit(struct drm_crtc *crtc)
 {
        trace_dpu_crtc_complete_commit(DRMID(crtc));
+       _dpu_crtc_complete_flip(crtc);
 }
 
 static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
@@ -421,8 +416,6 @@ static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
 
                trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r);
        }
-
-       drm_mode_debug_printmodeline(adj_mode);
 }
 
 static void _dpu_crtc_get_pcc_coeff(struct drm_crtc_state *state,
@@ -457,7 +450,6 @@ static void _dpu_crtc_setup_cp_blocks(struct drm_crtc *crtc)
        struct dpu_crtc_mixer *mixer = cstate->mixers;
        struct dpu_hw_pcc_cfg cfg;
        struct dpu_hw_ctl *ctl;
-       struct dpu_hw_mixer *lm;
        struct dpu_hw_dspp *dspp;
        int i;
 
@@ -467,7 +459,6 @@ static void _dpu_crtc_setup_cp_blocks(struct drm_crtc *crtc)
 
        for (i = 0; i < cstate->num_mixers; i++) {
                ctl = mixer[i].lm_ctl;
-               lm = mixer[i].hw_lm;
                dspp = mixer[i].hw_dspp;
 
                if (!dspp || !dspp->ops.setup_pcc)
@@ -496,16 +487,8 @@ static void _dpu_crtc_setup_cp_blocks(struct drm_crtc *crtc)
 static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
                struct drm_crtc_state *old_state)
 {
-       struct dpu_crtc *dpu_crtc;
-       struct dpu_crtc_state *cstate;
+       struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
        struct drm_encoder *encoder;
-       struct drm_device *dev;
-       unsigned long flags;
-
-       if (!crtc) {
-               DPU_ERROR("invalid crtc\n");
-               return;
-       }
 
        if (!crtc->state->enable) {
                DPU_DEBUG("crtc%d -> enable %d, skip atomic_begin\n",
@@ -515,21 +498,8 @@ static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
 
        DPU_DEBUG("crtc%d\n", crtc->base.id);
 
-       dpu_crtc = to_dpu_crtc(crtc);
-       cstate = to_dpu_crtc_state(crtc->state);
-       dev = crtc->dev;
-
        _dpu_crtc_setup_lm_bounds(crtc, crtc->state);
 
-       if (dpu_crtc->event) {
-               WARN_ON(dpu_crtc->event);
-       } else {
-               spin_lock_irqsave(&dev->event_lock, flags);
-               dpu_crtc->event = crtc->state->event;
-               crtc->state->event = NULL;
-               spin_unlock_irqrestore(&dev->event_lock, flags);
-       }
-
        /* encoder will trigger pending mask now */
        drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
                dpu_encoder_trigger_kickoff_pending(encoder);
@@ -583,14 +553,11 @@ static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
                return;
        }
 
-       if (dpu_crtc->event) {
-               DPU_DEBUG("already received dpu_crtc->event\n");
-       } else {
-               spin_lock_irqsave(&dev->event_lock, flags);
-               dpu_crtc->event = crtc->state->event;
-               crtc->state->event = NULL;
-               spin_unlock_irqrestore(&dev->event_lock, flags);
-       }
+       WARN_ON(dpu_crtc->event);
+       spin_lock_irqsave(&dev->event_lock, flags);
+       dpu_crtc->event = crtc->state->event;
+       crtc->state->event = NULL;
+       spin_unlock_irqrestore(&dev->event_lock, flags);
 
        /*
         * If no mixers has been allocated in dpu_crtc_atomic_check(),
@@ -635,14 +602,7 @@ static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
 static void dpu_crtc_destroy_state(struct drm_crtc *crtc,
                struct drm_crtc_state *state)
 {
-       struct dpu_crtc_state *cstate;
-
-       if (!crtc || !state) {
-               DPU_ERROR("invalid argument(s)\n");
-               return;
-       }
-
-       cstate = to_dpu_crtc_state(state);
+       struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
 
        DPU_DEBUG("crtc%d\n", crtc->base.id);
 
@@ -731,14 +691,8 @@ static void dpu_crtc_reset(struct drm_crtc *crtc)
  */
 static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc)
 {
-       struct dpu_crtc_state *cstate, *old_cstate;
+       struct dpu_crtc_state *cstate, *old_cstate = to_dpu_crtc_state(crtc->state);
 
-       if (!crtc || !crtc->state) {
-               DPU_ERROR("invalid argument(s)\n");
-               return NULL;
-       }
-
-       old_cstate = to_dpu_crtc_state(crtc->state);
        cstate = kmemdup(old_cstate, sizeof(*old_cstate), GFP_KERNEL);
        if (!cstate) {
                DPU_ERROR("failed to allocate state\n");
@@ -754,19 +708,12 @@ static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc)
 static void dpu_crtc_disable(struct drm_crtc *crtc,
                             struct drm_crtc_state *old_crtc_state)
 {
-       struct dpu_crtc *dpu_crtc;
-       struct dpu_crtc_state *cstate;
+       struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+       struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
        struct drm_encoder *encoder;
        unsigned long flags;
        bool release_bandwidth = false;
 
-       if (!crtc || !crtc->state) {
-               DPU_ERROR("invalid crtc\n");
-               return;
-       }
-       dpu_crtc = to_dpu_crtc(crtc);
-       cstate = to_dpu_crtc_state(crtc->state);
-
        DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
 
        /* Disable/save vblank irq handling */
@@ -825,19 +772,13 @@ static void dpu_crtc_disable(struct drm_crtc *crtc,
 static void dpu_crtc_enable(struct drm_crtc *crtc,
                struct drm_crtc_state *old_crtc_state)
 {
-       struct dpu_crtc *dpu_crtc;
+       struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
        struct drm_encoder *encoder;
        bool request_bandwidth = false;
 
-       if (!crtc) {
-               DPU_ERROR("invalid crtc\n");
-               return;
-       }
-
        pm_runtime_get_sync(crtc->dev->dev);
 
        DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
-       dpu_crtc = to_dpu_crtc(crtc);
 
        drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) {
                /* in video mode, we hold an extra bandwidth reference
@@ -873,15 +814,15 @@ struct plane_state {
 static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
                struct drm_crtc_state *state)
 {
-       struct dpu_crtc *dpu_crtc;
+       struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+       struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
        struct plane_state *pstates;
-       struct dpu_crtc_state *cstate;
 
        const struct drm_plane_state *pstate;
        struct drm_plane *plane;
        struct drm_display_mode *mode;
 
-       int cnt = 0, rc = 0, mixer_width, i, z_pos;
+       int cnt = 0, rc = 0, mixer_width = 0, i, z_pos;
 
        struct dpu_multirect_plane_states multirect_plane[DPU_STAGE_MAX * 2];
        int multirect_count = 0;
@@ -889,16 +830,8 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
        int left_zpos_cnt = 0, right_zpos_cnt = 0;
        struct drm_rect crtc_rect = { 0 };
 
-       if (!crtc) {
-               DPU_ERROR("invalid crtc\n");
-               return -EINVAL;
-       }
-
        pstates = kzalloc(sizeof(*pstates) * DPU_STAGE_MAX * 4, GFP_KERNEL);
 
-       dpu_crtc = to_dpu_crtc(crtc);
-       cstate = to_dpu_crtc_state(state);
-
        if (!state->enable || !state->active) {
                DPU_DEBUG("crtc%d -> enable %d, active %d, skip atomic_check\n",
                                crtc->base.id, state->enable, state->active);
@@ -914,9 +847,11 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
 
        memset(pipe_staged, 0, sizeof(pipe_staged));
 
-       mixer_width = mode->hdisplay / cstate->num_mixers;
+       if (cstate->num_mixers) {
+               mixer_width = mode->hdisplay / cstate->num_mixers;
 
-       _dpu_crtc_setup_lm_bounds(crtc, state);
+               _dpu_crtc_setup_lm_bounds(crtc, state);
+       }
 
        crtc_rect.x2 = mode->hdisplay;
        crtc_rect.y2 = mode->vdisplay;
@@ -1242,23 +1177,7 @@ static int _dpu_debugfs_status_show(struct seq_file *s, void *data)
        return 0;
 }
 
-static int _dpu_debugfs_status_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, _dpu_debugfs_status_show, inode->i_private);
-}
-
-#define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix)                          \
-static int __prefix ## _open(struct inode *inode, struct file *file)   \
-{                                                                      \
-       return single_open(file, __prefix ## _show, inode->i_private);  \
-}                                                                      \
-static const struct file_operations __prefix ## _fops = {              \
-       .owner = THIS_MODULE,                                           \
-       .open = __prefix ## _open,                                      \
-       .release = single_release,                                      \
-       .read = seq_read,                                               \
-       .llseek = seq_lseek,                                            \
-}
+DEFINE_SHOW_ATTRIBUTE(_dpu_debugfs_status);
 
 static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v)
 {
@@ -1275,25 +1194,18 @@ static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v)
 
        return 0;
 }
-DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_crtc_debugfs_state);
+DEFINE_SHOW_ATTRIBUTE(dpu_crtc_debugfs_state);
 
 static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
 {
        struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
 
-       static const struct file_operations debugfs_status_fops = {
-               .open =         _dpu_debugfs_status_open,
-               .read =         seq_read,
-               .llseek =       seq_lseek,
-               .release =      single_release,
-       };
-
        dpu_crtc->debugfs_root = debugfs_create_dir(dpu_crtc->name,
                        crtc->dev->primary->debugfs_root);
 
        debugfs_create_file("status", 0400,
                        dpu_crtc->debugfs_root,
-                       dpu_crtc, &debugfs_status_fops);
+                       dpu_crtc, &_dpu_debugfs_status_fops);
        debugfs_create_file("state", 0600,
                        dpu_crtc->debugfs_root,
                        &dpu_crtc->base,
index bd6def4..f7f5c25 100644 (file)
@@ -1001,6 +1001,9 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
 
        trace_dpu_enc_mode_set(DRMID(drm_enc));
 
+       if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS && priv->dp)
+               msm_dp_display_mode_set(priv->dp, drm_enc, mode, adj_mode);
+
        list_for_each_entry(conn_iter, connector_list, head)
                if (conn_iter->encoder == drm_enc)
                        conn = conn_iter;
@@ -1109,6 +1112,13 @@ static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
                return;
        }
 
+
+       if (dpu_enc->disp_info.intf_type == DRM_MODE_CONNECTOR_DisplayPort &&
+               dpu_enc->cur_master->hw_mdptop &&
+               dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select)
+               dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select(
+                       dpu_enc->cur_master->hw_mdptop);
+
        _dpu_encoder_update_vsync_source(dpu_enc, &dpu_enc->disp_info);
 
        if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI &&
@@ -1146,6 +1156,7 @@ static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
 {
        struct dpu_encoder_virt *dpu_enc = NULL;
        int ret = 0;
+       struct msm_drm_private *priv;
        struct drm_display_mode *cur_mode = NULL;
 
        if (!drm_enc) {
@@ -1156,6 +1167,7 @@ static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
 
        mutex_lock(&dpu_enc->enc_lock);
        cur_mode = &dpu_enc->base.crtc->state->adjusted_mode;
+       priv = drm_enc->dev->dev_private;
 
        trace_dpu_enc_enable(DRMID(drm_enc), cur_mode->hdisplay,
                             cur_mode->vdisplay);
@@ -1176,6 +1188,15 @@ static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
 
        _dpu_encoder_virt_enable_helper(drm_enc);
 
+       if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS && priv->dp) {
+               ret = msm_dp_display_enable(priv->dp,
+                                               drm_enc);
+               if (ret) {
+                       DPU_ERROR_ENC(dpu_enc, "dp display enable failed: %d\n",
+                               ret);
+                       goto out;
+               }
+       }
        dpu_enc->enabled = true;
 
 out:
@@ -1211,6 +1232,11 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
        /* wait for idle */
        dpu_encoder_wait_for_event(drm_enc, MSM_ENC_TX_COMPLETE);
 
+       if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS && priv->dp) {
+               if (msm_dp_display_pre_disable(priv->dp, drm_enc))
+                       DPU_ERROR_ENC(dpu_enc, "dp display push idle failed\n");
+       }
+
        dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_PRE_STOP);
 
        for (i = 0; i < dpu_enc->num_phys_encs; i++) {
@@ -1220,6 +1246,7 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
                        phys->ops.disable(phys);
        }
 
+
        /* after phys waits for frame-done, should be no more frames pending */
        if (atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) {
                DPU_ERROR("enc%d timeout pending\n", drm_enc->base.id);
@@ -1234,6 +1261,11 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
 
        DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n");
 
+       if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS && priv->dp) {
+               if (msm_dp_display_disable(priv->dp, drm_enc))
+                       DPU_ERROR_ENC(dpu_enc, "dp display disable failed\n");
+       }
+
        mutex_unlock(&dpu_enc->enc_lock);
 }
 
@@ -1880,24 +1912,13 @@ static int _dpu_encoder_status_show(struct seq_file *s, void *data)
        return 0;
 }
 
-static int _dpu_encoder_debugfs_status_open(struct inode *inode,
-               struct file *file)
-{
-       return single_open(file, _dpu_encoder_status_show, inode->i_private);
-}
+DEFINE_SHOW_ATTRIBUTE(_dpu_encoder_status);
 
 static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
 {
        struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
        int i;
 
-       static const struct file_operations debugfs_status_fops = {
-               .open =         _dpu_encoder_debugfs_status_open,
-               .read =         seq_read,
-               .llseek =       seq_lseek,
-               .release =      single_release,
-       };
-
        char name[DPU_NAME_SIZE];
 
        if (!drm_enc->dev) {
@@ -1913,7 +1934,7 @@ static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
 
        /* don't error check these */
        debugfs_create_file("status", 0600,
-               dpu_enc->debugfs_root, dpu_enc, &debugfs_status_fops);
+               dpu_enc->debugfs_root, dpu_enc, &_dpu_encoder_status_fops);
 
        for (i = 0; i < dpu_enc->num_phys_encs; i++)
                if (dpu_enc->phys_encs[i]->ops.late_register)
@@ -2008,7 +2029,7 @@ static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
 {
        int ret = 0;
        int i = 0;
-       enum dpu_intf_type intf_type;
+       enum dpu_intf_type intf_type = INTF_NONE;
        struct dpu_enc_phys_init_params phys_params;
 
        if (!dpu_enc) {
@@ -2030,9 +2051,9 @@ static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
        case DRM_MODE_ENCODER_DSI:
                intf_type = INTF_DSI;
                break;
-       default:
-               DPU_ERROR_ENC(dpu_enc, "unsupported display interface type\n");
-               return -EINVAL;
+       case DRM_MODE_ENCODER_TMDS:
+               intf_type = INTF_DP;
+               break;
        }
 
        WARN_ON(disp_info->num_of_h_tiles < 1);
index b5a4905..805e059 100644 (file)
@@ -100,6 +100,14 @@ static void drm_mode_to_intf_timing_params(
         * display_v_end -= mode->hsync_start - mode->hdisplay;
         * }
         */
+       /* for DP/EDP, Shift timings to align it to bottom right */
+       if ((phys_enc->hw_intf->cap->type == INTF_DP) ||
+               (phys_enc->hw_intf->cap->type == INTF_EDP)) {
+               timing->h_back_porch += timing->h_front_porch;
+               timing->h_front_porch = 0;
+               timing->v_back_porch += timing->v_front_porch;
+               timing->v_front_porch = 0;
+       }
 }
 
 static u32 get_horizontal_total(const struct intf_timing_params *timing)
@@ -298,7 +306,6 @@ static void dpu_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
        struct dpu_hw_ctl *hw_ctl;
        unsigned long lock_flags;
        u32 flush_register = 0;
-       int new_cnt = -1, old_cnt = -1;
 
        hw_ctl = phys_enc->hw_ctl;
 
@@ -308,7 +315,7 @@ static void dpu_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
                phys_enc->parent_ops->handle_vblank_virt(phys_enc->parent,
                                phys_enc);
 
-       old_cnt  = atomic_read(&phys_enc->pending_kickoff_cnt);
+       atomic_read(&phys_enc->pending_kickoff_cnt);
 
        /*
         * only decrement the pending flush count if we've actually flushed
@@ -320,8 +327,7 @@ static void dpu_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
                flush_register = hw_ctl->ops.get_flush_register(hw_ctl);
 
        if (!(flush_register & hw_ctl->ops.get_pending_flush(hw_ctl)))
-               new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt,
-                               -1, 0);
+               atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
        spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
 
        /* Signal any waiting atomic commit thread */
index 97d122e..60b304b 100644 (file)
@@ -684,7 +684,8 @@ static const struct dpu_perf_cfg sc7180_perf_data = {
        .max_bw_high = 6800000,
        .min_core_ib = 2400000,
        .min_llcc_ib = 800000,
-       .min_dram_ib = 800000,
+       .min_dram_ib = 1600000,
+       .min_prefill_lines = 24,
        .danger_lut_tbl = {0xff, 0xffff, 0x0},
        .qos_lut_tbl = {
                {.nentry = ARRAY_SIZE(sc7180_qos_linear),
@@ -701,6 +702,8 @@ static const struct dpu_perf_cfg sc7180_perf_data = {
                {.rd_enable = 1, .wr_enable = 1},
                {.rd_enable = 1, .wr_enable = 0}
        },
+       .clk_inefficiency_factor = 105,
+       .bw_inefficiency_factor = 120,
 };
 
 static const struct dpu_perf_cfg sm8150_perf_data = {
index 1b7a921..3544af1 100644 (file)
@@ -659,6 +659,8 @@ struct dpu_perf_cdp_cfg {
  * @downscaling_prefill_lines  downscaling latency in lines
  * @amortizable_theshold minimum y position for traffic shaping prefill
  * @min_prefill_lines  minimum pipeline latency in lines
+ * @clk_inefficiency_factor DPU src clock inefficiency factor
+ * @bw_inefficiency_factor DPU axi bus bw inefficiency factor
  * @safe_lut_tbl: LUT tables for safe signals
  * @danger_lut_tbl: LUT tables for danger signals
  * @qos_lut_tbl: LUT tables for QoS signals
@@ -683,6 +685,8 @@ struct dpu_perf_cfg {
        u32 downscaling_prefill_lines;
        u32 amortizable_threshold;
        u32 min_prefill_lines;
+       u32 clk_inefficiency_factor;
+       u32 bw_inefficiency_factor;
        u32 safe_lut_tbl[DPU_QOS_LUT_USAGE_MAX];
        u32 danger_lut_tbl[DPU_QOS_LUT_USAGE_MAX];
        struct dpu_qos_lut_tbl qos_lut_tbl[DPU_QOS_LUT_USAGE_MAX];
index c0a4d4e..d93c44f 100644 (file)
@@ -85,30 +85,17 @@ static int _dpu_danger_signal_status(struct seq_file *s,
        return 0;
 }
 
-#define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix)                          \
-static int __prefix ## _open(struct inode *inode, struct file *file)   \
-{                                                                      \
-       return single_open(file, __prefix ## _show, inode->i_private);  \
-}                                                                      \
-static const struct file_operations __prefix ## _fops = {              \
-       .owner = THIS_MODULE,                                           \
-       .open = __prefix ## _open,                                      \
-       .release = single_release,                                      \
-       .read = seq_read,                                               \
-       .llseek = seq_lseek,                                            \
-}
-
 static int dpu_debugfs_danger_stats_show(struct seq_file *s, void *v)
 {
        return _dpu_danger_signal_status(s, true);
 }
-DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_danger_stats);
+DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_danger_stats);
 
 static int dpu_debugfs_safe_stats_show(struct seq_file *s, void *v)
 {
        return _dpu_danger_signal_status(s, false);
 }
-DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_safe_stats);
+DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_safe_stats);
 
 static void dpu_debugfs_danger_init(struct dpu_kms *dpu_kms,
                struct dentry *parent)
@@ -195,10 +182,15 @@ static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
        struct dpu_kms *dpu_kms = to_dpu_kms(kms);
        void *p = dpu_hw_util_get_log_mask_ptr();
        struct dentry *entry;
+       struct drm_device *dev;
+       struct msm_drm_private *priv;
 
        if (!p)
                return -EINVAL;
 
+       dev = dpu_kms->dev;
+       priv = dev->dev_private;
+
        entry = debugfs_create_dir("debug", minor->debugfs_root);
 
        debugfs_create_x32(DPU_DEBUGFS_HWMASKNAME, 0600, entry, p);
@@ -207,6 +199,9 @@ static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
        dpu_debugfs_vbif_init(dpu_kms, entry);
        dpu_debugfs_core_irq_init(dpu_kms, entry);
 
+       if (priv->dp)
+               msm_dp_debugfs_init(priv->dp, minor);
+
        return dpu_core_perf_debugfs_init(dpu_kms, entry);
 }
 #endif
@@ -290,6 +285,28 @@ static int dpu_kms_global_obj_init(struct dpu_kms *dpu_kms)
        return 0;
 }
 
+static int dpu_kms_parse_data_bus_icc_path(struct dpu_kms *dpu_kms)
+{
+       struct icc_path *path0;
+       struct icc_path *path1;
+       struct drm_device *dev = dpu_kms->dev;
+
+       path0 = of_icc_get(dev->dev, "mdp0-mem");
+       path1 = of_icc_get(dev->dev, "mdp1-mem");
+
+       if (IS_ERR_OR_NULL(path0))
+               return PTR_ERR_OR_ZERO(path0);
+
+       dpu_kms->path[0] = path0;
+       dpu_kms->num_paths = 1;
+
+       if (!IS_ERR_OR_NULL(path1)) {
+               dpu_kms->path[1] = path1;
+               dpu_kms->num_paths++;
+       }
+       return 0;
+}
+
 static int dpu_kms_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
 {
        return dpu_crtc_vblank(crtc, true);
@@ -479,6 +496,33 @@ static int _dpu_kms_initialize_dsi(struct drm_device *dev,
        return rc;
 }
 
+static int _dpu_kms_initialize_displayport(struct drm_device *dev,
+                                           struct msm_drm_private *priv,
+                                           struct dpu_kms *dpu_kms)
+{
+       struct drm_encoder *encoder = NULL;
+       int rc = 0;
+
+       if (!priv->dp)
+               return rc;
+
+       encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_TMDS);
+       if (IS_ERR(encoder)) {
+               DPU_ERROR("encoder init failed for dsi display\n");
+               return PTR_ERR(encoder);
+       }
+
+       rc = msm_dp_modeset_init(priv->dp, dev, encoder);
+       if (rc) {
+               DPU_ERROR("modeset_init failed for DP, rc = %d\n", rc);
+               drm_encoder_cleanup(encoder);
+               return rc;
+       }
+
+       priv->encoders[priv->num_encoders++] = encoder;
+       return rc;
+}
+
 /**
  * _dpu_kms_setup_displays - create encoders, bridges and connectors
  *                           for underlying displays
@@ -491,12 +535,21 @@ static int _dpu_kms_setup_displays(struct drm_device *dev,
                                    struct msm_drm_private *priv,
                                    struct dpu_kms *dpu_kms)
 {
-       /**
-        * Extend this function to initialize other
-        * types of displays
-        */
+       int rc = 0;
 
-       return _dpu_kms_initialize_dsi(dev, priv, dpu_kms);
+       rc = _dpu_kms_initialize_dsi(dev, priv, dpu_kms);
+       if (rc) {
+               DPU_ERROR("initialize_dsi failed, rc = %d\n", rc);
+               return rc;
+       }
+
+       rc = _dpu_kms_initialize_displayport(dev, priv, dpu_kms);
+       if (rc) {
+               DPU_ERROR("initialize_DP failed, rc = %d\n", rc);
+               return rc;
+       }
+
+       return rc;
 }
 
 static void _dpu_kms_drm_obj_destroy(struct dpu_kms *dpu_kms)
@@ -681,13 +734,20 @@ static void _dpu_kms_set_encoder_mode(struct msm_kms *kms,
        info.capabilities = cmd_mode ? MSM_DISPLAY_CAP_CMD_MODE :
                        MSM_DISPLAY_CAP_VID_MODE;
 
-       /* TODO: No support for DSI swap */
-       for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
-               if (priv->dsi[i]) {
-                       info.h_tile_instance[info.num_of_h_tiles] = i;
-                       info.num_of_h_tiles++;
+       switch (info.intf_type) {
+       case DRM_MODE_ENCODER_DSI:
+               /* TODO: No support for DSI swap */
+               for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
+                       if (priv->dsi[i]) {
+                               info.h_tile_instance[info.num_of_h_tiles] = i;
+                               info.num_of_h_tiles++;
+                       }
                }
-       }
+               break;
+       case DRM_MODE_ENCODER_TMDS:
+               info.num_of_h_tiles = 1;
+               break;
+       };
 
        rc = dpu_encoder_setup(encoder->dev, encoder, &info);
        if (rc)
@@ -709,6 +769,23 @@ static void dpu_irq_preinstall(struct msm_kms *kms)
        dpu_core_irq_preinstall(dpu_kms);
 }
 
+static int dpu_irq_postinstall(struct msm_kms *kms)
+{
+       struct msm_drm_private *priv;
+       struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+
+       if (!dpu_kms || !dpu_kms->dev)
+               return -EINVAL;
+
+       priv = dpu_kms->dev->dev_private;
+       if (!priv)
+               return -EINVAL;
+
+       msm_dp_irq_postinstall(priv->dp);
+
+       return 0;
+}
+
 static void dpu_irq_uninstall(struct msm_kms *kms)
 {
        struct dpu_kms *dpu_kms = to_dpu_kms(kms);
@@ -719,6 +796,7 @@ static void dpu_irq_uninstall(struct msm_kms *kms)
 static const struct msm_kms_funcs kms_funcs = {
        .hw_init         = dpu_kms_hw_init,
        .irq_preinstall  = dpu_irq_preinstall,
+       .irq_postinstall = dpu_irq_postinstall,
        .irq_uninstall   = dpu_irq_uninstall,
        .irq             = dpu_irq,
        .enable_commit   = dpu_kms_enable_commit,
@@ -952,6 +1030,9 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
 
        dpu_vbif_init_memtypes(dpu_kms);
 
+       if (of_device_is_compatible(dev->dev->of_node, "qcom,sc7180-mdss"))
+               dpu_kms_parse_data_bus_icc_path(dpu_kms);
+
        pm_runtime_put_sync(&dpu_kms->pdev->dev);
 
        return 0;
@@ -1079,7 +1160,7 @@ static int dpu_dev_remove(struct platform_device *pdev)
 
 static int __maybe_unused dpu_runtime_suspend(struct device *dev)
 {
-       int rc = -1;
+       int i, rc = -1;
        struct platform_device *pdev = to_platform_device(dev);
        struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
        struct dss_module_power *mp = &dpu_kms->mp;
@@ -1090,6 +1171,9 @@ static int __maybe_unused dpu_runtime_suspend(struct device *dev)
        if (rc)
                DPU_ERROR("clock disable failed rc:%d\n", rc);
 
+       for (i = 0; i < dpu_kms->num_paths; i++)
+               icc_set_bw(dpu_kms->path[i], 0, 0);
+
        return rc;
 }
 
@@ -1101,8 +1185,15 @@ static int __maybe_unused dpu_runtime_resume(struct device *dev)
        struct drm_encoder *encoder;
        struct drm_device *ddev;
        struct dss_module_power *mp = &dpu_kms->mp;
+       int i;
 
        ddev = dpu_kms->dev;
+
+       /* Min vote of BW is required before turning on AXI clk */
+       for (i = 0; i < dpu_kms->num_paths; i++)
+               icc_set_bw(dpu_kms->path[i], 0,
+                       dpu_kms->catalog->perf.min_dram_ib);
+
        rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
        if (rc) {
                DPU_ERROR("clock enable failed rc:%d\n", rc);
index e140cd6..1c0e4c0 100644 (file)
@@ -8,6 +8,8 @@
 #ifndef __DPU_KMS_H__
 #define __DPU_KMS_H__
 
+#include <linux/interconnect.h>
+
 #include <drm/drm_drv.h>
 
 #include "msm_drv.h"
@@ -140,6 +142,8 @@ struct dpu_kms {
         * when disabled.
         */
        atomic_t bandwidth_ref;
+       struct icc_path *path[2];
+       u32 num_paths;
 };
 
 struct vsync_info {
index 7d3fdbb..cd40788 100644 (file)
@@ -8,7 +8,6 @@
 #include <linux/irqdesc.h>
 #include <linux/irqchip/chained_irq.h>
 #include "dpu_kms.h"
-#include <linux/interconnect.h>
 
 #define to_dpu_mdss(x) container_of(x, struct dpu_mdss, base)
 
@@ -277,9 +276,11 @@ int dpu_mdss_init(struct drm_device *dev)
 
        DRM_DEBUG("mapped mdss address space @%pK\n", dpu_mdss->mmio);
 
-       ret = dpu_mdss_parse_data_bus_icc_path(dev, dpu_mdss);
-       if (ret)
-               return ret;
+       if (!of_device_is_compatible(dev->dev->of_node, "qcom,sc7180-mdss")) {
+               ret = dpu_mdss_parse_data_bus_icc_path(dev, dpu_mdss);
+               if (ret)
+                       return ret;
+       }
 
        mp = &dpu_mdss->mp;
        ret = msm_dss_parse_clock(pdev, mp);
index 29e373d..7ea90d2 100644 (file)
@@ -131,6 +131,86 @@ static struct dpu_kms *_dpu_plane_get_kms(struct drm_plane *plane)
        return to_dpu_kms(priv->kms);
 }
 
+/**
+ * _dpu_plane_calc_bw - calculate bandwidth required for a plane
+ * @Plane: Pointer to drm plane.
+ * Result: Updates calculated bandwidth in the plane state.
+ * BW Equation: src_w * src_h * bpp * fps * (v_total / v_dest)
+ * Prefill BW Equation: line src bytes * line_time
+ */
+static void _dpu_plane_calc_bw(struct drm_plane *plane,
+       struct drm_framebuffer *fb)
+{
+       struct dpu_plane *pdpu = to_dpu_plane(plane);
+       struct dpu_plane_state *pstate;
+       struct drm_display_mode *mode;
+       const struct dpu_format *fmt = NULL;
+       struct dpu_kms *dpu_kms = _dpu_plane_get_kms(plane);
+       int src_width, src_height, dst_height, fps;
+       u64 plane_prefill_bw;
+       u64 plane_bw;
+       u32 hw_latency_lines;
+       u64 scale_factor;
+       int vbp, vpw;
+
+       pstate = to_dpu_plane_state(plane->state);
+       mode = &plane->state->crtc->mode;
+
+       fmt = dpu_get_dpu_format_ext(fb->format->format, fb->modifier);
+
+       src_width = drm_rect_width(&pdpu->pipe_cfg.src_rect);
+       src_height = drm_rect_height(&pdpu->pipe_cfg.src_rect);
+       dst_height = drm_rect_height(&pdpu->pipe_cfg.dst_rect);
+       fps = drm_mode_vrefresh(mode);
+       vbp = mode->vtotal - mode->vsync_end;
+       vpw = mode->vsync_end - mode->vsync_start;
+       hw_latency_lines =  dpu_kms->catalog->perf.min_prefill_lines;
+       scale_factor = src_height > dst_height ?
+               mult_frac(src_height, 1, dst_height) : 1;
+
+       plane_bw =
+               src_width * mode->vtotal * fps * fmt->bpp *
+               scale_factor;
+
+       plane_prefill_bw =
+               src_width * hw_latency_lines * fps * fmt->bpp *
+               scale_factor * mode->vtotal;
+
+       do_div(plane_prefill_bw, (vbp+vpw));
+
+       pstate->plane_fetch_bw = max(plane_bw, plane_prefill_bw);
+}
+
+/**
+ * _dpu_plane_calc_clk - calculate clock required for a plane
+ * @Plane: Pointer to drm plane.
+ * Result: Updates calculated clock in the plane state.
+ * Clock equation: dst_w * v_total * fps * (src_h / dst_h)
+ */
+static void _dpu_plane_calc_clk(struct drm_plane *plane)
+{
+       struct dpu_plane *pdpu = to_dpu_plane(plane);
+       struct dpu_plane_state *pstate;
+       struct drm_display_mode *mode;
+       int dst_width, src_height, dst_height, fps;
+
+       pstate = to_dpu_plane_state(plane->state);
+       mode = &plane->state->crtc->mode;
+
+       src_height = drm_rect_height(&pdpu->pipe_cfg.src_rect);
+       dst_width = drm_rect_width(&pdpu->pipe_cfg.dst_rect);
+       dst_height = drm_rect_height(&pdpu->pipe_cfg.dst_rect);
+       fps = drm_mode_vrefresh(mode);
+
+       pstate->plane_clk =
+               dst_width * mode->vtotal * fps;
+
+       if (src_height > dst_height) {
+               pstate->plane_clk *= src_height;
+               do_div(pstate->plane_clk, dst_height);
+       }
+}
+
 /**
  * _dpu_plane_calc_fill_level - calculate fill level of the given source format
  * @plane:             Pointer to drm plane
@@ -1102,6 +1182,10 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane)
        }
 
        _dpu_plane_set_qos_remap(plane);
+
+       _dpu_plane_calc_bw(plane, fb);
+
+       _dpu_plane_calc_clk(plane);
 }
 
 static void _dpu_plane_atomic_disable(struct drm_plane *plane)
index 4569497..ca83b87 100644 (file)
@@ -25,6 +25,8 @@
  * @scaler3_cfg: configuration data for scaler3
  * @pixel_ext: configuration data for pixel extensions
  * @cdp_cfg:   CDP configuration
+ * @plane_fetch_bw: calculated BW per plane
+ * @plane_clk: calculated clk per plane
  */
 struct dpu_plane_state {
        struct drm_plane_state base;
@@ -39,6 +41,8 @@ struct dpu_plane_state {
        struct dpu_hw_pixel_ext pixel_ext;
 
        struct dpu_hw_pipe_cdp_cfg cdp_cfg;
+       u64 plane_fetch_bw;
+       u64 plane_clk;
 };
 
 /**
index 5d89560..88645db 100644 (file)
@@ -25,54 +25,9 @@ static struct mdp4_kms *get_kms(struct drm_encoder *encoder)
        return to_mdp4_kms(to_mdp_kms(priv->kms));
 }
 
-#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
-#include <mach/board.h>
-/* not ironically named at all.. no, really.. */
-static void bs_init(struct mdp4_dtv_encoder *mdp4_dtv_encoder)
-{
-       struct drm_device *dev = mdp4_dtv_encoder->base.dev;
-       struct lcdc_platform_data *dtv_pdata = mdp4_find_pdata("dtv.0");
-
-       if (!dtv_pdata) {
-               DRM_DEV_ERROR(dev->dev, "could not find dtv pdata\n");
-               return;
-       }
-
-       if (dtv_pdata->bus_scale_table) {
-               mdp4_dtv_encoder->bsc = msm_bus_scale_register_client(
-                               dtv_pdata->bus_scale_table);
-               DBG("bus scale client: %08x", mdp4_dtv_encoder->bsc);
-               DBG("lcdc_power_save: %p", dtv_pdata->lcdc_power_save);
-               if (dtv_pdata->lcdc_power_save)
-                       dtv_pdata->lcdc_power_save(1);
-       }
-}
-
-static void bs_fini(struct mdp4_dtv_encoder *mdp4_dtv_encoder)
-{
-       if (mdp4_dtv_encoder->bsc) {
-               msm_bus_scale_unregister_client(mdp4_dtv_encoder->bsc);
-               mdp4_dtv_encoder->bsc = 0;
-       }
-}
-
-static void bs_set(struct mdp4_dtv_encoder *mdp4_dtv_encoder, int idx)
-{
-       if (mdp4_dtv_encoder->bsc) {
-               DBG("set bus scaling: %d", idx);
-               msm_bus_scale_client_update_request(mdp4_dtv_encoder->bsc, idx);
-       }
-}
-#else
-static void bs_init(struct mdp4_dtv_encoder *mdp4_dtv_encoder) {}
-static void bs_fini(struct mdp4_dtv_encoder *mdp4_dtv_encoder) {}
-static void bs_set(struct mdp4_dtv_encoder *mdp4_dtv_encoder, int idx) {}
-#endif
-
 static void mdp4_dtv_encoder_destroy(struct drm_encoder *encoder)
 {
        struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
-       bs_fini(mdp4_dtv_encoder);
        drm_encoder_cleanup(encoder);
        kfree(mdp4_dtv_encoder);
 }
@@ -162,8 +117,6 @@ static void mdp4_dtv_encoder_disable(struct drm_encoder *encoder)
        clk_disable_unprepare(mdp4_dtv_encoder->hdmi_clk);
        clk_disable_unprepare(mdp4_dtv_encoder->mdp_clk);
 
-       bs_set(mdp4_dtv_encoder, 0);
-
        mdp4_dtv_encoder->enabled = false;
 }
 
@@ -185,8 +138,6 @@ static void mdp4_dtv_encoder_enable(struct drm_encoder *encoder)
                        MDP4_DMA_CONFIG_PACK(0x21));
        mdp4_crtc_set_intf(encoder->crtc, INTF_LCDC_DTV, 1);
 
-       bs_set(mdp4_dtv_encoder, 1);
-
        DBG("setting mdp_clk=%lu", pc);
 
        ret = clk_set_rate(mdp4_dtv_encoder->mdp_clk, pc);
@@ -252,8 +203,6 @@ struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev)
                goto fail;
        }
 
-       bs_init(mdp4_dtv_encoder);
-
        return encoder;
 
 fail:
index 18933bd..e8ee92a 100644 (file)
@@ -222,17 +222,4 @@ static inline struct clk *mpd4_lvds_pll_init(struct drm_device *dev)
 }
 #endif
 
-#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
-/* bus scaling data is associated with extra pointless platform devices,
- * "dtv", etc.. this is a bit of a hack, but we need a way for encoders
- * to find their pdata to make the bus-scaling stuff work.
- */
-static inline void *mdp4_find_pdata(const char *devname)
-{
-       struct device *dev;
-       dev = bus_find_device_by_name(&platform_bus_type, NULL, devname);
-       return dev ? dev->platform_data : NULL;
-}
-#endif
-
 #endif /* __MDP4_KMS_H__ */
index 871f351..10eb3e5 100644 (file)
@@ -30,51 +30,10 @@ static struct mdp4_kms *get_kms(struct drm_encoder *encoder)
        return to_mdp4_kms(to_mdp_kms(priv->kms));
 }
 
-#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
-#include <mach/board.h>
-static void bs_init(struct mdp4_lcdc_encoder *mdp4_lcdc_encoder)
-{
-       struct drm_device *dev = mdp4_lcdc_encoder->base.dev;
-       struct lcdc_platform_data *lcdc_pdata = mdp4_find_pdata("lvds.0");
-
-       if (!lcdc_pdata) {
-               DRM_DEV_ERROR(dev->dev, "could not find lvds pdata\n");
-               return;
-       }
-
-       if (lcdc_pdata->bus_scale_table) {
-               mdp4_lcdc_encoder->bsc = msm_bus_scale_register_client(
-                               lcdc_pdata->bus_scale_table);
-               DBG("lvds : bus scale client: %08x", mdp4_lcdc_encoder->bsc);
-       }
-}
-
-static void bs_fini(struct mdp4_lcdc_encoder *mdp4_lcdc_encoder)
-{
-       if (mdp4_lcdc_encoder->bsc) {
-               msm_bus_scale_unregister_client(mdp4_lcdc_encoder->bsc);
-               mdp4_lcdc_encoder->bsc = 0;
-       }
-}
-
-static void bs_set(struct mdp4_lcdc_encoder *mdp4_lcdc_encoder, int idx)
-{
-       if (mdp4_lcdc_encoder->bsc) {
-               DBG("set bus scaling: %d", idx);
-               msm_bus_scale_client_update_request(mdp4_lcdc_encoder->bsc, idx);
-       }
-}
-#else
-static void bs_init(struct mdp4_lcdc_encoder *mdp4_lcdc_encoder) {}
-static void bs_fini(struct mdp4_lcdc_encoder *mdp4_lcdc_encoder) {}
-static void bs_set(struct mdp4_lcdc_encoder *mdp4_lcdc_encoder, int idx) {}
-#endif
-
 static void mdp4_lcdc_encoder_destroy(struct drm_encoder *encoder)
 {
        struct mdp4_lcdc_encoder *mdp4_lcdc_encoder =
                        to_mdp4_lcdc_encoder(encoder);
-       bs_fini(mdp4_lcdc_encoder);
        drm_encoder_cleanup(encoder);
        kfree(mdp4_lcdc_encoder);
 }
@@ -348,8 +307,6 @@ static void mdp4_lcdc_encoder_disable(struct drm_encoder *encoder)
                        DRM_DEV_ERROR(dev->dev, "failed to disable regulator: %d\n", ret);
        }
 
-       bs_set(mdp4_lcdc_encoder, 0);
-
        mdp4_lcdc_encoder->enabled = false;
 }
 
@@ -382,8 +339,6 @@ static void mdp4_lcdc_encoder_enable(struct drm_encoder *encoder)
        mdp4_crtc_set_config(encoder->crtc, config);
        mdp4_crtc_set_intf(encoder->crtc, INTF_LCDC_DTV, 0);
 
-       bs_set(mdp4_lcdc_encoder, 1);
-
        for (i = 0; i < ARRAY_SIZE(mdp4_lcdc_encoder->regs); i++) {
                ret = regulator_enable(mdp4_lcdc_encoder->regs[i]);
                if (ret)
@@ -480,8 +435,6 @@ struct drm_encoder *mdp4_lcdc_encoder_init(struct drm_device *dev,
        }
        mdp4_lcdc_encoder->regs[2] = reg;
 
-       bs_init(mdp4_lcdc_encoder);
-
        return encoder;
 
 fail:
index eeef41f..ff2c1d5 100644 (file)
@@ -14,27 +14,6 @@ static struct mdp5_kms *get_kms(struct drm_encoder *encoder)
        return to_mdp5_kms(to_mdp_kms(priv->kms));
 }
 
-#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
-#include <mach/board.h>
-#include <linux/msm-bus.h>
-#include <linux/msm-bus-board.h>
-
-static void bs_set(struct mdp5_encoder *mdp5_cmd_enc, int idx)
-{
-       if (mdp5_cmd_enc->bsc) {
-               DBG("set bus scaling: %d", idx);
-               /* HACK: scaling down, and then immediately back up
-                * seems to leave things broken (underflow).. so
-                * never disable:
-                */
-               idx = 1;
-               msm_bus_scale_client_update_request(mdp5_cmd_enc->bsc, idx);
-       }
-}
-#else
-static void bs_set(struct mdp5_encoder *mdp5_cmd_enc, int idx) {}
-#endif
-
 #define VSYNC_CLK_RATE 19200000
 static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
                                    struct drm_display_mode *mode)
@@ -146,8 +125,6 @@ void mdp5_cmd_encoder_disable(struct drm_encoder *encoder)
        mdp5_ctl_set_encoder_state(ctl, pipeline, false);
        mdp5_ctl_commit(ctl, pipeline, mdp_ctl_flush_mask_encoder(intf), true);
 
-       bs_set(mdp5_cmd_enc, 0);
-
        mdp5_cmd_enc->enabled = false;
 }
 
@@ -161,7 +138,6 @@ void mdp5_cmd_encoder_enable(struct drm_encoder *encoder)
        if (WARN_ON(mdp5_cmd_enc->enabled))
                return;
 
-       bs_set(mdp5_cmd_enc, 1);
        if (pingpong_tearcheck_enable(encoder))
                return;
 
index f488272..79d67c4 100644 (file)
@@ -16,72 +16,9 @@ static struct mdp5_kms *get_kms(struct drm_encoder *encoder)
        return to_mdp5_kms(to_mdp_kms(priv->kms));
 }
 
-#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
-#include <mach/board.h>
-#include <mach/msm_bus.h>
-#include <mach/msm_bus_board.h>
-#define MDP_BUS_VECTOR_ENTRY(ab_val, ib_val)           \
-       {                                               \
-               .src = MSM_BUS_MASTER_MDP_PORT0,        \
-               .dst = MSM_BUS_SLAVE_EBI_CH0,           \
-               .ab = (ab_val),                         \
-               .ib = (ib_val),                         \
-       }
-
-static struct msm_bus_vectors mdp_bus_vectors[] = {
-       MDP_BUS_VECTOR_ENTRY(0, 0),
-       MDP_BUS_VECTOR_ENTRY(2000000000, 2000000000),
-};
-static struct msm_bus_paths mdp_bus_usecases[] = { {
-               .num_paths = 1,
-               .vectors = &mdp_bus_vectors[0],
-}, {
-               .num_paths = 1,
-               .vectors = &mdp_bus_vectors[1],
-} };
-static struct msm_bus_scale_pdata mdp_bus_scale_table = {
-       .usecase = mdp_bus_usecases,
-       .num_usecases = ARRAY_SIZE(mdp_bus_usecases),
-       .name = "mdss_mdp",
-};
-
-static void bs_init(struct mdp5_encoder *mdp5_encoder)
-{
-       mdp5_encoder->bsc = msm_bus_scale_register_client(
-                       &mdp_bus_scale_table);
-       DBG("bus scale client: %08x", mdp5_encoder->bsc);
-}
-
-static void bs_fini(struct mdp5_encoder *mdp5_encoder)
-{
-       if (mdp5_encoder->bsc) {
-               msm_bus_scale_unregister_client(mdp5_encoder->bsc);
-               mdp5_encoder->bsc = 0;
-       }
-}
-
-static void bs_set(struct mdp5_encoder *mdp5_encoder, int idx)
-{
-       if (mdp5_encoder->bsc) {
-               DBG("set bus scaling: %d", idx);
-               /* HACK: scaling down, and then immediately back up
-                * seems to leave things broken (underflow).. so
-                * never disable:
-                */
-               idx = 1;
-               msm_bus_scale_client_update_request(mdp5_encoder->bsc, idx);
-       }
-}
-#else
-static void bs_init(struct mdp5_encoder *mdp5_encoder) {}
-static void bs_fini(struct mdp5_encoder *mdp5_encoder) {}
-static void bs_set(struct mdp5_encoder *mdp5_encoder, int idx) {}
-#endif
-
 static void mdp5_encoder_destroy(struct drm_encoder *encoder)
 {
        struct mdp5_encoder *mdp5_encoder = to_mdp5_encoder(encoder);
-       bs_fini(mdp5_encoder);
        drm_encoder_cleanup(encoder);
        kfree(mdp5_encoder);
 }
@@ -222,8 +159,6 @@ static void mdp5_vid_encoder_disable(struct drm_encoder *encoder)
         */
        mdp_irq_wait(&mdp5_kms->base, intf2vblank(mixer, intf));
 
-       bs_set(mdp5_encoder, 0);
-
        mdp5_encoder->enabled = false;
 }
 
@@ -240,7 +175,6 @@ static void mdp5_vid_encoder_enable(struct drm_encoder *encoder)
        if (WARN_ON(mdp5_encoder->enabled))
                return;
 
-       bs_set(mdp5_encoder, 1);
        spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
        mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intfn), 1);
        spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
@@ -426,8 +360,6 @@ struct drm_encoder *mdp5_encoder_init(struct drm_device *dev,
 
        drm_encoder_helper_add(encoder, &mdp5_encoder_helper_funcs);
 
-       bs_init(mdp5_encoder);
-
        return encoder;
 
 fail:
diff --git a/drivers/gpu/drm/msm/dp/dp_audio.c b/drivers/gpu/drm/msm/dp/dp_audio.c
new file mode 100644 (file)
index 0000000..82a8673
--- /dev/null
@@ -0,0 +1,638 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2016-2020, The Linux Foundation. All rights reserved.
+ */
+
+
+#define pr_fmt(fmt)    "[drm-dp] %s: " fmt, __func__
+
+#include <linux/of_platform.h>
+
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_edid.h>
+
+#include "dp_catalog.h"
+#include "dp_audio.h"
+#include "dp_panel.h"
+#include "dp_display.h"
+
+#define HEADER_BYTE_2_BIT       0
+#define PARITY_BYTE_2_BIT       8
+#define HEADER_BYTE_1_BIT      16
+#define PARITY_BYTE_1_BIT      24
+#define HEADER_BYTE_3_BIT      16
+#define PARITY_BYTE_3_BIT      24
+
+struct dp_audio_private {
+       struct platform_device *audio_pdev;
+       struct platform_device *pdev;
+       struct dp_catalog *catalog;
+       struct dp_panel *panel;
+
+       bool engine_on;
+       u32 channels;
+
+       struct dp_audio dp_audio;
+};
+
+static u8 dp_audio_get_g0_value(u8 data)
+{
+       u8 c[4];
+       u8 g[4];
+       u8 ret_data = 0;
+       u8 i;
+
+       for (i = 0; i < 4; i++)
+               c[i] = (data >> i) & 0x01;
+
+       g[0] = c[3];
+       g[1] = c[0] ^ c[3];
+       g[2] = c[1];
+       g[3] = c[2];
+
+       for (i = 0; i < 4; i++)
+               ret_data = ((g[i] & 0x01) << i) | ret_data;
+
+       return ret_data;
+}
+
+static u8 dp_audio_get_g1_value(u8 data)
+{
+       u8 c[4];
+       u8 g[4];
+       u8 ret_data = 0;
+       u8 i;
+
+       for (i = 0; i < 4; i++)
+               c[i] = (data >> i) & 0x01;
+
+       g[0] = c[0] ^ c[3];
+       g[1] = c[0] ^ c[1] ^ c[3];
+       g[2] = c[1] ^ c[2];
+       g[3] = c[2] ^ c[3];
+
+       for (i = 0; i < 4; i++)
+               ret_data = ((g[i] & 0x01) << i) | ret_data;
+
+       return ret_data;
+}
+
+static u8 dp_audio_calculate_parity(u32 data)
+{
+       u8 x0 = 0;
+       u8 x1 = 0;
+       u8 ci = 0;
+       u8 iData = 0;
+       u8 i = 0;
+       u8 parity_byte;
+       u8 num_byte = (data & 0xFF00) > 0 ? 8 : 2;
+
+       for (i = 0; i < num_byte; i++) {
+               iData = (data >> i*4) & 0xF;
+
+               ci = iData ^ x1;
+               x1 = x0 ^ dp_audio_get_g1_value(ci);
+               x0 = dp_audio_get_g0_value(ci);
+       }
+
+       parity_byte = x1 | (x0 << 4);
+
+       return parity_byte;
+}
+
+static u32 dp_audio_get_header(struct dp_catalog *catalog,
+               enum dp_catalog_audio_sdp_type sdp,
+               enum dp_catalog_audio_header_type header)
+{
+       catalog->sdp_type = sdp;
+       catalog->sdp_header = header;
+       dp_catalog_audio_get_header(catalog);
+
+       return catalog->audio_data;
+}
+
+static void dp_audio_set_header(struct dp_catalog *catalog,
+               u32 data,
+               enum dp_catalog_audio_sdp_type sdp,
+               enum dp_catalog_audio_header_type header)
+{
+       catalog->sdp_type = sdp;
+       catalog->sdp_header = header;
+       catalog->audio_data = data;
+       dp_catalog_audio_set_header(catalog);
+}
+
+static void dp_audio_stream_sdp(struct dp_audio_private *audio)
+{
+       struct dp_catalog *catalog = audio->catalog;
+       u32 value, new_value;
+       u8 parity_byte;
+
+       /* Config header and parity byte 1 */
+       value = dp_audio_get_header(catalog,
+                       DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_1);
+
+       new_value = 0x02;
+       parity_byte = dp_audio_calculate_parity(new_value);
+       value |= ((new_value << HEADER_BYTE_1_BIT)
+                       | (parity_byte << PARITY_BYTE_1_BIT));
+       DRM_DEBUG_DP("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
+                       value, parity_byte);
+       dp_audio_set_header(catalog, value,
+               DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_1);
+
+       /* Config header and parity byte 2 */
+       value = dp_audio_get_header(catalog,
+                       DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_2);
+       new_value = value;
+       parity_byte = dp_audio_calculate_parity(new_value);
+       value |= ((new_value << HEADER_BYTE_2_BIT)
+                       | (parity_byte << PARITY_BYTE_2_BIT));
+       DRM_DEBUG_DP("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
+                       value, parity_byte);
+
+       dp_audio_set_header(catalog, value,
+               DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_2);
+
+       /* Config header and parity byte 3 */
+       value = dp_audio_get_header(catalog,
+                       DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_3);
+
+       new_value = audio->channels - 1;
+       parity_byte = dp_audio_calculate_parity(new_value);
+       value |= ((new_value << HEADER_BYTE_3_BIT)
+                       | (parity_byte << PARITY_BYTE_3_BIT));
+       DRM_DEBUG_DP("Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
+               value, parity_byte);
+
+       dp_audio_set_header(catalog, value,
+               DP_AUDIO_SDP_STREAM, DP_AUDIO_SDP_HEADER_3);
+}
+
+static void dp_audio_timestamp_sdp(struct dp_audio_private *audio)
+{
+       struct dp_catalog *catalog = audio->catalog;
+       u32 value, new_value;
+       u8 parity_byte;
+
+       /* Config header and parity byte 1 */
+       value = dp_audio_get_header(catalog,
+                       DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_1);
+
+       new_value = 0x1;
+       parity_byte = dp_audio_calculate_parity(new_value);
+       value |= ((new_value << HEADER_BYTE_1_BIT)
+                       | (parity_byte << PARITY_BYTE_1_BIT));
+       DRM_DEBUG_DP("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
+               value, parity_byte);
+       dp_audio_set_header(catalog, value,
+               DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_1);
+
+       /* Config header and parity byte 2 */
+       value = dp_audio_get_header(catalog,
+                       DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_2);
+
+       new_value = 0x17;
+       parity_byte = dp_audio_calculate_parity(new_value);
+       value |= ((new_value << HEADER_BYTE_2_BIT)
+                       | (parity_byte << PARITY_BYTE_2_BIT));
+       DRM_DEBUG_DP("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
+                       value, parity_byte);
+       dp_audio_set_header(catalog, value,
+               DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_2);
+
+       /* Config header and parity byte 3 */
+       value = dp_audio_get_header(catalog,
+                       DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_3);
+
+       new_value = (0x0 | (0x11 << 2));
+       parity_byte = dp_audio_calculate_parity(new_value);
+       value |= ((new_value << HEADER_BYTE_3_BIT)
+                       | (parity_byte << PARITY_BYTE_3_BIT));
+       DRM_DEBUG_DP("Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
+                       value, parity_byte);
+       dp_audio_set_header(catalog, value,
+               DP_AUDIO_SDP_TIMESTAMP, DP_AUDIO_SDP_HEADER_3);
+}
+
+static void dp_audio_infoframe_sdp(struct dp_audio_private *audio)
+{
+       struct dp_catalog *catalog = audio->catalog;
+       u32 value, new_value;
+       u8 parity_byte;
+
+       /* Config header and parity byte 1 */
+       value = dp_audio_get_header(catalog,
+                       DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_1);
+
+       new_value = 0x84;
+       parity_byte = dp_audio_calculate_parity(new_value);
+       value |= ((new_value << HEADER_BYTE_1_BIT)
+                       | (parity_byte << PARITY_BYTE_1_BIT));
+       DRM_DEBUG_DP("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
+                       value, parity_byte);
+       dp_audio_set_header(catalog, value,
+               DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_1);
+
+       /* Config header and parity byte 2 */
+       value = dp_audio_get_header(catalog,
+                       DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_2);
+
+       new_value = 0x1b;
+       parity_byte = dp_audio_calculate_parity(new_value);
+       value |= ((new_value << HEADER_BYTE_2_BIT)
+                       | (parity_byte << PARITY_BYTE_2_BIT));
+       DRM_DEBUG_DP("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
+                       value, parity_byte);
+       dp_audio_set_header(catalog, value,
+               DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_2);
+
+       /* Config header and parity byte 3 */
+       value = dp_audio_get_header(catalog,
+                       DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_3);
+
+       new_value = (0x0 | (0x11 << 2));
+       parity_byte = dp_audio_calculate_parity(new_value);
+       value |= ((new_value << HEADER_BYTE_3_BIT)
+                       | (parity_byte << PARITY_BYTE_3_BIT));
+       DRM_DEBUG_DP("Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
+                       new_value, parity_byte);
+       dp_audio_set_header(catalog, value,
+               DP_AUDIO_SDP_INFOFRAME, DP_AUDIO_SDP_HEADER_3);
+}
+
+static void dp_audio_copy_management_sdp(struct dp_audio_private *audio)
+{
+       struct dp_catalog *catalog = audio->catalog;
+       u32 value, new_value;
+       u8 parity_byte;
+
+       /* Config header and parity byte 1 */
+       value = dp_audio_get_header(catalog,
+                       DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_1);
+
+       new_value = 0x05;
+       parity_byte = dp_audio_calculate_parity(new_value);
+       value |= ((new_value << HEADER_BYTE_1_BIT)
+                       | (parity_byte << PARITY_BYTE_1_BIT));
+       DRM_DEBUG_DP("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
+                       value, parity_byte);
+       dp_audio_set_header(catalog, value,
+               DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_1);
+
+       /* Config header and parity byte 2 */
+       value = dp_audio_get_header(catalog,
+                       DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_2);
+
+       new_value = 0x0F;
+       parity_byte = dp_audio_calculate_parity(new_value);
+       value |= ((new_value << HEADER_BYTE_2_BIT)
+                       | (parity_byte << PARITY_BYTE_2_BIT));
+       DRM_DEBUG_DP("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
+                       value, parity_byte);
+       dp_audio_set_header(catalog, value,
+               DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_2);
+
+       /* Config header and parity byte 3 */
+       value = dp_audio_get_header(catalog,
+                       DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_3);
+
+       new_value = 0x0;
+       parity_byte = dp_audio_calculate_parity(new_value);
+       value |= ((new_value << HEADER_BYTE_3_BIT)
+                       | (parity_byte << PARITY_BYTE_3_BIT));
+       DRM_DEBUG_DP("Header Byte 3: value = 0x%x, parity_byte = 0x%x\n",
+                       value, parity_byte);
+       dp_audio_set_header(catalog, value,
+               DP_AUDIO_SDP_COPYMANAGEMENT, DP_AUDIO_SDP_HEADER_3);
+}
+
+static void dp_audio_isrc_sdp(struct dp_audio_private *audio)
+{
+       struct dp_catalog *catalog = audio->catalog;
+       u32 value, new_value;
+       u8 parity_byte;
+
+       /* Config header and parity byte 1 */
+       value = dp_audio_get_header(catalog,
+                       DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_1);
+
+       new_value = 0x06;
+       parity_byte = dp_audio_calculate_parity(new_value);
+       value |= ((new_value << HEADER_BYTE_1_BIT)
+                       | (parity_byte << PARITY_BYTE_1_BIT));
+       DRM_DEBUG_DP("Header Byte 1: value = 0x%x, parity_byte = 0x%x\n",
+                       value, parity_byte);
+       dp_audio_set_header(catalog, value,
+               DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_1);
+
+       /* Config header and parity byte 2 */
+       value = dp_audio_get_header(catalog,
+                       DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_2);
+
+       new_value = 0x0F;
+       parity_byte = dp_audio_calculate_parity(new_value);
+       value |= ((new_value << HEADER_BYTE_2_BIT)
+                       | (parity_byte << PARITY_BYTE_2_BIT));
+       DRM_DEBUG_DP("Header Byte 2: value = 0x%x, parity_byte = 0x%x\n",
+                       value, parity_byte);
+       dp_audio_set_header(catalog, value,
+               DP_AUDIO_SDP_ISRC, DP_AUDIO_SDP_HEADER_2);
+}
+
+static void dp_audio_setup_sdp(struct dp_audio_private *audio)
+{
+       dp_catalog_audio_config_sdp(audio->catalog);
+
+       dp_audio_stream_sdp(audio);
+       dp_audio_timestamp_sdp(audio);
+       dp_audio_infoframe_sdp(audio);
+       dp_audio_copy_management_sdp(audio);
+       dp_audio_isrc_sdp(audio);
+}
+
+static void dp_audio_setup_acr(struct dp_audio_private *audio)
+{
+       u32 select = 0;
+       struct dp_catalog *catalog = audio->catalog;
+
+       switch (audio->dp_audio.bw_code) {
+       case DP_LINK_BW_1_62:
+               select = 0;
+               break;
+       case DP_LINK_BW_2_7:
+               select = 1;
+               break;
+       case DP_LINK_BW_5_4:
+               select = 2;
+               break;
+       case DP_LINK_BW_8_1:
+               select = 3;
+               break;
+       default:
+               DRM_DEBUG_DP("Unknown link rate\n");
+               select = 0;
+               break;
+       }
+
+       catalog->audio_data = select;
+       dp_catalog_audio_config_acr(catalog);
+}
+
+static void dp_audio_safe_to_exit_level(struct dp_audio_private *audio)
+{
+       struct dp_catalog *catalog = audio->catalog;
+       u32 safe_to_exit_level = 0;
+
+       switch (audio->dp_audio.lane_count) {
+       case 1:
+               safe_to_exit_level = 14;
+               break;
+       case 2:
+               safe_to_exit_level = 8;
+               break;
+       case 4:
+               safe_to_exit_level = 5;
+               break;
+       default:
+               DRM_DEBUG_DP("setting the default safe_to_exit_level = %u\n",
+                               safe_to_exit_level);
+               safe_to_exit_level = 14;
+               break;
+       }
+
+       catalog->audio_data = safe_to_exit_level;
+       dp_catalog_audio_sfe_level(catalog);
+}
+
+static void dp_audio_enable(struct dp_audio_private *audio, bool enable)
+{
+       struct dp_catalog *catalog = audio->catalog;
+
+       catalog->audio_data = enable;
+       dp_catalog_audio_enable(catalog);
+
+       audio->engine_on = enable;
+}
+
+static struct dp_audio_private *dp_audio_get_data(struct platform_device *pdev)
+{
+       struct dp_audio *dp_audio;
+       struct msm_dp *dp_display;
+
+       if (!pdev) {
+               DRM_ERROR("invalid input\n");
+               return ERR_PTR(-ENODEV);
+       }
+
+       dp_display = platform_get_drvdata(pdev);
+       if (!dp_display) {
+               DRM_ERROR("invalid input\n");
+               return ERR_PTR(-ENODEV);
+       }
+
+       dp_audio = dp_display->dp_audio;
+
+       if (!dp_audio) {
+               DRM_ERROR("invalid dp_audio data\n");
+               return ERR_PTR(-EINVAL);
+       }
+
+       return container_of(dp_audio, struct dp_audio_private, dp_audio);
+}
+
+static int dp_audio_hook_plugged_cb(struct device *dev, void *data,
+               hdmi_codec_plugged_cb fn,
+               struct device *codec_dev)
+{
+
+       struct platform_device *pdev;
+       struct msm_dp *dp_display;
+
+       pdev = to_platform_device(dev);
+       if (!pdev) {
+               pr_err("invalid input\n");
+               return -ENODEV;
+       }
+
+       dp_display = platform_get_drvdata(pdev);
+       if (!dp_display) {
+               pr_err("invalid input\n");
+               return -ENODEV;
+       }
+
+       return dp_display_set_plugged_cb(dp_display, fn, codec_dev);
+}
+
+static int dp_audio_get_eld(struct device *dev,
+       void *data, uint8_t *buf, size_t len)
+{
+       struct platform_device *pdev;
+       struct msm_dp *dp_display;
+
+       pdev = to_platform_device(dev);
+
+       if (!pdev) {
+               DRM_ERROR("invalid input\n");
+               return -ENODEV;
+       }
+
+       dp_display = platform_get_drvdata(pdev);
+       if (!dp_display) {
+               DRM_ERROR("invalid input\n");
+               return -ENODEV;
+       }
+
+       memcpy(buf, dp_display->connector->eld,
+               min(sizeof(dp_display->connector->eld), len));
+
+       return 0;
+}
+
+int dp_audio_hw_params(struct device *dev,
+       void *data,
+       struct hdmi_codec_daifmt *daifmt,
+       struct hdmi_codec_params *params)
+{
+       int rc = 0;
+       struct dp_audio_private *audio;
+       struct platform_device *pdev;
+       struct msm_dp *dp_display;
+
+       pdev = to_platform_device(dev);
+       dp_display = platform_get_drvdata(pdev);
+
+       /*
+        * there could be cases where sound card can be opened even
+        * before OR even when DP is not connected . This can cause
+        * unclocked access as the audio subsystem relies on the DP
+        * driver to maintain the correct state of clocks. To protect
+        * such cases check for connection status and bail out if not
+        * connected.
+        */
+       if (!dp_display->power_on) {
+               rc = -EINVAL;
+               goto end;
+       }
+
+       audio = dp_audio_get_data(pdev);
+       if (IS_ERR(audio)) {
+               rc = PTR_ERR(audio);
+               goto end;
+       }
+
+       audio->channels = params->channels;
+
+       dp_audio_setup_sdp(audio);
+       dp_audio_setup_acr(audio);
+       dp_audio_safe_to_exit_level(audio);
+       dp_audio_enable(audio, true);
+       dp_display->audio_enabled = true;
+
+end:
+       return rc;
+}
+
+static void dp_audio_shutdown(struct device *dev, void *data)
+{
+       struct dp_audio_private *audio;
+       struct platform_device *pdev;
+       struct msm_dp *dp_display;
+
+       pdev = to_platform_device(dev);
+       dp_display = platform_get_drvdata(pdev);
+       audio = dp_audio_get_data(pdev);
+       if (IS_ERR(audio)) {
+               DRM_ERROR("failed to get audio data\n");
+               return;
+       }
+
+       /*
+        * if audio was not enabled there is no need
+        * to execute the shutdown and we can bail out early.
+        * This also makes sure that we dont cause an unclocked
+        * access when audio subsystem calls this without DP being
+        * connected. is_connected cannot be used here as its set
+        * to false earlier than this call
+        */
+       if (!dp_display->audio_enabled)
+               return;
+
+       dp_audio_enable(audio, false);
+       /* signal the dp display to safely shutdown clocks */
+       dp_display_signal_audio_complete(dp_display);
+}
+
+static const struct hdmi_codec_ops dp_audio_codec_ops = {
+       .hw_params = dp_audio_hw_params,
+       .audio_shutdown = dp_audio_shutdown,
+       .get_eld = dp_audio_get_eld,
+       .hook_plugged_cb = dp_audio_hook_plugged_cb,
+};
+
+static struct hdmi_codec_pdata codec_data = {
+       .ops = &dp_audio_codec_ops,
+       .max_i2s_channels = 8,
+       .i2s = 1,
+};
+
+int dp_register_audio_driver(struct device *dev,
+               struct dp_audio *dp_audio)
+{
+       struct dp_audio_private *audio_priv;
+
+       audio_priv = container_of(dp_audio,
+                       struct dp_audio_private, dp_audio);
+
+       audio_priv->audio_pdev = platform_device_register_data(dev,
+                                               HDMI_CODEC_DRV_NAME,
+                                               PLATFORM_DEVID_AUTO,
+                                               &codec_data,
+                                               sizeof(codec_data));
+       return PTR_ERR_OR_ZERO(audio_priv->audio_pdev);
+}
+
+struct dp_audio *dp_audio_get(struct platform_device *pdev,
+                       struct dp_panel *panel,
+                       struct dp_catalog *catalog)
+{
+       int rc = 0;
+       struct dp_audio_private *audio;
+       struct dp_audio *dp_audio;
+
+       if (!pdev || !panel || !catalog) {
+               DRM_ERROR("invalid input\n");
+               rc = -EINVAL;
+               goto error;
+       }
+
+       audio = devm_kzalloc(&pdev->dev, sizeof(*audio), GFP_KERNEL);
+       if (!audio) {
+               rc = -ENOMEM;
+               goto error;
+       }
+
+       audio->pdev = pdev;
+       audio->panel = panel;
+       audio->catalog = catalog;
+
+       dp_audio = &audio->dp_audio;
+
+       dp_catalog_audio_init(catalog);
+
+       return dp_audio;
+error:
+       return ERR_PTR(rc);
+}
+
+void dp_audio_put(struct dp_audio *dp_audio)
+{
+       struct dp_audio_private *audio;
+
+       if (!dp_audio)
+               return;
+
+       audio = container_of(dp_audio, struct dp_audio_private, dp_audio);
+
+       devm_kfree(&audio->pdev->dev, audio);
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_audio.h b/drivers/gpu/drm/msm/dp/dp_audio.h
new file mode 100644 (file)
index 0000000..84e5f4a
--- /dev/null
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DP_AUDIO_H_
+#define _DP_AUDIO_H_
+
+#include <linux/platform_device.h>
+
+#include "dp_panel.h"
+#include "dp_catalog.h"
+#include <sound/hdmi-codec.h>
+
+/**
+ * struct dp_audio
+ * @lane_count: number of lanes configured in current session
+ * @bw_code: link rate's bandwidth code for current session
+ */
+struct dp_audio {
+       u32 lane_count;
+       u32 bw_code;
+};
+
+/**
+ * dp_audio_get()
+ *
+ * Creates and instance of dp audio.
+ *
+ * @pdev: caller's platform device instance.
+ * @panel: an instance of dp_panel module.
+ * @catalog: an instance of dp_catalog module.
+ *
+ * Returns the error code in case of failure, otherwize
+ * an instance of newly created dp_module.
+ */
+struct dp_audio *dp_audio_get(struct platform_device *pdev,
+                       struct dp_panel *panel,
+                       struct dp_catalog *catalog);
+
+/**
+ * dp_register_audio_driver()
+ *
+ * Registers DP device with hdmi_codec interface.
+ *
+ * @dev: DP device instance.
+ * @dp_audio: an instance of dp_audio module.
+ *
+ *
+ * Returns the error code in case of failure, otherwise
+ * zero on success.
+ */
+int dp_register_audio_driver(struct device *dev,
+               struct dp_audio *dp_audio);
+
+/**
+ * dp_audio_put()
+ *
+ * Cleans the dp_audio instance.
+ *
+ * @dp_audio: an instance of dp_audio.
+ */
+void dp_audio_put(struct dp_audio *dp_audio);
+
+int dp_audio_hw_params(struct device *dev,
+       void *data,
+       struct hdmi_codec_daifmt *daifmt,
+       struct hdmi_codec_params *params);
+
+#endif /* _DP_AUDIO_H_ */
+
+
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c
new file mode 100644 (file)
index 0000000..19b35ae
--- /dev/null
@@ -0,0 +1,535 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/delay.h>
+#include <drm/drm_print.h>
+
+#include "dp_reg.h"
+#include "dp_aux.h"
+
+#define DP_AUX_ENUM_STR(x)             #x
+
+struct dp_aux_private {
+       struct device *dev;
+       struct dp_catalog *catalog;
+
+       struct mutex mutex;
+       struct completion comp;
+
+       u32 aux_error_num;
+       u32 retry_cnt;
+       bool cmd_busy;
+       bool native;
+       bool read;
+       bool no_send_addr;
+       bool no_send_stop;
+       u32 offset;
+       u32 segment;
+       u32 isr;
+
+       struct drm_dp_aux dp_aux;
+};
+
+static const char *dp_aux_get_error(u32 aux_error)
+{
+       switch (aux_error) {
+       case DP_AUX_ERR_NONE:
+               return DP_AUX_ENUM_STR(DP_AUX_ERR_NONE);
+       case DP_AUX_ERR_ADDR:
+               return DP_AUX_ENUM_STR(DP_AUX_ERR_ADDR);
+       case DP_AUX_ERR_TOUT:
+               return DP_AUX_ENUM_STR(DP_AUX_ERR_TOUT);
+       case DP_AUX_ERR_NACK:
+               return DP_AUX_ENUM_STR(DP_AUX_ERR_NACK);
+       case DP_AUX_ERR_DEFER:
+               return DP_AUX_ENUM_STR(DP_AUX_ERR_DEFER);
+       case DP_AUX_ERR_NACK_DEFER:
+               return DP_AUX_ENUM_STR(DP_AUX_ERR_NACK_DEFER);
+       default:
+               return "unknown";
+       }
+}
+
+static u32 dp_aux_write(struct dp_aux_private *aux,
+                       struct drm_dp_aux_msg *msg)
+{
+       u32 data[4], reg, len;
+       u8 *msgdata = msg->buffer;
+       int const AUX_CMD_FIFO_LEN = 128;
+       int i = 0;
+
+       if (aux->read)
+               len = 4;
+       else
+               len = msg->size + 4;
+
+       /*
+        * cmd fifo only has depth of 144 bytes
+        * limit buf length to 128 bytes here
+        */
+       if (len > AUX_CMD_FIFO_LEN) {
+               DRM_ERROR("buf size greater than allowed size of 128 bytes\n");
+               return 0;
+       }
+
+       /* Pack cmd and write to HW */
+       data[0] = (msg->address >> 16) & 0xf; /* addr[19:16] */
+       if (aux->read)
+               data[0] |=  BIT(4); /* R/W */
+
+       data[1] = (msg->address >> 8) & 0xff;   /* addr[15:8] */
+       data[2] = msg->address & 0xff;          /* addr[7:0] */
+       data[3] = (msg->size - 1) & 0xff;       /* len[7:0] */
+
+       for (i = 0; i < len; i++) {
+               reg = (i < 4) ? data[i] : msgdata[i - 4];
+               /* index = 0, write */
+               reg = (((reg) << DP_AUX_DATA_OFFSET)
+                      & DP_AUX_DATA_MASK) | DP_AUX_DATA_WRITE;
+               if (i == 0)
+                       reg |= DP_AUX_DATA_INDEX_WRITE;
+               aux->catalog->aux_data = reg;
+               dp_catalog_aux_write_data(aux->catalog);
+       }
+
+       dp_catalog_aux_clear_trans(aux->catalog, false);
+       dp_catalog_aux_clear_hw_interrupts(aux->catalog);
+
+       reg = 0; /* Transaction number == 1 */
+       if (!aux->native) { /* i2c */
+               reg |= DP_AUX_TRANS_CTRL_I2C;
+
+               if (aux->no_send_addr)
+                       reg |= DP_AUX_TRANS_CTRL_NO_SEND_ADDR;
+
+               if (aux->no_send_stop)
+                       reg |= DP_AUX_TRANS_CTRL_NO_SEND_STOP;
+       }
+
+       reg |= DP_AUX_TRANS_CTRL_GO;
+       aux->catalog->aux_data = reg;
+       dp_catalog_aux_write_trans(aux->catalog);
+
+       return len;
+}
+
+static int dp_aux_cmd_fifo_tx(struct dp_aux_private *aux,
+                             struct drm_dp_aux_msg *msg)
+{
+       u32 ret, len, timeout;
+       int aux_timeout_ms = HZ/4;
+
+       reinit_completion(&aux->comp);
+
+       len = dp_aux_write(aux, msg);
+       if (len == 0) {
+               DRM_ERROR("DP AUX write failed\n");
+               return -EINVAL;
+       }
+
+       timeout = wait_for_completion_timeout(&aux->comp, aux_timeout_ms);
+       if (!timeout) {
+               DRM_ERROR("aux %s timeout\n", (aux->read ? "read" : "write"));
+               return -ETIMEDOUT;
+       }
+
+       if (aux->aux_error_num == DP_AUX_ERR_NONE) {
+               ret = len;
+       } else {
+               DRM_ERROR_RATELIMITED("aux err: %s\n",
+                       dp_aux_get_error(aux->aux_error_num));
+
+               ret = -EINVAL;
+       }
+
+       return ret;
+}
+
+static void dp_aux_cmd_fifo_rx(struct dp_aux_private *aux,
+               struct drm_dp_aux_msg *msg)
+{
+       u32 data;
+       u8 *dp;
+       u32 i, actual_i;
+       u32 len = msg->size;
+
+       dp_catalog_aux_clear_trans(aux->catalog, true);
+
+       data = DP_AUX_DATA_INDEX_WRITE; /* INDEX_WRITE */
+       data |= DP_AUX_DATA_READ;  /* read */
+
+       aux->catalog->aux_data = data;
+       dp_catalog_aux_write_data(aux->catalog);
+
+       dp = msg->buffer;
+
+       /* discard first byte */
+       data = dp_catalog_aux_read_data(aux->catalog);
+
+       for (i = 0; i < len; i++) {
+               data = dp_catalog_aux_read_data(aux->catalog);
+               *dp++ = (u8)((data >> DP_AUX_DATA_OFFSET) & 0xff);
+
+               actual_i = (data >> DP_AUX_DATA_INDEX_OFFSET) & 0xFF;
+               if (i != actual_i)
+                       DRM_ERROR("Index mismatch: expected %d, found %d\n",
+                               i, actual_i);
+       }
+}
+
+static void dp_aux_native_handler(struct dp_aux_private *aux)
+{
+       u32 isr = aux->isr;
+
+       if (isr & DP_INTR_AUX_I2C_DONE)
+               aux->aux_error_num = DP_AUX_ERR_NONE;
+       else if (isr & DP_INTR_WRONG_ADDR)
+               aux->aux_error_num = DP_AUX_ERR_ADDR;
+       else if (isr & DP_INTR_TIMEOUT)
+               aux->aux_error_num = DP_AUX_ERR_TOUT;
+       if (isr & DP_INTR_NACK_DEFER)
+               aux->aux_error_num = DP_AUX_ERR_NACK;
+       if (isr & DP_INTR_AUX_ERROR) {
+               aux->aux_error_num = DP_AUX_ERR_PHY;
+               dp_catalog_aux_clear_hw_interrupts(aux->catalog);
+       }
+
+       complete(&aux->comp);
+}
+
+static void dp_aux_i2c_handler(struct dp_aux_private *aux)
+{
+       u32 isr = aux->isr;
+
+       if (isr & DP_INTR_AUX_I2C_DONE) {
+               if (isr & (DP_INTR_I2C_NACK | DP_INTR_I2C_DEFER))
+                       aux->aux_error_num = DP_AUX_ERR_NACK;
+               else
+                       aux->aux_error_num = DP_AUX_ERR_NONE;
+       } else {
+               if (isr & DP_INTR_WRONG_ADDR)
+                       aux->aux_error_num = DP_AUX_ERR_ADDR;
+               else if (isr & DP_INTR_TIMEOUT)
+                       aux->aux_error_num = DP_AUX_ERR_TOUT;
+               if (isr & DP_INTR_NACK_DEFER)
+                       aux->aux_error_num = DP_AUX_ERR_NACK_DEFER;
+               if (isr & DP_INTR_I2C_NACK)
+                       aux->aux_error_num = DP_AUX_ERR_NACK;
+               if (isr & DP_INTR_I2C_DEFER)
+                       aux->aux_error_num = DP_AUX_ERR_DEFER;
+               if (isr & DP_INTR_AUX_ERROR) {
+                       aux->aux_error_num = DP_AUX_ERR_PHY;
+                       dp_catalog_aux_clear_hw_interrupts(aux->catalog);
+               }
+       }
+
+       complete(&aux->comp);
+}
+
+static void dp_aux_update_offset_and_segment(struct dp_aux_private *aux,
+                                            struct drm_dp_aux_msg *input_msg)
+{
+       u32 edid_address = 0x50;
+       u32 segment_address = 0x30;
+       bool i2c_read = input_msg->request &
+               (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ);
+       u8 *data;
+
+       if (aux->native || i2c_read || ((input_msg->address != edid_address) &&
+               (input_msg->address != segment_address)))
+               return;
+
+
+       data = input_msg->buffer;
+       if (input_msg->address == segment_address)
+               aux->segment = *data;
+       else
+               aux->offset = *data;
+}
+
+/**
+ * dp_aux_transfer_helper() - helper function for EDID read transactions
+ *
+ * @aux: DP AUX private structure
+ * @input_msg: input message from DRM upstream APIs
+ * @send_seg: send the segment to sink
+ *
+ * return: void
+ *
+ * This helper function is used to fix EDID reads for non-compliant
+ * sinks that do not handle the i2c middle-of-transaction flag correctly.
+ */
+static void dp_aux_transfer_helper(struct dp_aux_private *aux,
+                                  struct drm_dp_aux_msg *input_msg,
+                                  bool send_seg)
+{
+       struct drm_dp_aux_msg helper_msg;
+       u32 message_size = 0x10;
+       u32 segment_address = 0x30;
+       u32 const edid_block_length = 0x80;
+       bool i2c_mot = input_msg->request & DP_AUX_I2C_MOT;
+       bool i2c_read = input_msg->request &
+               (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ);
+
+       if (!i2c_mot || !i2c_read || (input_msg->size == 0))
+               return;
+
+       /*
+        * Sending the segment value and EDID offset will be performed
+        * from the DRM upstream EDID driver for each block. Avoid
+        * duplicate AUX transactions related to this while reading the
+        * first 16 bytes of each block.
+        */
+       if (!(aux->offset % edid_block_length) || !send_seg)
+               goto end;
+
+       aux->read = false;
+       aux->cmd_busy = true;
+       aux->no_send_addr = true;
+       aux->no_send_stop = true;
+
+       /*
+        * Send the segment address for every i2c read in which the
+        * middle-of-tranaction flag is set. This is required to support EDID
+        * reads of more than 2 blocks as the segment address is reset to 0
+        * since we are overriding the middle-of-transaction flag for read
+        * transactions.
+        */
+
+       if (aux->segment) {
+               memset(&helper_msg, 0, sizeof(helper_msg));
+               helper_msg.address = segment_address;
+               helper_msg.buffer = &aux->segment;
+               helper_msg.size = 1;
+               dp_aux_cmd_fifo_tx(aux, &helper_msg);
+       }
+
+       /*
+        * Send the offset address for every i2c read in which the
+        * middle-of-transaction flag is set. This will ensure that the sink
+        * will update its read pointer and return the correct portion of the
+        * EDID buffer in the subsequent i2c read trasntion triggered in the
+        * native AUX transfer function.
+        */
+       memset(&helper_msg, 0, sizeof(helper_msg));
+       helper_msg.address = input_msg->address;
+       helper_msg.buffer = &aux->offset;
+       helper_msg.size = 1;
+       dp_aux_cmd_fifo_tx(aux, &helper_msg);
+
+end:
+       aux->offset += message_size;
+       if (aux->offset == 0x80 || aux->offset == 0x100)
+               aux->segment = 0x0; /* reset segment at end of block */
+}
+
+/*
+ * This function does the real job to process an AUX transaction.
+ * It will call aux_reset() function to reset the AUX channel,
+ * if the waiting is timeout.
+ */
+static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
+                              struct drm_dp_aux_msg *msg)
+{
+       ssize_t ret;
+       int const aux_cmd_native_max = 16;
+       int const aux_cmd_i2c_max = 128;
+       int const retry_count = 5;
+       struct dp_aux_private *aux = container_of(dp_aux,
+               struct dp_aux_private, dp_aux);
+
+       mutex_lock(&aux->mutex);
+
+       aux->native = msg->request & (DP_AUX_NATIVE_WRITE & DP_AUX_NATIVE_READ);
+
+       /* Ignore address only message */
+       if ((msg->size == 0) || (msg->buffer == NULL)) {
+               msg->reply = aux->native ?
+                       DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK;
+               ret = msg->size;
+               goto unlock_exit;
+       }
+
+       /* msg sanity check */
+       if ((aux->native && (msg->size > aux_cmd_native_max)) ||
+               (msg->size > aux_cmd_i2c_max)) {
+               DRM_ERROR("%s: invalid msg: size(%zu), request(%x)\n",
+                       __func__, msg->size, msg->request);
+               ret = -EINVAL;
+               goto unlock_exit;
+       }
+
+       dp_aux_update_offset_and_segment(aux, msg);
+       dp_aux_transfer_helper(aux, msg, true);
+
+       aux->read = msg->request & (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ);
+       aux->cmd_busy = true;
+
+       if (aux->read) {
+               aux->no_send_addr = true;
+               aux->no_send_stop = false;
+       } else {
+               aux->no_send_addr = true;
+               aux->no_send_stop = true;
+       }
+
+       ret = dp_aux_cmd_fifo_tx(aux, msg);
+
+       if (ret < 0) {
+               if (aux->native) {
+                       aux->retry_cnt++;
+                       if (!(aux->retry_cnt % retry_count))
+                               dp_catalog_aux_update_cfg(aux->catalog);
+                       dp_catalog_aux_reset(aux->catalog);
+               }
+               usleep_range(400, 500); /* at least 400us to next try */
+               goto unlock_exit;
+       }
+
+       if (aux->aux_error_num == DP_AUX_ERR_NONE) {
+               if (aux->read)
+                       dp_aux_cmd_fifo_rx(aux, msg);
+
+               msg->reply = aux->native ?
+                       DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK;
+       } else {
+               /* Reply defer to retry */
+               msg->reply = aux->native ?
+                       DP_AUX_NATIVE_REPLY_DEFER : DP_AUX_I2C_REPLY_DEFER;
+       }
+
+       /* Return requested size for success or retry */
+       ret = msg->size;
+       aux->retry_cnt = 0;
+
+unlock_exit:
+       aux->cmd_busy = false;
+       mutex_unlock(&aux->mutex);
+       return ret;
+}
+
+void dp_aux_isr(struct drm_dp_aux *dp_aux)
+{
+       struct dp_aux_private *aux;
+
+       if (!dp_aux) {
+               DRM_ERROR("invalid input\n");
+               return;
+       }
+
+       aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+
+       aux->isr = dp_catalog_aux_get_irq(aux->catalog);
+
+       if (!aux->cmd_busy)
+               return;
+
+       if (aux->native)
+               dp_aux_native_handler(aux);
+       else
+               dp_aux_i2c_handler(aux);
+}
+
+void dp_aux_reconfig(struct drm_dp_aux *dp_aux)
+{
+       struct dp_aux_private *aux;
+
+       aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+
+       dp_catalog_aux_update_cfg(aux->catalog);
+       dp_catalog_aux_reset(aux->catalog);
+}
+
+void dp_aux_init(struct drm_dp_aux *dp_aux)
+{
+       struct dp_aux_private *aux;
+
+       if (!dp_aux) {
+               DRM_ERROR("invalid input\n");
+               return;
+       }
+
+       aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+
+       dp_catalog_aux_enable(aux->catalog, true);
+       aux->retry_cnt = 0;
+}
+
+void dp_aux_deinit(struct drm_dp_aux *dp_aux)
+{
+       struct dp_aux_private *aux;
+
+       aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+
+       dp_catalog_aux_enable(aux->catalog, false);
+}
+
+int dp_aux_register(struct drm_dp_aux *dp_aux)
+{
+       struct dp_aux_private *aux;
+       int ret;
+
+       if (!dp_aux) {
+               DRM_ERROR("invalid input\n");
+               return -EINVAL;
+       }
+
+       aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+
+       aux->dp_aux.name = "dpu_dp_aux";
+       aux->dp_aux.dev = aux->dev;
+       aux->dp_aux.transfer = dp_aux_transfer;
+       ret = drm_dp_aux_register(&aux->dp_aux);
+       if (ret) {
+               DRM_ERROR("%s: failed to register drm aux: %d\n", __func__,
+                               ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+void dp_aux_unregister(struct drm_dp_aux *dp_aux)
+{
+       drm_dp_aux_unregister(dp_aux);
+}
+
+struct drm_dp_aux *dp_aux_get(struct device *dev, struct dp_catalog *catalog)
+{
+       struct dp_aux_private *aux;
+
+       if (!catalog) {
+               DRM_ERROR("invalid input\n");
+               return ERR_PTR(-ENODEV);
+       }
+
+       aux = devm_kzalloc(dev, sizeof(*aux), GFP_KERNEL);
+       if (!aux)
+               return ERR_PTR(-ENOMEM);
+
+       init_completion(&aux->comp);
+       aux->cmd_busy = false;
+       mutex_init(&aux->mutex);
+
+       aux->dev = dev;
+       aux->catalog = catalog;
+       aux->retry_cnt = 0;
+
+       return &aux->dp_aux;
+}
+
+void dp_aux_put(struct drm_dp_aux *dp_aux)
+{
+       struct dp_aux_private *aux;
+
+       if (!dp_aux)
+               return;
+
+       aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+
+       mutex_destroy(&aux->mutex);
+
+       devm_kfree(aux->dev, aux);
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.h b/drivers/gpu/drm/msm/dp/dp_aux.h
new file mode 100644 (file)
index 0000000..f8b8ba9
--- /dev/null
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DP_AUX_H_
+#define _DP_AUX_H_
+
+#include "dp_catalog.h"
+#include <drm/drm_dp_helper.h>
+
+#define DP_AUX_ERR_NONE                0
+#define DP_AUX_ERR_ADDR                -1
+#define DP_AUX_ERR_TOUT                -2
+#define DP_AUX_ERR_NACK                -3
+#define DP_AUX_ERR_DEFER       -4
+#define DP_AUX_ERR_NACK_DEFER  -5
+#define DP_AUX_ERR_PHY         -6
+
+int dp_aux_register(struct drm_dp_aux *dp_aux);
+void dp_aux_unregister(struct drm_dp_aux *dp_aux);
+void dp_aux_isr(struct drm_dp_aux *dp_aux);
+void dp_aux_init(struct drm_dp_aux *dp_aux);
+void dp_aux_deinit(struct drm_dp_aux *dp_aux);
+void dp_aux_reconfig(struct drm_dp_aux *dp_aux);
+
+struct drm_dp_aux *dp_aux_get(struct device *dev, struct dp_catalog *catalog);
+void dp_aux_put(struct drm_dp_aux *aux);
+
+#endif /*__DP_AUX_H_*/
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c
new file mode 100644 (file)
index 0000000..b15b4ce
--- /dev/null
@@ -0,0 +1,1019 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt)    "[drm-dp] %s: " fmt, __func__
+
+#include <linux/rational.h>
+#include <linux/delay.h>
+#include <linux/iopoll.h>
+#include <linux/phy/phy.h>
+#include <linux/phy/phy-dp.h>
+#include <linux/rational.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_print.h>
+
+#include "dp_catalog.h"
+#include "dp_reg.h"
+
+#define POLLING_SLEEP_US                       1000
+#define POLLING_TIMEOUT_US                     10000
+
+#define SCRAMBLER_RESET_COUNT_VALUE            0xFC
+
+#define DP_INTERRUPT_STATUS_ACK_SHIFT  1
+#define DP_INTERRUPT_STATUS_MASK_SHIFT 2
+
+#define MSM_DP_CONTROLLER_AHB_OFFSET   0x0000
+#define MSM_DP_CONTROLLER_AHB_SIZE     0x0200
+#define MSM_DP_CONTROLLER_AUX_OFFSET   0x0200
+#define MSM_DP_CONTROLLER_AUX_SIZE     0x0200
+#define MSM_DP_CONTROLLER_LINK_OFFSET  0x0400
+#define MSM_DP_CONTROLLER_LINK_SIZE    0x0C00
+#define MSM_DP_CONTROLLER_P0_OFFSET    0x1000
+#define MSM_DP_CONTROLLER_P0_SIZE      0x0400
+
+#define DP_INTERRUPT_STATUS1 \
+       (DP_INTR_AUX_I2C_DONE| \
+       DP_INTR_WRONG_ADDR | DP_INTR_TIMEOUT | \
+       DP_INTR_NACK_DEFER | DP_INTR_WRONG_DATA_CNT | \
+       DP_INTR_I2C_NACK | DP_INTR_I2C_DEFER | \
+       DP_INTR_PLL_UNLOCKED | DP_INTR_AUX_ERROR)
+
+#define DP_INTERRUPT_STATUS1_ACK \
+       (DP_INTERRUPT_STATUS1 << DP_INTERRUPT_STATUS_ACK_SHIFT)
+#define DP_INTERRUPT_STATUS1_MASK \
+       (DP_INTERRUPT_STATUS1 << DP_INTERRUPT_STATUS_MASK_SHIFT)
+
+#define DP_INTERRUPT_STATUS2 \
+       (DP_INTR_READY_FOR_VIDEO | DP_INTR_IDLE_PATTERN_SENT | \
+       DP_INTR_FRAME_END | DP_INTR_CRC_UPDATED)
+
+#define DP_INTERRUPT_STATUS2_ACK \
+       (DP_INTERRUPT_STATUS2 << DP_INTERRUPT_STATUS_ACK_SHIFT)
+#define DP_INTERRUPT_STATUS2_MASK \
+       (DP_INTERRUPT_STATUS2 << DP_INTERRUPT_STATUS_MASK_SHIFT)
+
+struct dp_catalog_private {
+       struct device *dev;
+       struct dp_io *io;
+       u32 (*audio_map)[DP_AUDIO_SDP_HEADER_MAX];
+       struct dp_catalog dp_catalog;
+       u8 aux_lut_cfg_index[PHY_AUX_CFG_MAX];
+};
+
+static inline u32 dp_read_aux(struct dp_catalog_private *catalog, u32 offset)
+{
+       offset += MSM_DP_CONTROLLER_AUX_OFFSET;
+       return readl_relaxed(catalog->io->dp_controller.base + offset);
+}
+
+static inline void dp_write_aux(struct dp_catalog_private *catalog,
+                              u32 offset, u32 data)
+{
+       offset += MSM_DP_CONTROLLER_AUX_OFFSET;
+       /*
+        * To make sure aux reg writes happens before any other operation,
+        * this function uses writel() instread of writel_relaxed()
+        */
+       writel(data, catalog->io->dp_controller.base + offset);
+}
+
+static inline u32 dp_read_ahb(struct dp_catalog_private *catalog, u32 offset)
+{
+       offset += MSM_DP_CONTROLLER_AHB_OFFSET;
+       return readl_relaxed(catalog->io->dp_controller.base + offset);
+}
+
+static inline void dp_write_ahb(struct dp_catalog_private *catalog,
+                              u32 offset, u32 data)
+{
+       offset += MSM_DP_CONTROLLER_AHB_OFFSET;
+       /*
+        * To make sure phy reg writes happens before any other operation,
+        * this function uses writel() instread of writel_relaxed()
+        */
+       writel(data, catalog->io->dp_controller.base + offset);
+}
+
+static inline void dp_write_p0(struct dp_catalog_private *catalog,
+                              u32 offset, u32 data)
+{
+       offset += MSM_DP_CONTROLLER_P0_OFFSET;
+       /*
+        * To make sure interface reg writes happens before any other operation,
+        * this function uses writel() instread of writel_relaxed()
+        */
+       writel(data, catalog->io->dp_controller.base + offset);
+}
+
+static inline u32 dp_read_p0(struct dp_catalog_private *catalog,
+                              u32 offset)
+{
+       offset += MSM_DP_CONTROLLER_P0_OFFSET;
+       /*
+        * To make sure interface reg writes happens before any other operation,
+        * this function uses writel() instread of writel_relaxed()
+        */
+       return readl_relaxed(catalog->io->dp_controller.base + offset);
+}
+
+static inline u32 dp_read_link(struct dp_catalog_private *catalog, u32 offset)
+{
+       offset += MSM_DP_CONTROLLER_LINK_OFFSET;
+       return readl_relaxed(catalog->io->dp_controller.base + offset);
+}
+
+static inline void dp_write_link(struct dp_catalog_private *catalog,
+                              u32 offset, u32 data)
+{
+       offset += MSM_DP_CONTROLLER_LINK_OFFSET;
+       /*
+        * To make sure link reg writes happens before any other operation,
+        * this function uses writel() instread of writel_relaxed()
+        */
+       writel(data, catalog->io->dp_controller.base + offset);
+}
+
+/* aux related catalog functions */
+u32 dp_catalog_aux_read_data(struct dp_catalog *dp_catalog)
+{
+       struct dp_catalog_private *catalog = container_of(dp_catalog,
+                               struct dp_catalog_private, dp_catalog);
+
+       return dp_read_aux(catalog, REG_DP_AUX_DATA);
+}
+
+int dp_catalog_aux_write_data(struct dp_catalog *dp_catalog)
+{
+       struct dp_catalog_private *catalog = container_of(dp_catalog,
+                               struct dp_catalog_private, dp_catalog);
+
+       dp_write_aux(catalog, REG_DP_AUX_DATA, dp_catalog->aux_data);
+       return 0;
+}
+
+int dp_catalog_aux_write_trans(struct dp_catalog *dp_catalog)
+{
+       struct dp_catalog_private *catalog = container_of(dp_catalog,
+                               struct dp_catalog_private, dp_catalog);
+
+       dp_write_aux(catalog, REG_DP_AUX_TRANS_CTRL, dp_catalog->aux_data);
+       return 0;
+}
+
+int dp_catalog_aux_clear_trans(struct dp_catalog *dp_catalog, bool read)
+{
+       u32 data;
+       struct dp_catalog_private *catalog = container_of(dp_catalog,
+                               struct dp_catalog_private, dp_catalog);
+
+       if (read) {
+               data = dp_read_aux(catalog, REG_DP_AUX_TRANS_CTRL);
+               data &= ~DP_AUX_TRANS_CTRL_GO;
+               dp_write_aux(catalog, REG_DP_AUX_TRANS_CTRL, data);
+       } else {
+               dp_write_aux(catalog, REG_DP_AUX_TRANS_CTRL, 0);
+       }
+       return 0;
+}
+
+int dp_catalog_aux_clear_hw_interrupts(struct dp_catalog *dp_catalog)
+{
+       struct dp_catalog_private *catalog = container_of(dp_catalog,
+                               struct dp_catalog_private, dp_catalog);
+
+       dp_read_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_STATUS);
+       dp_write_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0x1f);
+       dp_write_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0x9f);
+       dp_write_aux(catalog, REG_DP_PHY_AUX_INTERRUPT_CLEAR, 0);
+       return 0;
+}
+
+void dp_catalog_aux_reset(struct dp_catalog *dp_catalog)
+{
+       u32 aux_ctrl;
+       struct dp_catalog_private *catalog = container_of(dp_catalog,
+                               struct dp_catalog_private, dp_catalog);
+
+       aux_ctrl = dp_read_aux(catalog, REG_DP_AUX_CTRL);
+
+       aux_ctrl |= DP_AUX_CTRL_RESET;
+       dp_write_aux(catalog, REG_DP_AUX_CTRL, aux_ctrl);
+       usleep_range(1000, 1100); /* h/w recommended delay */
+
+       aux_ctrl &= ~DP_AUX_CTRL_RESET;
+       dp_write_aux(catalog, REG_DP_AUX_CTRL, aux_ctrl);
+}
+
+void dp_catalog_aux_enable(struct dp_catalog *dp_catalog, bool enable)
+{
+       u32 aux_ctrl;
+       struct dp_catalog_private *catalog = container_of(dp_catalog,
+                               struct dp_catalog_private, dp_catalog);
+
+       aux_ctrl = dp_read_aux(catalog, REG_DP_AUX_CTRL);
+
+       if (enable) {
+               dp_write_aux(catalog, REG_DP_TIMEOUT_COUNT, 0xffff);
+               dp_write_aux(catalog, REG_DP_AUX_LIMITS, 0xffff);
+               aux_ctrl |= DP_AUX_CTRL_ENABLE;
+       } else {
+               aux_ctrl &= ~DP_AUX_CTRL_ENABLE;
+       }
+
+       dp_write_aux(catalog, REG_DP_AUX_CTRL, aux_ctrl);
+}
+
+void dp_catalog_aux_update_cfg(struct dp_catalog *dp_catalog)
+{
+       struct dp_catalog_private *catalog = container_of(dp_catalog,
+                               struct dp_catalog_private, dp_catalog);
+       struct dp_io *dp_io = catalog->io;
+       struct phy *phy = dp_io->phy;
+
+       phy_calibrate(phy);
+}
+
+static void dump_regs(void __iomem *base, int len)
+{
+       int i;
+       u32 x0, x4, x8, xc;
+       u32 addr_off = 0;
+
+       len = DIV_ROUND_UP(len, 16);
+       for (i = 0; i < len; i++) {
+               x0 = readl_relaxed(base + addr_off);
+               x4 = readl_relaxed(base + addr_off + 0x04);
+               x8 = readl_relaxed(base + addr_off + 0x08);
+               xc = readl_relaxed(base + addr_off + 0x0c);
+
+               pr_info("%08x: %08x %08x %08x %08x", addr_off, x0, x4, x8, xc);
+               addr_off += 16;
+       }
+}
+
+void dp_catalog_dump_regs(struct dp_catalog *dp_catalog)
+{
+       u32 offset, len;
+       struct dp_catalog_private *catalog = container_of(dp_catalog,
+               struct dp_catalog_private, dp_catalog);
+
+       pr_info("AHB regs\n");
+       offset = MSM_DP_CONTROLLER_AHB_OFFSET;
+       len = MSM_DP_CONTROLLER_AHB_SIZE;
+       dump_regs(catalog->io->dp_controller.base + offset, len);
+
+       pr_info("AUXCLK regs\n");
+       offset = MSM_DP_CONTROLLER_AUX_OFFSET;
+       len = MSM_DP_CONTROLLER_AUX_SIZE;
+       dump_regs(catalog->io->dp_controller.base + offset, len);
+
+       pr_info("LCLK regs\n");
+       offset = MSM_DP_CONTROLLER_LINK_OFFSET;
+       len = MSM_DP_CONTROLLER_LINK_SIZE;
+       dump_regs(catalog->io->dp_controller.base + offset, len);
+
+       pr_info("P0CLK regs\n");
+       offset = MSM_DP_CONTROLLER_P0_OFFSET;
+       len = MSM_DP_CONTROLLER_P0_SIZE;
+       dump_regs(catalog->io->dp_controller.base + offset, len);
+}
+
+int dp_catalog_aux_get_irq(struct dp_catalog *dp_catalog)
+{
+       struct dp_catalog_private *catalog = container_of(dp_catalog,
+                               struct dp_catalog_private, dp_catalog);
+       u32 intr, intr_ack;
+
+       intr = dp_read_ahb(catalog, REG_DP_INTR_STATUS);
+       intr &= ~DP_INTERRUPT_STATUS1_MASK;
+       intr_ack = (intr & DP_INTERRUPT_STATUS1)
+                       << DP_INTERRUPT_STATUS_ACK_SHIFT;
+       dp_write_ahb(catalog, REG_DP_INTR_STATUS, intr_ack |
+                       DP_INTERRUPT_STATUS1_MASK);
+
+       return intr;
+
+}
+
+/* controller related catalog functions */
+void dp_catalog_ctrl_update_transfer_unit(struct dp_catalog *dp_catalog,
+                               u32 dp_tu, u32 valid_boundary,
+                               u32 valid_boundary2)
+{
+       struct dp_catalog_private *catalog = container_of(dp_catalog,
+                               struct dp_catalog_private, dp_catalog);
+
+       dp_write_link(catalog, REG_DP_VALID_BOUNDARY, valid_boundary);
+       dp_write_link(catalog, REG_DP_TU, dp_tu);
+       dp_write_link(catalog, REG_DP_VALID_BOUNDARY_2, valid_boundary2);
+}
+
+void dp_catalog_ctrl_state_ctrl(struct dp_catalog *dp_catalog, u32 state)
+{
+       struct dp_catalog_private *catalog = container_of(dp_catalog,
+                               struct dp_catalog_private, dp_catalog);
+
+       dp_write_link(catalog, REG_DP_STATE_CTRL, state);
+}
+
+void dp_catalog_ctrl_config_ctrl(struct dp_catalog *dp_catalog, u32 cfg)
+{
+       struct dp_catalog_private *catalog = container_of(dp_catalog,
+                               struct dp_catalog_private, dp_catalog);
+
+       DRM_DEBUG_DP("DP_CONFIGURATION_CTRL=0x%x\n", cfg);
+
+       dp_write_link(catalog, REG_DP_CONFIGURATION_CTRL, cfg);
+}
+
+void dp_catalog_ctrl_lane_mapping(struct dp_catalog *dp_catalog)
+{
+       struct dp_catalog_private *catalog = container_of(dp_catalog,
+                               struct dp_catalog_private, dp_catalog);
+       u32 ln_0 = 0, ln_1 = 1, ln_2 = 2, ln_3 = 3; /* One-to-One mapping */
+       u32 ln_mapping;
+
+       ln_mapping = ln_0 << LANE0_MAPPING_SHIFT;
+       ln_mapping |= ln_1 << LANE1_MAPPING_SHIFT;
+       ln_mapping |= ln_2 << LANE2_MAPPING_SHIFT;
+       ln_mapping |= ln_3 << LANE3_MAPPING_SHIFT;
+
+       dp_write_link(catalog, REG_DP_LOGICAL2PHYSICAL_LANE_MAPPING,
+                       ln_mapping);
+}
+
+void dp_catalog_ctrl_mainlink_ctrl(struct dp_catalog *dp_catalog,
+                                               bool enable)
+{
+       u32 mainlink_ctrl;
+       struct dp_catalog_private *catalog = container_of(dp_catalog,
+                               struct dp_catalog_private, dp_catalog);
+
+       if (enable) {
+               /*
+                * To make sure link reg writes happens before other operation,
+                * dp_write_link() function uses writel()
+                */
+               mainlink_ctrl = dp_read_link(catalog, REG_DP_MAINLINK_CTRL);
+
+               mainlink_ctrl &= ~(DP_MAINLINK_CTRL_RESET |
+                                               DP_MAINLINK_CTRL_ENABLE);
+               dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
+
+               mainlink_ctrl |= DP_MAINLINK_CTRL_RESET;
+               dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
+
+               mainlink_ctrl &= ~DP_MAINLINK_CTRL_RESET;
+               dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
+
+               mainlink_ctrl |= (DP_MAINLINK_CTRL_ENABLE |
+                                       DP_MAINLINK_FB_BOUNDARY_SEL);
+               dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
+       } else {
+               mainlink_ctrl = dp_read_link(catalog, REG_DP_MAINLINK_CTRL);
+               mainlink_ctrl &= ~DP_MAINLINK_CTRL_ENABLE;
+               dp_write_link(catalog, REG_DP_MAINLINK_CTRL, mainlink_ctrl);
+       }
+}
+
+void dp_catalog_ctrl_config_misc(struct dp_catalog *dp_catalog,
+                                       u32 colorimetry_cfg,
+                                       u32 test_bits_depth)
+{
+       u32 misc_val;
+       struct dp_catalog_private *catalog = container_of(dp_catalog,
+                               struct dp_catalog_private, dp_catalog);
+
+       misc_val = dp_read_link(catalog, REG_DP_MISC1_MISC0);
+
+       /* clear bpp bits */
+       misc_val &= ~(0x07 << DP_MISC0_TEST_BITS_DEPTH_SHIFT);
+       misc_val |= colorimetry_cfg << DP_MISC0_COLORIMETRY_CFG_SHIFT;
+       misc_val |= test_bits_depth << DP_MISC0_TEST_BITS_DEPTH_SHIFT;
+       /* Configure clock to synchronous mode */
+       misc_val |= DP_MISC0_SYNCHRONOUS_CLK;
+
+       DRM_DEBUG_DP("misc settings = 0x%x\n", misc_val);
+       dp_write_link(catalog, REG_DP_MISC1_MISC0, misc_val);
+}
+
+void dp_catalog_ctrl_config_msa(struct dp_catalog *dp_catalog,
+                                       u32 rate, u32 stream_rate_khz,
+                                       bool fixed_nvid)
+{
+       u32 pixel_m, pixel_n;
+       u32 mvid, nvid, pixel_div = 0, dispcc_input_rate;
+       u32 const nvid_fixed = DP_LINK_CONSTANT_N_VALUE;
+       u32 const link_rate_hbr2 = 540000;
+       u32 const link_rate_hbr3 = 810000;
+       unsigned long den, num;
+
+       struct dp_catalog_private *catalog = container_of(dp_catalog,
+                               struct dp_catalog_private, dp_catalog);
+
+       if (rate == link_rate_hbr3)
+               pixel_div = 6;
+       else if (rate == 1620000 || rate == 270000)
+               pixel_div = 2;
+       else if (rate == link_rate_hbr2)
+               pixel_div = 4;
+       else
+               DRM_ERROR("Invalid pixel mux divider\n");
+
+       dispcc_input_rate = (rate * 10) / pixel_div;
+
+       rational_best_approximation(dispcc_input_rate, stream_rate_khz,
+                       (unsigned long)(1 << 16) - 1,
+                       (unsigned long)(1 << 16) - 1, &den, &num);
+
+       den = ~(den - num);
+       den = den & 0xFFFF;
+       pixel_m = num;
+       pixel_n = den;
+
+       mvid = (pixel_m & 0xFFFF) * 5;
+       nvid = (0xFFFF & (~pixel_n)) + (pixel_m & 0xFFFF);
+
+       if (nvid < nvid_fixed) {
+               u32 temp;
+
+               temp = (nvid_fixed / nvid) * nvid;
+               mvid = (nvid_fixed / nvid) * mvid;
+               nvid = temp;
+       }
+
+       if (link_rate_hbr2 == rate)
+               nvid *= 2;
+
+       if (link_rate_hbr3 == rate)
+               nvid *= 3;
+
+       DRM_DEBUG_DP("mvid=0x%x, nvid=0x%x\n", mvid, nvid);
+       dp_write_link(catalog, REG_DP_SOFTWARE_MVID, mvid);
+       dp_write_link(catalog, REG_DP_SOFTWARE_NVID, nvid);
+       dp_write_p0(catalog, MMSS_DP_DSC_DTO, 0x0);
+}
+
+int dp_catalog_ctrl_set_pattern(struct dp_catalog *dp_catalog,
+                                       u32 pattern)
+{
+       int bit, ret;
+       u32 data;
+       struct dp_catalog_private *catalog = container_of(dp_catalog,
+                               struct dp_catalog_private, dp_catalog);
+
+       bit = BIT(pattern - 1);
+       DRM_DEBUG_DP("hw: bit=%d train=%d\n", bit, pattern);
+       dp_catalog_ctrl_state_ctrl(dp_catalog, bit);
+
+       bit = BIT(pattern - 1) << DP_MAINLINK_READY_LINK_TRAINING_SHIFT;
+
+       /* Poll for mainlink ready status */
+       ret = readx_poll_timeout(readl, catalog->io->dp_controller.base +
+                                       MSM_DP_CONTROLLER_LINK_OFFSET +
+                                       REG_DP_MAINLINK_READY,
+                                       data, data & bit,
+                                       POLLING_SLEEP_US, POLLING_TIMEOUT_US);
+       if (ret < 0) {
+               DRM_ERROR("set pattern for link_train=%d failed\n", pattern);
+               return ret;
+       }
+       return 0;
+}
+
+void dp_catalog_ctrl_reset(struct dp_catalog *dp_catalog)
+{
+       u32 sw_reset;
+       struct dp_catalog_private *catalog = container_of(dp_catalog,
+                               struct dp_catalog_private, dp_catalog);
+
+       sw_reset = dp_read_ahb(catalog, REG_DP_SW_RESET);
+
+       sw_reset |= DP_SW_RESET;
+       dp_write_ahb(catalog, REG_DP_SW_RESET, sw_reset);
+       usleep_range(1000, 1100); /* h/w recommended delay */
+
+       sw_reset &= ~DP_SW_RESET;
+       dp_write_ahb(catalog, REG_DP_SW_RESET, sw_reset);
+}
+
+bool dp_catalog_ctrl_mainlink_ready(struct dp_catalog *dp_catalog)
+{
+       u32 data;
+       int ret;
+       struct dp_catalog_private *catalog = container_of(dp_catalog,
+                               struct dp_catalog_private, dp_catalog);
+
+       /* Poll for mainlink ready status */
+       ret = readl_poll_timeout(catalog->io->dp_controller.base +
+                               MSM_DP_CONTROLLER_LINK_OFFSET +
+                               REG_DP_MAINLINK_READY,
+                               data, data & DP_MAINLINK_READY_FOR_VIDEO,
+                               POLLING_SLEEP_US, POLLING_TIMEOUT_US);
+       if (ret < 0) {
+               DRM_ERROR("mainlink not ready\n");
+               return false;
+       }
+
+       return true;
+}
+
+void dp_catalog_ctrl_enable_irq(struct dp_catalog *dp_catalog,
+                                               bool enable)
+{
+       struct dp_catalog_private *catalog = container_of(dp_catalog,
+                               struct dp_catalog_private, dp_catalog);
+
+       if (enable) {
+               dp_write_ahb(catalog, REG_DP_INTR_STATUS,
+                               DP_INTERRUPT_STATUS1_MASK);
+               dp_write_ahb(catalog, REG_DP_INTR_STATUS2,
+                               DP_INTERRUPT_STATUS2_MASK);
+       } else {
+               dp_write_ahb(catalog, REG_DP_INTR_STATUS, 0x00);
+               dp_write_ahb(catalog, REG_DP_INTR_STATUS2, 0x00);
+       }
+}
+
+void dp_catalog_hpd_config_intr(struct dp_catalog *dp_catalog,
+                       u32 intr_mask, bool en)
+{
+       struct dp_catalog_private *catalog = container_of(dp_catalog,
+                               struct dp_catalog_private, dp_catalog);
+
+       u32 config = dp_read_aux(catalog, REG_DP_DP_HPD_INT_MASK);
+
+       config = (en ? config | intr_mask : config & ~intr_mask);
+
+       dp_write_aux(catalog, REG_DP_DP_HPD_INT_MASK,
+                               config & DP_DP_HPD_INT_MASK);
+}
+
+void dp_catalog_ctrl_hpd_config(struct dp_catalog *dp_catalog)
+{
+       struct dp_catalog_private *catalog = container_of(dp_catalog,
+                               struct dp_catalog_private, dp_catalog);
+
+       u32 reftimer = dp_read_aux(catalog, REG_DP_DP_HPD_REFTIMER);
+
+       /* enable HPD interrupts */
+       dp_catalog_hpd_config_intr(dp_catalog,
+               DP_DP_HPD_PLUG_INT_MASK | DP_DP_IRQ_HPD_INT_MASK
+               | DP_DP_HPD_UNPLUG_INT_MASK | DP_DP_HPD_REPLUG_INT_MASK, true);
+
+       /* Configure REFTIMER and enable it */
+       reftimer |= DP_DP_HPD_REFTIMER_ENABLE;
+       dp_write_aux(catalog, REG_DP_DP_HPD_REFTIMER, reftimer);
+
+       /* Enable HPD */
+       dp_write_aux(catalog, REG_DP_DP_HPD_CTRL, DP_DP_HPD_CTRL_HPD_EN);
+}
+
+u32 dp_catalog_hpd_get_intr_status(struct dp_catalog *dp_catalog)
+{
+       struct dp_catalog_private *catalog = container_of(dp_catalog,
+                               struct dp_catalog_private, dp_catalog);
+       int isr = 0;
+
+       isr = dp_read_aux(catalog, REG_DP_DP_HPD_INT_STATUS);
+       dp_write_aux(catalog, REG_DP_DP_HPD_INT_ACK,
+                                (isr & DP_DP_HPD_INT_MASK));
+
+       return isr;
+}
+
+int dp_catalog_ctrl_get_interrupt(struct dp_catalog *dp_catalog)
+{
+       struct dp_catalog_private *catalog = container_of(dp_catalog,
+                               struct dp_catalog_private, dp_catalog);
+       u32 intr, intr_ack;
+
+       intr = dp_read_ahb(catalog, REG_DP_INTR_STATUS2);
+       intr &= ~DP_INTERRUPT_STATUS2_MASK;
+       intr_ack = (intr & DP_INTERRUPT_STATUS2)
+                       << DP_INTERRUPT_STATUS_ACK_SHIFT;
+       dp_write_ahb(catalog, REG_DP_INTR_STATUS2,
+                       intr_ack | DP_INTERRUPT_STATUS2_MASK);
+
+       return intr;
+}
+
+void dp_catalog_ctrl_phy_reset(struct dp_catalog *dp_catalog)
+{
+       struct dp_catalog_private *catalog = container_of(dp_catalog,
+                               struct dp_catalog_private, dp_catalog);
+
+       dp_write_ahb(catalog, REG_DP_PHY_CTRL,
+                       DP_PHY_CTRL_SW_RESET | DP_PHY_CTRL_SW_RESET_PLL);
+       usleep_range(1000, 1100); /* h/w recommended delay */
+       dp_write_ahb(catalog, REG_DP_PHY_CTRL, 0x0);
+}
+
+int dp_catalog_ctrl_update_vx_px(struct dp_catalog *dp_catalog,
+               u8 v_level, u8 p_level)
+{
+       struct dp_catalog_private *catalog = container_of(dp_catalog,
+                               struct dp_catalog_private, dp_catalog);
+       struct dp_io *dp_io = catalog->io;
+       struct phy *phy = dp_io->phy;
+       struct phy_configure_opts_dp *opts_dp = &dp_io->phy_opts.dp;
+
+       /* TODO: Update for all lanes instead of just first one */
+       opts_dp->voltage[0] = v_level;
+       opts_dp->pre[0] = p_level;
+       opts_dp->set_voltages = 1;
+       phy_configure(phy, &dp_io->phy_opts);
+       opts_dp->set_voltages = 0;
+
+       return 0;
+}
+
+void dp_catalog_ctrl_send_phy_pattern(struct dp_catalog *dp_catalog,
+                       u32 pattern)
+{
+       struct dp_catalog_private *catalog = container_of(dp_catalog,
+                               struct dp_catalog_private, dp_catalog);
+       u32 value = 0x0;
+
+       /* Make sure to clear the current pattern before starting a new one */
+       dp_write_link(catalog, REG_DP_STATE_CTRL, 0x0);
+
+       switch (pattern) {
+       case DP_PHY_TEST_PATTERN_D10_2:
+               dp_write_link(catalog, REG_DP_STATE_CTRL,
+                               DP_STATE_CTRL_LINK_TRAINING_PATTERN1);
+               break;
+       case DP_PHY_TEST_PATTERN_ERROR_COUNT:
+               value &= ~(1 << 16);
+               dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET,
+                                       value);
+               value |= SCRAMBLER_RESET_COUNT_VALUE;
+               dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET,
+                                       value);
+               dp_write_link(catalog, REG_DP_MAINLINK_LEVELS,
+                                       DP_MAINLINK_SAFE_TO_EXIT_LEVEL_2);
+               dp_write_link(catalog, REG_DP_STATE_CTRL,
+                                       DP_STATE_CTRL_LINK_SYMBOL_ERR_MEASURE);
+               break;
+       case DP_PHY_TEST_PATTERN_PRBS7:
+               dp_write_link(catalog, REG_DP_STATE_CTRL,
+                               DP_STATE_CTRL_LINK_PRBS7);
+               break;
+       case DP_PHY_TEST_PATTERN_80BIT_CUSTOM:
+               dp_write_link(catalog, REG_DP_STATE_CTRL,
+                               DP_STATE_CTRL_LINK_TEST_CUSTOM_PATTERN);
+               /* 00111110000011111000001111100000 */
+               dp_write_link(catalog, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG0,
+                               0x3E0F83E0);
+               /* 00001111100000111110000011111000 */
+               dp_write_link(catalog, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG1,
+                               0x0F83E0F8);
+               /* 1111100000111110 */
+               dp_write_link(catalog, REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG2,
+                               0x0000F83E);
+               break;
+       case DP_PHY_TEST_PATTERN_CP2520:
+               value = dp_read_link(catalog, REG_DP_MAINLINK_CTRL);
+               value &= ~DP_MAINLINK_CTRL_SW_BYPASS_SCRAMBLER;
+               dp_write_link(catalog, REG_DP_MAINLINK_CTRL, value);
+
+               value = DP_HBR2_ERM_PATTERN;
+               dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET,
+                               value);
+               value |= SCRAMBLER_RESET_COUNT_VALUE;
+               dp_write_link(catalog, REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET,
+                                       value);
+               dp_write_link(catalog, REG_DP_MAINLINK_LEVELS,
+                                       DP_MAINLINK_SAFE_TO_EXIT_LEVEL_2);
+               dp_write_link(catalog, REG_DP_STATE_CTRL,
+                                       DP_STATE_CTRL_LINK_SYMBOL_ERR_MEASURE);
+               value = dp_read_link(catalog, REG_DP_MAINLINK_CTRL);
+               value |= DP_MAINLINK_CTRL_ENABLE;
+               dp_write_link(catalog, REG_DP_MAINLINK_CTRL, value);
+               break;
+       case DP_PHY_TEST_PATTERN_SEL_MASK:
+               dp_write_link(catalog, REG_DP_MAINLINK_CTRL,
+                               DP_MAINLINK_CTRL_ENABLE);
+               dp_write_link(catalog, REG_DP_STATE_CTRL,
+                               DP_STATE_CTRL_LINK_TRAINING_PATTERN4);
+               break;
+       default:
+               DRM_DEBUG_DP("No valid test pattern requested:0x%x\n", pattern);
+               break;
+       }
+}
+
+u32 dp_catalog_ctrl_read_phy_pattern(struct dp_catalog *dp_catalog)
+{
+       struct dp_catalog_private *catalog = container_of(dp_catalog,
+                               struct dp_catalog_private, dp_catalog);
+
+       return dp_read_link(catalog, REG_DP_MAINLINK_READY);
+}
+
+/* panel related catalog functions */
+int dp_catalog_panel_timing_cfg(struct dp_catalog *dp_catalog)
+{
+       struct dp_catalog_private *catalog = container_of(dp_catalog,
+                               struct dp_catalog_private, dp_catalog);
+
+       dp_write_link(catalog, REG_DP_TOTAL_HOR_VER,
+                               dp_catalog->total);
+       dp_write_link(catalog, REG_DP_START_HOR_VER_FROM_SYNC,
+                               dp_catalog->sync_start);
+       dp_write_link(catalog, REG_DP_HSYNC_VSYNC_WIDTH_POLARITY,
+                               dp_catalog->width_blanking);
+       dp_write_link(catalog, REG_DP_ACTIVE_HOR_VER, dp_catalog->dp_active);
+       return 0;
+}
+
+void dp_catalog_panel_tpg_enable(struct dp_catalog *dp_catalog,
+                               struct drm_display_mode *drm_mode)
+{
+       struct dp_catalog_private *catalog = container_of(dp_catalog,
+                               struct dp_catalog_private, dp_catalog);
+       u32 hsync_period, vsync_period;
+       u32 display_v_start, display_v_end;
+       u32 hsync_start_x, hsync_end_x;
+       u32 v_sync_width;
+       u32 hsync_ctl;
+       u32 display_hctl;
+
+       /* TPG config parameters*/
+       hsync_period = drm_mode->htotal;
+       vsync_period = drm_mode->vtotal;
+
+       display_v_start = ((drm_mode->vtotal - drm_mode->vsync_start) *
+                                       hsync_period);
+       display_v_end = ((vsync_period - (drm_mode->vsync_start -
+                                       drm_mode->vdisplay))
+                                       * hsync_period) - 1;
+
+       display_v_start += drm_mode->htotal - drm_mode->hsync_start;
+       display_v_end -= (drm_mode->hsync_start - drm_mode->hdisplay);
+
+       hsync_start_x = drm_mode->htotal - drm_mode->hsync_start;
+       hsync_end_x = hsync_period - (drm_mode->hsync_start -
+                                       drm_mode->hdisplay) - 1;
+
+       v_sync_width = drm_mode->vsync_end - drm_mode->vsync_start;
+
+       hsync_ctl = (hsync_period << 16) |
+                       (drm_mode->hsync_end - drm_mode->hsync_start);
+       display_hctl = (hsync_end_x << 16) | hsync_start_x;
+
+
+       dp_write_p0(catalog, MMSS_DP_INTF_CONFIG, 0x0);
+       dp_write_p0(catalog, MMSS_DP_INTF_HSYNC_CTL, hsync_ctl);
+       dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PERIOD_F0, vsync_period *
+                       hsync_period);
+       dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F0, v_sync_width *
+                       hsync_period);
+       dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PERIOD_F1, 0);
+       dp_write_p0(catalog, MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F1, 0);
+       dp_write_p0(catalog, MMSS_DP_INTF_DISPLAY_HCTL, display_hctl);
+       dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_HCTL, 0);
+       dp_write_p0(catalog, MMSS_INTF_DISPLAY_V_START_F0, display_v_start);
+       dp_write_p0(catalog, MMSS_DP_INTF_DISPLAY_V_END_F0, display_v_end);
+       dp_write_p0(catalog, MMSS_INTF_DISPLAY_V_START_F1, 0);
+       dp_write_p0(catalog, MMSS_DP_INTF_DISPLAY_V_END_F1, 0);
+       dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_START_F0, 0);
+       dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_END_F0, 0);
+       dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_START_F1, 0);
+       dp_write_p0(catalog, MMSS_DP_INTF_ACTIVE_V_END_F1, 0);
+       dp_write_p0(catalog, MMSS_DP_INTF_POLARITY_CTL, 0);
+
+       dp_write_p0(catalog, MMSS_DP_TPG_MAIN_CONTROL,
+                               DP_TPG_CHECKERED_RECT_PATTERN);
+       dp_write_p0(catalog, MMSS_DP_TPG_VIDEO_CONFIG,
+                               DP_TPG_VIDEO_CONFIG_BPP_8BIT |
+                               DP_TPG_VIDEO_CONFIG_RGB);
+       dp_write_p0(catalog, MMSS_DP_BIST_ENABLE,
+                               DP_BIST_ENABLE_DPBIST_EN);
+       dp_write_p0(catalog, MMSS_DP_TIMING_ENGINE_EN,
+                               DP_TIMING_ENGINE_EN_EN);
+       DRM_DEBUG_DP("%s: enabled tpg\n", __func__);
+}
+
+void dp_catalog_panel_tpg_disable(struct dp_catalog *dp_catalog)
+{
+       struct dp_catalog_private *catalog = container_of(dp_catalog,
+                               struct dp_catalog_private, dp_catalog);
+
+       dp_write_p0(catalog, MMSS_DP_TPG_MAIN_CONTROL, 0x0);
+       dp_write_p0(catalog, MMSS_DP_BIST_ENABLE, 0x0);
+       dp_write_p0(catalog, MMSS_DP_TIMING_ENGINE_EN, 0x0);
+}
+
+struct dp_catalog *dp_catalog_get(struct device *dev, struct dp_io *io)
+{
+       struct dp_catalog_private *catalog;
+
+       if (!io) {
+               DRM_ERROR("invalid input\n");
+               return ERR_PTR(-EINVAL);
+       }
+
+       catalog  = devm_kzalloc(dev, sizeof(*catalog), GFP_KERNEL);
+       if (!catalog)
+               return ERR_PTR(-ENOMEM);
+
+       catalog->dev = dev;
+       catalog->io = io;
+
+       return &catalog->dp_catalog;
+}
+
+void dp_catalog_audio_get_header(struct dp_catalog *dp_catalog)
+{
+       struct dp_catalog_private *catalog;
+       u32 (*sdp_map)[DP_AUDIO_SDP_HEADER_MAX];
+       enum dp_catalog_audio_sdp_type sdp;
+       enum dp_catalog_audio_header_type header;
+
+       if (!dp_catalog)
+               return;
+
+       catalog = container_of(dp_catalog,
+               struct dp_catalog_private, dp_catalog);
+
+       sdp_map = catalog->audio_map;
+       sdp     = dp_catalog->sdp_type;
+       header  = dp_catalog->sdp_header;
+
+       dp_catalog->audio_data = dp_read_link(catalog,
+                       sdp_map[sdp][header]);
+}
+
+void dp_catalog_audio_set_header(struct dp_catalog *dp_catalog)
+{
+       struct dp_catalog_private *catalog;
+       u32 (*sdp_map)[DP_AUDIO_SDP_HEADER_MAX];
+       enum dp_catalog_audio_sdp_type sdp;
+       enum dp_catalog_audio_header_type header;
+       u32 data;
+
+       if (!dp_catalog)
+               return;
+
+       catalog = container_of(dp_catalog,
+               struct dp_catalog_private, dp_catalog);
+
+       sdp_map = catalog->audio_map;
+       sdp     = dp_catalog->sdp_type;
+       header  = dp_catalog->sdp_header;
+       data    = dp_catalog->audio_data;
+
+       dp_write_link(catalog, sdp_map[sdp][header], data);
+}
+
+void dp_catalog_audio_config_acr(struct dp_catalog *dp_catalog)
+{
+       struct dp_catalog_private *catalog;
+       u32 acr_ctrl, select;
+
+       if (!dp_catalog)
+               return;
+
+       catalog = container_of(dp_catalog,
+               struct dp_catalog_private, dp_catalog);
+
+       select = dp_catalog->audio_data;
+       acr_ctrl = select << 4 | BIT(31) | BIT(8) | BIT(14);
+
+       DRM_DEBUG_DP("select = 0x%x, acr_ctrl = 0x%x\n", select, acr_ctrl);
+
+       dp_write_link(catalog, MMSS_DP_AUDIO_ACR_CTRL, acr_ctrl);
+}
+
+void dp_catalog_audio_enable(struct dp_catalog *dp_catalog)
+{
+       struct dp_catalog_private *catalog;
+       bool enable;
+       u32 audio_ctrl;
+
+       if (!dp_catalog)
+               return;
+
+       catalog = container_of(dp_catalog,
+               struct dp_catalog_private, dp_catalog);
+
+       enable = !!dp_catalog->audio_data;
+       audio_ctrl = dp_read_link(catalog, MMSS_DP_AUDIO_CFG);
+
+       if (enable)
+               audio_ctrl |= BIT(0);
+       else
+               audio_ctrl &= ~BIT(0);
+
+       DRM_DEBUG_DP("dp_audio_cfg = 0x%x\n", audio_ctrl);
+
+       dp_write_link(catalog, MMSS_DP_AUDIO_CFG, audio_ctrl);
+       /* make sure audio engine is disabled */
+       wmb();
+}
+
+void dp_catalog_audio_config_sdp(struct dp_catalog *dp_catalog)
+{
+       struct dp_catalog_private *catalog;
+       u32 sdp_cfg = 0;
+       u32 sdp_cfg2 = 0;
+
+       if (!dp_catalog)
+               return;
+
+       catalog = container_of(dp_catalog,
+               struct dp_catalog_private, dp_catalog);
+
+       sdp_cfg = dp_read_link(catalog, MMSS_DP_SDP_CFG);
+       /* AUDIO_TIMESTAMP_SDP_EN */
+       sdp_cfg |= BIT(1);
+       /* AUDIO_STREAM_SDP_EN */
+       sdp_cfg |= BIT(2);
+       /* AUDIO_COPY_MANAGEMENT_SDP_EN */
+       sdp_cfg |= BIT(5);
+       /* AUDIO_ISRC_SDP_EN  */
+       sdp_cfg |= BIT(6);
+       /* AUDIO_INFOFRAME_SDP_EN  */
+       sdp_cfg |= BIT(20);
+
+       DRM_DEBUG_DP("sdp_cfg = 0x%x\n", sdp_cfg);
+
+       dp_write_link(catalog, MMSS_DP_SDP_CFG, sdp_cfg);
+
+       sdp_cfg2 = dp_read_link(catalog, MMSS_DP_SDP_CFG2);
+       /* IFRM_REGSRC -> Do not use reg values */
+       sdp_cfg2 &= ~BIT(0);
+       /* AUDIO_STREAM_HB3_REGSRC-> Do not use reg values */
+       sdp_cfg2 &= ~BIT(1);
+
+       DRM_DEBUG_DP("sdp_cfg2 = 0x%x\n", sdp_cfg2);
+
+       dp_write_link(catalog, MMSS_DP_SDP_CFG2, sdp_cfg2);
+}
+
+void dp_catalog_audio_init(struct dp_catalog *dp_catalog)
+{
+       struct dp_catalog_private *catalog;
+
+       static u32 sdp_map[][DP_AUDIO_SDP_HEADER_MAX] = {
+               {
+                       MMSS_DP_AUDIO_STREAM_0,
+                       MMSS_DP_AUDIO_STREAM_1,
+                       MMSS_DP_AUDIO_STREAM_1,
+               },
+               {
+                       MMSS_DP_AUDIO_TIMESTAMP_0,
+                       MMSS_DP_AUDIO_TIMESTAMP_1,
+                       MMSS_DP_AUDIO_TIMESTAMP_1,
+               },
+               {
+                       MMSS_DP_AUDIO_INFOFRAME_0,
+                       MMSS_DP_AUDIO_INFOFRAME_1,
+                       MMSS_DP_AUDIO_INFOFRAME_1,
+               },
+               {
+                       MMSS_DP_AUDIO_COPYMANAGEMENT_0,
+                       MMSS_DP_AUDIO_COPYMANAGEMENT_1,
+                       MMSS_DP_AUDIO_COPYMANAGEMENT_1,
+               },
+               {
+                       MMSS_DP_AUDIO_ISRC_0,
+                       MMSS_DP_AUDIO_ISRC_1,
+                       MMSS_DP_AUDIO_ISRC_1,
+               },
+       };
+
+       if (!dp_catalog)
+               return;
+
+       catalog = container_of(dp_catalog,
+               struct dp_catalog_private, dp_catalog);
+
+       catalog->audio_map = sdp_map;
+}
+
+void dp_catalog_audio_sfe_level(struct dp_catalog *dp_catalog)
+{
+       struct dp_catalog_private *catalog;
+       u32 mainlink_levels, safe_to_exit_level;
+
+       if (!dp_catalog)
+               return;
+
+       catalog = container_of(dp_catalog,
+               struct dp_catalog_private, dp_catalog);
+
+       safe_to_exit_level = dp_catalog->audio_data;
+       mainlink_levels = dp_read_link(catalog, REG_DP_MAINLINK_LEVELS);
+       mainlink_levels &= 0xFE0;
+       mainlink_levels |= safe_to_exit_level;
+
+       DRM_DEBUG_DP("mainlink_level = 0x%x, safe_to_exit_level = 0x%x\n",
+                        mainlink_levels, safe_to_exit_level);
+
+       dp_write_link(catalog, REG_DP_MAINLINK_LEVELS, mainlink_levels);
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.h b/drivers/gpu/drm/msm/dp/dp_catalog.h
new file mode 100644 (file)
index 0000000..4b7666f
--- /dev/null
@@ -0,0 +1,131 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DP_CATALOG_H_
+#define _DP_CATALOG_H_
+
+#include <drm/drm_modes.h>
+
+#include "dp_parser.h"
+
+/* interrupts */
+#define DP_INTR_HPD            BIT(0)
+#define DP_INTR_AUX_I2C_DONE   BIT(3)
+#define DP_INTR_WRONG_ADDR     BIT(6)
+#define DP_INTR_TIMEOUT                BIT(9)
+#define DP_INTR_NACK_DEFER     BIT(12)
+#define DP_INTR_WRONG_DATA_CNT BIT(15)
+#define DP_INTR_I2C_NACK       BIT(18)
+#define DP_INTR_I2C_DEFER      BIT(21)
+#define DP_INTR_PLL_UNLOCKED   BIT(24)
+#define DP_INTR_AUX_ERROR      BIT(27)
+
+#define DP_INTR_READY_FOR_VIDEO                BIT(0)
+#define DP_INTR_IDLE_PATTERN_SENT      BIT(3)
+#define DP_INTR_FRAME_END              BIT(6)
+#define DP_INTR_CRC_UPDATED            BIT(9)
+
+#define DP_AUX_CFG_MAX_VALUE_CNT 3
+
+/* PHY AUX config registers */
+enum dp_phy_aux_config_type {
+       PHY_AUX_CFG0,
+       PHY_AUX_CFG1,
+       PHY_AUX_CFG2,
+       PHY_AUX_CFG3,
+       PHY_AUX_CFG4,
+       PHY_AUX_CFG5,
+       PHY_AUX_CFG6,
+       PHY_AUX_CFG7,
+       PHY_AUX_CFG8,
+       PHY_AUX_CFG9,
+       PHY_AUX_CFG_MAX,
+};
+
+enum dp_catalog_audio_sdp_type {
+       DP_AUDIO_SDP_STREAM,
+       DP_AUDIO_SDP_TIMESTAMP,
+       DP_AUDIO_SDP_INFOFRAME,
+       DP_AUDIO_SDP_COPYMANAGEMENT,
+       DP_AUDIO_SDP_ISRC,
+       DP_AUDIO_SDP_MAX,
+};
+
+enum dp_catalog_audio_header_type {
+       DP_AUDIO_SDP_HEADER_1,
+       DP_AUDIO_SDP_HEADER_2,
+       DP_AUDIO_SDP_HEADER_3,
+       DP_AUDIO_SDP_HEADER_MAX,
+};
+
+struct dp_catalog {
+       u32 aux_data;
+       u32 total;
+       u32 sync_start;
+       u32 width_blanking;
+       u32 dp_active;
+       enum dp_catalog_audio_sdp_type sdp_type;
+       enum dp_catalog_audio_header_type sdp_header;
+       u32 audio_data;
+};
+
+/* AUX APIs */
+u32 dp_catalog_aux_read_data(struct dp_catalog *dp_catalog);
+int dp_catalog_aux_write_data(struct dp_catalog *dp_catalog);
+int dp_catalog_aux_write_trans(struct dp_catalog *dp_catalog);
+int dp_catalog_aux_clear_trans(struct dp_catalog *dp_catalog, bool read);
+int dp_catalog_aux_clear_hw_interrupts(struct dp_catalog *dp_catalog);
+void dp_catalog_aux_reset(struct dp_catalog *dp_catalog);
+void dp_catalog_aux_enable(struct dp_catalog *dp_catalog, bool enable);
+void dp_catalog_aux_update_cfg(struct dp_catalog *dp_catalog);
+int dp_catalog_aux_get_irq(struct dp_catalog *dp_catalog);
+
+/* DP Controller APIs */
+void dp_catalog_ctrl_state_ctrl(struct dp_catalog *dp_catalog, u32 state);
+void dp_catalog_ctrl_config_ctrl(struct dp_catalog *dp_catalog, u32 config);
+void dp_catalog_ctrl_lane_mapping(struct dp_catalog *dp_catalog);
+void dp_catalog_ctrl_mainlink_ctrl(struct dp_catalog *dp_catalog, bool enable);
+void dp_catalog_ctrl_config_misc(struct dp_catalog *dp_catalog, u32 cc, u32 tb);
+void dp_catalog_ctrl_config_msa(struct dp_catalog *dp_catalog, u32 rate,
+                               u32 stream_rate_khz, bool fixed_nvid);
+int dp_catalog_ctrl_set_pattern(struct dp_catalog *dp_catalog, u32 pattern);
+void dp_catalog_ctrl_reset(struct dp_catalog *dp_catalog);
+bool dp_catalog_ctrl_mainlink_ready(struct dp_catalog *dp_catalog);
+void dp_catalog_ctrl_enable_irq(struct dp_catalog *dp_catalog, bool enable);
+void dp_catalog_hpd_config_intr(struct dp_catalog *dp_catalog,
+                       u32 intr_mask, bool en);
+void dp_catalog_ctrl_hpd_config(struct dp_catalog *dp_catalog);
+u32 dp_catalog_hpd_get_intr_status(struct dp_catalog *dp_catalog);
+void dp_catalog_ctrl_phy_reset(struct dp_catalog *dp_catalog);
+int dp_catalog_ctrl_update_vx_px(struct dp_catalog *dp_catalog, u8 v_level,
+                               u8 p_level);
+int dp_catalog_ctrl_get_interrupt(struct dp_catalog *dp_catalog);
+void dp_catalog_ctrl_update_transfer_unit(struct dp_catalog *dp_catalog,
+                               u32 dp_tu, u32 valid_boundary,
+                               u32 valid_boundary2);
+void dp_catalog_ctrl_send_phy_pattern(struct dp_catalog *dp_catalog,
+                               u32 pattern);
+u32 dp_catalog_ctrl_read_phy_pattern(struct dp_catalog *dp_catalog);
+
+/* DP Panel APIs */
+int dp_catalog_panel_timing_cfg(struct dp_catalog *dp_catalog);
+void dp_catalog_dump_regs(struct dp_catalog *dp_catalog);
+void dp_catalog_panel_tpg_enable(struct dp_catalog *dp_catalog,
+                               struct drm_display_mode *drm_mode);
+void dp_catalog_panel_tpg_disable(struct dp_catalog *dp_catalog);
+
+struct dp_catalog *dp_catalog_get(struct device *dev, struct dp_io *io);
+
+/* DP Audio APIs */
+void dp_catalog_audio_get_header(struct dp_catalog *catalog);
+void dp_catalog_audio_set_header(struct dp_catalog *catalog);
+void dp_catalog_audio_config_acr(struct dp_catalog *catalog);
+void dp_catalog_audio_enable(struct dp_catalog *catalog);
+void dp_catalog_audio_enable(struct dp_catalog *catalog);
+void dp_catalog_audio_config_sdp(struct dp_catalog *catalog);
+void dp_catalog_audio_init(struct dp_catalog *catalog);
+void dp_catalog_audio_sfe_level(struct dp_catalog *catalog);
+
+#endif /* _DP_CATALOG_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
new file mode 100644 (file)
index 0000000..2e3e191
--- /dev/null
@@ -0,0 +1,1869 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt)    "[drm-dp] %s: " fmt, __func__
+
+#include <linux/types.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/phy/phy.h>
+#include <linux/phy/phy-dp.h>
+#include <drm/drm_fixed.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_print.h>
+
+#include "dp_reg.h"
+#include "dp_ctrl.h"
+#include "dp_link.h"
+
+#define DP_KHZ_TO_HZ 1000
+#define IDLE_PATTERN_COMPLETION_TIMEOUT_JIFFIES        (30 * HZ / 1000) /* 30 ms */
+#define WAIT_FOR_VIDEO_READY_TIMEOUT_JIFFIES (HZ / 2)
+
+#define DP_CTRL_INTR_READY_FOR_VIDEO     BIT(0)
+#define DP_CTRL_INTR_IDLE_PATTERN_SENT  BIT(3)
+
+#define MR_LINK_TRAINING1  0x8
+#define MR_LINK_SYMBOL_ERM 0x80
+#define MR_LINK_PRBS7 0x100
+#define MR_LINK_CUSTOM80 0x200
+#define MR_LINK_TRAINING4  0x40
+
+enum {
+       DP_TRAINING_NONE,
+       DP_TRAINING_1,
+       DP_TRAINING_2,
+};
+
+struct dp_tu_calc_input {
+       u64 lclk;        /* 162, 270, 540 and 810 */
+       u64 pclk_khz;    /* in KHz */
+       u64 hactive;     /* active h-width */
+       u64 hporch;      /* bp + fp + pulse */
+       int nlanes;      /* no.of.lanes */
+       int bpp;         /* bits */
+       int pixel_enc;   /* 444, 420, 422 */
+       int dsc_en;     /* dsc on/off */
+       int async_en;   /* async mode */
+       int fec_en;     /* fec */
+       int compress_ratio; /* 2:1 = 200, 3:1 = 300, 3.75:1 = 375 */
+       int num_of_dsc_slices; /* number of slices per line */
+};
+
+struct dp_vc_tu_mapping_table {
+       u32 vic;
+       u8 lanes;
+       u8 lrate; /* DP_LINK_RATE -> 162(6), 270(10), 540(20), 810 (30) */
+       u8 bpp;
+       u8 valid_boundary_link;
+       u16 delay_start_link;
+       bool boundary_moderation_en;
+       u8 valid_lower_boundary_link;
+       u8 upper_boundary_count;
+       u8 lower_boundary_count;
+       u8 tu_size_minus1;
+};
+
+struct dp_ctrl_private {
+       struct dp_ctrl dp_ctrl;
+       struct device *dev;
+       struct drm_dp_aux *aux;
+       struct dp_panel *panel;
+       struct dp_link *link;
+       struct dp_power *power;
+       struct dp_parser *parser;
+       struct dp_catalog *catalog;
+
+       struct completion idle_comp;
+       struct completion video_comp;
+};
+
+struct dp_cr_status {
+       u8 lane_0_1;
+       u8 lane_2_3;
+};
+
+#define DP_LANE0_1_CR_DONE     0x11
+
+static int dp_aux_link_configure(struct drm_dp_aux *aux,
+                                       struct dp_link_info *link)
+{
+       u8 values[2];
+       int err;
+
+       values[0] = drm_dp_link_rate_to_bw_code(link->rate);
+       values[1] = link->num_lanes;
+
+       if (link->capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
+               values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+
+       err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, values, sizeof(values));
+       if (err < 0)
+               return err;
+
+       return 0;
+}
+
+void dp_ctrl_push_idle(struct dp_ctrl *dp_ctrl)
+{
+       struct dp_ctrl_private *ctrl;
+
+       ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+
+       reinit_completion(&ctrl->idle_comp);
+       dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_PUSH_IDLE);
+
+       if (!wait_for_completion_timeout(&ctrl->idle_comp,
+                       IDLE_PATTERN_COMPLETION_TIMEOUT_JIFFIES))
+               pr_warn("PUSH_IDLE pattern timedout\n");
+
+       pr_debug("mainlink off done\n");
+}
+
+static void dp_ctrl_config_ctrl(struct dp_ctrl_private *ctrl)
+{
+       u32 config = 0, tbd;
+       u8 *dpcd = ctrl->panel->dpcd;
+
+       /* Default-> LSCLK DIV: 1/4 LCLK  */
+       config |= (2 << DP_CONFIGURATION_CTRL_LSCLK_DIV_SHIFT);
+
+       /* Scrambler reset enable */
+       if (dpcd[DP_EDP_CONFIGURATION_CAP] & DP_ALTERNATE_SCRAMBLER_RESET_CAP)
+               config |= DP_CONFIGURATION_CTRL_ASSR;
+
+       tbd = dp_link_get_test_bits_depth(ctrl->link,
+                       ctrl->panel->dp_mode.bpp);
+
+       if (tbd == DP_TEST_BIT_DEPTH_UNKNOWN) {
+               pr_debug("BIT_DEPTH not set. Configure default\n");
+               tbd = DP_TEST_BIT_DEPTH_8;
+       }
+
+       config |= tbd << DP_CONFIGURATION_CTRL_BPC_SHIFT;
+
+       /* Num of Lanes */
+       config |= ((ctrl->link->link_params.num_lanes - 1)
+                       << DP_CONFIGURATION_CTRL_NUM_OF_LANES_SHIFT);
+
+       if (drm_dp_enhanced_frame_cap(dpcd))
+               config |= DP_CONFIGURATION_CTRL_ENHANCED_FRAMING;
+
+       config |= DP_CONFIGURATION_CTRL_P_INTERLACED; /* progressive video */
+
+       /* sync clock & static Mvid */
+       config |= DP_CONFIGURATION_CTRL_STATIC_DYNAMIC_CN;
+       config |= DP_CONFIGURATION_CTRL_SYNC_ASYNC_CLK;
+
+       dp_catalog_ctrl_config_ctrl(ctrl->catalog, config);
+}
+
+static void dp_ctrl_configure_source_params(struct dp_ctrl_private *ctrl)
+{
+       u32 cc, tb;
+
+       dp_catalog_ctrl_lane_mapping(ctrl->catalog);
+       dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, true);
+
+       dp_ctrl_config_ctrl(ctrl);
+
+       tb = dp_link_get_test_bits_depth(ctrl->link,
+               ctrl->panel->dp_mode.bpp);
+       cc = dp_link_get_colorimetry_config(ctrl->link);
+       dp_catalog_ctrl_config_misc(ctrl->catalog, cc, tb);
+       dp_panel_timing_cfg(ctrl->panel);
+}
+
+/*
+ * The structure and few functions present below are IP/Hardware
+ * specific implementation. Most of the implementation will not
+ * have coding comments
+ */
+struct tu_algo_data {
+       s64 lclk_fp;
+       s64 pclk_fp;
+       s64 lwidth;
+       s64 lwidth_fp;
+       s64 hbp_relative_to_pclk;
+       s64 hbp_relative_to_pclk_fp;
+       int nlanes;
+       int bpp;
+       int pixelEnc;
+       int dsc_en;
+       int async_en;
+       int bpc;
+
+       uint delay_start_link_extra_pixclk;
+       int extra_buffer_margin;
+       s64 ratio_fp;
+       s64 original_ratio_fp;
+
+       s64 err_fp;
+       s64 n_err_fp;
+       s64 n_n_err_fp;
+       int tu_size;
+       int tu_size_desired;
+       int tu_size_minus1;
+
+       int valid_boundary_link;
+       s64 resulting_valid_fp;
+       s64 total_valid_fp;
+       s64 effective_valid_fp;
+       s64 effective_valid_recorded_fp;
+       int n_tus;
+       int n_tus_per_lane;
+       int paired_tus;
+       int remainder_tus;
+       int remainder_tus_upper;
+       int remainder_tus_lower;
+       int extra_bytes;
+       int filler_size;
+       int delay_start_link;
+
+       int extra_pclk_cycles;
+       int extra_pclk_cycles_in_link_clk;
+       s64 ratio_by_tu_fp;
+       s64 average_valid2_fp;
+       int new_valid_boundary_link;
+       int remainder_symbols_exist;
+       int n_symbols;
+       s64 n_remainder_symbols_per_lane_fp;
+       s64 last_partial_tu_fp;
+       s64 TU_ratio_err_fp;
+
+       int n_tus_incl_last_incomplete_tu;
+       int extra_pclk_cycles_tmp;
+       int extra_pclk_cycles_in_link_clk_tmp;
+       int extra_required_bytes_new_tmp;
+       int filler_size_tmp;
+       int lower_filler_size_tmp;
+       int delay_start_link_tmp;
+
+       bool boundary_moderation_en;
+       int boundary_mod_lower_err;
+       int upper_boundary_count;
+       int lower_boundary_count;
+       int i_upper_boundary_count;
+       int i_lower_boundary_count;
+       int valid_lower_boundary_link;
+       int even_distribution_BF;
+       int even_distribution_legacy;
+       int even_distribution;
+       int min_hblank_violated;
+       s64 delay_start_time_fp;
+       s64 hbp_time_fp;
+       s64 hactive_time_fp;
+       s64 diff_abs_fp;
+
+       s64 ratio;
+};
+
+static int _tu_param_compare(s64 a, s64 b)
+{
+       u32 a_sign;
+       u32 b_sign;
+       s64 a_temp, b_temp, minus_1;
+
+       if (a == b)
+               return 0;
+
+       minus_1 = drm_fixp_from_fraction(-1, 1);
+
+       a_sign = (a >> 32) & 0x80000000 ? 1 : 0;
+
+       b_sign = (b >> 32) & 0x80000000 ? 1 : 0;
+
+       if (a_sign > b_sign)
+               return 2;
+       else if (b_sign > a_sign)
+               return 1;
+
+       if (!a_sign && !b_sign) { /* positive */
+               if (a > b)
+                       return 1;
+               else
+                       return 2;
+       } else { /* negative */
+               a_temp = drm_fixp_mul(a, minus_1);
+               b_temp = drm_fixp_mul(b, minus_1);
+
+               if (a_temp > b_temp)
+                       return 2;
+               else
+                       return 1;
+       }
+}
+
+static void dp_panel_update_tu_timings(struct dp_tu_calc_input *in,
+                                       struct tu_algo_data *tu)
+{
+       int nlanes = in->nlanes;
+       int dsc_num_slices = in->num_of_dsc_slices;
+       int dsc_num_bytes  = 0;
+       int numerator;
+       s64 pclk_dsc_fp;
+       s64 dwidth_dsc_fp;
+       s64 hbp_dsc_fp;
+
+       int tot_num_eoc_symbols = 0;
+       int tot_num_hor_bytes   = 0;
+       int tot_num_dummy_bytes = 0;
+       int dwidth_dsc_bytes    = 0;
+       int  eoc_bytes           = 0;
+
+       s64 temp1_fp, temp2_fp, temp3_fp;
+
+       tu->lclk_fp              = drm_fixp_from_fraction(in->lclk, 1);
+       tu->pclk_fp              = drm_fixp_from_fraction(in->pclk_khz, 1000);
+       tu->lwidth               = in->hactive;
+       tu->hbp_relative_to_pclk = in->hporch;
+       tu->nlanes               = in->nlanes;
+       tu->bpp                  = in->bpp;
+       tu->pixelEnc             = in->pixel_enc;
+       tu->dsc_en               = in->dsc_en;
+       tu->async_en             = in->async_en;
+       tu->lwidth_fp            = drm_fixp_from_fraction(in->hactive, 1);
+       tu->hbp_relative_to_pclk_fp = drm_fixp_from_fraction(in->hporch, 1);
+
+       if (tu->pixelEnc == 420) {
+               temp1_fp = drm_fixp_from_fraction(2, 1);
+               tu->pclk_fp = drm_fixp_div(tu->pclk_fp, temp1_fp);
+               tu->lwidth_fp = drm_fixp_div(tu->lwidth_fp, temp1_fp);
+               tu->hbp_relative_to_pclk_fp =
+                               drm_fixp_div(tu->hbp_relative_to_pclk_fp, 2);
+       }
+
+       if (tu->pixelEnc == 422) {
+               switch (tu->bpp) {
+               case 24:
+                       tu->bpp = 16;
+                       tu->bpc = 8;
+                       break;
+               case 30:
+                       tu->bpp = 20;
+                       tu->bpc = 10;
+                       break;
+               default:
+                       tu->bpp = 16;
+                       tu->bpc = 8;
+                       break;
+               }
+       } else {
+               tu->bpc = tu->bpp/3;
+       }
+
+       if (!in->dsc_en)
+               goto fec_check;
+
+       temp1_fp = drm_fixp_from_fraction(in->compress_ratio, 100);
+       temp2_fp = drm_fixp_from_fraction(in->bpp, 1);
+       temp3_fp = drm_fixp_div(temp2_fp, temp1_fp);
+       temp2_fp = drm_fixp_mul(tu->lwidth_fp, temp3_fp);
+
+       temp1_fp = drm_fixp_from_fraction(8, 1);
+       temp3_fp = drm_fixp_div(temp2_fp, temp1_fp);
+
+       numerator = drm_fixp2int(temp3_fp);
+
+       dsc_num_bytes  = numerator / dsc_num_slices;
+       eoc_bytes           = dsc_num_bytes % nlanes;
+       tot_num_eoc_symbols = nlanes * dsc_num_slices;
+       tot_num_hor_bytes   = dsc_num_bytes * dsc_num_slices;
+       tot_num_dummy_bytes = (nlanes - eoc_bytes) * dsc_num_slices;
+
+       if (dsc_num_bytes == 0)
+               pr_info("incorrect no of bytes per slice=%d\n", dsc_num_bytes);
+
+       dwidth_dsc_bytes = (tot_num_hor_bytes +
+                               tot_num_eoc_symbols +
+                               (eoc_bytes == 0 ? 0 : tot_num_dummy_bytes));
+
+       dwidth_dsc_fp = drm_fixp_from_fraction(dwidth_dsc_bytes, 3);
+
+       temp2_fp = drm_fixp_mul(tu->pclk_fp, dwidth_dsc_fp);
+       temp1_fp = drm_fixp_div(temp2_fp, tu->lwidth_fp);
+       pclk_dsc_fp = temp1_fp;
+
+       temp1_fp = drm_fixp_div(pclk_dsc_fp, tu->pclk_fp);
+       temp2_fp = drm_fixp_mul(tu->hbp_relative_to_pclk_fp, temp1_fp);
+       hbp_dsc_fp = temp2_fp;
+
+       /* output */
+       tu->pclk_fp = pclk_dsc_fp;
+       tu->lwidth_fp = dwidth_dsc_fp;
+       tu->hbp_relative_to_pclk_fp = hbp_dsc_fp;
+
+fec_check:
+       if (in->fec_en) {
+               temp1_fp = drm_fixp_from_fraction(976, 1000); /* 0.976 */
+               tu->lclk_fp = drm_fixp_mul(tu->lclk_fp, temp1_fp);
+       }
+}
+
+static void _tu_valid_boundary_calc(struct tu_algo_data *tu)
+{
+       s64 temp1_fp, temp2_fp, temp, temp1, temp2;
+       int compare_result_1, compare_result_2, compare_result_3;
+
+       temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1);
+       temp2_fp = drm_fixp_mul(tu->ratio_fp, temp1_fp);
+
+       tu->new_valid_boundary_link = drm_fixp2int_ceil(temp2_fp);
+
+       temp = (tu->i_upper_boundary_count *
+                               tu->new_valid_boundary_link +
+                               tu->i_lower_boundary_count *
+                               (tu->new_valid_boundary_link-1));
+       tu->average_valid2_fp = drm_fixp_from_fraction(temp,
+                                       (tu->i_upper_boundary_count +
+                                       tu->i_lower_boundary_count));
+
+       temp1_fp = drm_fixp_from_fraction(tu->bpp, 8);
+       temp2_fp = tu->lwidth_fp;
+       temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp);
+       temp2_fp = drm_fixp_div(temp1_fp, tu->average_valid2_fp);
+       tu->n_tus = drm_fixp2int(temp2_fp);
+       if ((temp2_fp & 0xFFFFFFFF) > 0xFFFFF000)
+               tu->n_tus += 1;
+
+       temp1_fp = drm_fixp_from_fraction(tu->n_tus, 1);
+       temp2_fp = drm_fixp_mul(temp1_fp, tu->average_valid2_fp);
+       temp1_fp = drm_fixp_from_fraction(tu->n_symbols, 1);
+       temp2_fp = temp1_fp - temp2_fp;
+       temp1_fp = drm_fixp_from_fraction(tu->nlanes, 1);
+       temp2_fp = drm_fixp_div(temp2_fp, temp1_fp);
+       tu->n_remainder_symbols_per_lane_fp = temp2_fp;
+
+       temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1);
+       tu->last_partial_tu_fp =
+                       drm_fixp_div(tu->n_remainder_symbols_per_lane_fp,
+                                       temp1_fp);
+
+       if (tu->n_remainder_symbols_per_lane_fp != 0)
+               tu->remainder_symbols_exist = 1;
+       else
+               tu->remainder_symbols_exist = 0;
+
+       temp1_fp = drm_fixp_from_fraction(tu->n_tus, tu->nlanes);
+       tu->n_tus_per_lane = drm_fixp2int(temp1_fp);
+
+       tu->paired_tus = (int)((tu->n_tus_per_lane) /
+                                       (tu->i_upper_boundary_count +
+                                        tu->i_lower_boundary_count));
+
+       tu->remainder_tus = tu->n_tus_per_lane - tu->paired_tus *
+                                               (tu->i_upper_boundary_count +
+                                               tu->i_lower_boundary_count);
+
+       if ((tu->remainder_tus - tu->i_upper_boundary_count) > 0) {
+               tu->remainder_tus_upper = tu->i_upper_boundary_count;
+               tu->remainder_tus_lower = tu->remainder_tus -
+                                               tu->i_upper_boundary_count;
+       } else {
+               tu->remainder_tus_upper = tu->remainder_tus;
+               tu->remainder_tus_lower = 0;
+       }
+
+       temp = tu->paired_tus * (tu->i_upper_boundary_count *
+                               tu->new_valid_boundary_link +
+                               tu->i_lower_boundary_count *
+                               (tu->new_valid_boundary_link - 1)) +
+                               (tu->remainder_tus_upper *
+                                tu->new_valid_boundary_link) +
+                               (tu->remainder_tus_lower *
+                               (tu->new_valid_boundary_link - 1));
+       tu->total_valid_fp = drm_fixp_from_fraction(temp, 1);
+
+       if (tu->remainder_symbols_exist) {
+               temp1_fp = tu->total_valid_fp +
+                               tu->n_remainder_symbols_per_lane_fp;
+               temp2_fp = drm_fixp_from_fraction(tu->n_tus_per_lane, 1);
+               temp2_fp = temp2_fp + tu->last_partial_tu_fp;
+               temp1_fp = drm_fixp_div(temp1_fp, temp2_fp);
+       } else {
+               temp2_fp = drm_fixp_from_fraction(tu->n_tus_per_lane, 1);
+               temp1_fp = drm_fixp_div(tu->total_valid_fp, temp2_fp);
+       }
+       tu->effective_valid_fp = temp1_fp;
+
+       temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1);
+       temp2_fp = drm_fixp_mul(tu->ratio_fp, temp1_fp);
+       tu->n_n_err_fp = tu->effective_valid_fp - temp2_fp;
+
+       temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1);
+       temp2_fp = drm_fixp_mul(tu->ratio_fp, temp1_fp);
+       tu->n_err_fp = tu->average_valid2_fp - temp2_fp;
+
+       tu->even_distribution = tu->n_tus % tu->nlanes == 0 ? 1 : 0;
+
+       temp1_fp = drm_fixp_from_fraction(tu->bpp, 8);
+       temp2_fp = tu->lwidth_fp;
+       temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp);
+       temp2_fp = drm_fixp_div(temp1_fp, tu->average_valid2_fp);
+
+       if (temp2_fp)
+               tu->n_tus_incl_last_incomplete_tu = drm_fixp2int_ceil(temp2_fp);
+       else
+               tu->n_tus_incl_last_incomplete_tu = 0;
+
+       temp1 = 0;
+       temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1);
+       temp2_fp = drm_fixp_mul(tu->original_ratio_fp, temp1_fp);
+       temp1_fp = tu->average_valid2_fp - temp2_fp;
+       temp2_fp = drm_fixp_from_fraction(tu->n_tus_incl_last_incomplete_tu, 1);
+       temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp);
+
+       if (temp1_fp)
+               temp1 = drm_fixp2int_ceil(temp1_fp);
+
+       temp = tu->i_upper_boundary_count * tu->nlanes;
+       temp1_fp = drm_fixp_from_fraction(tu->tu_size, 1);
+       temp2_fp = drm_fixp_mul(tu->original_ratio_fp, temp1_fp);
+       temp1_fp = drm_fixp_from_fraction(tu->new_valid_boundary_link, 1);
+       temp2_fp = temp1_fp - temp2_fp;
+       temp1_fp = drm_fixp_from_fraction(temp, 1);
+       temp2_fp = drm_fixp_mul(temp1_fp, temp2_fp);
+
+       if (temp2_fp)
+               temp2 = drm_fixp2int_ceil(temp2_fp);
+       else
+               temp2 = 0;
+       tu->extra_required_bytes_new_tmp = (int)(temp1 + temp2);
+
+       temp1_fp = drm_fixp_from_fraction(8, tu->bpp);
+       temp2_fp = drm_fixp_from_fraction(
+       tu->extra_required_bytes_new_tmp, 1);
+       temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp);
+
+       if (temp1_fp)
+               tu->extra_pclk_cycles_tmp = drm_fixp2int_ceil(temp1_fp);
+       else
+               tu->extra_pclk_cycles_tmp = 0;
+
+       temp1_fp = drm_fixp_from_fraction(tu->extra_pclk_cycles_tmp, 1);
+       temp2_fp = drm_fixp_div(tu->lclk_fp, tu->pclk_fp);
+       temp1_fp = drm_fixp_mul(temp1_fp, temp2_fp);
+
+       if (temp1_fp)
+               tu->extra_pclk_cycles_in_link_clk_tmp =
+                                               drm_fixp2int_ceil(temp1_fp);
+       else
+               tu->extra_pclk_cycles_in_link_clk_tmp = 0;
+
+       tu->filler_size_tmp = tu->tu_size - tu->new_valid_boundary_link;
+
+       tu->lower_filler_size_tmp = tu->filler_size_tmp + 1;
+
+       tu->delay_start_link_tmp = tu->extra_pclk_cycles_in_link_clk_tmp +
+                                       tu->lower_filler_size_tmp +
+                                       tu->extra_buffer_margin;
+
+       temp1_fp = drm_fixp_from_fraction(tu->delay_start_link_tmp, 1);
+       tu->delay_start_time_fp = drm_fixp_div(temp1_fp, tu->lclk_fp);
+
+       compare_result_1 = _tu_param_compare(tu->n_n_err_fp, tu->diff_abs_fp);
+       if (compare_result_1 == 2)
+               compare_result_1 = 1;
+       else
+               compare_result_1 = 0;
+
+       compare_result_2 = _tu_param_compare(tu->n_n_err_fp, tu->err_fp);
+       if (compare_result_2 == 2)
+               compare_result_2 = 1;
+       else
+               compare_result_2 = 0;
+
+       compare_result_3 = _tu_param_compare(tu->hbp_time_fp,
+                                       tu->delay_start_time_fp);
+       if (compare_result_3 == 2)
+               compare_result_3 = 0;
+       else
+               compare_result_3 = 1;
+
+       if (((tu->even_distribution == 1) ||
+                       ((tu->even_distribution_BF == 0) &&
+                       (tu->even_distribution_legacy == 0))) &&
+                       tu->n_err_fp >= 0 && tu->n_n_err_fp >= 0 &&
+                       compare_result_2 &&
+                       (compare_result_1 || (tu->min_hblank_violated == 1)) &&
+                       (tu->new_valid_boundary_link - 1) > 0 &&
+                       compare_result_3 &&
+                       (tu->delay_start_link_tmp <= 1023)) {
+               tu->upper_boundary_count = tu->i_upper_boundary_count;
+               tu->lower_boundary_count = tu->i_lower_boundary_count;
+               tu->err_fp = tu->n_n_err_fp;
+               tu->boundary_moderation_en = true;
+               tu->tu_size_desired = tu->tu_size;
+               tu->valid_boundary_link = tu->new_valid_boundary_link;
+               tu->effective_valid_recorded_fp = tu->effective_valid_fp;
+               tu->even_distribution_BF = 1;
+               tu->delay_start_link = tu->delay_start_link_tmp;
+       } else if (tu->boundary_mod_lower_err == 0) {
+               compare_result_1 = _tu_param_compare(tu->n_n_err_fp,
+                                                       tu->diff_abs_fp);
+               if (compare_result_1 == 2)
+                       tu->boundary_mod_lower_err = 1;
+       }
+}
+
+static void _dp_ctrl_calc_tu(struct dp_tu_calc_input *in,
+                                  struct dp_vc_tu_mapping_table *tu_table)
+{
+       struct tu_algo_data tu;
+       int compare_result_1, compare_result_2;
+       u64 temp = 0;
+       s64 temp_fp = 0, temp1_fp = 0, temp2_fp = 0;
+
+       s64 LCLK_FAST_SKEW_fp = drm_fixp_from_fraction(6, 10000); /* 0.0006 */
+       s64 const_p49_fp = drm_fixp_from_fraction(49, 100); /* 0.49 */
+       s64 const_p56_fp = drm_fixp_from_fraction(56, 100); /* 0.56 */
+       s64 RATIO_SCALE_fp = drm_fixp_from_fraction(1001, 1000);
+
+       u8 DP_BRUTE_FORCE = 1;
+       s64 BRUTE_FORCE_THRESHOLD_fp = drm_fixp_from_fraction(1, 10); /* 0.1 */
+       uint EXTRA_PIXCLK_CYCLE_DELAY = 4;
+       uint HBLANK_MARGIN = 4;
+
+       memset(&tu, 0, sizeof(tu));
+
+       dp_panel_update_tu_timings(in, &tu);
+
+       tu.err_fp = drm_fixp_from_fraction(1000, 1); /* 1000 */
+
+       temp1_fp = drm_fixp_from_fraction(4, 1);
+       temp2_fp = drm_fixp_mul(temp1_fp, tu.lclk_fp);
+       temp_fp = drm_fixp_div(temp2_fp, tu.pclk_fp);
+       tu.extra_buffer_margin = drm_fixp2int_ceil(temp_fp);
+
+       temp1_fp = drm_fixp_from_fraction(tu.bpp, 8);
+       temp2_fp = drm_fixp_mul(tu.pclk_fp, temp1_fp);
+       temp1_fp = drm_fixp_from_fraction(tu.nlanes, 1);
+       temp2_fp = drm_fixp_div(temp2_fp, temp1_fp);
+       tu.ratio_fp = drm_fixp_div(temp2_fp, tu.lclk_fp);
+
+       tu.original_ratio_fp = tu.ratio_fp;
+       tu.boundary_moderation_en = false;
+       tu.upper_boundary_count = 0;
+       tu.lower_boundary_count = 0;
+       tu.i_upper_boundary_count = 0;
+       tu.i_lower_boundary_count = 0;
+       tu.valid_lower_boundary_link = 0;
+       tu.even_distribution_BF = 0;
+       tu.even_distribution_legacy = 0;
+       tu.even_distribution = 0;
+       tu.delay_start_time_fp = 0;
+
+       tu.err_fp = drm_fixp_from_fraction(1000, 1);
+       tu.n_err_fp = 0;
+       tu.n_n_err_fp = 0;
+
+       tu.ratio = drm_fixp2int(tu.ratio_fp);
+       temp1_fp = drm_fixp_from_fraction(tu.nlanes, 1);
+       div64_u64_rem(tu.lwidth_fp, temp1_fp, &temp2_fp);
+       if (temp2_fp != 0 &&
+                       !tu.ratio && tu.dsc_en == 0) {
+               tu.ratio_fp = drm_fixp_mul(tu.ratio_fp, RATIO_SCALE_fp);
+               tu.ratio = drm_fixp2int(tu.ratio_fp);
+               if (tu.ratio)
+                       tu.ratio_fp = drm_fixp_from_fraction(1, 1);
+       }
+
+       if (tu.ratio > 1)
+               tu.ratio = 1;
+
+       if (tu.ratio == 1)
+               goto tu_size_calc;
+
+       compare_result_1 = _tu_param_compare(tu.ratio_fp, const_p49_fp);
+       if (!compare_result_1 || compare_result_1 == 1)
+               compare_result_1 = 1;
+       else
+               compare_result_1 = 0;
+
+       compare_result_2 = _tu_param_compare(tu.ratio_fp, const_p56_fp);
+       if (!compare_result_2 || compare_result_2 == 2)
+               compare_result_2 = 1;
+       else
+               compare_result_2 = 0;
+
+       if (tu.dsc_en && compare_result_1 && compare_result_2) {
+               HBLANK_MARGIN += 4;
+               DRM_DEBUG_DP("Info: increase HBLANK_MARGIN to %d\n",
+                               HBLANK_MARGIN);
+       }
+
+tu_size_calc:
+       for (tu.tu_size = 32; tu.tu_size <= 64; tu.tu_size++) {
+               temp1_fp = drm_fixp_from_fraction(tu.tu_size, 1);
+               temp2_fp = drm_fixp_mul(tu.ratio_fp, temp1_fp);
+               temp = drm_fixp2int_ceil(temp2_fp);
+               temp1_fp = drm_fixp_from_fraction(temp, 1);
+               tu.n_err_fp = temp1_fp - temp2_fp;
+
+               if (tu.n_err_fp < tu.err_fp) {
+                       tu.err_fp = tu.n_err_fp;
+                       tu.tu_size_desired = tu.tu_size;
+               }
+       }
+
+       tu.tu_size_minus1 = tu.tu_size_desired - 1;
+
+       temp1_fp = drm_fixp_from_fraction(tu.tu_size_desired, 1);
+       temp2_fp = drm_fixp_mul(tu.ratio_fp, temp1_fp);
+       tu.valid_boundary_link = drm_fixp2int_ceil(temp2_fp);
+
+       temp1_fp = drm_fixp_from_fraction(tu.bpp, 8);
+       temp2_fp = tu.lwidth_fp;
+       temp2_fp = drm_fixp_mul(temp2_fp, temp1_fp);
+
+       temp1_fp = drm_fixp_from_fraction(tu.valid_boundary_link, 1);
+       temp2_fp = drm_fixp_div(temp2_fp, temp1_fp);
+       tu.n_tus = drm_fixp2int(temp2_fp);
+       if ((temp2_fp & 0xFFFFFFFF) > 0xFFFFF000)
+               tu.n_tus += 1;
+
+       tu.even_distribution_legacy = tu.n_tus % tu.nlanes == 0 ? 1 : 0;
+       DRM_DEBUG_DP("Info: n_sym = %d, num_of_tus = %d\n",
+               tu.valid_boundary_link, tu.n_tus);
+
+       temp1_fp = drm_fixp_from_fraction(tu.tu_size_desired, 1);
+       temp2_fp = drm_fixp_mul(tu.original_ratio_fp, temp1_fp);
+       temp1_fp = drm_fixp_from_fraction(tu.valid_boundary_link, 1);
+       temp2_fp = temp1_fp - temp2_fp;
+       temp1_fp = drm_fixp_from_fraction(tu.n_tus + 1, 1);
+       temp2_fp = drm_fixp_mul(temp1_fp, temp2_fp);
+
+       temp = drm_fixp2int(temp2_fp);
+       if (temp && temp2_fp)
+               tu.extra_bytes = drm_fixp2int_ceil(temp2_fp);
+       else
+               tu.extra_bytes = 0;
+
+       temp1_fp = drm_fixp_from_fraction(tu.extra_bytes, 1);
+       temp2_fp = drm_fixp_from_fraction(8, tu.bpp);
+       temp1_fp = drm_fixp_mul(temp1_fp, temp2_fp);
+
+       if (temp && temp1_fp)
+               tu.extra_pclk_cycles = drm_fixp2int_ceil(temp1_fp);
+       else
+               tu.extra_pclk_cycles = drm_fixp2int(temp1_fp);
+
+       temp1_fp = drm_fixp_div(tu.lclk_fp, tu.pclk_fp);
+       temp2_fp = drm_fixp_from_fraction(tu.extra_pclk_cycles, 1);
+       temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp);
+
+       if (temp1_fp)
+               tu.extra_pclk_cycles_in_link_clk = drm_fixp2int_ceil(temp1_fp);
+       else
+               tu.extra_pclk_cycles_in_link_clk = drm_fixp2int(temp1_fp);
+
+       tu.filler_size = tu.tu_size_desired - tu.valid_boundary_link;
+
+       temp1_fp = drm_fixp_from_fraction(tu.tu_size_desired, 1);
+       tu.ratio_by_tu_fp = drm_fixp_mul(tu.ratio_fp, temp1_fp);
+
+       tu.delay_start_link = tu.extra_pclk_cycles_in_link_clk +
+                               tu.filler_size + tu.extra_buffer_margin;
+
+       tu.resulting_valid_fp =
+                       drm_fixp_from_fraction(tu.valid_boundary_link, 1);
+
+       temp1_fp = drm_fixp_from_fraction(tu.tu_size_desired, 1);
+       temp2_fp = drm_fixp_div(tu.resulting_valid_fp, temp1_fp);
+       tu.TU_ratio_err_fp = temp2_fp - tu.original_ratio_fp;
+
+       temp1_fp = drm_fixp_from_fraction(HBLANK_MARGIN, 1);
+       temp1_fp = tu.hbp_relative_to_pclk_fp - temp1_fp;
+       tu.hbp_time_fp = drm_fixp_div(temp1_fp, tu.pclk_fp);
+
+       temp1_fp = drm_fixp_from_fraction(tu.delay_start_link, 1);
+       tu.delay_start_time_fp = drm_fixp_div(temp1_fp, tu.lclk_fp);
+
+       compare_result_1 = _tu_param_compare(tu.hbp_time_fp,
+                                       tu.delay_start_time_fp);
+       if (compare_result_1 == 2) /* if (hbp_time_fp < delay_start_time_fp) */
+               tu.min_hblank_violated = 1;
+
+       tu.hactive_time_fp = drm_fixp_div(tu.lwidth_fp, tu.pclk_fp);
+
+       compare_result_2 = _tu_param_compare(tu.hactive_time_fp,
+                                               tu.delay_start_time_fp);
+       if (compare_result_2 == 2)
+               tu.min_hblank_violated = 1;
+
+       tu.delay_start_time_fp = 0;
+
+       /* brute force */
+
+       tu.delay_start_link_extra_pixclk = EXTRA_PIXCLK_CYCLE_DELAY;
+       tu.diff_abs_fp = tu.resulting_valid_fp - tu.ratio_by_tu_fp;
+
+       temp = drm_fixp2int(tu.diff_abs_fp);
+       if (!temp && tu.diff_abs_fp <= 0xffff)
+               tu.diff_abs_fp = 0;
+
+       /* if(diff_abs < 0) diff_abs *= -1 */
+       if (tu.diff_abs_fp < 0)
+               tu.diff_abs_fp = drm_fixp_mul(tu.diff_abs_fp, -1);
+
+       tu.boundary_mod_lower_err = 0;
+       if ((tu.diff_abs_fp != 0 &&
+                       ((tu.diff_abs_fp > BRUTE_FORCE_THRESHOLD_fp) ||
+                        (tu.even_distribution_legacy == 0) ||
+                        (DP_BRUTE_FORCE == 1))) ||
+                       (tu.min_hblank_violated == 1)) {
+               do {
+                       tu.err_fp = drm_fixp_from_fraction(1000, 1);
+
+                       temp1_fp = drm_fixp_div(tu.lclk_fp, tu.pclk_fp);
+                       temp2_fp = drm_fixp_from_fraction(
+                                       tu.delay_start_link_extra_pixclk, 1);
+                       temp1_fp = drm_fixp_mul(temp2_fp, temp1_fp);
+
+                       if (temp1_fp)
+                               tu.extra_buffer_margin =
+                                       drm_fixp2int_ceil(temp1_fp);
+                       else
+                               tu.extra_buffer_margin = 0;
+
+                       temp1_fp = drm_fixp_from_fraction(tu.bpp, 8);
+                       temp1_fp = drm_fixp_mul(tu.lwidth_fp, temp1_fp);
+
+                       if (temp1_fp)
+                               tu.n_symbols = drm_fixp2int_ceil(temp1_fp);
+                       else
+                               tu.n_symbols = 0;
+
+                       for (tu.tu_size = 32; tu.tu_size <= 64; tu.tu_size++) {
+                               for (tu.i_upper_boundary_count = 1;
+                                       tu.i_upper_boundary_count <= 15;
+                                       tu.i_upper_boundary_count++) {
+                                       for (tu.i_lower_boundary_count = 1;
+                                               tu.i_lower_boundary_count <= 15;
+                                               tu.i_lower_boundary_count++) {
+                                               _tu_valid_boundary_calc(&tu);
+                                       }
+                               }
+                       }
+                       tu.delay_start_link_extra_pixclk--;
+               } while (tu.boundary_moderation_en != true &&
+                       tu.boundary_mod_lower_err == 1 &&
+                       tu.delay_start_link_extra_pixclk != 0);
+
+               if (tu.boundary_moderation_en == true) {
+                       temp1_fp = drm_fixp_from_fraction(
+                                       (tu.upper_boundary_count *
+                                       tu.valid_boundary_link +
+                                       tu.lower_boundary_count *
+                                       (tu.valid_boundary_link - 1)), 1);
+                       temp2_fp = drm_fixp_from_fraction(
+                                       (tu.upper_boundary_count +
+                                       tu.lower_boundary_count), 1);
+                       tu.resulting_valid_fp =
+                                       drm_fixp_div(temp1_fp, temp2_fp);
+
+                       temp1_fp = drm_fixp_from_fraction(
+                                       tu.tu_size_desired, 1);
+                       tu.ratio_by_tu_fp =
+                               drm_fixp_mul(tu.original_ratio_fp, temp1_fp);
+
+                       tu.valid_lower_boundary_link =
+                               tu.valid_boundary_link - 1;
+
+                       temp1_fp = drm_fixp_from_fraction(tu.bpp, 8);
+                       temp1_fp = drm_fixp_mul(tu.lwidth_fp, temp1_fp);
+                       temp2_fp = drm_fixp_div(temp1_fp,
+                                               tu.resulting_valid_fp);
+                       tu.n_tus = drm_fixp2int(temp2_fp);
+
+                       tu.tu_size_minus1 = tu.tu_size_desired - 1;
+                       tu.even_distribution_BF = 1;
+
+                       temp1_fp =
+                               drm_fixp_from_fraction(tu.tu_size_desired, 1);
+                       temp2_fp =
+                               drm_fixp_div(tu.resulting_valid_fp, temp1_fp);
+                       tu.TU_ratio_err_fp = temp2_fp - tu.original_ratio_fp;
+               }
+       }
+
+       temp2_fp = drm_fixp_mul(LCLK_FAST_SKEW_fp, tu.lwidth_fp);
+
+       if (temp2_fp)
+               temp = drm_fixp2int_ceil(temp2_fp);
+       else
+               temp = 0;
+
+       temp1_fp = drm_fixp_from_fraction(tu.nlanes, 1);
+       temp2_fp = drm_fixp_mul(tu.original_ratio_fp, temp1_fp);
+       temp1_fp = drm_fixp_from_fraction(tu.bpp, 8);
+       temp2_fp = drm_fixp_div(temp1_fp, temp2_fp);
+       temp1_fp = drm_fixp_from_fraction(temp, 1);
+       temp2_fp = drm_fixp_mul(temp1_fp, temp2_fp);
+       temp = drm_fixp2int(temp2_fp);
+
+       if (tu.async_en)
+               tu.delay_start_link += (int)temp;
+
+       temp1_fp = drm_fixp_from_fraction(tu.delay_start_link, 1);
+       tu.delay_start_time_fp = drm_fixp_div(temp1_fp, tu.lclk_fp);
+
+       /* OUTPUTS */
+       tu_table->valid_boundary_link       = tu.valid_boundary_link;
+       tu_table->delay_start_link          = tu.delay_start_link;
+       tu_table->boundary_moderation_en    = tu.boundary_moderation_en;
+       tu_table->valid_lower_boundary_link = tu.valid_lower_boundary_link;
+       tu_table->upper_boundary_count      = tu.upper_boundary_count;
+       tu_table->lower_boundary_count      = tu.lower_boundary_count;
+       tu_table->tu_size_minus1            = tu.tu_size_minus1;
+
+       DRM_DEBUG_DP("TU: valid_boundary_link: %d\n",
+                               tu_table->valid_boundary_link);
+       DRM_DEBUG_DP("TU: delay_start_link: %d\n",
+                               tu_table->delay_start_link);
+       DRM_DEBUG_DP("TU: boundary_moderation_en: %d\n",
+                       tu_table->boundary_moderation_en);
+       DRM_DEBUG_DP("TU: valid_lower_boundary_link: %d\n",
+                       tu_table->valid_lower_boundary_link);
+       DRM_DEBUG_DP("TU: upper_boundary_count: %d\n",
+                       tu_table->upper_boundary_count);
+       DRM_DEBUG_DP("TU: lower_boundary_count: %d\n",
+                       tu_table->lower_boundary_count);
+       DRM_DEBUG_DP("TU: tu_size_minus1: %d\n", tu_table->tu_size_minus1);
+}
+
+static void dp_ctrl_calc_tu_parameters(struct dp_ctrl_private *ctrl,
+               struct dp_vc_tu_mapping_table *tu_table)
+{
+       struct dp_tu_calc_input in;
+       struct drm_display_mode *drm_mode;
+
+       drm_mode = &ctrl->panel->dp_mode.drm_mode;
+
+       in.lclk = ctrl->link->link_params.rate / 1000;
+       in.pclk_khz = drm_mode->clock;
+       in.hactive = drm_mode->hdisplay;
+       in.hporch = drm_mode->htotal - drm_mode->hdisplay;
+       in.nlanes = ctrl->link->link_params.num_lanes;
+       in.bpp = ctrl->panel->dp_mode.bpp;
+       in.pixel_enc = 444;
+       in.dsc_en = 0;
+       in.async_en = 0;
+       in.fec_en = 0;
+       in.num_of_dsc_slices = 0;
+       in.compress_ratio = 100;
+
+       _dp_ctrl_calc_tu(&in, tu_table);
+}
+
+static void dp_ctrl_setup_tr_unit(struct dp_ctrl_private *ctrl)
+{
+       u32 dp_tu = 0x0;
+       u32 valid_boundary = 0x0;
+       u32 valid_boundary2 = 0x0;
+       struct dp_vc_tu_mapping_table tu_calc_table;
+
+       dp_ctrl_calc_tu_parameters(ctrl, &tu_calc_table);
+
+       dp_tu |= tu_calc_table.tu_size_minus1;
+       valid_boundary |= tu_calc_table.valid_boundary_link;
+       valid_boundary |= (tu_calc_table.delay_start_link << 16);
+
+       valid_boundary2 |= (tu_calc_table.valid_lower_boundary_link << 1);
+       valid_boundary2 |= (tu_calc_table.upper_boundary_count << 16);
+       valid_boundary2 |= (tu_calc_table.lower_boundary_count << 20);
+
+       if (tu_calc_table.boundary_moderation_en)
+               valid_boundary2 |= BIT(0);
+
+       pr_debug("dp_tu=0x%x, valid_boundary=0x%x, valid_boundary2=0x%x\n",
+                       dp_tu, valid_boundary, valid_boundary2);
+
+       dp_catalog_ctrl_update_transfer_unit(ctrl->catalog,
+                               dp_tu, valid_boundary, valid_boundary2);
+}
+
+static int dp_ctrl_wait4video_ready(struct dp_ctrl_private *ctrl)
+{
+       int ret = 0;
+
+       if (!wait_for_completion_timeout(&ctrl->video_comp,
+                               WAIT_FOR_VIDEO_READY_TIMEOUT_JIFFIES)) {
+               DRM_ERROR("wait4video timedout\n");
+               ret = -ETIMEDOUT;
+       }
+       return ret;
+}
+
+static int dp_ctrl_update_vx_px(struct dp_ctrl_private *ctrl)
+{
+       struct dp_link *link = ctrl->link;
+       int ret = 0, lane, lane_cnt;
+       u8 buf[4];
+       u32 max_level_reached = 0;
+       u32 voltage_swing_level = link->phy_params.v_level;
+       u32 pre_emphasis_level = link->phy_params.p_level;
+
+       ret = dp_catalog_ctrl_update_vx_px(ctrl->catalog,
+               voltage_swing_level, pre_emphasis_level);
+
+       if (ret)
+               return ret;
+
+       if (voltage_swing_level >= DP_TRAIN_VOLTAGE_SWING_MAX) {
+               DRM_DEBUG_DP("max. voltage swing level reached %d\n",
+                               voltage_swing_level);
+               max_level_reached |= DP_TRAIN_MAX_SWING_REACHED;
+       }
+
+       if (pre_emphasis_level >= DP_TRAIN_PRE_EMPHASIS_MAX) {
+               DRM_DEBUG_DP("max. pre-emphasis level reached %d\n",
+                               pre_emphasis_level);
+               max_level_reached  |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+       }
+
+       pre_emphasis_level <<= DP_TRAIN_PRE_EMPHASIS_SHIFT;
+
+       lane_cnt = ctrl->link->link_params.num_lanes;
+       for (lane = 0; lane < lane_cnt; lane++)
+               buf[lane] = voltage_swing_level | pre_emphasis_level
+                               | max_level_reached;
+
+       DRM_DEBUG_DP("sink: p|v=0x%x\n", voltage_swing_level
+                                       | pre_emphasis_level);
+       ret = drm_dp_dpcd_write(ctrl->aux, DP_TRAINING_LANE0_SET,
+                                       buf, lane_cnt);
+       if (ret == lane_cnt)
+               ret = 0;
+
+       return ret;
+}
+
+static bool dp_ctrl_train_pattern_set(struct dp_ctrl_private *ctrl,
+               u8 pattern)
+{
+       u8 buf;
+       int ret = 0;
+
+       DRM_DEBUG_DP("sink: pattern=%x\n", pattern);
+
+       buf = pattern;
+
+       if (pattern && pattern != DP_TRAINING_PATTERN_4)
+               buf |= DP_LINK_SCRAMBLING_DISABLE;
+
+       ret = drm_dp_dpcd_writeb(ctrl->aux, DP_TRAINING_PATTERN_SET, buf);
+       return ret == 1;
+}
+
+static int dp_ctrl_read_link_status(struct dp_ctrl_private *ctrl,
+                                   u8 *link_status)
+{
+       int len = 0;
+       u32 const offset = DP_LANE_ALIGN_STATUS_UPDATED - DP_LANE0_1_STATUS;
+       u32 link_status_read_max_retries = 100;
+
+       while (--link_status_read_max_retries) {
+               len = drm_dp_dpcd_read_link_status(ctrl->aux,
+                       link_status);
+               if (len != DP_LINK_STATUS_SIZE) {
+                       DRM_ERROR("DP link status read failed, err: %d\n", len);
+                       return len;
+               }
+
+               if (!(link_status[offset] & DP_LINK_STATUS_UPDATED))
+                       return 0;
+       }
+
+       return -ETIMEDOUT;
+}
+
+static int dp_ctrl_link_train_1(struct dp_ctrl_private *ctrl,
+               struct dp_cr_status *cr, int *training_step)
+{
+       int tries, old_v_level, ret = 0;
+       u8 link_status[DP_LINK_STATUS_SIZE];
+       int const maximum_retries = 4;
+
+       dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0);
+
+       *training_step = DP_TRAINING_1;
+
+       ret = dp_catalog_ctrl_set_pattern(ctrl->catalog, DP_TRAINING_PATTERN_1);
+       if (ret)
+               return ret;
+       dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_1 |
+               DP_LINK_SCRAMBLING_DISABLE);
+
+       ret = dp_ctrl_update_vx_px(ctrl);
+       if (ret)
+               return ret;
+
+       tries = 0;
+       old_v_level = ctrl->link->phy_params.v_level;
+       for (tries = 0; tries < maximum_retries; tries++) {
+               drm_dp_link_train_clock_recovery_delay(ctrl->panel->dpcd);
+
+               ret = dp_ctrl_read_link_status(ctrl, link_status);
+               if (ret)
+                       return ret;
+
+               cr->lane_0_1 = link_status[0];
+               cr->lane_2_3 = link_status[1];
+
+               if (drm_dp_clock_recovery_ok(link_status,
+                       ctrl->link->link_params.num_lanes)) {
+                       return 0;
+               }
+
+               if (ctrl->link->phy_params.v_level >=
+                       DP_TRAIN_VOLTAGE_SWING_MAX) {
+                       DRM_ERROR_RATELIMITED("max v_level reached\n");
+                       return -EAGAIN;
+               }
+
+               if (old_v_level != ctrl->link->phy_params.v_level) {
+                       tries = 0;
+                       old_v_level = ctrl->link->phy_params.v_level;
+               }
+
+               DRM_DEBUG_DP("clock recovery not done, adjusting vx px\n");
+
+               dp_link_adjust_levels(ctrl->link, link_status);
+               ret = dp_ctrl_update_vx_px(ctrl);
+               if (ret)
+                       return ret;
+       }
+
+       DRM_ERROR("max tries reached\n");
+       return -ETIMEDOUT;
+}
+
+static int dp_ctrl_link_rate_down_shift(struct dp_ctrl_private *ctrl)
+{
+       int ret = 0;
+
+       switch (ctrl->link->link_params.rate) {
+       case 810000:
+               ctrl->link->link_params.rate = 540000;
+               break;
+       case 540000:
+               ctrl->link->link_params.rate = 270000;
+               break;
+       case 270000:
+               ctrl->link->link_params.rate = 162000;
+               break;
+       case 162000:
+       default:
+               ret = -EINVAL;
+               break;
+       };
+
+       if (!ret)
+               DRM_DEBUG_DP("new rate=0x%x\n", ctrl->link->link_params.rate);
+
+       return ret;
+}
+
+static int dp_ctrl_link_lane_down_shift(struct dp_ctrl_private *ctrl)
+{
+
+       if (ctrl->link->link_params.num_lanes == 1)
+               return -1;
+
+       ctrl->link->link_params.num_lanes /= 2;
+       ctrl->link->link_params.rate = ctrl->panel->link_info.rate;
+
+       ctrl->link->phy_params.p_level = 0;
+       ctrl->link->phy_params.v_level = 0;
+
+       return 0;
+}
+
+static void dp_ctrl_clear_training_pattern(struct dp_ctrl_private *ctrl)
+{
+       dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_DISABLE);
+       drm_dp_link_train_channel_eq_delay(ctrl->panel->dpcd);
+}
+
+static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl,
+               struct dp_cr_status *cr, int *training_step)
+{
+       int tries = 0, ret = 0;
+       char pattern;
+       int const maximum_retries = 5;
+       u8 link_status[DP_LINK_STATUS_SIZE];
+
+       dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0);
+
+       *training_step = DP_TRAINING_2;
+
+       if (drm_dp_tps3_supported(ctrl->panel->dpcd))
+               pattern = DP_TRAINING_PATTERN_3;
+       else
+               pattern = DP_TRAINING_PATTERN_2;
+
+       ret = dp_ctrl_update_vx_px(ctrl);
+       if (ret)
+               return ret;
+
+       ret = dp_catalog_ctrl_set_pattern(ctrl->catalog, pattern);
+       if (ret)
+               return ret;
+
+       dp_ctrl_train_pattern_set(ctrl, pattern | DP_RECOVERED_CLOCK_OUT_EN);
+
+       for (tries = 0; tries <= maximum_retries; tries++) {
+               drm_dp_link_train_channel_eq_delay(ctrl->panel->dpcd);
+
+               ret = dp_ctrl_read_link_status(ctrl, link_status);
+               if (ret)
+                       return ret;
+               cr->lane_0_1 = link_status[0];
+               cr->lane_2_3 = link_status[1];
+
+               if (drm_dp_channel_eq_ok(link_status,
+                       ctrl->link->link_params.num_lanes)) {
+                       return 0;
+               }
+
+               dp_link_adjust_levels(ctrl->link, link_status);
+               ret = dp_ctrl_update_vx_px(ctrl);
+               if (ret)
+                       return ret;
+
+       }
+
+       return -ETIMEDOUT;
+}
+
+static int dp_ctrl_reinitialize_mainlink(struct dp_ctrl_private *ctrl);
+
+static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl,
+               struct dp_cr_status *cr, int *training_step)
+{
+       int ret = 0;
+       u8 encoding = DP_SET_ANSI_8B10B;
+       struct dp_link_info link_info = {0};
+
+       dp_ctrl_config_ctrl(ctrl);
+
+       link_info.num_lanes = ctrl->link->link_params.num_lanes;
+       link_info.rate = ctrl->link->link_params.rate;
+       link_info.capabilities = DP_LINK_CAP_ENHANCED_FRAMING;
+
+       dp_aux_link_configure(ctrl->aux, &link_info);
+       drm_dp_dpcd_write(ctrl->aux, DP_MAIN_LINK_CHANNEL_CODING_SET,
+                               &encoding, 1);
+
+       ret = dp_ctrl_link_train_1(ctrl, cr, training_step);
+       if (ret) {
+               DRM_ERROR("link training #1 failed. ret=%d\n", ret);
+               goto end;
+       }
+
+       /* print success info as this is a result of user initiated action */
+       DRM_DEBUG_DP("link training #1 successful\n");
+
+       ret = dp_ctrl_link_train_2(ctrl, cr, training_step);
+       if (ret) {
+               DRM_ERROR("link training #2 failed. ret=%d\n", ret);
+               goto end;
+       }
+
+       /* print success info as this is a result of user initiated action */
+       DRM_DEBUG_DP("link training #2 successful\n");
+
+end:
+       dp_catalog_ctrl_state_ctrl(ctrl->catalog, 0);
+
+       return ret;
+}
+
+static int dp_ctrl_setup_main_link(struct dp_ctrl_private *ctrl,
+               struct dp_cr_status *cr, int *training_step)
+{
+       int ret = 0;
+
+       dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, true);
+
+       if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN)
+               return ret;
+
+       /*
+        * As part of previous calls, DP controller state might have
+        * transitioned to PUSH_IDLE. In order to start transmitting
+        * a link training pattern, we have to first do soft reset.
+        */
+       dp_catalog_ctrl_reset(ctrl->catalog);
+
+       ret = dp_ctrl_link_train(ctrl, cr, training_step);
+
+       return ret;
+}
+
+static void dp_ctrl_set_clock_rate(struct dp_ctrl_private *ctrl,
+                       enum dp_pm_type module, char *name, unsigned long rate)
+{
+       u32 num = ctrl->parser->mp[module].num_clk;
+       struct dss_clk *cfg = ctrl->parser->mp[module].clk_config;
+
+       while (num && strcmp(cfg->clk_name, name)) {
+               num--;
+               cfg++;
+       }
+
+       DRM_DEBUG_DP("setting rate=%lu on clk=%s\n", rate, name);
+
+       if (num)
+               cfg->rate = rate;
+       else
+               DRM_ERROR("%s clock doesn't exit to set rate %lu\n",
+                               name, rate);
+}
+
+static int dp_ctrl_enable_mainlink_clocks(struct dp_ctrl_private *ctrl)
+{
+       int ret = 0;
+       struct dp_io *dp_io = &ctrl->parser->io;
+       struct phy *phy = dp_io->phy;
+       struct phy_configure_opts_dp *opts_dp = &dp_io->phy_opts.dp;
+
+       opts_dp->lanes = ctrl->link->link_params.num_lanes;
+       opts_dp->link_rate = ctrl->link->link_params.rate / 100;
+       dp_ctrl_set_clock_rate(ctrl, DP_CTRL_PM, "ctrl_link",
+                                       ctrl->link->link_params.rate * 1000);
+
+       phy_configure(phy, &dp_io->phy_opts);
+       phy_power_on(phy);
+
+       ret = dp_power_clk_enable(ctrl->power, DP_CTRL_PM, true);
+       if (ret)
+               DRM_ERROR("Unable to start link clocks. ret=%d\n", ret);
+
+       DRM_DEBUG_DP("link rate=%d pixel_clk=%d\n",
+               ctrl->link->link_params.rate, ctrl->dp_ctrl.pixel_rate);
+
+       return ret;
+}
+
+static int dp_ctrl_enable_stream_clocks(struct dp_ctrl_private *ctrl)
+{
+       int ret = 0;
+
+       dp_ctrl_set_clock_rate(ctrl, DP_STREAM_PM, "stream_pixel",
+                                       ctrl->dp_ctrl.pixel_rate * 1000);
+
+       ret = dp_power_clk_enable(ctrl->power, DP_STREAM_PM, true);
+       if (ret)
+               DRM_ERROR("Unabled to start pixel clocks. ret=%d\n", ret);
+
+       DRM_DEBUG_DP("link rate=%d pixel_clk=%d\n",
+                       ctrl->link->link_params.rate, ctrl->dp_ctrl.pixel_rate);
+
+       return ret;
+}
+
+int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip)
+{
+       struct dp_ctrl_private *ctrl;
+       struct dp_io *dp_io;
+       struct phy *phy;
+
+       if (!dp_ctrl) {
+               DRM_ERROR("Invalid input data\n");
+               return -EINVAL;
+       }
+
+       ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+       dp_io = &ctrl->parser->io;
+       phy = dp_io->phy;
+
+       ctrl->dp_ctrl.orientation = flip;
+
+       dp_catalog_ctrl_phy_reset(ctrl->catalog);
+       phy_init(phy);
+       dp_catalog_ctrl_enable_irq(ctrl->catalog, true);
+
+       return 0;
+}
+
+/**
+ * dp_ctrl_host_deinit() - Uninitialize DP controller
+ * @dp_ctrl: Display Port Driver data
+ *
+ * Perform required steps to uninitialize DP controller
+ * and its resources.
+ */
+void dp_ctrl_host_deinit(struct dp_ctrl *dp_ctrl)
+{
+       struct dp_ctrl_private *ctrl;
+
+       if (!dp_ctrl) {
+               DRM_ERROR("Invalid input data\n");
+               return;
+       }
+
+       ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+
+       dp_catalog_ctrl_enable_irq(ctrl->catalog, false);
+
+       DRM_DEBUG_DP("Host deinitialized successfully\n");
+}
+
+static bool dp_ctrl_use_fixed_nvid(struct dp_ctrl_private *ctrl)
+{
+       u8 *dpcd = ctrl->panel->dpcd;
+       u32 edid_quirks = 0;
+
+       edid_quirks = drm_dp_get_edid_quirks(ctrl->panel->edid);
+       /*
+        * For better interop experience, used a fixed NVID=0x8000
+        * whenever connected to a VGA dongle downstream.
+        */
+       if (drm_dp_is_branch(dpcd))
+               return (drm_dp_has_quirk(&ctrl->panel->desc, edid_quirks,
+                               DP_DPCD_QUIRK_CONSTANT_N));
+
+       return false;
+}
+
+static int dp_ctrl_reinitialize_mainlink(struct dp_ctrl_private *ctrl)
+{
+       int ret = 0;
+       struct dp_io *dp_io = &ctrl->parser->io;
+       struct phy *phy = dp_io->phy;
+       struct phy_configure_opts_dp *opts_dp = &dp_io->phy_opts.dp;
+
+       dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false);
+       opts_dp->lanes = ctrl->link->link_params.num_lanes;
+       phy_configure(phy, &dp_io->phy_opts);
+       /*
+        * Disable and re-enable the mainlink clock since the
+        * link clock might have been adjusted as part of the
+        * link maintenance.
+        */
+       ret = dp_power_clk_enable(ctrl->power, DP_CTRL_PM, false);
+       if (ret) {
+               DRM_ERROR("Failed to disable clocks. ret=%d\n", ret);
+               return ret;
+       }
+       phy_power_off(phy);
+       /* hw recommended delay before re-enabling clocks */
+       msleep(20);
+
+       ret = dp_ctrl_enable_mainlink_clocks(ctrl);
+       if (ret) {
+               DRM_ERROR("Failed to enable mainlink clks. ret=%d\n", ret);
+               return ret;
+       }
+
+       return ret;
+}
+
+static int dp_ctrl_link_maintenance(struct dp_ctrl_private *ctrl)
+{
+       int ret = 0;
+       struct dp_cr_status cr;
+       int training_step = DP_TRAINING_NONE;
+
+       dp_ctrl_push_idle(&ctrl->dp_ctrl);
+       dp_catalog_ctrl_reset(ctrl->catalog);
+
+       ctrl->dp_ctrl.pixel_rate = ctrl->panel->dp_mode.drm_mode.clock;
+
+       ret = dp_ctrl_setup_main_link(ctrl, &cr, &training_step);
+       if (ret)
+               goto end;
+
+       dp_ctrl_clear_training_pattern(ctrl);
+
+       dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO);
+
+       ret = dp_ctrl_wait4video_ready(ctrl);
+end:
+       return ret;
+}
+
+static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl)
+{
+       int ret = 0;
+
+       if (!ctrl->link->phy_params.phy_test_pattern_sel) {
+               DRM_DEBUG_DP("no test pattern selected by sink\n");
+               return ret;
+       }
+
+       /*
+        * The global reset will need DP link related clocks to be
+        * running. Add the global reset just before disabling the
+        * link clocks and core clocks.
+        */
+       ret = dp_ctrl_off(&ctrl->dp_ctrl);
+       if (ret) {
+               DRM_ERROR("failed to disable DP controller\n");
+               return ret;
+       }
+
+       ret = dp_ctrl_on_link(&ctrl->dp_ctrl);
+       if (!ret)
+               ret = dp_ctrl_on_stream(&ctrl->dp_ctrl);
+       else
+               DRM_ERROR("failed to enable DP link controller\n");
+
+       return ret;
+}
+
+static bool dp_ctrl_send_phy_test_pattern(struct dp_ctrl_private *ctrl)
+{
+       bool success = false;
+       u32 pattern_sent = 0x0;
+       u32 pattern_requested = ctrl->link->phy_params.phy_test_pattern_sel;
+
+       DRM_DEBUG_DP("request: 0x%x\n", pattern_requested);
+
+       if (dp_catalog_ctrl_update_vx_px(ctrl->catalog,
+                       ctrl->link->phy_params.v_level,
+                       ctrl->link->phy_params.p_level)) {
+               DRM_ERROR("Failed to set v/p levels\n");
+               return false;
+       }
+       dp_catalog_ctrl_send_phy_pattern(ctrl->catalog, pattern_requested);
+       dp_ctrl_update_vx_px(ctrl);
+       dp_link_send_test_response(ctrl->link);
+
+       pattern_sent = dp_catalog_ctrl_read_phy_pattern(ctrl->catalog);
+
+       switch (pattern_sent) {
+       case MR_LINK_TRAINING1:
+               success = (pattern_requested ==
+                               DP_PHY_TEST_PATTERN_D10_2);
+               break;
+       case MR_LINK_SYMBOL_ERM:
+               success = ((pattern_requested ==
+                       DP_PHY_TEST_PATTERN_ERROR_COUNT) ||
+                               (pattern_requested ==
+                               DP_PHY_TEST_PATTERN_CP2520));
+               break;
+       case MR_LINK_PRBS7:
+               success = (pattern_requested ==
+                               DP_PHY_TEST_PATTERN_PRBS7);
+               break;
+       case MR_LINK_CUSTOM80:
+               success = (pattern_requested ==
+                               DP_PHY_TEST_PATTERN_80BIT_CUSTOM);
+               break;
+       case MR_LINK_TRAINING4:
+               success = (pattern_requested ==
+                               DP_PHY_TEST_PATTERN_SEL_MASK);
+               break;
+       default:
+               success = false;
+       }
+
+       DRM_DEBUG_DP("%s: test->0x%x\n", success ? "success" : "failed",
+                                               pattern_requested);
+       return success;
+}
+
+void dp_ctrl_handle_sink_request(struct dp_ctrl *dp_ctrl)
+{
+       struct dp_ctrl_private *ctrl;
+       u32 sink_request = 0x0;
+
+       if (!dp_ctrl) {
+               DRM_ERROR("invalid input\n");
+               return;
+       }
+
+       ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+       sink_request = ctrl->link->sink_request;
+
+       if (sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) {
+               DRM_DEBUG_DP("PHY_TEST_PATTERN request\n");
+               if (dp_ctrl_process_phy_test_request(ctrl)) {
+                       DRM_ERROR("process phy_test_req failed\n");
+                       return;
+               }
+       }
+
+       if (sink_request & DP_LINK_STATUS_UPDATED) {
+               if (dp_ctrl_link_maintenance(ctrl)) {
+                       DRM_ERROR("LM failed: TEST_LINK_TRAINING\n");
+                       return;
+               }
+       }
+
+       if (sink_request & DP_TEST_LINK_TRAINING) {
+               dp_link_send_test_response(ctrl->link);
+               if (dp_ctrl_link_maintenance(ctrl)) {
+                       DRM_ERROR("LM failed: TEST_LINK_TRAINING\n");
+                       return;
+               }
+       }
+}
+
+int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl)
+{
+       int rc = 0;
+       struct dp_ctrl_private *ctrl;
+       u32 rate = 0;
+       int link_train_max_retries = 5;
+       u32 const phy_cts_pixel_clk_khz = 148500;
+       struct dp_cr_status cr;
+       unsigned int training_step;
+
+       if (!dp_ctrl)
+               return -EINVAL;
+
+       ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+
+       rate = ctrl->panel->link_info.rate;
+
+       dp_power_clk_enable(ctrl->power, DP_CORE_PM, true);
+
+       if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) {
+               DRM_DEBUG_DP("using phy test link parameters\n");
+               if (!ctrl->panel->dp_mode.drm_mode.clock)
+                       ctrl->dp_ctrl.pixel_rate = phy_cts_pixel_clk_khz;
+       } else {
+               ctrl->link->link_params.rate = rate;
+               ctrl->link->link_params.num_lanes =
+                       ctrl->panel->link_info.num_lanes;
+               ctrl->dp_ctrl.pixel_rate = ctrl->panel->dp_mode.drm_mode.clock;
+       }
+
+       DRM_DEBUG_DP("rate=%d, num_lanes=%d, pixel_rate=%d\n",
+               ctrl->link->link_params.rate,
+               ctrl->link->link_params.num_lanes, ctrl->dp_ctrl.pixel_rate);
+
+       rc = dp_ctrl_enable_mainlink_clocks(ctrl);
+       if (rc)
+               return rc;
+
+       ctrl->link->phy_params.p_level = 0;
+       ctrl->link->phy_params.v_level = 0;
+
+       while (--link_train_max_retries &&
+               !atomic_read(&ctrl->dp_ctrl.aborted)) {
+               rc = dp_ctrl_reinitialize_mainlink(ctrl);
+               if (rc) {
+                       DRM_ERROR("Failed to reinitialize mainlink. rc=%d\n",
+                                       rc);
+                       break;
+               }
+
+               training_step = DP_TRAINING_NONE;
+               rc = dp_ctrl_setup_main_link(ctrl, &cr, &training_step);
+               if (rc == 0) {
+                       /* training completed successfully */
+                       break;
+               } else if (training_step == DP_TRAINING_1) {
+                       /* link train_1 failed */
+                       rc = dp_ctrl_link_rate_down_shift(ctrl);
+                       if (rc < 0) { /* already in RBR = 1.6G */
+                               if (cr.lane_0_1 & DP_LANE0_1_CR_DONE) {
+                                       /*
+                                        * some lanes are ready,
+                                        * reduce lane number
+                                        */
+                                       rc = dp_ctrl_link_lane_down_shift(ctrl);
+                                       if (rc < 0) { /* lane == 1 already */
+                                               /* end with failure */
+                                               break;
+                                       }
+                               } else {
+                                       /* end with failure */
+                                       break; /* lane == 1 already */
+                               }
+                       }
+               } else if (training_step == DP_TRAINING_2) {
+                       /* link train_2 failed, lower lane rate */
+                       rc = dp_ctrl_link_lane_down_shift(ctrl);
+                       if (rc < 0) {
+                               /* end with failure */
+                               break; /* lane == 1 already */
+                       }
+               }
+       }
+
+       if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN)
+               return rc;
+
+       /* stop txing train pattern */
+       dp_ctrl_clear_training_pattern(ctrl);
+
+       /*
+        * keep transmitting idle pattern until video ready
+        * to avoid main link from loss of sync
+        */
+       if (rc == 0)  /* link train successfully */
+               dp_ctrl_push_idle(dp_ctrl);
+
+       return rc;
+}
+
+int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl)
+{
+       u32 rate = 0;
+       int ret = 0;
+       bool mainlink_ready = false;
+       struct dp_ctrl_private *ctrl;
+
+       if (!dp_ctrl)
+               return -EINVAL;
+
+       ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+
+       rate = ctrl->panel->link_info.rate;
+
+       ctrl->link->link_params.rate = rate;
+       ctrl->link->link_params.num_lanes = ctrl->panel->link_info.num_lanes;
+       ctrl->dp_ctrl.pixel_rate = ctrl->panel->dp_mode.drm_mode.clock;
+
+       DRM_DEBUG_DP("rate=%d, num_lanes=%d, pixel_rate=%d\n",
+               ctrl->link->link_params.rate,
+               ctrl->link->link_params.num_lanes, ctrl->dp_ctrl.pixel_rate);
+
+       if (!dp_power_clk_status(ctrl->power, DP_CTRL_PM)) { /* link clk is off */
+               ret = dp_ctrl_enable_mainlink_clocks(ctrl);
+               if (ret) {
+                       DRM_ERROR("Failed to start link clocks. ret=%d\n", ret);
+                       goto end;
+               }
+       }
+
+       ret = dp_ctrl_enable_stream_clocks(ctrl);
+       if (ret) {
+               DRM_ERROR("Failed to start pixel clocks. ret=%d\n", ret);
+               goto end;
+       }
+
+       if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) {
+               dp_ctrl_send_phy_test_pattern(ctrl);
+               return 0;
+       }
+
+       /*
+        * Set up transfer unit values and set controller state to send
+        * video.
+        */
+       dp_ctrl_configure_source_params(ctrl);
+
+       dp_catalog_ctrl_config_msa(ctrl->catalog,
+               ctrl->link->link_params.rate,
+               ctrl->dp_ctrl.pixel_rate, dp_ctrl_use_fixed_nvid(ctrl));
+
+       reinit_completion(&ctrl->video_comp);
+
+       dp_ctrl_setup_tr_unit(ctrl);
+
+       dp_catalog_ctrl_state_ctrl(ctrl->catalog, DP_STATE_CTRL_SEND_VIDEO);
+
+       ret = dp_ctrl_wait4video_ready(ctrl);
+       if (ret)
+               return ret;
+
+       mainlink_ready = dp_catalog_ctrl_mainlink_ready(ctrl->catalog);
+       DRM_DEBUG_DP("mainlink %s\n", mainlink_ready ? "READY" : "NOT READY");
+
+end:
+       return ret;
+}
+
+int dp_ctrl_off(struct dp_ctrl *dp_ctrl)
+{
+       struct dp_ctrl_private *ctrl;
+       struct dp_io *dp_io;
+       struct phy *phy;
+       int ret = 0;
+
+       if (!dp_ctrl)
+               return -EINVAL;
+
+       ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+       dp_io = &ctrl->parser->io;
+       phy = dp_io->phy;
+
+       dp_catalog_ctrl_mainlink_ctrl(ctrl->catalog, false);
+
+       dp_catalog_ctrl_reset(ctrl->catalog);
+
+       ret = dp_power_clk_enable(ctrl->power, DP_STREAM_PM, false);
+       if (ret)
+               DRM_ERROR("Failed to disable pixel clocks. ret=%d\n", ret);
+
+       ret = dp_power_clk_enable(ctrl->power, DP_CTRL_PM, false);
+       if (ret) {
+               DRM_ERROR("Failed to disable link clocks. ret=%d\n", ret);
+       }
+
+       phy_power_off(phy);
+       phy_exit(phy);
+
+       DRM_DEBUG_DP("DP off done\n");
+       return ret;
+}
+
+void dp_ctrl_isr(struct dp_ctrl *dp_ctrl)
+{
+       struct dp_ctrl_private *ctrl;
+       u32 isr;
+
+       if (!dp_ctrl)
+               return;
+
+       ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl);
+
+       isr = dp_catalog_ctrl_get_interrupt(ctrl->catalog);
+
+       if (isr & DP_CTRL_INTR_READY_FOR_VIDEO) {
+               DRM_DEBUG_DP("dp_video_ready\n");
+               complete(&ctrl->video_comp);
+       }
+
+       if (isr & DP_CTRL_INTR_IDLE_PATTERN_SENT) {
+               DRM_DEBUG_DP("idle_patterns_sent\n");
+               complete(&ctrl->idle_comp);
+       }
+}
+
+struct dp_ctrl *dp_ctrl_get(struct device *dev, struct dp_link *link,
+                       struct dp_panel *panel, struct drm_dp_aux *aux,
+                       struct dp_power *power, struct dp_catalog *catalog,
+                       struct dp_parser *parser)
+{
+       struct dp_ctrl_private *ctrl;
+
+       if (!dev || !panel || !aux ||
+           !link || !catalog) {
+               DRM_ERROR("invalid input\n");
+               return ERR_PTR(-EINVAL);
+       }
+
+       ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
+       if (!ctrl) {
+               DRM_ERROR("Mem allocation failure\n");
+               return ERR_PTR(-ENOMEM);
+       }
+
+       init_completion(&ctrl->idle_comp);
+       init_completion(&ctrl->video_comp);
+
+       /* in parameters */
+       ctrl->parser   = parser;
+       ctrl->panel    = panel;
+       ctrl->power    = power;
+       ctrl->aux      = aux;
+       ctrl->link     = link;
+       ctrl->catalog  = catalog;
+       ctrl->dev      = dev;
+
+       return &ctrl->dp_ctrl;
+}
+
+void dp_ctrl_put(struct dp_ctrl *dp_ctrl)
+{
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.h b/drivers/gpu/drm/msm/dp/dp_ctrl.h
new file mode 100644 (file)
index 0000000..f60ba93
--- /dev/null
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DP_CTRL_H_
+#define _DP_CTRL_H_
+
+#include "dp_aux.h"
+#include "dp_panel.h"
+#include "dp_link.h"
+#include "dp_parser.h"
+#include "dp_power.h"
+#include "dp_catalog.h"
+
+struct dp_ctrl {
+       bool orientation;
+       atomic_t aborted;
+       u32 pixel_rate;
+};
+
+int dp_ctrl_host_init(struct dp_ctrl *dp_ctrl, bool flip);
+void dp_ctrl_host_deinit(struct dp_ctrl *dp_ctrl);
+int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl);
+int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl);
+int dp_ctrl_off(struct dp_ctrl *dp_ctrl);
+void dp_ctrl_push_idle(struct dp_ctrl *dp_ctrl);
+void dp_ctrl_isr(struct dp_ctrl *dp_ctrl);
+void dp_ctrl_handle_sink_request(struct dp_ctrl *dp_ctrl);
+struct dp_ctrl *dp_ctrl_get(struct device *dev, struct dp_link *link,
+                       struct dp_panel *panel, struct drm_dp_aux *aux,
+                       struct dp_power *power, struct dp_catalog *catalog,
+                       struct dp_parser *parser);
+void dp_ctrl_put(struct dp_ctrl *dp_ctrl);
+
+#endif /* _DP_CTRL_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_debug.c b/drivers/gpu/drm/msm/dp/dp_debug.c
new file mode 100644 (file)
index 0000000..84670bc
--- /dev/null
@@ -0,0 +1,485 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt)"[drm-dp] %s: " fmt, __func__
+
+#include <linux/debugfs.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_file.h>
+
+#include "dp_parser.h"
+#include "dp_catalog.h"
+#include "dp_aux.h"
+#include "dp_ctrl.h"
+#include "dp_debug.h"
+#include "dp_display.h"
+
+#define DEBUG_NAME "msm_dp"
+
+struct dp_debug_private {
+       struct dentry *root;
+
+       struct dp_usbpd *usbpd;
+       struct dp_link *link;
+       struct dp_panel *panel;
+       struct drm_connector **connector;
+       struct device *dev;
+       struct drm_device *drm_dev;
+
+       struct dp_debug dp_debug;
+};
+
+static int dp_debug_check_buffer_overflow(int rc, int *max_size, int *len)
+{
+       if (rc >= *max_size) {
+               DRM_ERROR("buffer overflow\n");
+               return -EINVAL;
+       }
+       *len += rc;
+       *max_size = SZ_4K - *len;
+
+       return 0;
+}
+
+static ssize_t dp_debug_read_info(struct file *file, char __user *user_buff,
+               size_t count, loff_t *ppos)
+{
+       struct dp_debug_private *debug = file->private_data;
+       char *buf;
+       u32 len = 0, rc = 0;
+       u64 lclk = 0;
+       u32 max_size = SZ_4K;
+       u32 link_params_rate;
+       struct drm_display_mode *drm_mode;
+
+       if (!debug)
+               return -ENODEV;
+
+       if (*ppos)
+               return 0;
+
+       buf = kzalloc(SZ_4K, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       drm_mode = &debug->panel->dp_mode.drm_mode;
+
+       rc = snprintf(buf + len, max_size, "\tname = %s\n", DEBUG_NAME);
+       if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
+               goto error;
+
+       rc = snprintf(buf + len, max_size,
+                       "\tdp_panel\n\t\tmax_pclk_khz = %d\n",
+                       debug->panel->max_pclk_khz);
+       if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
+               goto error;
+
+       rc = snprintf(buf + len, max_size,
+                       "\tdrm_dp_link\n\t\trate = %u\n",
+                       debug->panel->link_info.rate);
+       if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
+               goto error;
+
+       rc = snprintf(buf + len, max_size,
+                        "\t\tnum_lanes = %u\n",
+                       debug->panel->link_info.num_lanes);
+       if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
+               goto error;
+
+       rc = snprintf(buf + len, max_size,
+                       "\t\tcapabilities = %lu\n",
+                       debug->panel->link_info.capabilities);
+       if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
+               goto error;
+
+       rc = snprintf(buf + len, max_size,
+                       "\tdp_panel_info:\n\t\tactive = %dx%d\n",
+                       drm_mode->hdisplay,
+                       drm_mode->vdisplay);
+       if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
+               goto error;
+
+       rc = snprintf(buf + len, max_size,
+                       "\t\tback_porch = %dx%d\n",
+                       drm_mode->htotal - drm_mode->hsync_end,
+                       drm_mode->vtotal - drm_mode->vsync_end);
+       if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
+               goto error;
+
+       rc = snprintf(buf + len, max_size,
+                       "\t\tfront_porch = %dx%d\n",
+                       drm_mode->hsync_start - drm_mode->hdisplay,
+                       drm_mode->vsync_start - drm_mode->vdisplay);
+       if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
+               goto error;
+
+       rc = snprintf(buf + len, max_size,
+                       "\t\tsync_width = %dx%d\n",
+                       drm_mode->hsync_end - drm_mode->hsync_start,
+                       drm_mode->vsync_end - drm_mode->vsync_start);
+       if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
+               goto error;
+
+       rc = snprintf(buf + len, max_size,
+                       "\t\tactive_low = %dx%d\n",
+                       debug->panel->dp_mode.h_active_low,
+                       debug->panel->dp_mode.v_active_low);
+       if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
+               goto error;
+
+       rc = snprintf(buf + len, max_size,
+                       "\t\th_skew = %d\n",
+                       drm_mode->hskew);
+       if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
+               goto error;
+
+       rc = snprintf(buf + len, max_size,
+                       "\t\trefresh rate = %d\n",
+                       drm_mode_vrefresh(drm_mode));
+       if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
+               goto error;
+
+       rc = snprintf(buf + len, max_size,
+                       "\t\tpixel clock khz = %d\n",
+                       drm_mode->clock);
+       if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
+               goto error;
+
+       rc = snprintf(buf + len, max_size,
+                       "\t\tbpp = %d\n",
+                       debug->panel->dp_mode.bpp);
+       if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
+               goto error;
+
+       /* Link Information */
+       rc = snprintf(buf + len, max_size,
+                       "\tdp_link:\n\t\ttest_requested = %d\n",
+                       debug->link->sink_request);
+       if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
+               goto error;
+
+       rc = snprintf(buf + len, max_size,
+                       "\t\tnum_lanes = %d\n",
+                       debug->link->link_params.num_lanes);
+       if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
+               goto error;
+
+       link_params_rate = debug->link->link_params.rate;
+       rc = snprintf(buf + len, max_size,
+                       "\t\tbw_code = %d\n",
+                       drm_dp_link_rate_to_bw_code(link_params_rate));
+       if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
+               goto error;
+
+       lclk = debug->link->link_params.rate * 1000;
+       rc = snprintf(buf + len, max_size,
+                       "\t\tlclk = %lld\n", lclk);
+       if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
+               goto error;
+
+       rc = snprintf(buf + len, max_size,
+                       "\t\tv_level = %d\n",
+                       debug->link->phy_params.v_level);
+       if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
+               goto error;
+
+       rc = snprintf(buf + len, max_size,
+                       "\t\tp_level = %d\n",
+                       debug->link->phy_params.p_level);
+       if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
+               goto error;
+
+       if (copy_to_user(user_buff, buf, len))
+               goto error;
+
+       *ppos += len;
+
+       kfree(buf);
+       return len;
+ error:
+       kfree(buf);
+       return -EINVAL;
+}
+
+static int dp_test_data_show(struct seq_file *m, void *data)
+{
+       struct drm_device *dev;
+       struct dp_debug_private *debug;
+       struct drm_connector *connector;
+       struct drm_connector_list_iter conn_iter;
+       u32 bpc;
+
+       debug = m->private;
+       dev = debug->drm_dev;
+       drm_connector_list_iter_begin(dev, &conn_iter);
+       drm_for_each_connector_iter(connector, &conn_iter) {
+
+               if (connector->connector_type !=
+                       DRM_MODE_CONNECTOR_DisplayPort)
+                       continue;
+
+               if (connector->status == connector_status_connected) {
+                       bpc = debug->link->test_video.test_bit_depth;
+                       seq_printf(m, "hdisplay: %d\n",
+                                       debug->link->test_video.test_h_width);
+                       seq_printf(m, "vdisplay: %d\n",
+                                       debug->link->test_video.test_v_height);
+                                       seq_printf(m, "bpc: %u\n",
+                                       dp_link_bit_depth_to_bpc(bpc));
+               } else
+                       seq_puts(m, "0");
+       }
+
+       drm_connector_list_iter_end(&conn_iter);
+
+       return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(dp_test_data);
+
+static int dp_test_type_show(struct seq_file *m, void *data)
+{
+       struct dp_debug_private *debug = m->private;
+       struct drm_device *dev = debug->drm_dev;
+       struct drm_connector *connector;
+       struct drm_connector_list_iter conn_iter;
+
+       drm_connector_list_iter_begin(dev, &conn_iter);
+       drm_for_each_connector_iter(connector, &conn_iter) {
+
+               if (connector->connector_type !=
+                       DRM_MODE_CONNECTOR_DisplayPort)
+                       continue;
+
+               if (connector->status == connector_status_connected)
+                       seq_printf(m, "%02x", DP_TEST_LINK_VIDEO_PATTERN);
+               else
+                       seq_puts(m, "0");
+       }
+       drm_connector_list_iter_end(&conn_iter);
+
+       return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(dp_test_type);
+
+static ssize_t dp_test_active_write(struct file *file,
+               const char __user *ubuf,
+               size_t len, loff_t *offp)
+{
+       char *input_buffer;
+       int status = 0;
+       struct dp_debug_private *debug;
+       struct drm_device *dev;
+       struct drm_connector *connector;
+       struct drm_connector_list_iter conn_iter;
+       int val = 0;
+
+       debug = ((struct seq_file *)file->private_data)->private;
+       dev = debug->drm_dev;
+
+       if (len == 0)
+               return 0;
+
+       input_buffer = memdup_user_nul(ubuf, len);
+       if (IS_ERR(input_buffer))
+               return PTR_ERR(input_buffer);
+
+       DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
+
+       drm_connector_list_iter_begin(dev, &conn_iter);
+       drm_for_each_connector_iter(connector, &conn_iter) {
+               if (connector->connector_type !=
+                       DRM_MODE_CONNECTOR_DisplayPort)
+                       continue;
+
+               if (connector->status == connector_status_connected) {
+                       status = kstrtoint(input_buffer, 10, &val);
+                       if (status < 0)
+                               break;
+                       DRM_DEBUG_DRIVER("Got %d for test active\n", val);
+                       /* To prevent erroneous activation of the compliance
+                        * testing code, only accept an actual value of 1 here
+                        */
+                       if (val == 1)
+                               debug->panel->video_test = true;
+                       else
+                               debug->panel->video_test = false;
+               }
+       }
+       drm_connector_list_iter_end(&conn_iter);
+       kfree(input_buffer);
+       if (status < 0)
+               return status;
+
+       *offp += len;
+       return len;
+}
+
+static int dp_test_active_show(struct seq_file *m, void *data)
+{
+       struct dp_debug_private *debug = m->private;
+       struct drm_device *dev = debug->drm_dev;
+       struct drm_connector *connector;
+       struct drm_connector_list_iter conn_iter;
+
+       drm_connector_list_iter_begin(dev, &conn_iter);
+       drm_for_each_connector_iter(connector, &conn_iter) {
+               if (connector->connector_type !=
+                       DRM_MODE_CONNECTOR_DisplayPort)
+                       continue;
+
+               if (connector->status == connector_status_connected) {
+                       if (debug->panel->video_test)
+                               seq_puts(m, "1");
+                       else
+                               seq_puts(m, "0");
+               } else
+                       seq_puts(m, "0");
+       }
+       drm_connector_list_iter_end(&conn_iter);
+
+       return 0;
+}
+
+static int dp_test_active_open(struct inode *inode,
+               struct file *file)
+{
+       return single_open(file, dp_test_active_show,
+                       inode->i_private);
+}
+
+static const struct file_operations dp_debug_fops = {
+       .open = simple_open,
+       .read = dp_debug_read_info,
+};
+
+static const struct file_operations test_active_fops = {
+       .owner = THIS_MODULE,
+       .open = dp_test_active_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+       .write = dp_test_active_write
+};
+
+static int dp_debug_init(struct dp_debug *dp_debug, struct drm_minor *minor)
+{
+       int rc = 0;
+       struct dp_debug_private *debug = container_of(dp_debug,
+                       struct dp_debug_private, dp_debug);
+       struct dentry *file;
+       struct dentry *test_active;
+       struct dentry *test_data, *test_type;
+
+       file = debugfs_create_file("dp_debug", 0444, minor->debugfs_root,
+                       debug, &dp_debug_fops);
+       if (IS_ERR_OR_NULL(file)) {
+               rc = PTR_ERR(file);
+               DRM_ERROR("[%s] debugfs create file failed, rc=%d\n",
+                                 DEBUG_NAME, rc);
+       }
+
+       test_active = debugfs_create_file("msm_dp_test_active", 0444,
+                       minor->debugfs_root,
+                       debug, &test_active_fops);
+       if (IS_ERR_OR_NULL(test_active)) {
+               rc = PTR_ERR(test_active);
+               DRM_ERROR("[%s] debugfs test_active failed, rc=%d\n",
+                                 DEBUG_NAME, rc);
+       }
+
+       test_data = debugfs_create_file("msm_dp_test_data", 0444,
+                       minor->debugfs_root,
+                       debug, &dp_test_data_fops);
+       if (IS_ERR_OR_NULL(test_data)) {
+               rc = PTR_ERR(test_data);
+               DRM_ERROR("[%s] debugfs test_data failed, rc=%d\n",
+                                 DEBUG_NAME, rc);
+       }
+
+       test_type = debugfs_create_file("msm_dp_test_type", 0444,
+                       minor->debugfs_root,
+                       debug, &dp_test_type_fops);
+       if (IS_ERR_OR_NULL(test_type)) {
+               rc = PTR_ERR(test_type);
+               DRM_ERROR("[%s] debugfs test_type failed, rc=%d\n",
+                                 DEBUG_NAME, rc);
+       }
+
+       debug->root = minor->debugfs_root;
+
+       return rc;
+}
+
+struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel,
+               struct dp_usbpd *usbpd, struct dp_link *link,
+               struct drm_connector **connector, struct drm_minor *minor)
+{
+       int rc = 0;
+       struct dp_debug_private *debug;
+       struct dp_debug *dp_debug;
+
+       if (!dev || !panel || !usbpd || !link) {
+               DRM_ERROR("invalid input\n");
+               rc = -EINVAL;
+               goto error;
+       }
+
+       debug = devm_kzalloc(dev, sizeof(*debug), GFP_KERNEL);
+       if (!debug) {
+               rc = -ENOMEM;
+               goto error;
+       }
+
+       debug->dp_debug.debug_en = false;
+       debug->usbpd = usbpd;
+       debug->link = link;
+       debug->panel = panel;
+       debug->dev = dev;
+       debug->drm_dev = minor->dev;
+       debug->connector = connector;
+
+       dp_debug = &debug->dp_debug;
+       dp_debug->vdisplay = 0;
+       dp_debug->hdisplay = 0;
+       dp_debug->vrefresh = 0;
+
+       rc = dp_debug_init(dp_debug, minor);
+       if (rc) {
+               devm_kfree(dev, debug);
+               goto error;
+       }
+
+       return dp_debug;
+ error:
+       return ERR_PTR(rc);
+}
+
+static int dp_debug_deinit(struct dp_debug *dp_debug)
+{
+       struct dp_debug_private *debug;
+
+       if (!dp_debug)
+               return -EINVAL;
+
+       debug = container_of(dp_debug, struct dp_debug_private, dp_debug);
+
+       debugfs_remove_recursive(debug->root);
+
+       return 0;
+}
+
+void dp_debug_put(struct dp_debug *dp_debug)
+{
+       struct dp_debug_private *debug;
+
+       if (!dp_debug)
+               return;
+
+       debug = container_of(dp_debug, struct dp_debug_private, dp_debug);
+
+       dp_debug_deinit(dp_debug);
+
+       devm_kfree(debug->dev, debug);
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_debug.h b/drivers/gpu/drm/msm/dp/dp_debug.h
new file mode 100644 (file)
index 0000000..7eaedfb
--- /dev/null
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DP_DEBUG_H_
+#define _DP_DEBUG_H_
+
+#include "dp_panel.h"
+#include "dp_link.h"
+
+/**
+ * struct dp_debug
+ * @debug_en: specifies whether debug mode enabled
+ * @vdisplay: used to filter out vdisplay value
+ * @hdisplay: used to filter out hdisplay value
+ * @vrefresh: used to filter out vrefresh value
+ * @tpg_state: specifies whether tpg feature is enabled
+ */
+struct dp_debug {
+       bool debug_en;
+       int aspect_ratio;
+       int vdisplay;
+       int hdisplay;
+       int vrefresh;
+};
+
+#if defined(CONFIG_DEBUG_FS)
+
+/**
+ * dp_debug_get() - configure and get the DisplayPlot debug module data
+ *
+ * @dev: device instance of the caller
+ * @panel: instance of panel module
+ * @usbpd: instance of usbpd module
+ * @link: instance of link module
+ * @connector: double pointer to display connector
+ * @minor: pointer to drm minor number after device registration
+ * return: pointer to allocated debug module data
+ *
+ * This function sets up the debug module and provides a way
+ * for debugfs input to be communicated with existing modules
+ */
+struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel,
+               struct dp_usbpd *usbpd, struct dp_link *link,
+               struct drm_connector **connector,
+               struct drm_minor *minor);
+
+/**
+ * dp_debug_put()
+ *
+ * Cleans up dp_debug instance
+ *
+ * @dp_debug: instance of dp_debug
+ */
+void dp_debug_put(struct dp_debug *dp_debug);
+
+#else
+
+static inline
+struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel,
+               struct dp_usbpd *usbpd, struct dp_link *link,
+               struct drm_connector **connector, struct drm_minor *minor)
+{
+       return ERR_PTR(-EINVAL);
+}
+
+static inline void dp_debug_put(struct dp_debug *dp_debug)
+{
+}
+
+#endif /* defined(CONFIG_DEBUG_FS) */
+
+#endif /* _DP_DEBUG_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
new file mode 100644 (file)
index 0000000..e175aa3
--- /dev/null
@@ -0,0 +1,1463 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/component.h>
+#include <linux/of_irq.h>
+#include <linux/delay.h>
+
+#include "msm_drv.h"
+#include "msm_kms.h"
+#include "dp_hpd.h"
+#include "dp_parser.h"
+#include "dp_power.h"
+#include "dp_catalog.h"
+#include "dp_aux.h"
+#include "dp_reg.h"
+#include "dp_link.h"
+#include "dp_panel.h"
+#include "dp_ctrl.h"
+#include "dp_display.h"
+#include "dp_drm.h"
+#include "dp_audio.h"
+#include "dp_debug.h"
+
+static struct msm_dp *g_dp_display;
+#define HPD_STRING_SIZE 30
+
+enum {
+       ISR_DISCONNECTED,
+       ISR_CONNECT_PENDING,
+       ISR_CONNECTED,
+       ISR_HPD_REPLUG_COUNT,
+       ISR_IRQ_HPD_PULSE_COUNT,
+       ISR_HPD_LO_GLITH_COUNT,
+};
+
+/* event thread connection state */
+enum {
+       ST_DISCONNECTED,
+       ST_CONNECT_PENDING,
+       ST_CONNECTED,
+       ST_DISCONNECT_PENDING,
+       ST_SUSPEND_PENDING,
+       ST_SUSPENDED,
+};
+
+enum {
+       EV_NO_EVENT,
+       /* hpd events */
+       EV_HPD_INIT_SETUP,
+       EV_HPD_PLUG_INT,
+       EV_IRQ_HPD_INT,
+       EV_HPD_REPLUG_INT,
+       EV_HPD_UNPLUG_INT,
+       EV_USER_NOTIFICATION,
+       EV_CONNECT_PENDING_TIMEOUT,
+       EV_DISCONNECT_PENDING_TIMEOUT,
+};
+
+#define EVENT_TIMEOUT  (HZ/10) /* 100ms */
+#define DP_EVENT_Q_MAX 8
+
+#define DP_TIMEOUT_5_SECOND    (5000/EVENT_TIMEOUT)
+#define DP_TIMEOUT_NONE                0
+
+#define WAIT_FOR_RESUME_TIMEOUT_JIFFIES (HZ / 2)
+
+struct dp_event {
+       u32 event_id;
+       u32 data;
+       u32 delay;
+};
+
+struct dp_display_private {
+       char *name;
+       int irq;
+
+       /* state variables */
+       bool core_initialized;
+       bool hpd_irq_on;
+       bool audio_supported;
+
+       struct platform_device *pdev;
+       struct dentry *root;
+
+       struct dp_usbpd   *usbpd;
+       struct dp_parser  *parser;
+       struct dp_power   *power;
+       struct dp_catalog *catalog;
+       struct drm_dp_aux *aux;
+       struct dp_link    *link;
+       struct dp_panel   *panel;
+       struct dp_ctrl    *ctrl;
+       struct dp_debug   *debug;
+
+       struct dp_usbpd_cb usbpd_cb;
+       struct dp_display_mode dp_mode;
+       struct msm_dp dp_display;
+
+       /* wait for audio signaling */
+       struct completion audio_comp;
+
+       /* event related only access by event thread */
+       struct mutex event_mutex;
+       wait_queue_head_t event_q;
+       atomic_t hpd_state;
+       u32 event_pndx;
+       u32 event_gndx;
+       struct dp_event event_list[DP_EVENT_Q_MAX];
+       spinlock_t event_lock;
+
+       struct completion resume_comp;
+
+       struct dp_audio *audio;
+};
+
+static const struct of_device_id dp_dt_match[] = {
+       {.compatible = "qcom,sc7180-dp"},
+       {}
+};
+
+static int dp_add_event(struct dp_display_private *dp_priv, u32 event,
+                                               u32 data, u32 delay)
+{
+       unsigned long flag;
+       struct dp_event *todo;
+       int pndx;
+
+       spin_lock_irqsave(&dp_priv->event_lock, flag);
+       pndx = dp_priv->event_pndx + 1;
+       pndx %= DP_EVENT_Q_MAX;
+       if (pndx == dp_priv->event_gndx) {
+               pr_err("event_q is full: pndx=%d gndx=%d\n",
+                       dp_priv->event_pndx, dp_priv->event_gndx);
+               spin_unlock_irqrestore(&dp_priv->event_lock, flag);
+               return -EPERM;
+       }
+       todo = &dp_priv->event_list[dp_priv->event_pndx++];
+       dp_priv->event_pndx %= DP_EVENT_Q_MAX;
+       todo->event_id = event;
+       todo->data = data;
+       todo->delay = delay;
+       wake_up(&dp_priv->event_q);
+       spin_unlock_irqrestore(&dp_priv->event_lock, flag);
+
+       return 0;
+}
+
+static int dp_del_event(struct dp_display_private *dp_priv, u32 event)
+{
+       unsigned long flag;
+       struct dp_event *todo;
+       u32     gndx;
+
+       spin_lock_irqsave(&dp_priv->event_lock, flag);
+       if (dp_priv->event_pndx == dp_priv->event_gndx) {
+               spin_unlock_irqrestore(&dp_priv->event_lock, flag);
+               return -ENOENT;
+       }
+
+       gndx = dp_priv->event_gndx;
+       while (dp_priv->event_pndx != gndx) {
+               todo = &dp_priv->event_list[gndx];
+               if (todo->event_id == event) {
+                       todo->event_id = EV_NO_EVENT;   /* deleted */
+                       todo->delay = 0;
+               }
+               gndx++;
+               gndx %= DP_EVENT_Q_MAX;
+       }
+       spin_unlock_irqrestore(&dp_priv->event_lock, flag);
+
+       return 0;
+}
+
+void dp_display_signal_audio_complete(struct msm_dp *dp_display)
+{
+       struct dp_display_private *dp;
+
+       dp = container_of(dp_display, struct dp_display_private, dp_display);
+
+       complete_all(&dp->audio_comp);
+}
+
+static int dp_display_bind(struct device *dev, struct device *master,
+                          void *data)
+{
+       int rc = 0;
+       struct dp_display_private *dp;
+       struct drm_device *drm;
+       struct msm_drm_private *priv;
+
+       drm = dev_get_drvdata(master);
+
+       dp = container_of(g_dp_display,
+                       struct dp_display_private, dp_display);
+       if (!dp) {
+               DRM_ERROR("DP driver bind failed. Invalid driver data\n");
+               return -EINVAL;
+       }
+
+       dp->dp_display.drm_dev = drm;
+       priv = drm->dev_private;
+       priv->dp = &(dp->dp_display);
+
+       rc = dp->parser->parse(dp->parser);
+       if (rc) {
+               DRM_ERROR("device tree parsing failed\n");
+               goto end;
+       }
+
+       rc = dp_aux_register(dp->aux);
+       if (rc) {
+               DRM_ERROR("DRM DP AUX register failed\n");
+               goto end;
+       }
+
+       rc = dp_power_client_init(dp->power);
+       if (rc) {
+               DRM_ERROR("Power client create failed\n");
+               goto end;
+       }
+
+       rc = dp_register_audio_driver(dev, dp->audio);
+       if (rc)
+               DRM_ERROR("Audio registration Dp failed\n");
+
+end:
+       return rc;
+}
+
+static void dp_display_unbind(struct device *dev, struct device *master,
+                             void *data)
+{
+       struct dp_display_private *dp;
+       struct drm_device *drm = dev_get_drvdata(master);
+       struct msm_drm_private *priv = drm->dev_private;
+
+       dp = container_of(g_dp_display,
+                       struct dp_display_private, dp_display);
+       if (!dp) {
+               DRM_ERROR("Invalid DP driver data\n");
+               return;
+       }
+
+       dp_power_client_deinit(dp->power);
+       dp_aux_unregister(dp->aux);
+       priv->dp = NULL;
+}
+
+static const struct component_ops dp_display_comp_ops = {
+       .bind = dp_display_bind,
+       .unbind = dp_display_unbind,
+};
+
+static bool dp_display_is_ds_bridge(struct dp_panel *panel)
+{
+       return (panel->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
+               DP_DWN_STRM_PORT_PRESENT);
+}
+
+static bool dp_display_is_sink_count_zero(struct dp_display_private *dp)
+{
+       return dp_display_is_ds_bridge(dp->panel) &&
+               (dp->link->sink_count == 0);
+}
+
+static void dp_display_send_hpd_event(struct msm_dp *dp_display)
+{
+       struct dp_display_private *dp;
+       struct drm_connector *connector;
+
+       dp = container_of(dp_display, struct dp_display_private, dp_display);
+
+       connector = dp->dp_display.connector;
+       drm_helper_hpd_irq_event(connector->dev);
+}
+
+static int dp_display_send_hpd_notification(struct dp_display_private *dp,
+                                           bool hpd)
+{
+       static bool encoder_mode_set;
+       struct msm_drm_private *priv = dp->dp_display.drm_dev->dev_private;
+       struct msm_kms *kms = priv->kms;
+
+       if ((hpd && dp->dp_display.is_connected) ||
+                       (!hpd && !dp->dp_display.is_connected)) {
+               DRM_DEBUG_DP("HPD already %s\n", (hpd ? "on" : "off"));
+               return 0;
+       }
+
+       /* reset video pattern flag on disconnect */
+       if (!hpd)
+               dp->panel->video_test = false;
+
+       dp->dp_display.is_connected = hpd;
+
+       if (dp->dp_display.is_connected && dp->dp_display.encoder
+                               && !encoder_mode_set
+                               && kms->funcs->set_encoder_mode) {
+               kms->funcs->set_encoder_mode(kms,
+                               dp->dp_display.encoder, false);
+               DRM_DEBUG_DP("set_encoder_mode() Completed\n");
+               encoder_mode_set = true;
+       }
+
+       dp_display_send_hpd_event(&dp->dp_display);
+
+       return 0;
+}
+
+static int dp_display_process_hpd_high(struct dp_display_private *dp)
+{
+       int rc = 0;
+       struct edid *edid;
+
+       dp->panel->max_dp_lanes = dp->parser->max_dp_lanes;
+
+       rc = dp_panel_read_sink_caps(dp->panel, dp->dp_display.connector);
+       if (rc)
+               goto end;
+
+       dp_link_process_request(dp->link);
+
+       edid = dp->panel->edid;
+
+       dp->audio_supported = drm_detect_monitor_audio(edid);
+       dp_panel_handle_sink_request(dp->panel);
+
+       dp->dp_display.max_pclk_khz = DP_MAX_PIXEL_CLK_KHZ;
+       dp->dp_display.max_dp_lanes = dp->parser->max_dp_lanes;
+
+       rc = dp_ctrl_on_link(dp->ctrl);
+       if (rc) {
+               DRM_ERROR("failed to complete DP link training\n");
+               goto end;
+       }
+
+       dp_add_event(dp, EV_USER_NOTIFICATION, true, 0);
+
+
+end:
+       return rc;
+}
+
+static void dp_display_host_init(struct dp_display_private *dp)
+{
+       bool flip = false;
+
+       if (dp->core_initialized) {
+               DRM_DEBUG_DP("DP core already initialized\n");
+               return;
+       }
+
+       if (dp->usbpd->orientation == ORIENTATION_CC2)
+               flip = true;
+
+       dp_power_init(dp->power, flip);
+       dp_ctrl_host_init(dp->ctrl, flip);
+       dp_aux_init(dp->aux);
+       dp->core_initialized = true;
+}
+
+static int dp_display_usbpd_configure_cb(struct device *dev)
+{
+       int rc = 0;
+       struct dp_display_private *dp;
+
+       if (!dev) {
+               DRM_ERROR("invalid dev\n");
+               rc = -EINVAL;
+               goto end;
+       }
+
+       dp = container_of(g_dp_display,
+                       struct dp_display_private, dp_display);
+       if (!dp) {
+               DRM_ERROR("no driver data found\n");
+               rc = -ENODEV;
+               goto end;
+       }
+
+       dp_display_host_init(dp);
+
+       /*
+        * set sink to normal operation mode -- D0
+        * before dpcd read
+        */
+       dp_link_psm_config(dp->link, &dp->panel->link_info, false);
+       rc = dp_display_process_hpd_high(dp);
+end:
+       return rc;
+}
+
+static int dp_display_usbpd_disconnect_cb(struct device *dev)
+{
+       int rc = 0;
+       struct dp_display_private *dp;
+
+       if (!dev) {
+               DRM_ERROR("invalid dev\n");
+               rc = -EINVAL;
+               return rc;
+       }
+
+       dp = container_of(g_dp_display,
+                       struct dp_display_private, dp_display);
+       if (!dp) {
+               DRM_ERROR("no driver data found\n");
+               rc = -ENODEV;
+               return rc;
+       }
+
+       dp_add_event(dp, EV_USER_NOTIFICATION, false, 0);
+
+       return rc;
+}
+
+static void dp_display_handle_video_request(struct dp_display_private *dp)
+{
+       if (dp->link->sink_request & DP_TEST_LINK_VIDEO_PATTERN) {
+               dp->panel->video_test = true;
+               dp_link_send_test_response(dp->link);
+       }
+}
+
+static int dp_display_handle_irq_hpd(struct dp_display_private *dp)
+{
+       u32 sink_request;
+
+       sink_request = dp->link->sink_request;
+
+       if (sink_request & DS_PORT_STATUS_CHANGED) {
+               dp_add_event(dp, EV_USER_NOTIFICATION, false, 0);
+               if (dp_display_is_sink_count_zero(dp)) {
+                       DRM_DEBUG_DP("sink count is zero, nothing to do\n");
+                       return 0;
+               }
+
+               return dp_display_process_hpd_high(dp);
+       }
+
+       dp_ctrl_handle_sink_request(dp->ctrl);
+
+       if (dp->link->sink_request & DP_TEST_LINK_VIDEO_PATTERN)
+               dp_display_handle_video_request(dp);
+
+       return 0;
+}
+
+static int dp_display_usbpd_attention_cb(struct device *dev)
+{
+       int rc = 0;
+       struct dp_display_private *dp;
+
+       if (!dev) {
+               DRM_ERROR("invalid dev\n");
+               return -EINVAL;
+       }
+
+       dp = container_of(g_dp_display,
+                       struct dp_display_private, dp_display);
+       if (!dp) {
+               DRM_ERROR("no driver data found\n");
+               return -ENODEV;
+       }
+
+       /* check for any test request issued by sink */
+       rc = dp_link_process_request(dp->link);
+       if (!rc)
+               dp_display_handle_irq_hpd(dp);
+
+       return rc;
+}
+
+static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
+{
+       struct dp_usbpd *hpd = dp->usbpd;
+       u32 state;
+       u32 tout = DP_TIMEOUT_5_SECOND;
+       int ret;
+
+       if (!hpd)
+               return 0;
+
+       mutex_lock(&dp->event_mutex);
+
+       state =  atomic_read(&dp->hpd_state);
+       if (state == ST_SUSPEND_PENDING) {
+               mutex_unlock(&dp->event_mutex);
+               return 0;
+       }
+
+       if (state == ST_CONNECT_PENDING || state == ST_CONNECTED) {
+               mutex_unlock(&dp->event_mutex);
+               return 0;
+       }
+
+       if (state == ST_DISCONNECT_PENDING) {
+               /* wait until ST_DISCONNECTED */
+               dp_add_event(dp, EV_HPD_PLUG_INT, 0, 1); /* delay = 1 */
+               mutex_unlock(&dp->event_mutex);
+               return 0;
+       }
+
+       if (state == ST_SUSPENDED)
+               tout = DP_TIMEOUT_NONE;
+
+       atomic_set(&dp->hpd_state, ST_CONNECT_PENDING);
+
+       hpd->hpd_high = 1;
+
+       ret = dp_display_usbpd_configure_cb(&dp->pdev->dev);
+       if (ret) {      /* failed */
+               hpd->hpd_high = 0;
+               atomic_set(&dp->hpd_state, ST_DISCONNECTED);
+       }
+
+       /* start sanity checking */
+       dp_add_event(dp, EV_CONNECT_PENDING_TIMEOUT, 0, tout);
+
+       mutex_unlock(&dp->event_mutex);
+
+       /* uevent will complete connection part */
+       return 0;
+};
+
+static int dp_display_enable(struct dp_display_private *dp, u32 data);
+static int dp_display_disable(struct dp_display_private *dp, u32 data);
+
+static int dp_connect_pending_timeout(struct dp_display_private *dp, u32 data)
+{
+       u32 state;
+
+       mutex_lock(&dp->event_mutex);
+
+       state =  atomic_read(&dp->hpd_state);
+       if (state == ST_CONNECT_PENDING) {
+               dp_display_enable(dp, 0);
+               atomic_set(&dp->hpd_state, ST_CONNECTED);
+       }
+
+       mutex_unlock(&dp->event_mutex);
+
+       return 0;
+}
+
+static void dp_display_handle_plugged_change(struct msm_dp *dp_display,
+               bool plugged)
+{
+       if (dp_display->plugged_cb && dp_display->codec_dev)
+               dp_display->plugged_cb(dp_display->codec_dev, plugged);
+}
+
+static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
+{
+       struct dp_usbpd *hpd = dp->usbpd;
+       u32 state;
+
+       if (!hpd)
+               return 0;
+
+       mutex_lock(&dp->event_mutex);
+
+       state = atomic_read(&dp->hpd_state);
+       if (state == ST_SUSPEND_PENDING) {
+               mutex_unlock(&dp->event_mutex);
+               return 0;
+       }
+
+       if (state == ST_DISCONNECT_PENDING || state == ST_DISCONNECTED) {
+               mutex_unlock(&dp->event_mutex);
+               return 0;
+       }
+
+       if (state == ST_CONNECT_PENDING) {
+               /* wait until CONNECTED */
+               dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 1); /* delay = 1 */
+               mutex_unlock(&dp->event_mutex);
+               return 0;
+       }
+
+       atomic_set(&dp->hpd_state, ST_DISCONNECT_PENDING);
+
+       /* disable HPD plug interrupt until disconnect is done */
+       dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK
+                               | DP_DP_IRQ_HPD_INT_MASK, false);
+
+       hpd->hpd_high = 0;
+
+       /*
+        * We don't need separate work for disconnect as
+        * connect/attention interrupts are disabled
+        */
+       dp_display_usbpd_disconnect_cb(&dp->pdev->dev);
+
+       /* start sanity checking */
+       dp_add_event(dp, EV_DISCONNECT_PENDING_TIMEOUT, 0, DP_TIMEOUT_5_SECOND);
+
+       /* signal the disconnect event early to ensure proper teardown */
+       dp_display_handle_plugged_change(g_dp_display, false);
+       reinit_completion(&dp->audio_comp);
+
+       dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK |
+                                       DP_DP_IRQ_HPD_INT_MASK, true);
+
+       /* uevent will complete disconnection part */
+       mutex_unlock(&dp->event_mutex);
+       return 0;
+}
+
+static int dp_disconnect_pending_timeout(struct dp_display_private *dp, u32 data)
+{
+       u32 state;
+
+       mutex_lock(&dp->event_mutex);
+
+       state =  atomic_read(&dp->hpd_state);
+       if (state == ST_DISCONNECT_PENDING) {
+               dp_display_disable(dp, 0);
+               atomic_set(&dp->hpd_state, ST_DISCONNECTED);
+       }
+
+       mutex_unlock(&dp->event_mutex);
+
+       return 0;
+}
+
+static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data)
+{
+       u32 state;
+
+       mutex_lock(&dp->event_mutex);
+
+       /* irq_hpd can happen at either connected or disconnected state */
+       state =  atomic_read(&dp->hpd_state);
+       if (state == ST_SUSPEND_PENDING) {
+               mutex_unlock(&dp->event_mutex);
+               return 0;
+       }
+
+       dp_display_usbpd_attention_cb(&dp->pdev->dev);
+
+       mutex_unlock(&dp->event_mutex);
+
+       return 0;
+}
+
+static void dp_display_deinit_sub_modules(struct dp_display_private *dp)
+{
+       dp_debug_put(dp->debug);
+       dp_ctrl_put(dp->ctrl);
+       dp_panel_put(dp->panel);
+       dp_aux_put(dp->aux);
+       dp_audio_put(dp->audio);
+}
+
+static int dp_init_sub_modules(struct dp_display_private *dp)
+{
+       int rc = 0;
+       struct device *dev = &dp->pdev->dev;
+       struct dp_usbpd_cb *cb = &dp->usbpd_cb;
+       struct dp_panel_in panel_in = {
+               .dev = dev,
+       };
+
+       /* Callback APIs used for cable status change event */
+       cb->configure  = dp_display_usbpd_configure_cb;
+       cb->disconnect = dp_display_usbpd_disconnect_cb;
+       cb->attention  = dp_display_usbpd_attention_cb;
+
+       dp->usbpd = dp_hpd_get(dev, cb);
+       if (IS_ERR(dp->usbpd)) {
+               rc = PTR_ERR(dp->usbpd);
+               DRM_ERROR("failed to initialize hpd, rc = %d\n", rc);
+               dp->usbpd = NULL;
+               goto error;
+       }
+
+       dp->parser = dp_parser_get(dp->pdev);
+       if (IS_ERR(dp->parser)) {
+               rc = PTR_ERR(dp->parser);
+               DRM_ERROR("failed to initialize parser, rc = %d\n", rc);
+               dp->parser = NULL;
+               goto error;
+       }
+
+       dp->catalog = dp_catalog_get(dev, &dp->parser->io);
+       if (IS_ERR(dp->catalog)) {
+               rc = PTR_ERR(dp->catalog);
+               DRM_ERROR("failed to initialize catalog, rc = %d\n", rc);
+               dp->catalog = NULL;
+               goto error;
+       }
+
+       dp->power = dp_power_get(dp->parser);
+       if (IS_ERR(dp->power)) {
+               rc = PTR_ERR(dp->power);
+               DRM_ERROR("failed to initialize power, rc = %d\n", rc);
+               dp->power = NULL;
+               goto error;
+       }
+
+       dp->aux = dp_aux_get(dev, dp->catalog);
+       if (IS_ERR(dp->aux)) {
+               rc = PTR_ERR(dp->aux);
+               DRM_ERROR("failed to initialize aux, rc = %d\n", rc);
+               dp->aux = NULL;
+               goto error;
+       }
+
+       dp->link = dp_link_get(dev, dp->aux);
+       if (IS_ERR(dp->link)) {
+               rc = PTR_ERR(dp->link);
+               DRM_ERROR("failed to initialize link, rc = %d\n", rc);
+               dp->link = NULL;
+               goto error_link;
+       }
+
+       panel_in.aux = dp->aux;
+       panel_in.catalog = dp->catalog;
+       panel_in.link = dp->link;
+
+       dp->panel = dp_panel_get(&panel_in);
+       if (IS_ERR(dp->panel)) {
+               rc = PTR_ERR(dp->panel);
+               DRM_ERROR("failed to initialize panel, rc = %d\n", rc);
+               dp->panel = NULL;
+               goto error_link;
+       }
+
+       dp->ctrl = dp_ctrl_get(dev, dp->link, dp->panel, dp->aux,
+                              dp->power, dp->catalog, dp->parser);
+       if (IS_ERR(dp->ctrl)) {
+               rc = PTR_ERR(dp->ctrl);
+               DRM_ERROR("failed to initialize ctrl, rc = %d\n", rc);
+               dp->ctrl = NULL;
+               goto error_ctrl;
+       }
+
+       dp->audio = dp_audio_get(dp->pdev, dp->panel, dp->catalog);
+       if (IS_ERR(dp->audio)) {
+               rc = PTR_ERR(dp->audio);
+               pr_err("failed to initialize audio, rc = %d\n", rc);
+               dp->audio = NULL;
+               goto error_audio;
+       }
+
+       return rc;
+
+error_audio:
+       dp_ctrl_put(dp->ctrl);
+error_ctrl:
+       dp_panel_put(dp->panel);
+error_link:
+       dp_aux_put(dp->aux);
+error:
+       return rc;
+}
+
+static int dp_display_set_mode(struct msm_dp *dp_display,
+                              struct dp_display_mode *mode)
+{
+       struct dp_display_private *dp;
+
+       dp = container_of(dp_display, struct dp_display_private, dp_display);
+
+       dp->panel->dp_mode.drm_mode = mode->drm_mode;
+       dp->panel->dp_mode.bpp = mode->bpp;
+       dp->panel->dp_mode.capabilities = mode->capabilities;
+       dp_panel_init_panel_info(dp->panel);
+       return 0;
+}
+
+static int dp_display_prepare(struct msm_dp *dp)
+{
+       return 0;
+}
+
+static int dp_display_enable(struct dp_display_private *dp, u32 data)
+{
+       int rc = 0;
+       struct msm_dp *dp_display;
+
+       dp_display = g_dp_display;
+
+       if (dp_display->power_on) {
+               DRM_DEBUG_DP("Link already setup, return\n");
+               return 0;
+       }
+
+       rc = dp_ctrl_on_stream(dp->ctrl);
+       if (!rc)
+               dp_display->power_on = true;
+
+       /* complete resume_comp regardless it is armed or not */
+       complete(&dp->resume_comp);
+       return rc;
+}
+
+static int dp_display_post_enable(struct msm_dp *dp_display)
+{
+       struct dp_display_private *dp;
+       u32 rate;
+
+       dp = container_of(dp_display, struct dp_display_private, dp_display);
+
+       rate = dp->link->link_params.rate;
+
+       if (dp->audio_supported) {
+               dp->audio->bw_code = drm_dp_link_rate_to_bw_code(rate);
+               dp->audio->lane_count = dp->link->link_params.num_lanes;
+       }
+
+       /* signal the connect event late to synchronize video and display */
+       dp_display_handle_plugged_change(dp_display, true);
+       return 0;
+}
+
+static int dp_display_disable(struct dp_display_private *dp, u32 data)
+{
+       struct msm_dp *dp_display;
+
+       dp_display = g_dp_display;
+
+       if (!dp_display->power_on)
+               return -EINVAL;
+
+       /* wait only if audio was enabled */
+       if (dp_display->audio_enabled) {
+               if (!wait_for_completion_timeout(&dp->audio_comp,
+                               HZ * 5))
+                       DRM_ERROR("audio comp timeout\n");
+       }
+
+       dp_display->audio_enabled = false;
+
+       dp_ctrl_off(dp->ctrl);
+
+       dp->core_initialized = false;
+
+       dp_display->power_on = false;
+
+       return 0;
+}
+
+static int dp_display_unprepare(struct msm_dp *dp)
+{
+       return 0;
+}
+
+int dp_display_set_plugged_cb(struct msm_dp *dp_display,
+               hdmi_codec_plugged_cb fn, struct device *codec_dev)
+{
+       bool plugged;
+
+       dp_display->plugged_cb = fn;
+       dp_display->codec_dev = codec_dev;
+       plugged = dp_display->is_connected;
+       dp_display_handle_plugged_change(dp_display, plugged);
+
+       return 0;
+}
+
+int dp_display_validate_mode(struct msm_dp *dp, u32 mode_pclk_khz)
+{
+       const u32 num_components = 3, default_bpp = 24;
+       struct dp_display_private *dp_display;
+       struct dp_link_info *link_info;
+       u32 mode_rate_khz = 0, supported_rate_khz = 0, mode_bpp = 0;
+
+       if (!dp || !mode_pclk_khz || !dp->connector) {
+               DRM_ERROR("invalid params\n");
+               return -EINVAL;
+       }
+
+       dp_display = container_of(dp, struct dp_display_private, dp_display);
+       link_info = &dp_display->panel->link_info;
+
+       mode_bpp = dp->connector->display_info.bpc * num_components;
+       if (!mode_bpp)
+               mode_bpp = default_bpp;
+
+       mode_bpp = dp_panel_get_mode_bpp(dp_display->panel,
+                       mode_bpp, mode_pclk_khz);
+
+       mode_rate_khz = mode_pclk_khz * mode_bpp;
+       supported_rate_khz = link_info->num_lanes * link_info->rate * 8;
+
+       if (mode_rate_khz > supported_rate_khz)
+               return MODE_BAD;
+
+       return MODE_OK;
+}
+
+int dp_display_get_modes(struct msm_dp *dp,
+                               struct dp_display_mode *dp_mode)
+{
+       struct dp_display_private *dp_display;
+       int ret = 0;
+
+       if (!dp) {
+               DRM_ERROR("invalid params\n");
+               return 0;
+       }
+
+       dp_display = container_of(dp, struct dp_display_private, dp_display);
+
+       ret = dp_panel_get_modes(dp_display->panel,
+               dp->connector, dp_mode);
+       if (dp_mode->drm_mode.clock)
+               dp->max_pclk_khz = dp_mode->drm_mode.clock;
+       return ret;
+}
+
+bool dp_display_check_video_test(struct msm_dp *dp)
+{
+       struct dp_display_private *dp_display;
+
+       dp_display = container_of(dp, struct dp_display_private, dp_display);
+
+       return dp_display->panel->video_test;
+}
+
+int dp_display_get_test_bpp(struct msm_dp *dp)
+{
+       struct dp_display_private *dp_display;
+
+       if (!dp) {
+               DRM_ERROR("invalid params\n");
+               return 0;
+       }
+
+       dp_display = container_of(dp, struct dp_display_private, dp_display);
+
+       return dp_link_bit_depth_to_bpp(
+               dp_display->link->test_video.test_bit_depth);
+}
+
+static void dp_display_config_hpd(struct dp_display_private *dp)
+{
+
+       dp_display_host_init(dp);
+       dp_catalog_ctrl_hpd_config(dp->catalog);
+
+       /* Enable interrupt first time
+        * we are leaving dp clocks on during disconnect
+        * and never disable interrupt
+        */
+       enable_irq(dp->irq);
+}
+
+static int hpd_event_thread(void *data)
+{
+       struct dp_display_private *dp_priv;
+       unsigned long flag;
+       struct dp_event *todo;
+       int timeout_mode = 0;
+
+       dp_priv = (struct dp_display_private *)data;
+
+       while (1) {
+               if (timeout_mode) {
+                       wait_event_timeout(dp_priv->event_q,
+                               (dp_priv->event_pndx == dp_priv->event_gndx),
+                                               EVENT_TIMEOUT);
+               } else {
+                       wait_event_interruptible(dp_priv->event_q,
+                               (dp_priv->event_pndx != dp_priv->event_gndx));
+               }
+               spin_lock_irqsave(&dp_priv->event_lock, flag);
+               todo = &dp_priv->event_list[dp_priv->event_gndx];
+               if (todo->delay) {
+                       struct dp_event *todo_next;
+
+                       dp_priv->event_gndx++;
+                       dp_priv->event_gndx %= DP_EVENT_Q_MAX;
+
+                       /* re enter delay event into q */
+                       todo_next = &dp_priv->event_list[dp_priv->event_pndx++];
+                       dp_priv->event_pndx %= DP_EVENT_Q_MAX;
+                       todo_next->event_id = todo->event_id;
+                       todo_next->data = todo->data;
+                       todo_next->delay = todo->delay - 1;
+
+                       /* clean up older event */
+                       todo->event_id = EV_NO_EVENT;
+                       todo->delay = 0;
+
+                       /* switch to timeout mode */
+                       timeout_mode = 1;
+                       spin_unlock_irqrestore(&dp_priv->event_lock, flag);
+                       continue;
+               }
+
+               /* timeout with no events in q */
+               if (dp_priv->event_pndx == dp_priv->event_gndx) {
+                       spin_unlock_irqrestore(&dp_priv->event_lock, flag);
+                       continue;
+               }
+
+               dp_priv->event_gndx++;
+               dp_priv->event_gndx %= DP_EVENT_Q_MAX;
+               timeout_mode = 0;
+               spin_unlock_irqrestore(&dp_priv->event_lock, flag);
+
+               switch (todo->event_id) {
+               case EV_HPD_INIT_SETUP:
+                       dp_display_config_hpd(dp_priv);
+                       break;
+               case EV_HPD_PLUG_INT:
+                       dp_hpd_plug_handle(dp_priv, todo->data);
+                       break;
+               case EV_HPD_UNPLUG_INT:
+                       dp_hpd_unplug_handle(dp_priv, todo->data);
+                       break;
+               case EV_IRQ_HPD_INT:
+                       dp_irq_hpd_handle(dp_priv, todo->data);
+                       break;
+               case EV_HPD_REPLUG_INT:
+                       /* do nothing */
+                       break;
+               case EV_USER_NOTIFICATION:
+                       dp_display_send_hpd_notification(dp_priv,
+                                               todo->data);
+                       break;
+               case EV_CONNECT_PENDING_TIMEOUT:
+                       dp_connect_pending_timeout(dp_priv,
+                                               todo->data);
+                       break;
+               case EV_DISCONNECT_PENDING_TIMEOUT:
+                       dp_disconnect_pending_timeout(dp_priv,
+                                               todo->data);
+                       break;
+               default:
+                       break;
+               }
+       }
+
+       return 0;
+}
+
+static void dp_hpd_event_setup(struct dp_display_private *dp_priv)
+{
+       init_waitqueue_head(&dp_priv->event_q);
+       spin_lock_init(&dp_priv->event_lock);
+
+       kthread_run(hpd_event_thread, dp_priv, "dp_hpd_handler");
+}
+
+static irqreturn_t dp_display_irq_handler(int irq, void *dev_id)
+{
+       struct dp_display_private *dp = dev_id;
+       irqreturn_t ret = IRQ_HANDLED;
+       u32 hpd_isr_status;
+
+       if (!dp) {
+               DRM_ERROR("invalid data\n");
+               return IRQ_NONE;
+       }
+
+       hpd_isr_status = dp_catalog_hpd_get_intr_status(dp->catalog);
+
+       if (hpd_isr_status & 0x0F) {
+               /* hpd related interrupts */
+               if (hpd_isr_status & DP_DP_HPD_PLUG_INT_MASK ||
+                       hpd_isr_status & DP_DP_HPD_REPLUG_INT_MASK) {
+                       dp_add_event(dp, EV_HPD_PLUG_INT, 0, 0);
+               }
+
+               if (hpd_isr_status & DP_DP_IRQ_HPD_INT_MASK) {
+                       /* delete connect pending event first */
+                       dp_del_event(dp, EV_CONNECT_PENDING_TIMEOUT);
+                       dp_add_event(dp, EV_IRQ_HPD_INT, 0, 0);
+               }
+
+               if (hpd_isr_status & DP_DP_HPD_REPLUG_INT_MASK)
+                       dp_add_event(dp, EV_HPD_REPLUG_INT, 0, 0);
+
+               if (hpd_isr_status & DP_DP_HPD_UNPLUG_INT_MASK)
+                       dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 0);
+       }
+
+       /* DP controller isr */
+       dp_ctrl_isr(dp->ctrl);
+
+       /* DP aux isr */
+       dp_aux_isr(dp->aux);
+
+       return ret;
+}
+
+int dp_display_request_irq(struct msm_dp *dp_display)
+{
+       int rc = 0;
+       struct dp_display_private *dp;
+
+       if (!dp_display) {
+               DRM_ERROR("invalid input\n");
+               return -EINVAL;
+       }
+
+       dp = container_of(dp_display, struct dp_display_private, dp_display);
+
+       dp->irq = irq_of_parse_and_map(dp->pdev->dev.of_node, 0);
+       if (dp->irq < 0) {
+               rc = dp->irq;
+               DRM_ERROR("failed to get irq: %d\n", rc);
+               return rc;
+       }
+
+       rc = devm_request_irq(&dp->pdev->dev, dp->irq,
+                       dp_display_irq_handler,
+                       IRQF_TRIGGER_HIGH, "dp_display_isr", dp);
+       if (rc < 0) {
+               DRM_ERROR("failed to request IRQ%u: %d\n",
+                               dp->irq, rc);
+               return rc;
+       }
+       disable_irq(dp->irq);
+
+       return 0;
+}
+
+static int dp_display_probe(struct platform_device *pdev)
+{
+       int rc = 0;
+       struct dp_display_private *dp;
+
+       if (!pdev || !pdev->dev.of_node) {
+               DRM_ERROR("pdev not found\n");
+               return -ENODEV;
+       }
+
+       dp = devm_kzalloc(&pdev->dev, sizeof(*dp), GFP_KERNEL);
+       if (!dp)
+               return -ENOMEM;
+
+       dp->pdev = pdev;
+       dp->name = "drm_dp";
+
+       rc = dp_init_sub_modules(dp);
+       if (rc) {
+               DRM_ERROR("init sub module failed\n");
+               return -EPROBE_DEFER;
+       }
+
+       mutex_init(&dp->event_mutex);
+
+       init_completion(&dp->resume_comp);
+
+       g_dp_display = &dp->dp_display;
+
+       /* Store DP audio handle inside DP display */
+       g_dp_display->dp_audio = dp->audio;
+
+       init_completion(&dp->audio_comp);
+
+       platform_set_drvdata(pdev, g_dp_display);
+
+       rc = component_add(&pdev->dev, &dp_display_comp_ops);
+       if (rc) {
+               DRM_ERROR("component add failed, rc=%d\n", rc);
+               dp_display_deinit_sub_modules(dp);
+       }
+
+       return rc;
+}
+
+static int dp_display_remove(struct platform_device *pdev)
+{
+       struct dp_display_private *dp;
+
+       dp = container_of(g_dp_display,
+                       struct dp_display_private, dp_display);
+
+       dp_display_deinit_sub_modules(dp);
+
+       component_del(&pdev->dev, &dp_display_comp_ops);
+       platform_set_drvdata(pdev, NULL);
+
+       return 0;
+}
+
+static int dp_pm_resume(struct device *dev)
+{
+       return 0;
+}
+
+static int dp_pm_suspend(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct dp_display_private *dp = platform_get_drvdata(pdev);
+
+       if (!dp) {
+               DRM_ERROR("DP driver bind failed. Invalid driver data\n");
+               return -EINVAL;
+       }
+
+       atomic_set(&dp->hpd_state, ST_SUSPENDED);
+
+       return 0;
+}
+
+static int dp_pm_prepare(struct device *dev)
+{
+       return 0;
+}
+
+static void dp_pm_complete(struct device *dev)
+{
+
+}
+
+static const struct dev_pm_ops dp_pm_ops = {
+       .suspend = dp_pm_suspend,
+       .resume =  dp_pm_resume,
+       .prepare = dp_pm_prepare,
+       .complete = dp_pm_complete,
+};
+
+static struct platform_driver dp_display_driver = {
+       .probe  = dp_display_probe,
+       .remove = dp_display_remove,
+       .driver = {
+               .name = "msm-dp-display",
+               .of_match_table = dp_dt_match,
+               .suppress_bind_attrs = true,
+               .pm = &dp_pm_ops,
+       },
+};
+
+int __init msm_dp_register(void)
+{
+       int ret;
+
+       ret = platform_driver_register(&dp_display_driver);
+       if (ret)
+               DRM_ERROR("Dp display driver register failed");
+
+       return ret;
+}
+
+void __exit msm_dp_unregister(void)
+{
+       platform_driver_unregister(&dp_display_driver);
+}
+
+void msm_dp_irq_postinstall(struct msm_dp *dp_display)
+{
+       struct dp_display_private *dp;
+
+       if (!dp_display)
+               return;
+
+       dp = container_of(dp_display, struct dp_display_private, dp_display);
+
+       dp_hpd_event_setup(dp);
+
+       dp_add_event(dp, EV_HPD_INIT_SETUP, 0, 100);
+}
+
+void msm_dp_debugfs_init(struct msm_dp *dp_display, struct drm_minor *minor)
+{
+       struct dp_display_private *dp;
+       struct device *dev;
+       int rc;
+
+       dp = container_of(dp_display, struct dp_display_private, dp_display);
+       dev = &dp->pdev->dev;
+
+       dp->debug = dp_debug_get(dev, dp->panel, dp->usbpd,
+                                       dp->link, &dp->dp_display.connector,
+                                       minor);
+       if (IS_ERR(dp->debug)) {
+               rc = PTR_ERR(dp->debug);
+               DRM_ERROR("failed to initialize debug, rc = %d\n", rc);
+               dp->debug = NULL;
+       }
+}
+
+int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev,
+                       struct drm_encoder *encoder)
+{
+       struct msm_drm_private *priv;
+       int ret;
+
+       if (WARN_ON(!encoder) || WARN_ON(!dp_display) || WARN_ON(!dev))
+               return -EINVAL;
+
+       priv = dev->dev_private;
+       dp_display->drm_dev = dev;
+
+       ret = dp_display_request_irq(dp_display);
+       if (ret) {
+               DRM_ERROR("request_irq failed, ret=%d\n", ret);
+               return ret;
+       }
+
+       dp_display->encoder = encoder;
+
+       dp_display->connector = dp_drm_connector_init(dp_display);
+       if (IS_ERR(dp_display->connector)) {
+               ret = PTR_ERR(dp_display->connector);
+               DRM_DEV_ERROR(dev->dev,
+                       "failed to create dp connector: %d\n", ret);
+               dp_display->connector = NULL;
+               return ret;
+       }
+
+       priv->connectors[priv->num_connectors++] = dp_display->connector;
+       return 0;
+}
+
+static int dp_display_wait4resume_done(struct dp_display_private *dp)
+{
+       int ret = 0;
+
+       reinit_completion(&dp->resume_comp);
+       if (!wait_for_completion_timeout(&dp->resume_comp,
+                               WAIT_FOR_RESUME_TIMEOUT_JIFFIES)) {
+               DRM_ERROR("wait4resume_done timedout\n");
+               ret = -ETIMEDOUT;
+       }
+       return ret;
+}
+
+int msm_dp_display_enable(struct msm_dp *dp, struct drm_encoder *encoder)
+{
+       int rc = 0;
+       struct dp_display_private *dp_display;
+       u32 state;
+
+       dp_display = container_of(dp, struct dp_display_private, dp_display);
+       if (!dp_display->dp_mode.drm_mode.clock) {
+               DRM_ERROR("invalid params\n");
+               return -EINVAL;
+       }
+
+       mutex_lock(&dp_display->event_mutex);
+
+       rc = dp_display_set_mode(dp, &dp_display->dp_mode);
+       if (rc) {
+               DRM_ERROR("Failed to perform a mode set, rc=%d\n", rc);
+               mutex_unlock(&dp_display->event_mutex);
+               return rc;
+       }
+
+       rc = dp_display_prepare(dp);
+       if (rc) {
+               DRM_ERROR("DP display prepare failed, rc=%d\n", rc);
+               mutex_unlock(&dp_display->event_mutex);
+               return rc;
+       }
+
+       state =  atomic_read(&dp_display->hpd_state);
+       if (state == ST_SUSPENDED) {
+               /* start link training */
+               dp_add_event(dp_display, EV_HPD_PLUG_INT, 0, 0);
+               mutex_unlock(&dp_display->event_mutex);
+
+               /* wait until dp interface is up */
+               goto resume_done;
+       }
+
+       dp_display_enable(dp_display, 0);
+
+       rc = dp_display_post_enable(dp);
+       if (rc) {
+               DRM_ERROR("DP display post enable failed, rc=%d\n", rc);
+               dp_display_disable(dp_display, 0);
+               dp_display_unprepare(dp);
+       }
+
+       dp_del_event(dp_display, EV_CONNECT_PENDING_TIMEOUT);
+
+       if (state == ST_SUSPEND_PENDING)
+               dp_add_event(dp_display, EV_IRQ_HPD_INT, 0, 0);
+
+       /* completed connection */
+       atomic_set(&dp_display->hpd_state, ST_CONNECTED);
+
+       mutex_unlock(&dp_display->event_mutex);
+
+       return rc;
+
+resume_done:
+       dp_display_wait4resume_done(dp_display);
+       return rc;
+}
+
+int msm_dp_display_pre_disable(struct msm_dp *dp, struct drm_encoder *encoder)
+{
+       struct dp_display_private *dp_display;
+
+       dp_display = container_of(dp, struct dp_display_private, dp_display);
+
+       dp_ctrl_push_idle(dp_display->ctrl);
+
+       return 0;
+}
+
+int msm_dp_display_disable(struct msm_dp *dp, struct drm_encoder *encoder)
+{
+       int rc = 0;
+       u32 state;
+       struct dp_display_private *dp_display;
+
+       dp_display = container_of(dp, struct dp_display_private, dp_display);
+
+       mutex_lock(&dp_display->event_mutex);
+
+       dp_display_disable(dp_display, 0);
+
+       rc = dp_display_unprepare(dp);
+       if (rc)
+               DRM_ERROR("DP display unprepare failed, rc=%d\n", rc);
+
+       dp_del_event(dp_display, EV_DISCONNECT_PENDING_TIMEOUT);
+
+       state =  atomic_read(&dp_display->hpd_state);
+       if (state == ST_DISCONNECT_PENDING) {
+               /* completed disconnection */
+               atomic_set(&dp_display->hpd_state, ST_DISCONNECTED);
+       } else {
+               atomic_set(&dp_display->hpd_state, ST_SUSPEND_PENDING);
+       }
+
+       mutex_unlock(&dp_display->event_mutex);
+       return rc;
+}
+
+void msm_dp_display_mode_set(struct msm_dp *dp, struct drm_encoder *encoder,
+                               struct drm_display_mode *mode,
+                               struct drm_display_mode *adjusted_mode)
+{
+       struct dp_display_private *dp_display;
+
+       dp_display = container_of(dp, struct dp_display_private, dp_display);
+
+       memset(&dp_display->dp_mode, 0x0, sizeof(struct dp_display_mode));
+
+       if (dp_display_check_video_test(dp))
+               dp_display->dp_mode.bpp = dp_display_get_test_bpp(dp);
+       else /* Default num_components per pixel = 3 */
+               dp_display->dp_mode.bpp = dp->connector->display_info.bpc * 3;
+
+       if (!dp_display->dp_mode.bpp)
+               dp_display->dp_mode.bpp = 24; /* Default bpp */
+
+       drm_mode_copy(&dp_display->dp_mode.drm_mode, adjusted_mode);
+
+       dp_display->dp_mode.v_active_low =
+               !!(dp_display->dp_mode.drm_mode.flags & DRM_MODE_FLAG_NVSYNC);
+
+       dp_display->dp_mode.h_active_low =
+               !!(dp_display->dp_mode.drm_mode.flags & DRM_MODE_FLAG_NHSYNC);
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h
new file mode 100644 (file)
index 0000000..6092ba1
--- /dev/null
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DP_DISPLAY_H_
+#define _DP_DISPLAY_H_
+
+#include "dp_panel.h"
+#include <sound/hdmi-codec.h>
+
+struct msm_dp {
+       struct drm_device *drm_dev;
+       struct device *codec_dev;
+       struct drm_connector *connector;
+       struct drm_encoder *encoder;
+       bool is_connected;
+       bool audio_enabled;
+       bool power_on;
+
+       hdmi_codec_plugged_cb plugged_cb;
+
+       u32 max_pclk_khz;
+
+       u32 max_dp_lanes;
+       struct dp_audio *dp_audio;
+};
+
+int dp_display_set_plugged_cb(struct msm_dp *dp_display,
+               hdmi_codec_plugged_cb fn, struct device *codec_dev);
+int dp_display_validate_mode(struct msm_dp *dp_display, u32 mode_pclk_khz);
+int dp_display_get_modes(struct msm_dp *dp_display,
+               struct dp_display_mode *dp_mode);
+int dp_display_request_irq(struct msm_dp *dp_display);
+bool dp_display_check_video_test(struct msm_dp *dp_display);
+int dp_display_get_test_bpp(struct msm_dp *dp_display);
+void dp_display_signal_audio_complete(struct msm_dp *dp_display);
+
+#endif /* _DP_DISPLAY_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c
new file mode 100644 (file)
index 0000000..764f4b8
--- /dev/null
@@ -0,0 +1,164 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_crtc.h>
+
+#include "msm_drv.h"
+#include "msm_kms.h"
+#include "dp_drm.h"
+
+struct dp_connector {
+       struct drm_connector base;
+       struct msm_dp *dp_display;
+};
+#define to_dp_connector(x) container_of(x, struct dp_connector, base)
+
+/**
+ * dp_connector_detect - callback to determine if connector is connected
+ * @conn: Pointer to drm connector structure
+ * @force: Force detect setting from drm framework
+ * Returns: Connector 'is connected' status
+ */
+static enum drm_connector_status dp_connector_detect(struct drm_connector *conn,
+               bool force)
+{
+       struct msm_dp *dp;
+
+       dp = to_dp_connector(conn)->dp_display;
+
+       DRM_DEBUG_DP("is_connected = %s\n",
+               (dp->is_connected) ? "true" : "false");
+
+       return (dp->is_connected) ? connector_status_connected :
+                                       connector_status_disconnected;
+}
+
+/**
+ * dp_connector_get_modes - callback to add drm modes via drm_mode_probed_add()
+ * @connector: Pointer to drm connector structure
+ * Returns: Number of modes added
+ */
+static int dp_connector_get_modes(struct drm_connector *connector)
+{
+       int rc = 0;
+       struct msm_dp *dp;
+       struct dp_display_mode *dp_mode = NULL;
+       struct drm_display_mode *m, drm_mode;
+
+       if (!connector)
+               return 0;
+
+       dp = to_dp_connector(connector)->dp_display;
+
+       dp_mode = kzalloc(sizeof(*dp_mode),  GFP_KERNEL);
+       if (!dp_mode)
+               return 0;
+
+       /* pluggable case assumes EDID is read when HPD */
+       if (dp->is_connected) {
+               /*
+                *The get_modes() function might return one mode that is stored
+                * in dp_mode when compliance test is in progress. If not, the
+                * return value is equal to the total number of modes supported
+                * by the sink
+                */
+               rc = dp_display_get_modes(dp, dp_mode);
+               if (rc <= 0) {
+                       DRM_ERROR("failed to get DP sink modes, rc=%d\n", rc);
+                       kfree(dp_mode);
+                       return rc;
+               }
+               if (dp_mode->drm_mode.clock) { /* valid DP mode */
+                       memset(&drm_mode, 0x0, sizeof(drm_mode));
+                       drm_mode_copy(&drm_mode, &dp_mode->drm_mode);
+                       m = drm_mode_duplicate(connector->dev, &drm_mode);
+                       if (!m) {
+                               DRM_ERROR("failed to add mode %ux%u\n",
+                                      drm_mode.hdisplay,
+                                      drm_mode.vdisplay);
+                               kfree(dp_mode);
+                               return 0;
+                       }
+                       drm_mode_probed_add(connector, m);
+               }
+       } else {
+               DRM_DEBUG_DP("No sink connected\n");
+       }
+       kfree(dp_mode);
+       return rc;
+}
+
+/**
+ * dp_connector_mode_valid - callback to determine if specified mode is valid
+ * @connector: Pointer to drm connector structure
+ * @mode: Pointer to drm mode structure
+ * Returns: Validity status for specified mode
+ */
+static enum drm_mode_status dp_connector_mode_valid(
+               struct drm_connector *connector,
+               struct drm_display_mode *mode)
+{
+       struct msm_dp *dp_disp;
+
+       dp_disp = to_dp_connector(connector)->dp_display;
+
+       if ((dp_disp->max_pclk_khz <= 0) ||
+                       (dp_disp->max_pclk_khz > DP_MAX_PIXEL_CLK_KHZ) ||
+                       (mode->clock > dp_disp->max_pclk_khz))
+               return MODE_BAD;
+
+       return dp_display_validate_mode(dp_disp, mode->clock);
+}
+
+static const struct drm_connector_funcs dp_connector_funcs = {
+       .detect = dp_connector_detect,
+       .fill_modes = drm_helper_probe_single_connector_modes,
+       .destroy = drm_connector_cleanup,
+       .reset = drm_atomic_helper_connector_reset,
+       .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+       .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static const struct drm_connector_helper_funcs dp_connector_helper_funcs = {
+       .get_modes = dp_connector_get_modes,
+       .mode_valid = dp_connector_mode_valid,
+};
+
+/* connector initialization */
+struct drm_connector *dp_drm_connector_init(struct msm_dp *dp_display)
+{
+       struct drm_connector *connector = NULL;
+       struct dp_connector *dp_connector;
+       int ret;
+
+       dp_connector = devm_kzalloc(dp_display->drm_dev->dev,
+                                       sizeof(*dp_connector),
+                                       GFP_KERNEL);
+       if (!dp_connector)
+               return ERR_PTR(-ENOMEM);
+
+       dp_connector->dp_display = dp_display;
+
+       connector = &dp_connector->base;
+
+       ret = drm_connector_init(dp_display->drm_dev, connector,
+                       &dp_connector_funcs,
+                       DRM_MODE_CONNECTOR_DisplayPort);
+       if (ret)
+               return ERR_PTR(ret);
+
+       drm_connector_helper_add(connector, &dp_connector_helper_funcs);
+
+       /*
+        * Enable HPD to let hpd event is handled when cable is connected.
+        */
+       connector->polled = DRM_CONNECTOR_POLL_HPD;
+
+       drm_connector_attach_encoder(connector, dp_display->encoder);
+
+       return connector;
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.h b/drivers/gpu/drm/msm/dp/dp_drm.h
new file mode 100644 (file)
index 0000000..c27bfce
--- /dev/null
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DP_DRM_H_
+#define _DP_DRM_H_
+
+#include <linux/types.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "msm_drv.h"
+#include "dp_display.h"
+
+struct drm_connector *dp_drm_connector_init(struct msm_dp *dp_display);
+
+#endif /* _DP_DRM_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_hpd.c b/drivers/gpu/drm/msm/dp/dp_hpd.c
new file mode 100644 (file)
index 0000000..5b8fe32
--- /dev/null
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt)    "[drm-dp] %s: " fmt, __func__
+
+#include <linux/slab.h>
+#include <linux/device.h>
+
+#include "dp_hpd.h"
+
+/* DP specific VDM commands */
+#define DP_USBPD_VDM_STATUS    0x10
+#define DP_USBPD_VDM_CONFIGURE 0x11
+
+/* USBPD-TypeC specific Macros */
+#define VDM_VERSION            0x0
+#define USB_C_DP_SID           0xFF01
+
+struct dp_hpd_private {
+       struct device *dev;
+       struct dp_usbpd_cb *dp_cb;
+       struct dp_usbpd dp_usbpd;
+};
+
+int dp_hpd_connect(struct dp_usbpd *dp_usbpd, bool hpd)
+{
+       int rc = 0;
+       struct dp_hpd_private *hpd_priv;
+
+       hpd_priv = container_of(dp_usbpd, struct dp_hpd_private,
+                                       dp_usbpd);
+
+       dp_usbpd->hpd_high = hpd;
+
+       if (!hpd_priv->dp_cb && !hpd_priv->dp_cb->configure
+                               && !hpd_priv->dp_cb->disconnect) {
+               pr_err("hpd dp_cb not initialized\n");
+               return -EINVAL;
+       }
+       if (hpd)
+               hpd_priv->dp_cb->configure(hpd_priv->dev);
+       else
+               hpd_priv->dp_cb->disconnect(hpd_priv->dev);
+
+       return rc;
+}
+
+struct dp_usbpd *dp_hpd_get(struct device *dev, struct dp_usbpd_cb *cb)
+{
+       struct dp_hpd_private *dp_hpd;
+
+       if (!cb) {
+               pr_err("invalid cb data\n");
+               return ERR_PTR(-EINVAL);
+       }
+
+       dp_hpd = devm_kzalloc(dev, sizeof(*dp_hpd), GFP_KERNEL);
+       if (!dp_hpd)
+               return ERR_PTR(-ENOMEM);
+
+       dp_hpd->dev = dev;
+       dp_hpd->dp_cb = cb;
+
+       dp_hpd->dp_usbpd.connect = dp_hpd_connect;
+
+       return &dp_hpd->dp_usbpd;
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_hpd.h b/drivers/gpu/drm/msm/dp/dp_hpd.h
new file mode 100644 (file)
index 0000000..5bc5bb6
--- /dev/null
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DP_HPD_H_
+#define _DP_HPD_H_
+
+//#include <linux/usb/usbpd.h>
+
+#include <linux/types.h>
+#include <linux/device.h>
+
+enum plug_orientation {
+       ORIENTATION_NONE,
+       ORIENTATION_CC1,
+       ORIENTATION_CC2,
+};
+
+/**
+ * struct dp_usbpd - DisplayPort status
+ *
+ * @orientation: plug orientation configuration
+ * @low_pow_st: low power state
+ * @adaptor_dp_en: adaptor functionality enabled
+ * @multi_func: multi-function preferred
+ * @usb_config_req: request to switch to usb
+ * @exit_dp_mode: request exit from displayport mode
+ * @hpd_high: Hot Plug Detect signal is high.
+ * @hpd_irq: Change in the status since last message
+ * @alt_mode_cfg_done: bool to specify alt mode status
+ * @debug_en: bool to specify debug mode
+ * @connect: simulate disconnect or connect for debug mode
+ */
+struct dp_usbpd {
+       enum plug_orientation orientation;
+       bool low_pow_st;
+       bool adaptor_dp_en;
+       bool multi_func;
+       bool usb_config_req;
+       bool exit_dp_mode;
+       bool hpd_high;
+       bool hpd_irq;
+       bool alt_mode_cfg_done;
+       bool debug_en;
+
+       int (*connect)(struct dp_usbpd *dp_usbpd, bool hpd);
+};
+
+/**
+ * struct dp_usbpd_cb - callback functions provided by the client
+ *
+ * @configure: called by usbpd module when PD communication has
+ * been completed and the usb peripheral has been configured on
+ * dp mode.
+ * @disconnect: notify the cable disconnect issued by usb.
+ * @attention: notify any attention message issued by usb.
+ */
+struct dp_usbpd_cb {
+       int (*configure)(struct device *dev);
+       int (*disconnect)(struct device *dev);
+       int (*attention)(struct device *dev);
+};
+
+/**
+ * dp_hpd_get() - setup hpd module
+ *
+ * @dev: device instance of the caller
+ * @cb: struct containing callback function pointers.
+ *
+ * This function allows the client to initialize the usbpd
+ * module. The module will communicate with HPD module.
+ */
+struct dp_usbpd *dp_hpd_get(struct device *dev, struct dp_usbpd_cb *cb);
+
+int dp_hpd_register(struct dp_usbpd *dp_usbpd);
+void dp_hpd_unregister(struct dp_usbpd *dp_usbpd);
+int dp_hpd_connect(struct dp_usbpd *dp_usbpd, bool hpd);
+
+#endif /* _DP_HPD_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c
new file mode 100644 (file)
index 0000000..c811da5
--- /dev/null
@@ -0,0 +1,1210 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt)    "[drm-dp] %s: " fmt, __func__
+
+#include <drm/drm_print.h>
+
+#include "dp_link.h"
+#include "dp_panel.h"
+
+#define DP_TEST_REQUEST_MASK           0x7F
+
+enum audio_sample_rate {
+       AUDIO_SAMPLE_RATE_32_KHZ        = 0x00,
+       AUDIO_SAMPLE_RATE_44_1_KHZ      = 0x01,
+       AUDIO_SAMPLE_RATE_48_KHZ        = 0x02,
+       AUDIO_SAMPLE_RATE_88_2_KHZ      = 0x03,
+       AUDIO_SAMPLE_RATE_96_KHZ        = 0x04,
+       AUDIO_SAMPLE_RATE_176_4_KHZ     = 0x05,
+       AUDIO_SAMPLE_RATE_192_KHZ       = 0x06,
+};
+
+enum audio_pattern_type {
+       AUDIO_TEST_PATTERN_OPERATOR_DEFINED     = 0x00,
+       AUDIO_TEST_PATTERN_SAWTOOTH             = 0x01,
+};
+
+struct dp_link_request {
+       u32 test_requested;
+       u32 test_link_rate;
+       u32 test_lane_count;
+};
+
+struct dp_link_private {
+       u32 prev_sink_count;
+       struct device *dev;
+       struct drm_dp_aux *aux;
+       struct dp_link dp_link;
+
+       struct dp_link_request request;
+       struct mutex psm_mutex;
+       u8 link_status[DP_LINK_STATUS_SIZE];
+};
+
+static int dp_aux_link_power_up(struct drm_dp_aux *aux,
+                                       struct dp_link_info *link)
+{
+       u8 value;
+       int err;
+
+       if (link->revision < 0x11)
+               return 0;
+
+       err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
+       if (err < 0)
+               return err;
+
+       value &= ~DP_SET_POWER_MASK;
+       value |= DP_SET_POWER_D0;
+
+       err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
+       if (err < 0)
+               return err;
+
+       usleep_range(1000, 2000);
+
+       return 0;
+}
+
+static int dp_aux_link_power_down(struct drm_dp_aux *aux,
+                                       struct dp_link_info *link)
+{
+       u8 value;
+       int err;
+
+       if (link->revision < 0x11)
+               return 0;
+
+       err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value);
+       if (err < 0)
+               return err;
+
+       value &= ~DP_SET_POWER_MASK;
+       value |= DP_SET_POWER_D3;
+
+       err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value);
+       if (err < 0)
+               return err;
+
+       return 0;
+}
+
+static int dp_link_get_period(struct dp_link_private *link, int const addr)
+{
+       int ret = 0;
+       u8 data;
+       u32 const max_audio_period = 0xA;
+
+       /* TEST_AUDIO_PERIOD_CH_XX */
+       if (drm_dp_dpcd_readb(link->aux, addr, &data) < 0) {
+               DRM_ERROR("failed to read test_audio_period (0x%x)\n", addr);
+               ret = -EINVAL;
+               goto exit;
+       }
+
+       /* Period - Bits 3:0 */
+       data = data & 0xF;
+       if ((int)data > max_audio_period) {
+               DRM_ERROR("invalid test_audio_period_ch_1 = 0x%x\n", data);
+               ret = -EINVAL;
+               goto exit;
+       }
+
+       ret = data;
+exit:
+       return ret;
+}
+
+static int dp_link_parse_audio_channel_period(struct dp_link_private *link)
+{
+       int ret = 0;
+       struct dp_link_test_audio *req = &link->dp_link.test_audio;
+
+       ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH1);
+       if (ret == -EINVAL)
+               goto exit;
+
+       req->test_audio_period_ch_1 = ret;
+       DRM_DEBUG_DP("test_audio_period_ch_1 = 0x%x\n", ret);
+
+       ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH2);
+       if (ret == -EINVAL)
+               goto exit;
+
+       req->test_audio_period_ch_2 = ret;
+       DRM_DEBUG_DP("test_audio_period_ch_2 = 0x%x\n", ret);
+
+       /* TEST_AUDIO_PERIOD_CH_3 (Byte 0x275) */
+       ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH3);
+       if (ret == -EINVAL)
+               goto exit;
+
+       req->test_audio_period_ch_3 = ret;
+       DRM_DEBUG_DP("test_audio_period_ch_3 = 0x%x\n", ret);
+
+       ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH4);
+       if (ret == -EINVAL)
+               goto exit;
+
+       req->test_audio_period_ch_4 = ret;
+       DRM_DEBUG_DP("test_audio_period_ch_4 = 0x%x\n", ret);
+
+       ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH5);
+       if (ret == -EINVAL)
+               goto exit;
+
+       req->test_audio_period_ch_5 = ret;
+       DRM_DEBUG_DP("test_audio_period_ch_5 = 0x%x\n", ret);
+
+       ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH6);
+       if (ret == -EINVAL)
+               goto exit;
+
+       req->test_audio_period_ch_6 = ret;
+       DRM_DEBUG_DP("test_audio_period_ch_6 = 0x%x\n", ret);
+
+       ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH7);
+       if (ret == -EINVAL)
+               goto exit;
+
+       req->test_audio_period_ch_7 = ret;
+       DRM_DEBUG_DP("test_audio_period_ch_7 = 0x%x\n", ret);
+
+       ret = dp_link_get_period(link, DP_TEST_AUDIO_PERIOD_CH8);
+       if (ret == -EINVAL)
+               goto exit;
+
+       req->test_audio_period_ch_8 = ret;
+       DRM_DEBUG_DP("test_audio_period_ch_8 = 0x%x\n", ret);
+exit:
+       return ret;
+}
+
+static int dp_link_parse_audio_pattern_type(struct dp_link_private *link)
+{
+       int ret = 0;
+       u8 data;
+       ssize_t rlen;
+       int const max_audio_pattern_type = 0x1;
+
+       rlen = drm_dp_dpcd_readb(link->aux,
+                               DP_TEST_AUDIO_PATTERN_TYPE, &data);
+       if (rlen < 0) {
+               DRM_ERROR("failed to read link audio mode. rlen=%zd\n", rlen);
+               return rlen;
+       }
+
+       /* Audio Pattern Type - Bits 7:0 */
+       if ((int)data > max_audio_pattern_type) {
+               DRM_ERROR("invalid audio pattern type = 0x%x\n", data);
+               ret = -EINVAL;
+               goto exit;
+       }
+
+       link->dp_link.test_audio.test_audio_pattern_type = data;
+       DRM_DEBUG_DP("audio pattern type = 0x%x\n", data);
+exit:
+       return ret;
+}
+
+static int dp_link_parse_audio_mode(struct dp_link_private *link)
+{
+       int ret = 0;
+       u8 data;
+       ssize_t rlen;
+       int const max_audio_sampling_rate = 0x6;
+       int const max_audio_channel_count = 0x8;
+       int sampling_rate = 0x0;
+       int channel_count = 0x0;
+
+       rlen = drm_dp_dpcd_readb(link->aux, DP_TEST_AUDIO_MODE, &data);
+       if (rlen < 0) {
+               DRM_ERROR("failed to read link audio mode. rlen=%zd\n", rlen);
+               return rlen;
+       }
+
+       /* Sampling Rate - Bits 3:0 */
+       sampling_rate = data & 0xF;
+       if (sampling_rate > max_audio_sampling_rate) {
+               DRM_ERROR("sampling rate (0x%x) greater than max (0x%x)\n",
+                               sampling_rate, max_audio_sampling_rate);
+               ret = -EINVAL;
+               goto exit;
+       }
+
+       /* Channel Count - Bits 7:4 */
+       channel_count = ((data & 0xF0) >> 4) + 1;
+       if (channel_count > max_audio_channel_count) {
+               DRM_ERROR("channel_count (0x%x) greater than max (0x%x)\n",
+                               channel_count, max_audio_channel_count);
+               ret = -EINVAL;
+               goto exit;
+       }
+
+       link->dp_link.test_audio.test_audio_sampling_rate = sampling_rate;
+       link->dp_link.test_audio.test_audio_channel_count = channel_count;
+       DRM_DEBUG_DP("sampling_rate = 0x%x, channel_count = 0x%x\n",
+                                       sampling_rate, channel_count);
+exit:
+       return ret;
+}
+
+static int dp_link_parse_audio_pattern_params(struct dp_link_private *link)
+{
+       int ret = 0;
+
+       ret = dp_link_parse_audio_mode(link);
+       if (ret)
+               goto exit;
+
+       ret = dp_link_parse_audio_pattern_type(link);
+       if (ret)
+               goto exit;
+
+       ret = dp_link_parse_audio_channel_period(link);
+
+exit:
+       return ret;
+}
+
+static bool dp_link_is_video_pattern_valid(u32 pattern)
+{
+       switch (pattern) {
+       case DP_NO_TEST_PATTERN:
+       case DP_COLOR_RAMP:
+       case DP_BLACK_AND_WHITE_VERTICAL_LINES:
+       case DP_COLOR_SQUARE:
+               return true;
+       default:
+               return false;
+       }
+}
+
+/**
+ * dp_link_is_bit_depth_valid() - validates the bit depth requested
+ * @tbd: bit depth requested by the sink
+ *
+ * Returns true if the requested bit depth is supported.
+ */
+static bool dp_link_is_bit_depth_valid(u32 tbd)
+{
+       /* DP_TEST_VIDEO_PATTERN_NONE is treated as invalid */
+       switch (tbd) {
+       case DP_TEST_BIT_DEPTH_6:
+       case DP_TEST_BIT_DEPTH_8:
+       case DP_TEST_BIT_DEPTH_10:
+               return true;
+       default:
+               return false;
+       }
+}
+
+static int dp_link_parse_timing_params1(struct dp_link_private *link,
+                                       int addr, int len, u32 *val)
+{
+       u8 bp[2];
+       int rlen;
+
+       if (len != 2)
+               return -EINVAL;
+
+       /* Read the requested video link pattern (Byte 0x221). */
+       rlen = drm_dp_dpcd_read(link->aux, addr, bp, len);
+       if (rlen < len) {
+               DRM_ERROR("failed to read 0x%x\n", addr);
+               return -EINVAL;
+       }
+
+       *val = bp[1] | (bp[0] << 8);
+
+       return 0;
+}
+
+static int dp_link_parse_timing_params2(struct dp_link_private *link,
+                                       int addr, int len,
+                                       u32 *val1, u32 *val2)
+{
+       u8 bp[2];
+       int rlen;
+
+       if (len != 2)
+               return -EINVAL;
+
+       /* Read the requested video link pattern (Byte 0x221). */
+       rlen = drm_dp_dpcd_read(link->aux, addr, bp, len);
+       if (rlen < len) {
+               DRM_ERROR("failed to read 0x%x\n", addr);
+               return -EINVAL;
+       }
+
+       *val1 = (bp[0] & BIT(7)) >> 7;
+       *val2 = bp[1] | ((bp[0] & 0x7F) << 8);
+
+       return 0;
+}
+
+static int dp_link_parse_timing_params3(struct dp_link_private *link,
+                                       int addr, u32 *val)
+{
+       u8 bp;
+       u32 len = 1;
+       int rlen;
+
+       rlen = drm_dp_dpcd_read(link->aux, addr, &bp, len);
+       if (rlen < 1) {
+               DRM_ERROR("failed to read 0x%x\n", addr);
+               return -EINVAL;
+       }
+       *val = bp;
+
+       return 0;
+}
+
+/**
+ * dp_parse_video_pattern_params() - parses video pattern parameters from DPCD
+ * @link: Display Port Driver data
+ *
+ * Returns 0 if it successfully parses the video link pattern and the link
+ * bit depth requested by the sink and, and if the values parsed are valid.
+ */
+static int dp_link_parse_video_pattern_params(struct dp_link_private *link)
+{
+       int ret = 0;
+       ssize_t rlen;
+       u8 bp;
+
+       rlen = drm_dp_dpcd_readb(link->aux, DP_TEST_PATTERN, &bp);
+       if (rlen < 0) {
+               DRM_ERROR("failed to read link video pattern. rlen=%zd\n",
+                       rlen);
+               return rlen;
+       }
+
+       if (!dp_link_is_video_pattern_valid(bp)) {
+               DRM_ERROR("invalid link video pattern = 0x%x\n", bp);
+               ret = -EINVAL;
+               return ret;
+       }
+
+       link->dp_link.test_video.test_video_pattern = bp;
+
+       /* Read the requested color bit depth and dynamic range (Byte 0x232) */
+       rlen = drm_dp_dpcd_readb(link->aux, DP_TEST_MISC0, &bp);
+       if (rlen < 0) {
+               DRM_ERROR("failed to read link bit depth. rlen=%zd\n", rlen);
+               return rlen;
+       }
+
+       /* Dynamic Range */
+       link->dp_link.test_video.test_dyn_range =
+                       (bp & DP_TEST_DYNAMIC_RANGE_CEA);
+
+       /* Color bit depth */
+       bp &= DP_TEST_BIT_DEPTH_MASK;
+       if (!dp_link_is_bit_depth_valid(bp)) {
+               DRM_ERROR("invalid link bit depth = 0x%x\n", bp);
+               ret = -EINVAL;
+               return ret;
+       }
+
+       link->dp_link.test_video.test_bit_depth = bp;
+
+       /* resolution timing params */
+       ret = dp_link_parse_timing_params1(link, DP_TEST_H_TOTAL_HI, 2,
+                       &link->dp_link.test_video.test_h_total);
+       if (ret) {
+               DRM_ERROR("failed to parse test_htotal(DP_TEST_H_TOTAL_HI)\n");
+               return ret;
+       }
+
+       ret = dp_link_parse_timing_params1(link, DP_TEST_V_TOTAL_HI, 2,
+                       &link->dp_link.test_video.test_v_total);
+       if (ret) {
+               DRM_ERROR("failed to parse test_v_total(DP_TEST_V_TOTAL_HI)\n");
+               return ret;
+       }
+
+       ret = dp_link_parse_timing_params1(link, DP_TEST_H_START_HI, 2,
+                       &link->dp_link.test_video.test_h_start);
+       if (ret) {
+               DRM_ERROR("failed to parse test_h_start(DP_TEST_H_START_HI)\n");
+               return ret;
+       }
+
+       ret = dp_link_parse_timing_params1(link, DP_TEST_V_START_HI, 2,
+                       &link->dp_link.test_video.test_v_start);
+       if (ret) {
+               DRM_ERROR("failed to parse test_v_start(DP_TEST_V_START_HI)\n");
+               return ret;
+       }
+
+       ret = dp_link_parse_timing_params2(link, DP_TEST_HSYNC_HI, 2,
+                       &link->dp_link.test_video.test_hsync_pol,
+                       &link->dp_link.test_video.test_hsync_width);
+       if (ret) {
+               DRM_ERROR("failed to parse (DP_TEST_HSYNC_HI)\n");
+               return ret;
+       }
+
+       ret = dp_link_parse_timing_params2(link, DP_TEST_VSYNC_HI, 2,
+                       &link->dp_link.test_video.test_vsync_pol,
+                       &link->dp_link.test_video.test_vsync_width);
+       if (ret) {
+               DRM_ERROR("failed to parse (DP_TEST_VSYNC_HI)\n");
+               return ret;
+       }
+
+       ret = dp_link_parse_timing_params1(link, DP_TEST_H_WIDTH_HI, 2,
+                       &link->dp_link.test_video.test_h_width);
+       if (ret) {
+               DRM_ERROR("failed to parse test_h_width(DP_TEST_H_WIDTH_HI)\n");
+               return ret;
+       }
+
+       ret = dp_link_parse_timing_params1(link, DP_TEST_V_HEIGHT_HI, 2,
+                       &link->dp_link.test_video.test_v_height);
+       if (ret) {
+               DRM_ERROR("failed to parse test_v_height\n");
+               return ret;
+       }
+
+       ret = dp_link_parse_timing_params3(link, DP_TEST_MISC1,
+               &link->dp_link.test_video.test_rr_d);
+       link->dp_link.test_video.test_rr_d &= DP_TEST_REFRESH_DENOMINATOR;
+       if (ret) {
+               DRM_ERROR("failed to parse test_rr_d (DP_TEST_MISC1)\n");
+               return ret;
+       }
+
+       ret = dp_link_parse_timing_params3(link, DP_TEST_REFRESH_RATE_NUMERATOR,
+               &link->dp_link.test_video.test_rr_n);
+       if (ret) {
+               DRM_ERROR("failed to parse test_rr_n\n");
+               return ret;
+       }
+
+       DRM_DEBUG_DP("link video pattern = 0x%x\n"
+               "link dynamic range = 0x%x\n"
+               "link bit depth = 0x%x\n"
+               "TEST_H_TOTAL = %d, TEST_V_TOTAL = %d\n"
+               "TEST_H_START = %d, TEST_V_START = %d\n"
+               "TEST_HSYNC_POL = %d\n"
+               "TEST_HSYNC_WIDTH = %d\n"
+               "TEST_VSYNC_POL = %d\n"
+               "TEST_VSYNC_WIDTH = %d\n"
+               "TEST_H_WIDTH = %d\n"
+               "TEST_V_HEIGHT = %d\n"
+               "TEST_REFRESH_DENOMINATOR = %d\n"
+                "TEST_REFRESH_NUMERATOR = %d\n",
+               link->dp_link.test_video.test_video_pattern,
+               link->dp_link.test_video.test_dyn_range,
+               link->dp_link.test_video.test_bit_depth,
+               link->dp_link.test_video.test_h_total,
+               link->dp_link.test_video.test_v_total,
+               link->dp_link.test_video.test_h_start,
+               link->dp_link.test_video.test_v_start,
+               link->dp_link.test_video.test_hsync_pol,
+               link->dp_link.test_video.test_hsync_width,
+               link->dp_link.test_video.test_vsync_pol,
+               link->dp_link.test_video.test_vsync_width,
+               link->dp_link.test_video.test_h_width,
+               link->dp_link.test_video.test_v_height,
+               link->dp_link.test_video.test_rr_d,
+               link->dp_link.test_video.test_rr_n);
+
+       return ret;
+}
+
+/**
+ * dp_link_parse_link_training_params() - parses link training parameters from
+ * DPCD
+ * @link: Display Port Driver data
+ *
+ * Returns 0 if it successfully parses the link rate (Byte 0x219) and lane
+ * count (Byte 0x220), and if these values parse are valid.
+ */
+static int dp_link_parse_link_training_params(struct dp_link_private *link)
+{
+       u8 bp;
+       ssize_t rlen;
+
+       rlen = drm_dp_dpcd_readb(link->aux, DP_TEST_LINK_RATE,  &bp);
+       if (rlen < 0) {
+               DRM_ERROR("failed to read link rate. rlen=%zd\n", rlen);
+               return rlen;
+       }
+
+       if (!is_link_rate_valid(bp)) {
+               DRM_ERROR("invalid link rate = 0x%x\n", bp);
+               return -EINVAL;
+       }
+
+       link->request.test_link_rate = bp;
+       DRM_DEBUG_DP("link rate = 0x%x\n", link->request.test_link_rate);
+
+       rlen = drm_dp_dpcd_readb(link->aux, DP_TEST_LANE_COUNT, &bp);
+       if (rlen < 0) {
+               DRM_ERROR("failed to read lane count. rlen=%zd\n", rlen);
+               return rlen;
+       }
+       bp &= DP_MAX_LANE_COUNT_MASK;
+
+       if (!is_lane_count_valid(bp)) {
+               DRM_ERROR("invalid lane count = 0x%x\n", bp);
+               return -EINVAL;
+       }
+
+       link->request.test_lane_count = bp;
+       DRM_DEBUG_DP("lane count = 0x%x\n", link->request.test_lane_count);
+       return 0;
+}
+
+/**
+ * dp_parse_phy_test_params() - parses the phy link parameters
+ * @link: Display Port Driver data
+ *
+ * Parses the DPCD (Byte 0x248) for the DP PHY link pattern that is being
+ * requested.
+ */
+static int dp_link_parse_phy_test_params(struct dp_link_private *link)
+{
+       u8 data;
+       ssize_t rlen;
+
+       rlen = drm_dp_dpcd_readb(link->aux, DP_PHY_TEST_PATTERN,
+                                       &data);
+       if (rlen < 0) {
+               DRM_ERROR("failed to read phy link pattern. rlen=%zd\n", rlen);
+               return rlen;
+       }
+
+       link->dp_link.phy_params.phy_test_pattern_sel = data & 0x07;
+
+       DRM_DEBUG_DP("phy_test_pattern_sel = 0x%x\n", data);
+
+       switch (data) {
+       case DP_PHY_TEST_PATTERN_SEL_MASK:
+       case DP_PHY_TEST_PATTERN_NONE:
+       case DP_PHY_TEST_PATTERN_D10_2:
+       case DP_PHY_TEST_PATTERN_ERROR_COUNT:
+       case DP_PHY_TEST_PATTERN_PRBS7:
+       case DP_PHY_TEST_PATTERN_80BIT_CUSTOM:
+       case DP_PHY_TEST_PATTERN_CP2520:
+               return 0;
+       default:
+               return -EINVAL;
+       }
+}
+
+/**
+ * dp_link_is_video_audio_test_requested() - checks for audio/video link request
+ * @link: link requested by the sink
+ *
+ * Returns true if the requested link is a permitted audio/video link.
+ */
+static bool dp_link_is_video_audio_test_requested(u32 link)
+{
+       u8 video_audio_test = (DP_TEST_LINK_VIDEO_PATTERN |
+                               DP_TEST_LINK_AUDIO_PATTERN |
+                               DP_TEST_LINK_AUDIO_DISABLED_VIDEO);
+
+       return ((link & video_audio_test) &&
+               !(link & ~video_audio_test));
+}
+
+/**
+ * dp_link_parse_request() - parses link request parameters from sink
+ * @link: Display Port Driver data
+ *
+ * Parses the DPCD to check if an automated link is requested (Byte 0x201),
+ * and what type of link automation is being requested (Byte 0x218).
+ */
+static int dp_link_parse_request(struct dp_link_private *link)
+{
+       int ret = 0;
+       u8 data;
+       ssize_t rlen;
+
+       /**
+        * Read the device service IRQ vector (Byte 0x201) to determine
+        * whether an automated link has been requested by the sink.
+        */
+       rlen = drm_dp_dpcd_readb(link->aux,
+                               DP_DEVICE_SERVICE_IRQ_VECTOR, &data);
+       if (rlen < 0) {
+               DRM_ERROR("aux read failed. rlen=%zd\n", rlen);
+               return rlen;
+       }
+
+       DRM_DEBUG_DP("device service irq vector = 0x%x\n", data);
+
+       if (!(data & DP_AUTOMATED_TEST_REQUEST)) {
+               DRM_DEBUG_DP("no test requested\n");
+               return 0;
+       }
+
+       /**
+        * Read the link request byte (Byte 0x218) to determine what type
+        * of automated link has been requested by the sink.
+        */
+       rlen = drm_dp_dpcd_readb(link->aux, DP_TEST_REQUEST, &data);
+       if (rlen < 0) {
+               DRM_ERROR("aux read failed. rlen=%zd\n", rlen);
+               return rlen;
+       }
+
+       if (!data || (data == DP_TEST_LINK_FAUX_PATTERN)) {
+               DRM_DEBUG_DP("link 0x%x not supported\n", data);
+               goto end;
+       }
+
+       DRM_DEBUG_DP("Test:(0x%x) requested\n", data);
+       link->request.test_requested = data;
+       if (link->request.test_requested == DP_TEST_LINK_PHY_TEST_PATTERN) {
+               ret = dp_link_parse_phy_test_params(link);
+               if (ret)
+                       goto end;
+               ret = dp_link_parse_link_training_params(link);
+               if (ret)
+                       goto end;
+       }
+
+       if (link->request.test_requested == DP_TEST_LINK_TRAINING) {
+               ret = dp_link_parse_link_training_params(link);
+               if (ret)
+                       goto end;
+       }
+
+       if (dp_link_is_video_audio_test_requested(
+                       link->request.test_requested)) {
+               ret = dp_link_parse_video_pattern_params(link);
+               if (ret)
+                       goto end;
+
+               ret = dp_link_parse_audio_pattern_params(link);
+       }
+end:
+       /*
+        * Send a DP_TEST_ACK if all link parameters are valid, otherwise send
+        * a DP_TEST_NAK.
+        */
+       if (ret) {
+               link->dp_link.test_response = DP_TEST_NAK;
+       } else {
+               if (link->request.test_requested != DP_TEST_LINK_EDID_READ)
+                       link->dp_link.test_response = DP_TEST_ACK;
+               else
+                       link->dp_link.test_response =
+                               DP_TEST_EDID_CHECKSUM_WRITE;
+       }
+
+       return ret;
+}
+
+/**
+ * dp_link_parse_sink_count() - parses the sink count
+ * @dp_link: pointer to link module data
+ *
+ * Parses the DPCD to check if there is an update to the sink count
+ * (Byte 0x200), and whether all the sink devices connected have Content
+ * Protection enabled.
+ */
+static int dp_link_parse_sink_count(struct dp_link *dp_link)
+{
+       ssize_t rlen;
+       bool cp_ready;
+
+       struct dp_link_private *link = container_of(dp_link,
+                       struct dp_link_private, dp_link);
+
+       rlen = drm_dp_dpcd_readb(link->aux, DP_SINK_COUNT,
+                                &link->dp_link.sink_count);
+       if (rlen < 0) {
+               DRM_ERROR("sink count read failed. rlen=%zd\n", rlen);
+               return rlen;
+       }
+
+       cp_ready = link->dp_link.sink_count & DP_SINK_CP_READY;
+
+       link->dp_link.sink_count =
+               DP_GET_SINK_COUNT(link->dp_link.sink_count);
+
+       DRM_DEBUG_DP("sink_count = 0x%x, cp_ready = 0x%x\n",
+               link->dp_link.sink_count, cp_ready);
+       return 0;
+}
+
+static void dp_link_parse_sink_status_field(struct dp_link_private *link)
+{
+       int len = 0;
+
+       link->prev_sink_count = link->dp_link.sink_count;
+       dp_link_parse_sink_count(&link->dp_link);
+
+       len = drm_dp_dpcd_read_link_status(link->aux,
+               link->link_status);
+       if (len < DP_LINK_STATUS_SIZE)
+               DRM_ERROR("DP link status read failed\n");
+       dp_link_parse_request(link);
+}
+
+/**
+ * dp_link_process_link_training_request() - processes new training requests
+ * @link: Display Port link data
+ *
+ * This function will handle new link training requests that are initiated by
+ * the sink. In particular, it will update the requested lane count and link
+ * rate, and then trigger the link retraining procedure.
+ *
+ * The function will return 0 if a link training request has been processed,
+ * otherwise it will return -EINVAL.
+ */
+static int dp_link_process_link_training_request(struct dp_link_private *link)
+{
+       if (link->request.test_requested != DP_TEST_LINK_TRAINING)
+               return -EINVAL;
+
+       DRM_DEBUG_DP("Test:0x%x link rate = 0x%x, lane count = 0x%x\n",
+                       DP_TEST_LINK_TRAINING,
+                       link->request.test_link_rate,
+                       link->request.test_lane_count);
+
+       link->dp_link.link_params.num_lanes = link->request.test_lane_count;
+       link->dp_link.link_params.rate = link->request.test_link_rate;
+
+       return 0;
+}
+
+bool dp_link_send_test_response(struct dp_link *dp_link)
+{
+       struct dp_link_private *link = NULL;
+       int ret = 0;
+
+       if (!dp_link) {
+               DRM_ERROR("invalid input\n");
+               return false;
+       }
+
+       link = container_of(dp_link, struct dp_link_private, dp_link);
+
+       ret = drm_dp_dpcd_writeb(link->aux, DP_TEST_RESPONSE,
+                       dp_link->test_response);
+
+       return ret == 1;
+}
+
+int dp_link_psm_config(struct dp_link *dp_link,
+                             struct dp_link_info *link_info, bool enable)
+{
+       struct dp_link_private *link = NULL;
+       int ret = 0;
+
+       if (!dp_link) {
+               DRM_ERROR("invalid params\n");
+               return -EINVAL;
+       }
+
+       link = container_of(dp_link, struct dp_link_private, dp_link);
+
+       mutex_lock(&link->psm_mutex);
+       if (enable)
+               ret = dp_aux_link_power_down(link->aux, link_info);
+       else
+               ret = dp_aux_link_power_up(link->aux, link_info);
+
+       if (ret)
+               DRM_ERROR("Failed to %s low power mode\n", enable ?
+                                                       "enter" : "exit");
+       else
+               dp_link->psm_enabled = enable;
+
+       mutex_unlock(&link->psm_mutex);
+       return ret;
+}
+
+bool dp_link_send_edid_checksum(struct dp_link *dp_link, u8 checksum)
+{
+       struct dp_link_private *link = NULL;
+       int ret = 0;
+
+       if (!dp_link) {
+               DRM_ERROR("invalid input\n");
+               return false;
+       }
+
+       link = container_of(dp_link, struct dp_link_private, dp_link);
+
+       ret = drm_dp_dpcd_writeb(link->aux, DP_TEST_EDID_CHECKSUM,
+                                               checksum);
+       return ret == 1;
+}
+
+static int dp_link_parse_vx_px(struct dp_link_private *link)
+{
+       int ret = 0;
+
+       DRM_DEBUG_DP("vx: 0=%d, 1=%d, 2=%d, 3=%d\n",
+               drm_dp_get_adjust_request_voltage(link->link_status, 0),
+               drm_dp_get_adjust_request_voltage(link->link_status, 1),
+               drm_dp_get_adjust_request_voltage(link->link_status, 2),
+               drm_dp_get_adjust_request_voltage(link->link_status, 3));
+
+       DRM_DEBUG_DP("px: 0=%d, 1=%d, 2=%d, 3=%d\n",
+               drm_dp_get_adjust_request_pre_emphasis(link->link_status, 0),
+               drm_dp_get_adjust_request_pre_emphasis(link->link_status, 1),
+               drm_dp_get_adjust_request_pre_emphasis(link->link_status, 2),
+               drm_dp_get_adjust_request_pre_emphasis(link->link_status, 3));
+
+       /**
+        * Update the voltage and pre-emphasis levels as per DPCD request
+        * vector.
+        */
+       DRM_DEBUG_DP("Current: v_level = 0x%x, p_level = 0x%x\n",
+                       link->dp_link.phy_params.v_level,
+                       link->dp_link.phy_params.p_level);
+       link->dp_link.phy_params.v_level =
+               drm_dp_get_adjust_request_voltage(link->link_status, 0);
+       link->dp_link.phy_params.p_level =
+               drm_dp_get_adjust_request_pre_emphasis(link->link_status, 0);
+       DRM_DEBUG_DP("Requested: v_level = 0x%x, p_level = 0x%x\n",
+                       link->dp_link.phy_params.v_level,
+                       link->dp_link.phy_params.p_level);
+
+       return ret;
+}
+
+/**
+ * dp_link_process_phy_test_pattern_request() - process new phy link requests
+ * @link: Display Port Driver data
+ *
+ * This function will handle new phy link pattern requests that are initiated
+ * by the sink. The function will return 0 if a phy link pattern has been
+ * processed, otherwise it will return -EINVAL.
+ */
+static int dp_link_process_phy_test_pattern_request(
+               struct dp_link_private *link)
+{
+       int ret = 0;
+
+       if (!(link->request.test_requested & DP_TEST_LINK_PHY_TEST_PATTERN)) {
+               DRM_DEBUG_DP("no phy test\n");
+               return -EINVAL;
+       }
+
+       if (!is_link_rate_valid(link->request.test_link_rate) ||
+               !is_lane_count_valid(link->request.test_lane_count)) {
+               DRM_ERROR("Invalid: link rate = 0x%x,lane count = 0x%x\n",
+                               link->request.test_link_rate,
+                               link->request.test_lane_count);
+               return -EINVAL;
+       }
+
+       DRM_DEBUG_DP("Current: rate = 0x%x, lane count = 0x%x\n",
+                       link->dp_link.link_params.rate,
+                       link->dp_link.link_params.num_lanes);
+
+       DRM_DEBUG_DP("Requested: rate = 0x%x, lane count = 0x%x\n",
+                       link->request.test_link_rate,
+                       link->request.test_lane_count);
+
+       link->dp_link.link_params.num_lanes = link->request.test_lane_count;
+       link->dp_link.link_params.rate = link->request.test_link_rate;
+
+       ret = dp_link_parse_vx_px(link);
+
+       if (ret)
+               DRM_ERROR("parse_vx_px failed. ret=%d\n", ret);
+
+       return ret;
+}
+
+static u8 get_link_status(const u8 link_status[DP_LINK_STATUS_SIZE], int r)
+{
+       return link_status[r - DP_LANE0_1_STATUS];
+}
+
+/**
+ * dp_link_process_link_status_update() - processes link status updates
+ * @link: Display Port link module data
+ *
+ * This function will check for changes in the link status, e.g. clock
+ * recovery done on all lanes, and trigger link training if there is a
+ * failure/error on the link.
+ *
+ * The function will return 0 if the a link status update has been processed,
+ * otherwise it will return -EINVAL.
+ */
+static int dp_link_process_link_status_update(struct dp_link_private *link)
+{
+       if (!(get_link_status(link->link_status,
+                               DP_LANE_ALIGN_STATUS_UPDATED) &
+                               DP_LINK_STATUS_UPDATED) ||
+                       (drm_dp_clock_recovery_ok(link->link_status,
+                                       link->dp_link.link_params.num_lanes) &&
+                       drm_dp_channel_eq_ok(link->link_status,
+                                       link->dp_link.link_params.num_lanes)))
+               return -EINVAL;
+
+       DRM_DEBUG_DP("channel_eq_done = %d, clock_recovery_done = %d\n",
+                       drm_dp_clock_recovery_ok(link->link_status,
+                       link->dp_link.link_params.num_lanes),
+                       drm_dp_clock_recovery_ok(link->link_status,
+                       link->dp_link.link_params.num_lanes));
+
+       return 0;
+}
+
+/**
+ * dp_link_process_downstream_port_status_change() - process port status changes
+ * @link: Display Port Driver data
+ *
+ * This function will handle downstream port updates that are initiated by
+ * the sink. If the downstream port status has changed, the EDID is read via
+ * AUX.
+ *
+ * The function will return 0 if a downstream port update has been
+ * processed, otherwise it will return -EINVAL.
+ */
+static int dp_link_process_ds_port_status_change(struct dp_link_private *link)
+{
+       if (get_link_status(link->link_status, DP_LANE_ALIGN_STATUS_UPDATED) &
+                                       DP_DOWNSTREAM_PORT_STATUS_CHANGED)
+               goto reset;
+
+       if (link->prev_sink_count == link->dp_link.sink_count)
+               return -EINVAL;
+
+reset:
+       /* reset prev_sink_count */
+       link->prev_sink_count = link->dp_link.sink_count;
+
+       return 0;
+}
+
+static bool dp_link_is_video_pattern_requested(struct dp_link_private *link)
+{
+       return (link->request.test_requested & DP_TEST_LINK_VIDEO_PATTERN)
+               && !(link->request.test_requested &
+               DP_TEST_LINK_AUDIO_DISABLED_VIDEO);
+}
+
+static bool dp_link_is_audio_pattern_requested(struct dp_link_private *link)
+{
+       return (link->request.test_requested & DP_TEST_LINK_AUDIO_PATTERN);
+}
+
+static void dp_link_reset_data(struct dp_link_private *link)
+{
+       link->request = (const struct dp_link_request){ 0 };
+       link->dp_link.test_video = (const struct dp_link_test_video){ 0 };
+       link->dp_link.test_video.test_bit_depth = DP_TEST_BIT_DEPTH_UNKNOWN;
+       link->dp_link.test_audio = (const struct dp_link_test_audio){ 0 };
+       link->dp_link.phy_params.phy_test_pattern_sel = 0;
+       link->dp_link.sink_request = 0;
+       link->dp_link.test_response = 0;
+}
+
+/**
+ * dp_link_process_request() - handle HPD IRQ transition to HIGH
+ * @dp_link: pointer to link module data
+ *
+ * This function will handle the HPD IRQ state transitions from LOW to HIGH
+ * (including cases when there are back to back HPD IRQ HIGH) indicating
+ * the start of a new link training request or sink status update.
+ */
+int dp_link_process_request(struct dp_link *dp_link)
+{
+       int ret = 0;
+       struct dp_link_private *link;
+
+       if (!dp_link) {
+               DRM_ERROR("invalid input\n");
+               return -EINVAL;
+       }
+
+       link = container_of(dp_link, struct dp_link_private, dp_link);
+
+       dp_link_reset_data(link);
+
+       dp_link_parse_sink_status_field(link);
+
+       if (link->request.test_requested == DP_TEST_LINK_EDID_READ) {
+               dp_link->sink_request |= DP_TEST_LINK_EDID_READ;
+               return ret;
+       }
+
+       ret = dp_link_process_ds_port_status_change(link);
+       if (!ret) {
+               dp_link->sink_request |= DS_PORT_STATUS_CHANGED;
+               return ret;
+       }
+
+       ret = dp_link_process_link_training_request(link);
+       if (!ret) {
+               dp_link->sink_request |= DP_TEST_LINK_TRAINING;
+               return ret;
+       }
+
+       ret = dp_link_process_phy_test_pattern_request(link);
+       if (!ret) {
+               dp_link->sink_request |= DP_TEST_LINK_PHY_TEST_PATTERN;
+               return ret;
+       }
+
+       ret = dp_link_process_link_status_update(link);
+       if (!ret) {
+               dp_link->sink_request |= DP_LINK_STATUS_UPDATED;
+               return ret;
+       }
+
+       if (dp_link_is_video_pattern_requested(link)) {
+               ret = 0;
+               dp_link->sink_request |= DP_TEST_LINK_VIDEO_PATTERN;
+       }
+
+       if (dp_link_is_audio_pattern_requested(link)) {
+               dp_link->sink_request |= DP_TEST_LINK_AUDIO_PATTERN;
+               return -EINVAL;
+       }
+
+       return ret;
+}
+
+int dp_link_get_colorimetry_config(struct dp_link *dp_link)
+{
+       u32 cc;
+       struct dp_link_private *link;
+
+       if (!dp_link) {
+               DRM_ERROR("invalid input\n");
+               return -EINVAL;
+       }
+
+       link = container_of(dp_link, struct dp_link_private, dp_link);
+
+       /*
+        * Unless a video pattern CTS test is ongoing, use RGB_VESA
+        * Only RGB_VESA and RGB_CEA supported for now
+        */
+       if (dp_link_is_video_pattern_requested(link))
+               cc = link->dp_link.test_video.test_dyn_range;
+       else
+               cc = DP_TEST_DYNAMIC_RANGE_VESA;
+
+       return cc;
+}
+
+int dp_link_adjust_levels(struct dp_link *dp_link, u8 *link_status)
+{
+       int i;
+       int v_max = 0, p_max = 0;
+
+       if (!dp_link) {
+               DRM_ERROR("invalid input\n");
+               return -EINVAL;
+       }
+
+       /* use the max level across lanes */
+       for (i = 0; i < dp_link->link_params.num_lanes; i++) {
+               u8 data_v = drm_dp_get_adjust_request_voltage(link_status, i);
+               u8 data_p = drm_dp_get_adjust_request_pre_emphasis(link_status,
+                                                                        i);
+               DRM_DEBUG_DP("lane=%d req_vol_swing=%d req_pre_emphasis=%d\n",
+                               i, data_v, data_p);
+               if (v_max < data_v)
+                       v_max = data_v;
+               if (p_max < data_p)
+                       p_max = data_p;
+       }
+
+       dp_link->phy_params.v_level = v_max >> DP_TRAIN_VOLTAGE_SWING_SHIFT;
+       dp_link->phy_params.p_level = p_max >> DP_TRAIN_PRE_EMPHASIS_SHIFT;
+
+       /**
+        * Adjust the voltage swing and pre-emphasis level combination to within
+        * the allowable range.
+        */
+       if (dp_link->phy_params.v_level > DP_TRAIN_VOLTAGE_SWING_MAX) {
+               DRM_DEBUG_DP("Requested vSwingLevel=%d, change to %d\n",
+                       dp_link->phy_params.v_level,
+                       DP_TRAIN_VOLTAGE_SWING_MAX);
+               dp_link->phy_params.v_level = DP_TRAIN_VOLTAGE_SWING_MAX;
+       }
+
+       if (dp_link->phy_params.p_level > DP_TRAIN_PRE_EMPHASIS_MAX) {
+               DRM_DEBUG_DP("Requested preEmphasisLevel=%d, change to %d\n",
+                       dp_link->phy_params.p_level,
+                       DP_TRAIN_PRE_EMPHASIS_MAX);
+               dp_link->phy_params.p_level = DP_TRAIN_PRE_EMPHASIS_MAX;
+       }
+
+       if ((dp_link->phy_params.p_level > DP_TRAIN_PRE_EMPHASIS_LVL_1)
+               && (dp_link->phy_params.v_level ==
+                       DP_TRAIN_VOLTAGE_SWING_LVL_2)) {
+               DRM_DEBUG_DP("Requested preEmphasisLevel=%d, change to %d\n",
+                       dp_link->phy_params.p_level,
+                       DP_TRAIN_PRE_EMPHASIS_LVL_1);
+               dp_link->phy_params.p_level = DP_TRAIN_PRE_EMPHASIS_LVL_1;
+       }
+
+       DRM_DEBUG_DP("adjusted: v_level=%d, p_level=%d\n",
+               dp_link->phy_params.v_level, dp_link->phy_params.p_level);
+
+       return 0;
+}
+
+u32 dp_link_get_test_bits_depth(struct dp_link *dp_link, u32 bpp)
+{
+       u32 tbd;
+
+       /*
+        * Few simplistic rules and assumptions made here:
+        *    1. Test bit depth is bit depth per color component
+        *    2. Assume 3 color components
+        */
+       switch (bpp) {
+       case 18:
+               tbd = DP_TEST_BIT_DEPTH_6;
+               break;
+       case 24:
+               tbd = DP_TEST_BIT_DEPTH_8;
+               break;
+       case 30:
+               tbd = DP_TEST_BIT_DEPTH_10;
+               break;
+       default:
+               tbd = DP_TEST_BIT_DEPTH_UNKNOWN;
+               break;
+       }
+
+       if (tbd != DP_TEST_BIT_DEPTH_UNKNOWN)
+               tbd = (tbd >> DP_TEST_BIT_DEPTH_SHIFT);
+
+       return tbd;
+}
+
+struct dp_link *dp_link_get(struct device *dev, struct drm_dp_aux *aux)
+{
+       struct dp_link_private *link;
+       struct dp_link *dp_link;
+
+       if (!dev || !aux) {
+               DRM_ERROR("invalid input\n");
+               return ERR_PTR(-EINVAL);
+       }
+
+       link = devm_kzalloc(dev, sizeof(*link), GFP_KERNEL);
+       if (!link)
+               return ERR_PTR(-ENOMEM);
+
+       link->dev   = dev;
+       link->aux   = aux;
+
+       mutex_init(&link->psm_mutex);
+       dp_link = &link->dp_link;
+
+       return dp_link;
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_link.h b/drivers/gpu/drm/msm/dp/dp_link.h
new file mode 100644 (file)
index 0000000..49811b6
--- /dev/null
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DP_LINK_H_
+#define _DP_LINK_H_
+
+#include "dp_aux.h"
+
+#define DS_PORT_STATUS_CHANGED 0x200
+#define DP_TEST_BIT_DEPTH_UNKNOWN 0xFFFFFFFF
+#define DP_LINK_CAP_ENHANCED_FRAMING (1 << 0)
+
+struct dp_link_info {
+       unsigned char revision;
+       unsigned int rate;
+       unsigned int num_lanes;
+       unsigned long capabilities;
+};
+
+enum dp_link_voltage_level {
+       DP_TRAIN_VOLTAGE_SWING_LVL_0    = 0,
+       DP_TRAIN_VOLTAGE_SWING_LVL_1    = 1,
+       DP_TRAIN_VOLTAGE_SWING_LVL_2    = 2,
+       DP_TRAIN_VOLTAGE_SWING_MAX      = DP_TRAIN_VOLTAGE_SWING_LVL_2,
+};
+
+enum dp_link_preemaphasis_level {
+       DP_TRAIN_PRE_EMPHASIS_LVL_0     = 0,
+       DP_TRAIN_PRE_EMPHASIS_LVL_1     = 1,
+       DP_TRAIN_PRE_EMPHASIS_LVL_2     = 2,
+       DP_TRAIN_PRE_EMPHASIS_MAX       = DP_TRAIN_PRE_EMPHASIS_LVL_2,
+};
+
+struct dp_link_test_video {
+       u32 test_video_pattern;
+       u32 test_bit_depth;
+       u32 test_dyn_range;
+       u32 test_h_total;
+       u32 test_v_total;
+       u32 test_h_start;
+       u32 test_v_start;
+       u32 test_hsync_pol;
+       u32 test_hsync_width;
+       u32 test_vsync_pol;
+       u32 test_vsync_width;
+       u32 test_h_width;
+       u32 test_v_height;
+       u32 test_rr_d;
+       u32 test_rr_n;
+};
+
+struct dp_link_test_audio {
+       u32 test_audio_sampling_rate;
+       u32 test_audio_channel_count;
+       u32 test_audio_pattern_type;
+       u32 test_audio_period_ch_1;
+       u32 test_audio_period_ch_2;
+       u32 test_audio_period_ch_3;
+       u32 test_audio_period_ch_4;
+       u32 test_audio_period_ch_5;
+       u32 test_audio_period_ch_6;
+       u32 test_audio_period_ch_7;
+       u32 test_audio_period_ch_8;
+};
+
+struct dp_link_phy_params {
+       u32 phy_test_pattern_sel;
+       u8 v_level;
+       u8 p_level;
+};
+
+struct dp_link {
+       u32 sink_request;
+       u32 test_response;
+       bool psm_enabled;
+
+       u8 sink_count;
+       struct dp_link_test_video test_video;
+       struct dp_link_test_audio test_audio;
+       struct dp_link_phy_params phy_params;
+       struct dp_link_info link_params;
+};
+
+/**
+ * mdss_dp_test_bit_depth_to_bpp() - convert test bit depth to bpp
+ * @tbd: test bit depth
+ *
+ * Returns the bits per pixel (bpp) to be used corresponding to the
+ * git bit depth value. This function assumes that bit depth has
+ * already been validated.
+ */
+static inline u32 dp_link_bit_depth_to_bpp(u32 tbd)
+{
+       /*
+        * Few simplistic rules and assumptions made here:
+        *    1. Bit depth is per color component
+        *    2. If bit depth is unknown return 0
+        *    3. Assume 3 color components
+        */
+       switch (tbd) {
+       case DP_TEST_BIT_DEPTH_6:
+               return 18;
+       case DP_TEST_BIT_DEPTH_8:
+               return 24;
+       case DP_TEST_BIT_DEPTH_10:
+               return 30;
+       case DP_TEST_BIT_DEPTH_UNKNOWN:
+       default:
+               return 0;
+       }
+}
+
+/**
+ * dp_test_bit_depth_to_bpc() - convert test bit depth to bpc
+ * @tbd: test bit depth
+ *
+ * Returns the bits per comp (bpc) to be used corresponding to the
+ * bit depth value. This function assumes that bit depth has
+ * already been validated.
+ */
+static inline u32 dp_link_bit_depth_to_bpc(u32 tbd)
+{
+       switch (tbd) {
+       case DP_TEST_BIT_DEPTH_6:
+               return 6;
+       case DP_TEST_BIT_DEPTH_8:
+               return 8;
+       case DP_TEST_BIT_DEPTH_10:
+               return 10;
+       case DP_TEST_BIT_DEPTH_UNKNOWN:
+       default:
+               return 0;
+       }
+}
+
+u32 dp_link_get_test_bits_depth(struct dp_link *dp_link, u32 bpp);
+int dp_link_process_request(struct dp_link *dp_link);
+int dp_link_get_colorimetry_config(struct dp_link *dp_link);
+int dp_link_adjust_levels(struct dp_link *dp_link, u8 *link_status);
+bool dp_link_send_test_response(struct dp_link *dp_link);
+int dp_link_psm_config(struct dp_link *dp_link,
+               struct dp_link_info *link_info, bool enable);
+bool dp_link_send_edid_checksum(struct dp_link *dp_link, u8 checksum);
+
+/**
+ * dp_link_get() - get the functionalities of dp test module
+ *
+ *
+ * return: a pointer to dp_link struct
+ */
+struct dp_link *dp_link_get(struct device *dev, struct drm_dp_aux *aux);
+
+#endif /* _DP_LINK_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
new file mode 100644 (file)
index 0000000..18cec4f
--- /dev/null
@@ -0,0 +1,463 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include "dp_panel.h"
+
+#include <drm/drm_connector.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_print.h>
+
+struct dp_panel_private {
+       struct device *dev;
+       struct dp_panel dp_panel;
+       struct drm_dp_aux *aux;
+       struct dp_link *link;
+       struct dp_catalog *catalog;
+       bool panel_on;
+       bool aux_cfg_update_done;
+};
+
+static int dp_panel_read_dpcd(struct dp_panel *dp_panel)
+{
+       int rc = 0;
+       size_t len;
+       ssize_t rlen;
+       struct dp_panel_private *panel;
+       struct dp_link_info *link_info;
+       u8 *dpcd, major = 0, minor = 0, temp;
+       u32 offset = DP_DPCD_REV;
+
+       dpcd = dp_panel->dpcd;
+
+       panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+       link_info = &dp_panel->link_info;
+
+       rlen = drm_dp_dpcd_read(panel->aux, offset,
+                       dpcd, (DP_RECEIVER_CAP_SIZE + 1));
+       if (rlen < (DP_RECEIVER_CAP_SIZE + 1)) {
+               DRM_ERROR("dpcd read failed, rlen=%zd\n", rlen);
+               if (rlen == -ETIMEDOUT)
+                       rc = rlen;
+               else
+                       rc = -EINVAL;
+
+               goto end;
+       }
+
+       temp = dpcd[DP_TRAINING_AUX_RD_INTERVAL];
+
+       /* check for EXTENDED_RECEIVER_CAPABILITY_FIELD_PRESENT */
+       if (temp & BIT(7)) {
+               DRM_DEBUG_DP("using EXTENDED_RECEIVER_CAPABILITY_FIELD\n");
+               offset = DPRX_EXTENDED_DPCD_FIELD;
+       }
+
+       rlen = drm_dp_dpcd_read(panel->aux, offset,
+               dpcd, (DP_RECEIVER_CAP_SIZE + 1));
+       if (rlen < (DP_RECEIVER_CAP_SIZE + 1)) {
+               DRM_ERROR("dpcd read failed, rlen=%zd\n", rlen);
+               if (rlen == -ETIMEDOUT)
+                       rc = rlen;
+               else
+                       rc = -EINVAL;
+
+               goto end;
+       }
+
+       link_info->revision = dpcd[DP_DPCD_REV];
+       major = (link_info->revision >> 4) & 0x0f;
+       minor = link_info->revision & 0x0f;
+
+       link_info->rate = drm_dp_bw_code_to_link_rate(dpcd[DP_MAX_LINK_RATE]);
+       link_info->num_lanes = dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
+
+       if (link_info->num_lanes > dp_panel->max_dp_lanes)
+               link_info->num_lanes = dp_panel->max_dp_lanes;
+
+       /* Limit support upto HBR2 until HBR3 support is added */
+       if (link_info->rate >= (drm_dp_bw_code_to_link_rate(DP_LINK_BW_5_4)))
+               link_info->rate = drm_dp_bw_code_to_link_rate(DP_LINK_BW_5_4);
+
+       DRM_DEBUG_DP("version: %d.%d\n", major, minor);
+       DRM_DEBUG_DP("link_rate=%d\n", link_info->rate);
+       DRM_DEBUG_DP("lane_count=%d\n", link_info->num_lanes);
+
+       if (drm_dp_enhanced_frame_cap(dpcd))
+               link_info->capabilities |= DP_LINK_CAP_ENHANCED_FRAMING;
+
+       dp_panel->dfp_present = dpcd[DP_DOWNSTREAMPORT_PRESENT];
+       dp_panel->dfp_present &= DP_DWN_STRM_PORT_PRESENT;
+
+       if (dp_panel->dfp_present && (dpcd[DP_DPCD_REV] > 0x10)) {
+               dp_panel->ds_port_cnt = dpcd[DP_DOWN_STREAM_PORT_COUNT];
+               dp_panel->ds_port_cnt &= DP_PORT_COUNT_MASK;
+               len = DP_DOWNSTREAM_PORTS * DP_DOWNSTREAM_CAP_SIZE;
+
+               rlen = drm_dp_dpcd_read(panel->aux,
+                       DP_DOWNSTREAM_PORT_0, dp_panel->ds_cap_info, len);
+               if (rlen < len) {
+                       DRM_ERROR("ds port status failed, rlen=%zd\n", rlen);
+                       rc = -EINVAL;
+                       goto end;
+               }
+       }
+
+end:
+       return rc;
+}
+
+static u32 dp_panel_get_supported_bpp(struct dp_panel *dp_panel,
+               u32 mode_edid_bpp, u32 mode_pclk_khz)
+{
+       struct dp_link_info *link_info;
+       const u32 max_supported_bpp = 30, min_supported_bpp = 18;
+       u32 bpp = 0, data_rate_khz = 0;
+
+       bpp = min_t(u32, mode_edid_bpp, max_supported_bpp);
+
+       link_info = &dp_panel->link_info;
+       data_rate_khz = link_info->num_lanes * link_info->rate * 8;
+
+       while (bpp > min_supported_bpp) {
+               if (mode_pclk_khz * bpp <= data_rate_khz)
+                       break;
+               bpp -= 6;
+       }
+
+       return bpp;
+}
+
+static int dp_panel_update_modes(struct drm_connector *connector,
+       struct edid *edid)
+{
+       int rc = 0;
+
+       if (edid) {
+               rc = drm_connector_update_edid_property(connector, edid);
+               if (rc) {
+                       DRM_ERROR("failed to update edid property %d\n", rc);
+                       return rc;
+               }
+               rc = drm_add_edid_modes(connector, edid);
+               DRM_DEBUG_DP("%s -", __func__);
+               return rc;
+       }
+
+       rc = drm_connector_update_edid_property(connector, NULL);
+       if (rc)
+               DRM_ERROR("failed to update edid property %d\n", rc);
+
+       return rc;
+}
+
+int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
+       struct drm_connector *connector)
+{
+       int rc = 0, bw_code;
+       int rlen, count;
+       struct dp_panel_private *panel;
+
+       if (!dp_panel || !connector) {
+               DRM_ERROR("invalid input\n");
+               return -EINVAL;
+       }
+
+       panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+
+       rc = dp_panel_read_dpcd(dp_panel);
+       bw_code = drm_dp_link_rate_to_bw_code(dp_panel->link_info.rate);
+       if (rc || !is_link_rate_valid(bw_code) ||
+                       !is_lane_count_valid(dp_panel->link_info.num_lanes) ||
+                       (bw_code > dp_panel->max_bw_code)) {
+               DRM_ERROR("read dpcd failed %d\n", rc);
+               return rc;
+       }
+
+       if (dp_panel->dfp_present) {
+               rlen = drm_dp_dpcd_read(panel->aux, DP_SINK_COUNT,
+                               &count, 1);
+               if (rlen == 1) {
+                       count = DP_GET_SINK_COUNT(count);
+                       if (!count) {
+                               DRM_ERROR("no downstream ports connected\n");
+                               panel->link->sink_count = 0;
+                               rc = -ENOTCONN;
+                               goto end;
+                       }
+               }
+       }
+
+       kfree(dp_panel->edid);
+       dp_panel->edid = NULL;
+
+       dp_panel->edid = drm_get_edid(connector,
+                                             &panel->aux->ddc);
+       if (!dp_panel->edid) {
+               DRM_ERROR("panel edid read failed\n");
+
+               /* fail safe edid */
+               mutex_lock(&connector->dev->mode_config.mutex);
+               if (drm_add_modes_noedid(connector, 640, 480))
+                       drm_set_preferred_mode(connector, 640, 480);
+               mutex_unlock(&connector->dev->mode_config.mutex);
+       }
+
+       if (panel->aux_cfg_update_done) {
+               DRM_DEBUG_DP("read DPCD with updated AUX config\n");
+               rc = dp_panel_read_dpcd(dp_panel);
+               bw_code = drm_dp_link_rate_to_bw_code(dp_panel->link_info.rate);
+               if (rc || !is_link_rate_valid(bw_code) ||
+                       !is_lane_count_valid(dp_panel->link_info.num_lanes)
+                       || (bw_code > dp_panel->max_bw_code)) {
+                       DRM_ERROR("read dpcd failed %d\n", rc);
+                       return rc;
+               }
+               panel->aux_cfg_update_done = false;
+       }
+end:
+       return rc;
+}
+
+u32 dp_panel_get_mode_bpp(struct dp_panel *dp_panel,
+               u32 mode_edid_bpp, u32 mode_pclk_khz)
+{
+       struct dp_panel_private *panel;
+       u32 bpp = mode_edid_bpp;
+
+       if (!dp_panel || !mode_edid_bpp || !mode_pclk_khz) {
+               DRM_ERROR("invalid input\n");
+               return 0;
+       }
+
+       panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+
+       if (dp_panel->video_test)
+               bpp = dp_link_bit_depth_to_bpp(
+                               panel->link->test_video.test_bit_depth);
+       else
+               bpp = dp_panel_get_supported_bpp(dp_panel, mode_edid_bpp,
+                               mode_pclk_khz);
+
+       return bpp;
+}
+
+int dp_panel_get_modes(struct dp_panel *dp_panel,
+       struct drm_connector *connector, struct dp_display_mode *mode)
+{
+       if (!dp_panel) {
+               DRM_ERROR("invalid input\n");
+               return -EINVAL;
+       }
+
+       if (dp_panel->edid)
+               return dp_panel_update_modes(connector, dp_panel->edid);
+
+       return 0;
+}
+
+static u8 dp_panel_get_edid_checksum(struct edid *edid)
+{
+       struct edid *last_block;
+       u8 *raw_edid;
+       bool is_edid_corrupt;
+
+       if (!edid) {
+               DRM_ERROR("invalid edid input\n");
+               return 0;
+       }
+
+       raw_edid = (u8 *)edid;
+       raw_edid += (edid->extensions * EDID_LENGTH);
+       last_block = (struct edid *)raw_edid;
+
+       /* block type extension */
+       drm_edid_block_valid(raw_edid, 1, false, &is_edid_corrupt);
+       if (!is_edid_corrupt)
+               return last_block->checksum;
+
+       DRM_ERROR("Invalid block, no checksum\n");
+       return 0;
+}
+
+void dp_panel_handle_sink_request(struct dp_panel *dp_panel)
+{
+       struct dp_panel_private *panel;
+
+       if (!dp_panel) {
+               DRM_ERROR("invalid input\n");
+               return;
+       }
+
+       panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+
+       if (panel->link->sink_request & DP_TEST_LINK_EDID_READ) {
+               u8 checksum = dp_panel_get_edid_checksum(dp_panel->edid);
+
+               dp_link_send_edid_checksum(panel->link, checksum);
+               dp_link_send_test_response(panel->link);
+       }
+}
+
+void dp_panel_tpg_config(struct dp_panel *dp_panel, bool enable)
+{
+       struct dp_catalog *catalog;
+       struct dp_panel_private *panel;
+
+       if (!dp_panel) {
+               DRM_ERROR("invalid input\n");
+               return;
+       }
+
+       panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+       catalog = panel->catalog;
+
+       if (!panel->panel_on) {
+               DRM_DEBUG_DP("DP panel not enabled, handle TPG on next on\n");
+               return;
+       }
+
+       if (!enable) {
+               dp_catalog_panel_tpg_disable(catalog);
+               return;
+       }
+
+       DRM_DEBUG_DP("%s: calling catalog tpg_enable\n", __func__);
+       dp_catalog_panel_tpg_enable(catalog, &panel->dp_panel.dp_mode.drm_mode);
+}
+
+void dp_panel_dump_regs(struct dp_panel *dp_panel)
+{
+       struct dp_catalog *catalog;
+       struct dp_panel_private *panel;
+
+       panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+       catalog = panel->catalog;
+
+       dp_catalog_dump_regs(catalog);
+}
+
+int dp_panel_timing_cfg(struct dp_panel *dp_panel)
+{
+       int rc = 0;
+       u32 data, total_ver, total_hor;
+       struct dp_catalog *catalog;
+       struct dp_panel_private *panel;
+       struct drm_display_mode *drm_mode;
+
+       panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
+       catalog = panel->catalog;
+       drm_mode = &panel->dp_panel.dp_mode.drm_mode;
+
+       DRM_DEBUG_DP("width=%d hporch= %d %d %d\n",
+               drm_mode->hdisplay, drm_mode->htotal - drm_mode->hsync_end,
+               drm_mode->hsync_start - drm_mode->hdisplay,
+               drm_mode->hsync_end - drm_mode->hsync_start);
+
+       DRM_DEBUG_DP("height=%d vporch= %d %d %d\n",
+               drm_mode->vdisplay, drm_mode->vtotal - drm_mode->vsync_end,
+               drm_mode->vsync_start - drm_mode->vdisplay,
+               drm_mode->vsync_end - drm_mode->vsync_start);
+
+       total_hor = drm_mode->htotal;
+
+       total_ver = drm_mode->vtotal;
+
+       data = total_ver;
+       data <<= 16;
+       data |= total_hor;
+
+       catalog->total = data;
+
+       data = (drm_mode->vtotal - drm_mode->vsync_start);
+       data <<= 16;
+       data |= (drm_mode->htotal - drm_mode->hsync_start);
+
+       catalog->sync_start = data;
+
+       data = drm_mode->vsync_end - drm_mode->vsync_start;
+       data <<= 16;
+       data |= (panel->dp_panel.dp_mode.v_active_low << 31);
+       data |= drm_mode->hsync_end - drm_mode->hsync_start;
+       data |= (panel->dp_panel.dp_mode.h_active_low << 15);
+
+       catalog->width_blanking = data;
+
+       data = drm_mode->vdisplay;
+       data <<= 16;
+       data |= drm_mode->hdisplay;
+
+       catalog->dp_active = data;
+
+       dp_catalog_panel_timing_cfg(catalog);
+       panel->panel_on = true;
+
+       return rc;
+}
+
+int dp_panel_init_panel_info(struct dp_panel *dp_panel)
+{
+       int rc = 0;
+       struct drm_display_mode *drm_mode;
+
+       drm_mode = &dp_panel->dp_mode.drm_mode;
+
+       /*
+        * print resolution info as this is a result
+        * of user initiated action of cable connection
+        */
+       DRM_DEBUG_DP("SET NEW RESOLUTION:\n");
+       DRM_DEBUG_DP("%dx%d@%dfps\n", drm_mode->hdisplay,
+               drm_mode->vdisplay, drm_mode_vrefresh(drm_mode));
+       DRM_DEBUG_DP("h_porches(back|front|width) = (%d|%d|%d)\n",
+                       drm_mode->htotal - drm_mode->hsync_end,
+                       drm_mode->hsync_start - drm_mode->hdisplay,
+                       drm_mode->hsync_end - drm_mode->hsync_start);
+       DRM_DEBUG_DP("v_porches(back|front|width) = (%d|%d|%d)\n",
+                       drm_mode->vtotal - drm_mode->vsync_end,
+                       drm_mode->vsync_start - drm_mode->vdisplay,
+                       drm_mode->vsync_end - drm_mode->vsync_start);
+       DRM_DEBUG_DP("pixel clock (KHz)=(%d)\n", drm_mode->clock);
+       DRM_DEBUG_DP("bpp = %d\n", dp_panel->dp_mode.bpp);
+
+       dp_panel->dp_mode.bpp = max_t(u32, 18,
+                                       min_t(u32, dp_panel->dp_mode.bpp, 30));
+       DRM_DEBUG_DP("updated bpp = %d\n", dp_panel->dp_mode.bpp);
+
+       return rc;
+}
+
+struct dp_panel *dp_panel_get(struct dp_panel_in *in)
+{
+       struct dp_panel_private *panel;
+       struct dp_panel *dp_panel;
+
+       if (!in->dev || !in->catalog || !in->aux || !in->link) {
+               DRM_ERROR("invalid input\n");
+               return ERR_PTR(-EINVAL);
+       }
+
+       panel = devm_kzalloc(in->dev, sizeof(*panel), GFP_KERNEL);
+       if (!panel)
+               return ERR_PTR(-ENOMEM);
+
+       panel->dev = in->dev;
+       panel->aux = in->aux;
+       panel->catalog = in->catalog;
+       panel->link = in->link;
+
+       dp_panel = &panel->dp_panel;
+       dp_panel->max_bw_code = DP_LINK_BW_8_1;
+       panel->aux_cfg_update_done = false;
+
+       return dp_panel;
+}
+
+void dp_panel_put(struct dp_panel *dp_panel)
+{
+       if (!dp_panel)
+               return;
+
+       kfree(dp_panel->edid);
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.h b/drivers/gpu/drm/msm/dp/dp_panel.h
new file mode 100644 (file)
index 0000000..9023e5b
--- /dev/null
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DP_PANEL_H_
+#define _DP_PANEL_H_
+
+#include <drm/msm_drm.h>
+
+#include "dp_aux.h"
+#include "dp_link.h"
+#include "dp_hpd.h"
+
+struct edid;
+
+#define DPRX_EXTENDED_DPCD_FIELD       0x2200
+
+#define DP_DOWNSTREAM_PORTS            4
+#define DP_DOWNSTREAM_CAP_SIZE         4
+
+struct dp_display_mode {
+       struct drm_display_mode drm_mode;
+       u32 capabilities;
+       u32 bpp;
+       u32 h_active_low;
+       u32 v_active_low;
+};
+
+struct dp_panel_in {
+       struct device *dev;
+       struct drm_dp_aux *aux;
+       struct dp_link *link;
+       struct dp_catalog *catalog;
+};
+
+struct dp_panel {
+       /* dpcd raw data */
+       u8 dpcd[DP_RECEIVER_CAP_SIZE + 1];
+       u8 ds_cap_info[DP_DOWNSTREAM_PORTS * DP_DOWNSTREAM_CAP_SIZE];
+       u32 ds_port_cnt;
+       u32 dfp_present;
+
+       struct dp_link_info link_info;
+       struct drm_dp_desc desc;
+       struct edid *edid;
+       struct drm_connector *connector;
+       struct dp_display_mode dp_mode;
+       bool video_test;
+
+       u32 vic;
+       u32 max_pclk_khz;
+       u32 max_dp_lanes;
+
+       u32 max_bw_code;
+};
+
+int dp_panel_init_panel_info(struct dp_panel *dp_panel);
+int dp_panel_deinit(struct dp_panel *dp_panel);
+int dp_panel_timing_cfg(struct dp_panel *dp_panel);
+void dp_panel_dump_regs(struct dp_panel *dp_panel);
+int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
+               struct drm_connector *connector);
+u32 dp_panel_get_mode_bpp(struct dp_panel *dp_panel, u32 mode_max_bpp,
+                       u32 mode_pclk_khz);
+int dp_panel_get_modes(struct dp_panel *dp_panel,
+               struct drm_connector *connector, struct dp_display_mode *mode);
+void dp_panel_handle_sink_request(struct dp_panel *dp_panel);
+void dp_panel_tpg_config(struct dp_panel *dp_panel, bool enable);
+
+/**
+ * is_link_rate_valid() - validates the link rate
+ * @lane_rate: link rate requested by the sink
+ *
+ * Returns true if the requested link rate is supported.
+ */
+static inline bool is_link_rate_valid(u32 bw_code)
+{
+       return (bw_code == DP_LINK_BW_1_62 ||
+               bw_code == DP_LINK_BW_2_7 ||
+               bw_code == DP_LINK_BW_5_4 ||
+               bw_code == DP_LINK_BW_8_1);
+}
+
+/**
+ * dp_link_is_lane_count_valid() - validates the lane count
+ * @lane_count: lane count requested by the sink
+ *
+ * Returns true if the requested lane count is supported.
+ */
+static inline bool is_lane_count_valid(u32 lane_count)
+{
+       return (lane_count == 1 ||
+               lane_count == 2 ||
+               lane_count == 4);
+}
+
+struct dp_panel *dp_panel_get(struct dp_panel_in *in);
+void dp_panel_put(struct dp_panel *dp_panel);
+#endif /* _DP_PANEL_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_parser.c b/drivers/gpu/drm/msm/dp/dp_parser.c
new file mode 100644 (file)
index 0000000..0519dd3
--- /dev/null
@@ -0,0 +1,293 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/of_gpio.h>
+#include <linux/phy/phy.h>
+
+#include <drm/drm_print.h>
+
+#include "dp_parser.h"
+#include "dp_reg.h"
+
+static const struct dp_regulator_cfg sdm845_dp_reg_cfg = {
+       .num = 2,
+       .regs = {
+               {"vdda-1p2", 21800, 4 },        /* 1.2 V */
+               {"vdda-0p9", 36000, 32 },       /* 0.9 V */
+       },
+};
+
+static int msm_dss_ioremap(struct platform_device *pdev,
+                               struct dss_io_data *io_data)
+{
+       struct resource *res = NULL;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (!res) {
+               DRM_ERROR("%pS->%s: msm_dss_get_res failed\n",
+                       __builtin_return_address(0), __func__);
+               return -ENODEV;
+       }
+
+       io_data->len = (u32)resource_size(res);
+       io_data->base = ioremap(res->start, io_data->len);
+       if (!io_data->base) {
+               DRM_ERROR("%pS->%s: ioremap failed\n",
+                       __builtin_return_address(0), __func__);
+               return -EIO;
+       }
+
+       return 0;
+}
+
+static void msm_dss_iounmap(struct dss_io_data *io_data)
+{
+       if (io_data->base) {
+               iounmap(io_data->base);
+               io_data->base = NULL;
+       }
+       io_data->len = 0;
+}
+
+static void dp_parser_unmap_io_resources(struct dp_parser *parser)
+{
+       struct dp_io *io = &parser->io;
+
+       msm_dss_iounmap(&io->dp_controller);
+}
+
+static int dp_parser_ctrl_res(struct dp_parser *parser)
+{
+       int rc = 0;
+       struct platform_device *pdev = parser->pdev;
+       struct dp_io *io = &parser->io;
+
+       rc = msm_dss_ioremap(pdev, &io->dp_controller);
+       if (rc) {
+               DRM_ERROR("unable to remap dp io resources, rc=%d\n", rc);
+               goto err;
+       }
+
+       io->phy = devm_phy_get(&pdev->dev, "dp");
+       if (IS_ERR(io->phy)) {
+               rc = PTR_ERR(io->phy);
+               goto err;
+       }
+
+       return 0;
+err:
+       dp_parser_unmap_io_resources(parser);
+       return rc;
+}
+
+static int dp_parser_misc(struct dp_parser *parser)
+{
+       struct device_node *of_node = parser->pdev->dev.of_node;
+       int len = 0;
+       const char *data_lane_property = "data-lanes";
+
+       len = of_property_count_elems_of_size(of_node,
+                        data_lane_property, sizeof(u32));
+       if (len < 0) {
+               DRM_WARN("Invalid property %s, default max DP lanes = %d\n",
+                               data_lane_property, DP_MAX_NUM_DP_LANES);
+               len = DP_MAX_NUM_DP_LANES;
+       }
+
+       parser->max_dp_lanes = len;
+       return 0;
+}
+
+static inline bool dp_parser_check_prefix(const char *clk_prefix,
+                                               const char *clk_name)
+{
+       return !strncmp(clk_prefix, clk_name, strlen(clk_prefix));
+}
+
+static int dp_parser_init_clk_data(struct dp_parser *parser)
+{
+       int num_clk, i, rc;
+       int core_clk_count = 0, ctrl_clk_count = 0, stream_clk_count = 0;
+       const char *clk_name;
+       struct device *dev = &parser->pdev->dev;
+       struct dss_module_power *core_power = &parser->mp[DP_CORE_PM];
+       struct dss_module_power *ctrl_power = &parser->mp[DP_CTRL_PM];
+       struct dss_module_power *stream_power = &parser->mp[DP_STREAM_PM];
+
+       num_clk = of_property_count_strings(dev->of_node, "clock-names");
+       if (num_clk <= 0) {
+               DRM_ERROR("no clocks are defined\n");
+               return -EINVAL;
+       }
+
+       for (i = 0; i < num_clk; i++) {
+               rc = of_property_read_string_index(dev->of_node,
+                               "clock-names", i, &clk_name);
+               if (rc < 0)
+                       return rc;
+
+               if (dp_parser_check_prefix("core", clk_name))
+                       core_clk_count++;
+
+               if (dp_parser_check_prefix("ctrl", clk_name))
+                       ctrl_clk_count++;
+
+               if (dp_parser_check_prefix("stream", clk_name))
+                       stream_clk_count++;
+       }
+
+       /* Initialize the CORE power module */
+       if (core_clk_count == 0) {
+               DRM_ERROR("no core clocks are defined\n");
+               return -EINVAL;
+       }
+
+       core_power->num_clk = core_clk_count;
+       core_power->clk_config = devm_kzalloc(dev,
+                       sizeof(struct dss_clk) * core_power->num_clk,
+                       GFP_KERNEL);
+       if (!core_power->clk_config)
+               return -EINVAL;
+
+       /* Initialize the CTRL power module */
+       if (ctrl_clk_count == 0) {
+               DRM_ERROR("no ctrl clocks are defined\n");
+               return -EINVAL;
+       }
+
+       ctrl_power->num_clk = ctrl_clk_count;
+       ctrl_power->clk_config = devm_kzalloc(dev,
+                       sizeof(struct dss_clk) * ctrl_power->num_clk,
+                       GFP_KERNEL);
+       if (!ctrl_power->clk_config) {
+               ctrl_power->num_clk = 0;
+               return -EINVAL;
+       }
+
+       /* Initialize the STREAM power module */
+       if (stream_clk_count == 0) {
+               DRM_ERROR("no stream (pixel) clocks are defined\n");
+               return -EINVAL;
+       }
+
+       stream_power->num_clk = stream_clk_count;
+       stream_power->clk_config = devm_kzalloc(dev,
+                       sizeof(struct dss_clk) * stream_power->num_clk,
+                       GFP_KERNEL);
+       if (!stream_power->clk_config) {
+               stream_power->num_clk = 0;
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int dp_parser_clock(struct dp_parser *parser)
+{
+       int rc = 0, i = 0;
+       int num_clk = 0;
+       int core_clk_index = 0, ctrl_clk_index = 0, stream_clk_index = 0;
+       int core_clk_count = 0, ctrl_clk_count = 0, stream_clk_count = 0;
+       const char *clk_name;
+       struct device *dev = &parser->pdev->dev;
+       struct dss_module_power *core_power = &parser->mp[DP_CORE_PM];
+       struct dss_module_power *ctrl_power = &parser->mp[DP_CTRL_PM];
+       struct dss_module_power *stream_power = &parser->mp[DP_STREAM_PM];
+
+       rc =  dp_parser_init_clk_data(parser);
+       if (rc) {
+               DRM_ERROR("failed to initialize power data %d\n", rc);
+               return -EINVAL;
+       }
+
+       core_clk_count = core_power->num_clk;
+       ctrl_clk_count = ctrl_power->num_clk;
+       stream_clk_count = stream_power->num_clk;
+
+       num_clk = core_clk_count + ctrl_clk_count + stream_clk_count;
+
+       for (i = 0; i < num_clk; i++) {
+               rc = of_property_read_string_index(dev->of_node, "clock-names",
+                               i, &clk_name);
+               if (rc) {
+                       DRM_ERROR("error reading clock-names %d\n", rc);
+                       return rc;
+               }
+               if (dp_parser_check_prefix("core", clk_name) &&
+                               core_clk_index < core_clk_count) {
+                       struct dss_clk *clk =
+                               &core_power->clk_config[core_clk_index];
+                       strlcpy(clk->clk_name, clk_name, sizeof(clk->clk_name));
+                       clk->type = DSS_CLK_AHB;
+                       core_clk_index++;
+               } else if (dp_parser_check_prefix("stream", clk_name) &&
+                               stream_clk_index < stream_clk_count) {
+                       struct dss_clk *clk =
+                               &stream_power->clk_config[stream_clk_index];
+                       strlcpy(clk->clk_name, clk_name, sizeof(clk->clk_name));
+                       clk->type = DSS_CLK_PCLK;
+                       stream_clk_index++;
+               } else if (dp_parser_check_prefix("ctrl", clk_name) &&
+                          ctrl_clk_index < ctrl_clk_count) {
+                       struct dss_clk *clk =
+                               &ctrl_power->clk_config[ctrl_clk_index];
+                       strlcpy(clk->clk_name, clk_name, sizeof(clk->clk_name));
+                       ctrl_clk_index++;
+                       if (dp_parser_check_prefix("ctrl_link", clk_name) ||
+                           dp_parser_check_prefix("stream_pixel", clk_name))
+                               clk->type = DSS_CLK_PCLK;
+                       else
+                               clk->type = DSS_CLK_AHB;
+               }
+       }
+
+       DRM_DEBUG_DP("clock parsing successful\n");
+
+       return 0;
+}
+
+static int dp_parser_parse(struct dp_parser *parser)
+{
+       int rc = 0;
+
+       if (!parser) {
+               DRM_ERROR("invalid input\n");
+               return -EINVAL;
+       }
+
+       rc = dp_parser_ctrl_res(parser);
+       if (rc)
+               return rc;
+
+       rc = dp_parser_misc(parser);
+       if (rc)
+               return rc;
+
+       rc = dp_parser_clock(parser);
+       if (rc)
+               return rc;
+
+       /* Map the corresponding regulator information according to
+        * version. Currently, since we only have one supported platform,
+        * mapping the regulator directly.
+        */
+       parser->regulator_cfg = &sdm845_dp_reg_cfg;
+
+       return 0;
+}
+
+struct dp_parser *dp_parser_get(struct platform_device *pdev)
+{
+       struct dp_parser *parser;
+
+       parser = devm_kzalloc(&pdev->dev, sizeof(*parser), GFP_KERNEL);
+       if (!parser)
+               return ERR_PTR(-ENOMEM);
+
+       parser->parse = dp_parser_parse;
+       parser->pdev = pdev;
+
+       return parser;
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_parser.h b/drivers/gpu/drm/msm/dp/dp_parser.h
new file mode 100644 (file)
index 0000000..34b4962
--- /dev/null
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DP_PARSER_H_
+#define _DP_PARSER_H_
+
+#include <linux/platform_device.h>
+#include <linux/phy/phy.h>
+#include <linux/phy/phy-dp.h>
+
+#include "dpu_io_util.h"
+#include "msm_drv.h"
+
+#define DP_LABEL "MDSS DP DISPLAY"
+#define DP_MAX_PIXEL_CLK_KHZ   675000
+#define DP_MAX_NUM_DP_LANES    4
+
+enum dp_pm_type {
+       DP_CORE_PM,
+       DP_CTRL_PM,
+       DP_STREAM_PM,
+       DP_PHY_PM,
+       DP_MAX_PM
+};
+
+struct dss_io_data {
+       u32 len;
+       void __iomem *base;
+};
+
+static inline const char *dp_parser_pm_name(enum dp_pm_type module)
+{
+       switch (module) {
+       case DP_CORE_PM:        return "DP_CORE_PM";
+       case DP_CTRL_PM:        return "DP_CTRL_PM";
+       case DP_STREAM_PM:      return "DP_STREAM_PM";
+       case DP_PHY_PM:         return "DP_PHY_PM";
+       default:                return "???";
+       }
+}
+
+/**
+ * struct dp_display_data  - display related device tree data.
+ *
+ * @ctrl_node: referece to controller device
+ * @phy_node:  reference to phy device
+ * @is_active: is the controller currently active
+ * @name: name of the display
+ * @display_type: type of the display
+ */
+struct dp_display_data {
+       struct device_node *ctrl_node;
+       struct device_node *phy_node;
+       bool is_active;
+       const char *name;
+       const char *display_type;
+};
+
+/**
+ * struct dp_ctrl_resource - controller's IO related data
+ *
+ * @dp_controller: Display Port controller mapped memory address
+ * @phy_io: phy's mapped memory address
+ */
+struct dp_io {
+       struct dss_io_data dp_controller;
+       struct phy *phy;
+       union phy_configure_opts phy_opts;
+};
+
+/**
+ * struct dp_pinctrl - DP's pin control
+ *
+ * @pin: pin-controller's instance
+ * @state_active: active state pin control
+ * @state_hpd_active: hpd active state pin control
+ * @state_suspend: suspend state pin control
+ */
+struct dp_pinctrl {
+       struct pinctrl *pin;
+       struct pinctrl_state *state_active;
+       struct pinctrl_state *state_hpd_active;
+       struct pinctrl_state *state_suspend;
+};
+
+#define DP_DEV_REGULATOR_MAX   4
+
+/* Regulators for DP devices */
+struct dp_reg_entry {
+       char name[32];
+       int enable_load;
+       int disable_load;
+};
+
+struct dp_regulator_cfg {
+       int num;
+       struct dp_reg_entry regs[DP_DEV_REGULATOR_MAX];
+};
+
+/**
+ * struct dp_parser - DP parser's data exposed to clients
+ *
+ * @pdev: platform data of the client
+ * @mp: gpio, regulator and clock related data
+ * @pinctrl: pin-control related data
+ * @disp_data: controller's display related data
+ * @parse: function to be called by client to parse device tree.
+ */
+struct dp_parser {
+       struct platform_device *pdev;
+       struct dss_module_power mp[DP_MAX_PM];
+       struct dp_pinctrl pinctrl;
+       struct dp_io io;
+       struct dp_display_data disp_data;
+       const struct dp_regulator_cfg *regulator_cfg;
+       u32 max_dp_lanes;
+
+       int (*parse)(struct dp_parser *parser);
+};
+
+/**
+ * dp_parser_get() - get the DP's device tree parser module
+ *
+ * @pdev: platform data of the client
+ * return: pointer to dp_parser structure.
+ *
+ * This function provides client capability to parse the
+ * device tree and populate the data structures. The data
+ * related to clock, regulators, pin-control and other
+ * can be parsed using this module.
+ */
+struct dp_parser *dp_parser_get(struct platform_device *pdev);
+
+#endif
diff --git a/drivers/gpu/drm/msm/dp/dp_power.c b/drivers/gpu/drm/msm/dp/dp_power.c
new file mode 100644 (file)
index 0000000..17c1fc6
--- /dev/null
@@ -0,0 +1,372 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt)    "[drm-dp] %s: " fmt, __func__
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/regulator/consumer.h>
+#include "dp_power.h"
+#include "msm_drv.h"
+
+struct dp_power_private {
+       struct dp_parser *parser;
+       struct platform_device *pdev;
+       struct clk *link_clk_src;
+       struct clk *pixel_provider;
+       struct clk *link_provider;
+       struct regulator_bulk_data supplies[DP_DEV_REGULATOR_MAX];
+
+       struct dp_power dp_power;
+};
+
+static void dp_power_regulator_disable(struct dp_power_private *power)
+{
+       struct regulator_bulk_data *s = power->supplies;
+       const struct dp_reg_entry *regs = power->parser->regulator_cfg->regs;
+       int num = power->parser->regulator_cfg->num;
+       int i;
+
+       DBG("");
+       for (i = num - 1; i >= 0; i--)
+               if (regs[i].disable_load >= 0)
+                       regulator_set_load(s[i].consumer,
+                                          regs[i].disable_load);
+
+       regulator_bulk_disable(num, s);
+}
+
+static int dp_power_regulator_enable(struct dp_power_private *power)
+{
+       struct regulator_bulk_data *s = power->supplies;
+       const struct dp_reg_entry *regs = power->parser->regulator_cfg->regs;
+       int num = power->parser->regulator_cfg->num;
+       int ret, i;
+
+       DBG("");
+       for (i = 0; i < num; i++) {
+               if (regs[i].enable_load >= 0) {
+                       ret = regulator_set_load(s[i].consumer,
+                                                regs[i].enable_load);
+                       if (ret < 0) {
+                               pr_err("regulator %d set op mode failed, %d\n",
+                                       i, ret);
+                               goto fail;
+                       }
+               }
+       }
+
+       ret = regulator_bulk_enable(num, s);
+       if (ret < 0) {
+               pr_err("regulator enable failed, %d\n", ret);
+               goto fail;
+       }
+
+       return 0;
+
+fail:
+       for (i--; i >= 0; i--)
+               regulator_set_load(s[i].consumer, regs[i].disable_load);
+       return ret;
+}
+
+static int dp_power_regulator_init(struct dp_power_private *power)
+{
+       struct regulator_bulk_data *s = power->supplies;
+       const struct dp_reg_entry *regs = power->parser->regulator_cfg->regs;
+       struct platform_device *pdev = power->pdev;
+       int num = power->parser->regulator_cfg->num;
+       int i, ret;
+
+       for (i = 0; i < num; i++)
+               s[i].supply = regs[i].name;
+
+       ret = devm_regulator_bulk_get(&pdev->dev, num, s);
+       if (ret < 0) {
+               pr_err("%s: failed to init regulator, ret=%d\n",
+                                               __func__, ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int dp_power_clk_init(struct dp_power_private *power)
+{
+       int rc = 0;
+       struct dss_module_power *core, *ctrl, *stream;
+       struct device *dev = &power->pdev->dev;
+
+       core = &power->parser->mp[DP_CORE_PM];
+       ctrl = &power->parser->mp[DP_CTRL_PM];
+       stream = &power->parser->mp[DP_STREAM_PM];
+
+       rc = msm_dss_get_clk(dev, core->clk_config, core->num_clk);
+       if (rc) {
+               DRM_ERROR("failed to get %s clk. err=%d\n",
+                       dp_parser_pm_name(DP_CORE_PM), rc);
+               return rc;
+       }
+
+       rc = msm_dss_get_clk(dev, ctrl->clk_config, ctrl->num_clk);
+       if (rc) {
+               DRM_ERROR("failed to get %s clk. err=%d\n",
+                       dp_parser_pm_name(DP_CTRL_PM), rc);
+               msm_dss_put_clk(core->clk_config, core->num_clk);
+               return -ENODEV;
+       }
+
+       rc = msm_dss_get_clk(dev, stream->clk_config, stream->num_clk);
+       if (rc) {
+               DRM_ERROR("failed to get %s clk. err=%d\n",
+                       dp_parser_pm_name(DP_CTRL_PM), rc);
+               msm_dss_put_clk(core->clk_config, core->num_clk);
+               return -ENODEV;
+       }
+
+       return 0;
+}
+
+static int dp_power_clk_deinit(struct dp_power_private *power)
+{
+       struct dss_module_power *core, *ctrl, *stream;
+
+       core = &power->parser->mp[DP_CORE_PM];
+       ctrl = &power->parser->mp[DP_CTRL_PM];
+       stream = &power->parser->mp[DP_STREAM_PM];
+
+       if (!core || !ctrl || !stream) {
+               DRM_ERROR("invalid power_data\n");
+               return -EINVAL;
+       }
+
+       msm_dss_put_clk(ctrl->clk_config, ctrl->num_clk);
+       msm_dss_put_clk(core->clk_config, core->num_clk);
+       msm_dss_put_clk(stream->clk_config, stream->num_clk);
+       return 0;
+}
+
+static int dp_power_clk_set_rate(struct dp_power_private *power,
+               enum dp_pm_type module, bool enable)
+{
+       int rc = 0;
+       struct dss_module_power *mp = &power->parser->mp[module];
+
+       if (enable) {
+               rc = msm_dss_clk_set_rate(mp->clk_config, mp->num_clk);
+               if (rc) {
+                       DRM_ERROR("failed to set clks rate.\n");
+                       return rc;
+               }
+       }
+
+       rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, enable);
+       if (rc) {
+               DRM_ERROR("failed to %d clks, err: %d\n", enable, rc);
+               return rc;
+       }
+
+       return 0;
+}
+
+int dp_power_clk_status(struct dp_power *dp_power, enum dp_pm_type pm_type)
+{
+       if (pm_type == DP_CORE_PM)
+               return dp_power->core_clks_on;
+
+       if (pm_type == DP_CTRL_PM)
+               return dp_power->link_clks_on;
+
+       if (pm_type == DP_STREAM_PM)
+               return dp_power->stream_clks_on;
+
+       return 0;
+}
+
+int dp_power_clk_enable(struct dp_power *dp_power,
+               enum dp_pm_type pm_type, bool enable)
+{
+       int rc = 0;
+       struct dp_power_private *power;
+
+       power = container_of(dp_power, struct dp_power_private, dp_power);
+
+       if (pm_type != DP_CORE_PM && pm_type != DP_CTRL_PM &&
+                       pm_type != DP_STREAM_PM) {
+               DRM_ERROR("unsupported power module: %s\n",
+                               dp_parser_pm_name(pm_type));
+               return -EINVAL;
+       }
+
+       if (enable) {
+               if (pm_type == DP_CORE_PM && dp_power->core_clks_on) {
+                       DRM_DEBUG_DP("core clks already enabled\n");
+                       return 0;
+               }
+
+               if (pm_type == DP_CTRL_PM && dp_power->link_clks_on) {
+                       DRM_DEBUG_DP("links clks already enabled\n");
+                       return 0;
+               }
+
+               if (pm_type == DP_STREAM_PM && dp_power->stream_clks_on) {
+                       DRM_DEBUG_DP("pixel clks already enabled\n");
+                       return 0;
+               }
+
+               if ((pm_type == DP_CTRL_PM) && (!dp_power->core_clks_on)) {
+                       DRM_DEBUG_DP("Enable core clks before link clks\n");
+
+                       rc = dp_power_clk_set_rate(power, DP_CORE_PM, enable);
+                       if (rc) {
+                               DRM_ERROR("fail to enable clks: %s. err=%d\n",
+                                       dp_parser_pm_name(DP_CORE_PM), rc);
+                               return rc;
+                       }
+                       dp_power->core_clks_on = true;
+               }
+       }
+
+       rc = dp_power_clk_set_rate(power, pm_type, enable);
+       if (rc) {
+               DRM_ERROR("failed to '%s' clks for: %s. err=%d\n",
+                       enable ? "enable" : "disable",
+                       dp_parser_pm_name(pm_type), rc);
+                       return rc;
+       }
+
+       if (pm_type == DP_CORE_PM)
+               dp_power->core_clks_on = enable;
+       else if (pm_type == DP_STREAM_PM)
+               dp_power->stream_clks_on = enable;
+       else
+               dp_power->link_clks_on = enable;
+
+       DRM_DEBUG_DP("%s clocks for %s\n",
+                       enable ? "enable" : "disable",
+                       dp_parser_pm_name(pm_type));
+       DRM_DEBUG_DP("strem_clks:%s link_clks:%s core_clks:%s\n",
+               dp_power->stream_clks_on ? "on" : "off",
+               dp_power->link_clks_on ? "on" : "off",
+               dp_power->core_clks_on ? "on" : "off");
+
+       return 0;
+}
+
+int dp_power_client_init(struct dp_power *dp_power)
+{
+       int rc = 0;
+       struct dp_power_private *power;
+
+       if (!dp_power) {
+               DRM_ERROR("invalid power data\n");
+               return -EINVAL;
+       }
+
+       power = container_of(dp_power, struct dp_power_private, dp_power);
+
+       pm_runtime_enable(&power->pdev->dev);
+
+       rc = dp_power_regulator_init(power);
+       if (rc) {
+               DRM_ERROR("failed to init regulators %d\n", rc);
+               goto error;
+       }
+
+       rc = dp_power_clk_init(power);
+       if (rc) {
+               DRM_ERROR("failed to init clocks %d\n", rc);
+               goto error;
+       }
+       return 0;
+
+error:
+       pm_runtime_disable(&power->pdev->dev);
+       return rc;
+}
+
+void dp_power_client_deinit(struct dp_power *dp_power)
+{
+       struct dp_power_private *power;
+
+       if (!dp_power) {
+               DRM_ERROR("invalid power data\n");
+               return;
+       }
+
+       power = container_of(dp_power, struct dp_power_private, dp_power);
+
+       dp_power_clk_deinit(power);
+       pm_runtime_disable(&power->pdev->dev);
+
+}
+
+int dp_power_init(struct dp_power *dp_power, bool flip)
+{
+       int rc = 0;
+       struct dp_power_private *power = NULL;
+
+       if (!dp_power) {
+               DRM_ERROR("invalid power data\n");
+               return -EINVAL;
+       }
+
+       power = container_of(dp_power, struct dp_power_private, dp_power);
+
+       pm_runtime_get_sync(&power->pdev->dev);
+       rc = dp_power_regulator_enable(power);
+       if (rc) {
+               DRM_ERROR("failed to enable regulators, %d\n", rc);
+               goto exit;
+       }
+
+       rc = dp_power_clk_enable(dp_power, DP_CORE_PM, true);
+       if (rc) {
+               DRM_ERROR("failed to enable DP core clocks, %d\n", rc);
+               goto err_clk;
+       }
+
+       return 0;
+
+err_clk:
+       dp_power_regulator_disable(power);
+exit:
+       pm_runtime_put_sync(&power->pdev->dev);
+       return rc;
+}
+
+int dp_power_deinit(struct dp_power *dp_power)
+{
+       struct dp_power_private *power;
+
+       power = container_of(dp_power, struct dp_power_private, dp_power);
+
+       dp_power_clk_enable(dp_power, DP_CORE_PM, false);
+       dp_power_regulator_disable(power);
+       pm_runtime_put_sync(&power->pdev->dev);
+       return 0;
+}
+
+struct dp_power *dp_power_get(struct dp_parser *parser)
+{
+       struct dp_power_private *power;
+       struct dp_power *dp_power;
+
+       if (!parser) {
+               DRM_ERROR("invalid input\n");
+               return ERR_PTR(-EINVAL);
+       }
+
+       power = devm_kzalloc(&parser->pdev->dev, sizeof(*power), GFP_KERNEL);
+       if (!power)
+               return ERR_PTR(-ENOMEM);
+
+       power->parser = parser;
+       power->pdev = parser->pdev;
+
+       dp_power = &power->dp_power;
+
+       return dp_power;
+}
diff --git a/drivers/gpu/drm/msm/dp/dp_power.h b/drivers/gpu/drm/msm/dp/dp_power.h
new file mode 100644 (file)
index 0000000..76743d7
--- /dev/null
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2012-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DP_POWER_H_
+#define _DP_POWER_H_
+
+#include "dp_parser.h"
+
+/**
+ * sruct dp_power - DisplayPort's power related data
+ *
+ * @init: initializes the regulators/core clocks/GPIOs/pinctrl
+ * @deinit: turns off the regulators/core clocks/GPIOs/pinctrl
+ * @clk_enable: enable/disable the DP clocks
+ * @set_pixel_clk_parent: set the parent of DP pixel clock
+ */
+struct dp_power {
+       bool core_clks_on;
+       bool link_clks_on;
+       bool stream_clks_on;
+};
+
+/**
+ * dp_power_init() - enable power supplies for display controller
+ *
+ * @power: instance of power module
+ * @flip: bool for flipping gpio direction
+ * return: 0 if success or error if failure.
+ *
+ * This API will turn on the regulators and configures gpio's
+ * aux/hpd.
+ */
+int dp_power_init(struct dp_power *power, bool flip);
+
+/**
+ * dp_power_deinit() - turn off regulators and gpios.
+ *
+ * @power: instance of power module
+ * return: 0 for success
+ *
+ * This API turns off power and regulators.
+ */
+int dp_power_deinit(struct dp_power *power);
+
+/**
+ * dp_power_clk_status() - display controller clocks status
+ *
+ * @power: instance of power module
+ * @pm_type: type of pm, core/ctrl/phy
+ * return: status of power clocks
+ *
+ * This API return status of DP clocks
+ */
+
+int dp_power_clk_status(struct dp_power *dp_power, enum dp_pm_type pm_type);
+
+/**
+ * dp_power_clk_enable() - enable display controller clocks
+ *
+ * @power: instance of power module
+ * @pm_type: type of pm, core/ctrl/phy
+ * @enable: enables or disables
+ * return: pointer to allocated power module data
+ *
+ * This API will call setrate and enable for DP clocks
+ */
+
+int dp_power_clk_enable(struct dp_power *power, enum dp_pm_type pm_type,
+                               bool enable);
+
+/**
+ * dp_power_client_init() - initialize clock and regulator modules
+ *
+ * @power: instance of power module
+ * return: 0 for success, error for failure.
+ *
+ * This API will configure the DisplayPort's clocks and regulator
+ * modules.
+ */
+int dp_power_client_init(struct dp_power *power);
+
+/**
+ * dp_power_clinet_deinit() - de-initialize clock and regulator modules
+ *
+ * @power: instance of power module
+ * return: 0 for success, error for failure.
+ *
+ * This API will de-initialize the DisplayPort's clocks and regulator
+ * modueles.
+ */
+void dp_power_client_deinit(struct dp_power *power);
+
+/**
+ * dp_power_get() - configure and get the DisplayPort power module data
+ *
+ * @parser: instance of parser module
+ * return: pointer to allocated power module data
+ *
+ * This API will configure the DisplayPort's power module and provides
+ * methods to be called by the client to configure the power related
+ * modueles.
+ */
+struct dp_power *dp_power_get(struct dp_parser *parser);
+
+#endif /* _DP_POWER_H_ */
diff --git a/drivers/gpu/drm/msm/dp/dp_reg.h b/drivers/gpu/drm/msm/dp/dp_reg.h
new file mode 100644 (file)
index 0000000..43042ff
--- /dev/null
@@ -0,0 +1,306 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DP_REG_H_
+#define _DP_REG_H_
+
+/* DP_TX Registers */
+#define REG_DP_HW_VERSION                      (0x00000000)
+
+#define REG_DP_SW_RESET                                (0x00000010)
+#define DP_SW_RESET                            (0x00000001)
+
+#define REG_DP_PHY_CTRL                                (0x00000014)
+#define DP_PHY_CTRL_SW_RESET_PLL               (0x00000001)
+#define DP_PHY_CTRL_SW_RESET                   (0x00000004)
+
+#define REG_DP_CLK_CTRL                                (0x00000018)
+#define REG_DP_CLK_ACTIVE                      (0x0000001C)
+#define REG_DP_INTR_STATUS                     (0x00000020)
+#define REG_DP_INTR_STATUS2                    (0x00000024)
+#define REG_DP_INTR_STATUS3                    (0x00000028)
+
+#define REG_DP_DP_HPD_CTRL                     (0x00000000)
+#define DP_DP_HPD_CTRL_HPD_EN                  (0x00000001)
+
+#define REG_DP_DP_HPD_INT_STATUS               (0x00000004)
+
+#define REG_DP_DP_HPD_INT_ACK                  (0x00000008)
+#define DP_DP_HPD_PLUG_INT_ACK                 (0x00000001)
+#define DP_DP_IRQ_HPD_INT_ACK                  (0x00000002)
+#define DP_DP_HPD_REPLUG_INT_ACK               (0x00000004)
+#define DP_DP_HPD_UNPLUG_INT_ACK               (0x00000008)
+
+#define REG_DP_DP_HPD_INT_MASK                 (0x0000000C)
+#define DP_DP_HPD_PLUG_INT_MASK                        (0x00000001)
+#define DP_DP_IRQ_HPD_INT_MASK                 (0x00000002)
+#define DP_DP_HPD_REPLUG_INT_MASK              (0x00000004)
+#define DP_DP_HPD_UNPLUG_INT_MASK              (0x00000008)
+#define DP_DP_HPD_INT_MASK                     (DP_DP_HPD_PLUG_INT_MASK | \
+                                               DP_DP_IRQ_HPD_INT_MASK | \
+                                               DP_DP_HPD_REPLUG_INT_MASK | \
+                                               DP_DP_HPD_UNPLUG_INT_MASK)
+#define DP_DP_HPD_STATE_STATUS_CONNECTED       (0x40000000)
+#define DP_DP_HPD_STATE_STATUS_PENDING         (0x20000000)
+#define DP_DP_HPD_STATE_STATUS_DISCONNECTED    (0x00000000)
+#define DP_DP_HPD_STATE_STATUS_MASK            (0xE0000000)
+
+#define REG_DP_DP_HPD_REFTIMER                 (0x00000018)
+#define DP_DP_HPD_REFTIMER_ENABLE              (1 << 16)
+
+#define REG_DP_DP_HPD_EVENT_TIME_0             (0x0000001C)
+#define REG_DP_DP_HPD_EVENT_TIME_1             (0x00000020)
+#define DP_DP_HPD_EVENT_TIME_0_VAL             (0x3E800FA)
+#define DP_DP_HPD_EVENT_TIME_1_VAL             (0x1F407D0)
+
+#define REG_DP_AUX_CTRL                                (0x00000030)
+#define DP_AUX_CTRL_ENABLE                     (0x00000001)
+#define DP_AUX_CTRL_RESET                      (0x00000002)
+
+#define REG_DP_AUX_DATA                                (0x00000034)
+#define DP_AUX_DATA_READ                       (0x00000001)
+#define DP_AUX_DATA_WRITE                      (0x00000000)
+#define DP_AUX_DATA_OFFSET                     (0x00000008)
+#define DP_AUX_DATA_INDEX_OFFSET               (0x00000010)
+#define DP_AUX_DATA_MASK                       (0x0000ff00)
+#define DP_AUX_DATA_INDEX_WRITE                        (0x80000000)
+
+#define REG_DP_AUX_TRANS_CTRL                  (0x00000038)
+#define DP_AUX_TRANS_CTRL_I2C                  (0x00000100)
+#define DP_AUX_TRANS_CTRL_GO                   (0x00000200)
+#define DP_AUX_TRANS_CTRL_NO_SEND_ADDR         (0x00000400)
+#define DP_AUX_TRANS_CTRL_NO_SEND_STOP         (0x00000800)
+
+#define REG_DP_TIMEOUT_COUNT                   (0x0000003C)
+#define REG_DP_AUX_LIMITS                      (0x00000040)
+#define REG_DP_AUX_STATUS                      (0x00000044)
+
+#define DP_DPCD_CP_IRQ                         (0x201)
+#define DP_DPCD_RXSTATUS                       (0x69493)
+
+#define DP_INTERRUPT_TRANS_NUM                 (0x000000A0)
+
+#define REG_DP_MAINLINK_CTRL                   (0x00000000)
+#define DP_MAINLINK_CTRL_ENABLE                        (0x00000001)
+#define DP_MAINLINK_CTRL_RESET                 (0x00000002)
+#define DP_MAINLINK_CTRL_SW_BYPASS_SCRAMBLER   (0x00000010)
+#define DP_MAINLINK_FB_BOUNDARY_SEL            (0x02000000)
+
+#define REG_DP_STATE_CTRL                      (0x00000004)
+#define DP_STATE_CTRL_LINK_TRAINING_PATTERN1   (0x00000001)
+#define DP_STATE_CTRL_LINK_TRAINING_PATTERN2   (0x00000002)
+#define DP_STATE_CTRL_LINK_TRAINING_PATTERN3   (0x00000004)
+#define DP_STATE_CTRL_LINK_TRAINING_PATTERN4   (0x00000008)
+#define DP_STATE_CTRL_LINK_SYMBOL_ERR_MEASURE  (0x00000010)
+#define DP_STATE_CTRL_LINK_PRBS7               (0x00000020)
+#define DP_STATE_CTRL_LINK_TEST_CUSTOM_PATTERN (0x00000040)
+#define DP_STATE_CTRL_SEND_VIDEO               (0x00000080)
+#define DP_STATE_CTRL_PUSH_IDLE                        (0x00000100)
+
+#define REG_DP_CONFIGURATION_CTRL              (0x00000008)
+#define DP_CONFIGURATION_CTRL_SYNC_ASYNC_CLK   (0x00000001)
+#define DP_CONFIGURATION_CTRL_STATIC_DYNAMIC_CN (0x00000002)
+#define DP_CONFIGURATION_CTRL_P_INTERLACED     (0x00000004)
+#define DP_CONFIGURATION_CTRL_INTERLACED_BTF   (0x00000008)
+#define DP_CONFIGURATION_CTRL_NUM_OF_LANES     (0x00000010)
+#define DP_CONFIGURATION_CTRL_ENHANCED_FRAMING (0x00000040)
+#define DP_CONFIGURATION_CTRL_SEND_VSC         (0x00000080)
+#define DP_CONFIGURATION_CTRL_BPC              (0x00000100)
+#define DP_CONFIGURATION_CTRL_ASSR             (0x00000400)
+#define DP_CONFIGURATION_CTRL_RGB_YUV          (0x00000800)
+#define DP_CONFIGURATION_CTRL_LSCLK_DIV                (0x00002000)
+#define DP_CONFIGURATION_CTRL_NUM_OF_LANES_SHIFT       (0x04)
+#define DP_CONFIGURATION_CTRL_BPC_SHIFT                (0x08)
+#define DP_CONFIGURATION_CTRL_LSCLK_DIV_SHIFT  (0x0D)
+
+#define REG_DP_SOFTWARE_MVID                   (0x00000010)
+#define REG_DP_SOFTWARE_NVID                   (0x00000018)
+#define REG_DP_TOTAL_HOR_VER                   (0x0000001C)
+#define REG_DP_START_HOR_VER_FROM_SYNC         (0x00000020)
+#define REG_DP_HSYNC_VSYNC_WIDTH_POLARITY      (0x00000024)
+#define REG_DP_ACTIVE_HOR_VER                  (0x00000028)
+
+#define REG_DP_MISC1_MISC0                     (0x0000002C)
+#define DP_MISC0_SYNCHRONOUS_CLK               (0x00000001)
+#define DP_MISC0_COLORIMETRY_CFG_SHIFT         (0x00000001)
+#define DP_MISC0_TEST_BITS_DEPTH_SHIFT         (0x00000005)
+
+#define REG_DP_VALID_BOUNDARY                  (0x00000030)
+#define REG_DP_VALID_BOUNDARY_2                        (0x00000034)
+
+#define REG_DP_LOGICAL2PHYSICAL_LANE_MAPPING   (0x00000038)
+#define LANE0_MAPPING_SHIFT                    (0x00000000)
+#define LANE1_MAPPING_SHIFT                    (0x00000002)
+#define LANE2_MAPPING_SHIFT                    (0x00000004)
+#define LANE3_MAPPING_SHIFT                    (0x00000006)
+
+#define REG_DP_MAINLINK_READY                  (0x00000040)
+#define DP_MAINLINK_READY_FOR_VIDEO            (0x00000001)
+#define DP_MAINLINK_READY_LINK_TRAINING_SHIFT  (0x00000003)
+
+#define REG_DP_MAINLINK_LEVELS                 (0x00000044)
+#define DP_MAINLINK_SAFE_TO_EXIT_LEVEL_2       (0x00000002)
+
+
+#define REG_DP_TU                              (0x0000004C)
+
+#define REG_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET (0x00000054)
+#define DP_HBR2_ERM_PATTERN                    (0x00010000)
+
+#define REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG0  (0x000000C0)
+#define REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG1  (0x000000C4)
+#define REG_DP_TEST_80BIT_CUSTOM_PATTERN_REG2  (0x000000C8)
+
+#define MMSS_DP_MISC1_MISC0                    (0x0000002C)
+#define MMSS_DP_AUDIO_TIMING_GEN               (0x00000080)
+#define MMSS_DP_AUDIO_TIMING_RBR_32            (0x00000084)
+#define MMSS_DP_AUDIO_TIMING_HBR_32            (0x00000088)
+#define MMSS_DP_AUDIO_TIMING_RBR_44            (0x0000008C)
+#define MMSS_DP_AUDIO_TIMING_HBR_44            (0x00000090)
+#define MMSS_DP_AUDIO_TIMING_RBR_48            (0x00000094)
+#define MMSS_DP_AUDIO_TIMING_HBR_48            (0x00000098)
+
+#define MMSS_DP_PSR_CRC_RG                     (0x00000154)
+#define MMSS_DP_PSR_CRC_B                      (0x00000158)
+
+#define REG_DP_COMPRESSION_MODE_CTRL           (0x00000180)
+
+#define MMSS_DP_AUDIO_CFG                      (0x00000200)
+#define MMSS_DP_AUDIO_STATUS                   (0x00000204)
+#define MMSS_DP_AUDIO_PKT_CTRL                 (0x00000208)
+#define MMSS_DP_AUDIO_PKT_CTRL2                        (0x0000020C)
+#define MMSS_DP_AUDIO_ACR_CTRL                 (0x00000210)
+#define MMSS_DP_AUDIO_CTRL_RESET               (0x00000214)
+
+#define MMSS_DP_SDP_CFG                                (0x00000228)
+#define MMSS_DP_SDP_CFG2                       (0x0000022C)
+#define MMSS_DP_AUDIO_TIMESTAMP_0              (0x00000230)
+#define MMSS_DP_AUDIO_TIMESTAMP_1              (0x00000234)
+
+#define MMSS_DP_AUDIO_STREAM_0                 (0x00000240)
+#define MMSS_DP_AUDIO_STREAM_1                 (0x00000244)
+
+#define MMSS_DP_EXTENSION_0                    (0x00000250)
+#define MMSS_DP_EXTENSION_1                    (0x00000254)
+#define MMSS_DP_EXTENSION_2                    (0x00000258)
+#define MMSS_DP_EXTENSION_3                    (0x0000025C)
+#define MMSS_DP_EXTENSION_4                    (0x00000260)
+#define MMSS_DP_EXTENSION_5                    (0x00000264)
+#define MMSS_DP_EXTENSION_6                    (0x00000268)
+#define MMSS_DP_EXTENSION_7                    (0x0000026C)
+#define MMSS_DP_EXTENSION_8                    (0x00000270)
+#define MMSS_DP_EXTENSION_9                    (0x00000274)
+#define MMSS_DP_AUDIO_COPYMANAGEMENT_0         (0x00000278)
+#define MMSS_DP_AUDIO_COPYMANAGEMENT_1         (0x0000027C)
+#define MMSS_DP_AUDIO_COPYMANAGEMENT_2         (0x00000280)
+#define MMSS_DP_AUDIO_COPYMANAGEMENT_3         (0x00000284)
+#define MMSS_DP_AUDIO_COPYMANAGEMENT_4         (0x00000288)
+#define MMSS_DP_AUDIO_COPYMANAGEMENT_5         (0x0000028C)
+#define MMSS_DP_AUDIO_ISRC_0                   (0x00000290)
+#define MMSS_DP_AUDIO_ISRC_1                   (0x00000294)
+#define MMSS_DP_AUDIO_ISRC_2                   (0x00000298)
+#define MMSS_DP_AUDIO_ISRC_3                   (0x0000029C)
+#define MMSS_DP_AUDIO_ISRC_4                   (0x000002A0)
+#define MMSS_DP_AUDIO_ISRC_5                   (0x000002A4)
+#define MMSS_DP_AUDIO_INFOFRAME_0              (0x000002A8)
+#define MMSS_DP_AUDIO_INFOFRAME_1              (0x000002AC)
+#define MMSS_DP_AUDIO_INFOFRAME_2              (0x000002B0)
+
+#define MMSS_DP_GENERIC0_0                     (0x00000300)
+#define MMSS_DP_GENERIC0_1                     (0x00000304)
+#define MMSS_DP_GENERIC0_2                     (0x00000308)
+#define MMSS_DP_GENERIC0_3                     (0x0000030C)
+#define MMSS_DP_GENERIC0_4                     (0x00000310)
+#define MMSS_DP_GENERIC0_5                     (0x00000314)
+#define MMSS_DP_GENERIC0_6                     (0x00000318)
+#define MMSS_DP_GENERIC0_7                     (0x0000031C)
+#define MMSS_DP_GENERIC0_8                     (0x00000320)
+#define MMSS_DP_GENERIC0_9                     (0x00000324)
+#define MMSS_DP_GENERIC1_0                     (0x00000328)
+#define MMSS_DP_GENERIC1_1                     (0x0000032C)
+#define MMSS_DP_GENERIC1_2                     (0x00000330)
+#define MMSS_DP_GENERIC1_3                     (0x00000334)
+#define MMSS_DP_GENERIC1_4                     (0x00000338)
+#define MMSS_DP_GENERIC1_5                     (0x0000033C)
+#define MMSS_DP_GENERIC1_6                     (0x00000340)
+#define MMSS_DP_GENERIC1_7                     (0x00000344)
+#define MMSS_DP_GENERIC1_8                     (0x00000348)
+#define MMSS_DP_GENERIC1_9                     (0x0000034C)
+
+#define MMSS_DP_VSCEXT_0                       (0x000002D0)
+#define MMSS_DP_VSCEXT_1                       (0x000002D4)
+#define MMSS_DP_VSCEXT_2                       (0x000002D8)
+#define MMSS_DP_VSCEXT_3                       (0x000002DC)
+#define MMSS_DP_VSCEXT_4                       (0x000002E0)
+#define MMSS_DP_VSCEXT_5                       (0x000002E4)
+#define MMSS_DP_VSCEXT_6                       (0x000002E8)
+#define MMSS_DP_VSCEXT_7                       (0x000002EC)
+#define MMSS_DP_VSCEXT_8                       (0x000002F0)
+#define MMSS_DP_VSCEXT_9                       (0x000002F4)
+
+#define MMSS_DP_BIST_ENABLE                    (0x00000000)
+#define DP_BIST_ENABLE_DPBIST_EN               (0x00000001)
+
+#define MMSS_DP_TIMING_ENGINE_EN               (0x00000010)
+#define DP_TIMING_ENGINE_EN_EN                 (0x00000001)
+
+#define MMSS_DP_INTF_CONFIG                    (0x00000014)
+#define MMSS_DP_INTF_HSYNC_CTL                 (0x00000018)
+#define MMSS_DP_INTF_VSYNC_PERIOD_F0           (0x0000001C)
+#define MMSS_DP_INTF_VSYNC_PERIOD_F1           (0x00000020)
+#define MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F0      (0x00000024)
+#define MMSS_DP_INTF_VSYNC_PULSE_WIDTH_F1      (0x00000028)
+#define MMSS_INTF_DISPLAY_V_START_F0           (0x0000002C)
+#define MMSS_INTF_DISPLAY_V_START_F1           (0x00000030)
+#define MMSS_DP_INTF_DISPLAY_V_END_F0          (0x00000034)
+#define MMSS_DP_INTF_DISPLAY_V_END_F1          (0x00000038)
+#define MMSS_DP_INTF_ACTIVE_V_START_F0         (0x0000003C)
+#define MMSS_DP_INTF_ACTIVE_V_START_F1         (0x00000040)
+#define MMSS_DP_INTF_ACTIVE_V_END_F0           (0x00000044)
+#define MMSS_DP_INTF_ACTIVE_V_END_F1           (0x00000048)
+#define MMSS_DP_INTF_DISPLAY_HCTL              (0x0000004C)
+#define MMSS_DP_INTF_ACTIVE_HCTL               (0x00000050)
+#define MMSS_DP_INTF_POLARITY_CTL              (0x00000058)
+
+#define MMSS_DP_TPG_MAIN_CONTROL               (0x00000060)
+#define MMSS_DP_DSC_DTO                                (0x0000007C)
+#define DP_TPG_CHECKERED_RECT_PATTERN          (0x00000100)
+
+#define MMSS_DP_TPG_VIDEO_CONFIG               (0x00000064)
+#define DP_TPG_VIDEO_CONFIG_BPP_8BIT           (0x00000001)
+#define DP_TPG_VIDEO_CONFIG_RGB                        (0x00000004)
+
+#define MMSS_DP_ASYNC_FIFO_CONFIG              (0x00000088)
+
+#define REG_DP_PHY_AUX_INTERRUPT_CLEAR          (0x0000004C)
+#define REG_DP_PHY_AUX_BIST_CFG                        (0x00000050)
+#define REG_DP_PHY_AUX_INTERRUPT_STATUS         (0x000000BC)
+
+/* DP HDCP 1.3 registers */
+#define DP_HDCP_CTRL                                   (0x0A0)
+#define DP_HDCP_STATUS                                 (0x0A4)
+#define DP_HDCP_SW_UPPER_AKSV                          (0x098)
+#define DP_HDCP_SW_LOWER_AKSV                          (0x09C)
+#define DP_HDCP_ENTROPY_CTRL0                          (0x350)
+#define DP_HDCP_ENTROPY_CTRL1                          (0x35C)
+#define DP_HDCP_SHA_STATUS                             (0x0C8)
+#define DP_HDCP_RCVPORT_DATA2_0                        (0x0B0)
+#define DP_HDCP_RCVPORT_DATA3                          (0x0A4)
+#define DP_HDCP_RCVPORT_DATA4                          (0x0A8)
+#define DP_HDCP_RCVPORT_DATA5                          (0x0C0)
+#define DP_HDCP_RCVPORT_DATA6                          (0x0C4)
+
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_SHA_CTRL           (0x024)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_SHA_DATA           (0x028)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA0      (0x004)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA1      (0x008)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA7      (0x00C)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA8      (0x010)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA9      (0x014)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA10     (0x018)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA11     (0x01C)
+#define HDCP_SEC_DP_TZ_HV_HLOS_HDCP_RCVPORT_DATA12     (0x020)
+
+#endif /* _DP_REG_H_ */
index 4de771d..78ef5d4 100644 (file)
@@ -30,6 +30,8 @@ enum msm_dsi_phy_type {
        MSM_DSI_PHY_28NM_8960,
        MSM_DSI_PHY_14NM,
        MSM_DSI_PHY_10NM,
+       MSM_DSI_PHY_7NM,
+       MSM_DSI_PHY_7NM_V4_1,
        MSM_DSI_PHY_MAX
 };
 
index 8e536e0..50eb4d1 100644 (file)
@@ -1886,5 +1886,428 @@ static inline uint32_t REG_DSI_10nm_PHY_LN_TX_DCTRL(uint32_t i0) { return 0x0000
 
 #define REG_DSI_10nm_PHY_PLL_COMMON_STATUS_ONE                 0x000001a0
 
+#define REG_DSI_7nm_PHY_CMN_REVISION_ID0                       0x00000000
+
+#define REG_DSI_7nm_PHY_CMN_REVISION_ID1                       0x00000004
+
+#define REG_DSI_7nm_PHY_CMN_REVISION_ID2                       0x00000008
+
+#define REG_DSI_7nm_PHY_CMN_REVISION_ID3                       0x0000000c
+
+#define REG_DSI_7nm_PHY_CMN_CLK_CFG0                           0x00000010
+
+#define REG_DSI_7nm_PHY_CMN_CLK_CFG1                           0x00000014
+
+#define REG_DSI_7nm_PHY_CMN_GLBL_CTRL                          0x00000018
+
+#define REG_DSI_7nm_PHY_CMN_RBUF_CTRL                          0x0000001c
+
+#define REG_DSI_7nm_PHY_CMN_VREG_CTRL_0                                0x00000020
+
+#define REG_DSI_7nm_PHY_CMN_CTRL_0                             0x00000024
+
+#define REG_DSI_7nm_PHY_CMN_CTRL_1                             0x00000028
+
+#define REG_DSI_7nm_PHY_CMN_CTRL_2                             0x0000002c
+
+#define REG_DSI_7nm_PHY_CMN_CTRL_3                             0x00000030
+
+#define REG_DSI_7nm_PHY_CMN_LANE_CFG0                          0x00000034
+
+#define REG_DSI_7nm_PHY_CMN_LANE_CFG1                          0x00000038
+
+#define REG_DSI_7nm_PHY_CMN_PLL_CNTRL                          0x0000003c
+
+#define REG_DSI_7nm_PHY_CMN_DPHY_SOT                           0x00000040
+
+#define REG_DSI_7nm_PHY_CMN_LANE_CTRL0                         0x000000a0
+
+#define REG_DSI_7nm_PHY_CMN_LANE_CTRL1                         0x000000a4
+
+#define REG_DSI_7nm_PHY_CMN_LANE_CTRL2                         0x000000a8
+
+#define REG_DSI_7nm_PHY_CMN_LANE_CTRL3                         0x000000ac
+
+#define REG_DSI_7nm_PHY_CMN_LANE_CTRL4                         0x000000b0
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_0                      0x000000b4
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_1                      0x000000b8
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_2                      0x000000bc
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_3                      0x000000c0
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_4                      0x000000c4
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_5                      0x000000c8
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_6                      0x000000cc
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_7                      0x000000d0
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_8                      0x000000d4
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_9                      0x000000d8
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_10                     0x000000dc
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_11                     0x000000e0
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_12                     0x000000e4
+
+#define REG_DSI_7nm_PHY_CMN_TIMING_CTRL_13                     0x000000e8
+
+#define REG_DSI_7nm_PHY_CMN_GLBL_HSTX_STR_CTRL_0               0x000000ec
+
+#define REG_DSI_7nm_PHY_CMN_GLBL_HSTX_STR_CTRL_1               0x000000f0
+
+#define REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_TOP_CTRL       0x000000f4
+
+#define REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_BOT_CTRL       0x000000f8
+
+#define REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_MID_CTRL       0x000000fc
+
+#define REG_DSI_7nm_PHY_CMN_GLBL_LPTX_STR_CTRL                 0x00000100
+
+#define REG_DSI_7nm_PHY_CMN_GLBL_PEMPH_CTRL_0                  0x00000104
+
+#define REG_DSI_7nm_PHY_CMN_GLBL_PEMPH_CTRL_1                  0x00000108
+
+#define REG_DSI_7nm_PHY_CMN_GLBL_STR_SWI_CAL_SEL_CTRL          0x0000010c
+
+#define REG_DSI_7nm_PHY_CMN_VREG_CTRL_1                                0x00000110
+
+#define REG_DSI_7nm_PHY_CMN_CTRL_4                             0x00000114
+
+#define REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE4                 0x00000128
+
+#define REG_DSI_7nm_PHY_CMN_PHY_STATUS                         0x00000140
+
+#define REG_DSI_7nm_PHY_CMN_LANE_STATUS0                       0x00000148
+
+#define REG_DSI_7nm_PHY_CMN_LANE_STATUS1                       0x0000014c
+
+static inline uint32_t REG_DSI_7nm_PHY_LN(uint32_t i0) { return 0x00000000 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_7nm_PHY_LN_CFG0(uint32_t i0) { return 0x00000000 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_7nm_PHY_LN_CFG1(uint32_t i0) { return 0x00000004 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_7nm_PHY_LN_CFG2(uint32_t i0) { return 0x00000008 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_7nm_PHY_LN_TEST_DATAPATH(uint32_t i0) { return 0x0000000c + 0x80*i0; }
+
+static inline uint32_t REG_DSI_7nm_PHY_LN_PIN_SWAP(uint32_t i0) { return 0x00000010 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_7nm_PHY_LN_LPRX_CTRL(uint32_t i0) { return 0x00000014 + 0x80*i0; }
+
+static inline uint32_t REG_DSI_7nm_PHY_LN_TX_DCTRL(uint32_t i0) { return 0x00000018 + 0x80*i0; }
+
+#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_ONE                        0x00000000
+
+#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_TWO                        0x00000004
+
+#define REG_DSI_7nm_PHY_PLL_INT_LOOP_SETTINGS                  0x00000008
+
+#define REG_DSI_7nm_PHY_PLL_INT_LOOP_SETTINGS_TWO              0x0000000c
+
+#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_THREE              0x00000010
+
+#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FOUR               0x00000014
+
+#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE               0x00000018
+
+#define REG_DSI_7nm_PHY_PLL_INT_LOOP_CONTROLS                  0x0000001c
+
+#define REG_DSI_7nm_PHY_PLL_DSM_DIVIDER                                0x00000020
+
+#define REG_DSI_7nm_PHY_PLL_FEEDBACK_DIVIDER                   0x00000024
+
+#define REG_DSI_7nm_PHY_PLL_SYSTEM_MUXES                       0x00000028
+
+#define REG_DSI_7nm_PHY_PLL_FREQ_UPDATE_CONTROL_OVERRIDES      0x0000002c
+
+#define REG_DSI_7nm_PHY_PLL_CMODE                              0x00000030
+
+#define REG_DSI_7nm_PHY_PLL_PSM_CTRL                           0x00000034
+
+#define REG_DSI_7nm_PHY_PLL_RSM_CTRL                           0x00000038
+
+#define REG_DSI_7nm_PHY_PLL_VCO_TUNE_MAP                       0x0000003c
+
+#define REG_DSI_7nm_PHY_PLL_PLL_CNTRL                          0x00000040
+
+#define REG_DSI_7nm_PHY_PLL_CALIBRATION_SETTINGS               0x00000044
+
+#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_TIMER_LOW             0x00000048
+
+#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_TIMER_HIGH            0x0000004c
+
+#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_SETTINGS              0x00000050
+
+#define REG_DSI_7nm_PHY_PLL_BAND_SEL_MIN                       0x00000054
+
+#define REG_DSI_7nm_PHY_PLL_BAND_SEL_MAX                       0x00000058
+
+#define REG_DSI_7nm_PHY_PLL_BAND_SEL_PFILT                     0x0000005c
+
+#define REG_DSI_7nm_PHY_PLL_BAND_SEL_IFILT                     0x00000060
+
+#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_TWO          0x00000064
+
+#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE                0x00000068
+
+#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_FOUR         0x0000006c
+
+#define REG_DSI_7nm_PHY_PLL_BAND_SEL_ICODE_HIGH                        0x00000070
+
+#define REG_DSI_7nm_PHY_PLL_BAND_SEL_ICODE_LOW                 0x00000074
+
+#define REG_DSI_7nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE           0x00000078
+
+#define REG_DSI_7nm_PHY_PLL_FREQ_DETECT_THRESH                 0x0000007c
+
+#define REG_DSI_7nm_PHY_PLL_FREQ_DET_REFCLK_HIGH               0x00000080
+
+#define REG_DSI_7nm_PHY_PLL_FREQ_DET_REFCLK_LOW                        0x00000084
+
+#define REG_DSI_7nm_PHY_PLL_FREQ_DET_PLLCLK_HIGH               0x00000088
+
+#define REG_DSI_7nm_PHY_PLL_FREQ_DET_PLLCLK_LOW                        0x0000008c
+
+#define REG_DSI_7nm_PHY_PLL_PFILT                              0x00000090
+
+#define REG_DSI_7nm_PHY_PLL_IFILT                              0x00000094
+
+#define REG_DSI_7nm_PHY_PLL_PLL_GAIN                           0x00000098
+
+#define REG_DSI_7nm_PHY_PLL_ICODE_LOW                          0x0000009c
+
+#define REG_DSI_7nm_PHY_PLL_ICODE_HIGH                         0x000000a0
+
+#define REG_DSI_7nm_PHY_PLL_LOCKDET                            0x000000a4
+
+#define REG_DSI_7nm_PHY_PLL_OUTDIV                             0x000000a8
+
+#define REG_DSI_7nm_PHY_PLL_FASTLOCK_CONTROL                   0x000000ac
+
+#define REG_DSI_7nm_PHY_PLL_PASS_OUT_OVERRIDE_ONE              0x000000b0
+
+#define REG_DSI_7nm_PHY_PLL_PASS_OUT_OVERRIDE_TWO              0x000000b4
+
+#define REG_DSI_7nm_PHY_PLL_CORE_OVERRIDE                      0x000000b8
+
+#define REG_DSI_7nm_PHY_PLL_CORE_INPUT_OVERRIDE                        0x000000bc
+
+#define REG_DSI_7nm_PHY_PLL_RATE_CHANGE                                0x000000c0
+
+#define REG_DSI_7nm_PHY_PLL_PLL_DIGITAL_TIMERS                 0x000000c4
+
+#define REG_DSI_7nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO             0x000000c8
+
+#define REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START                  0x000000cc
+
+#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW                 0x000000d0
+
+#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID                 0x000000d4
+
+#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH                        0x000000d8
+
+#define REG_DSI_7nm_PHY_PLL_DEC_FRAC_MUXES                     0x000000dc
+
+#define REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_1                        0x000000e0
+
+#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_1               0x000000e4
+
+#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_1               0x000000e8
+
+#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_1              0x000000ec
+
+#define REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_2                        0x000000f0
+
+#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_2               0x000000f4
+
+#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_2               0x000000f8
+
+#define REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_2              0x000000fc
+
+#define REG_DSI_7nm_PHY_PLL_MASH_CONTROL                       0x00000100
+
+#define REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_LOW                   0x00000104
+
+#define REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_HIGH                  0x00000108
+
+#define REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_LOW                    0x0000010c
+
+#define REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_HIGH                   0x00000110
+
+#define REG_DSI_7nm_PHY_PLL_SSC_ADJPER_LOW                     0x00000114
+
+#define REG_DSI_7nm_PHY_PLL_SSC_ADJPER_HIGH                    0x00000118
+
+#define REG_DSI_7nm_PHY_PLL_SSC_MUX_CONTROL                    0x0000011c
+
+#define REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_LOW_1                 0x00000120
+
+#define REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_HIGH_1                        0x00000124
+
+#define REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_LOW_1                  0x00000128
+
+#define REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_HIGH_1                 0x0000012c
+
+#define REG_DSI_7nm_PHY_PLL_SSC_ADJPER_LOW_1                   0x00000130
+
+#define REG_DSI_7nm_PHY_PLL_SSC_ADJPER_HIGH_1                  0x00000134
+
+#define REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_LOW_2                 0x00000138
+
+#define REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_HIGH_2                        0x0000013c
+
+#define REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_LOW_2                  0x00000140
+
+#define REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_HIGH_2                 0x00000144
+
+#define REG_DSI_7nm_PHY_PLL_SSC_ADJPER_LOW_2                   0x00000148
+
+#define REG_DSI_7nm_PHY_PLL_SSC_ADJPER_HIGH_2                  0x0000014c
+
+#define REG_DSI_7nm_PHY_PLL_SSC_CONTROL                                0x00000150
+
+#define REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE                    0x00000154
+
+#define REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_1                 0x00000158
+
+#define REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_2                 0x0000015c
+
+#define REG_DSI_7nm_PHY_PLL_PLL_PROP_GAIN_RATE_1               0x00000160
+
+#define REG_DSI_7nm_PHY_PLL_PLL_PROP_GAIN_RATE_2               0x00000164
+
+#define REG_DSI_7nm_PHY_PLL_PLL_BAND_SEL_RATE_1                        0x00000168
+
+#define REG_DSI_7nm_PHY_PLL_PLL_BAND_SEL_RATE_2                        0x0000016c
+
+#define REG_DSI_7nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1          0x00000170
+
+#define REG_DSI_7nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_2          0x00000174
+
+#define REG_DSI_7nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1       0x00000178
+
+#define REG_DSI_7nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_2       0x0000017c
+
+#define REG_DSI_7nm_PHY_PLL_PLL_FASTLOCK_EN_BAND               0x00000180
+
+#define REG_DSI_7nm_PHY_PLL_FREQ_TUNE_ACCUM_INIT_MID           0x00000184
+
+#define REG_DSI_7nm_PHY_PLL_FREQ_TUNE_ACCUM_INIT_HIGH          0x00000188
+
+#define REG_DSI_7nm_PHY_PLL_FREQ_TUNE_ACCUM_INIT_MUX           0x0000018c
+
+#define REG_DSI_7nm_PHY_PLL_PLL_LOCK_OVERRIDE                  0x00000190
+
+#define REG_DSI_7nm_PHY_PLL_PLL_LOCK_DELAY                     0x00000194
+
+#define REG_DSI_7nm_PHY_PLL_PLL_LOCK_MIN_DELAY                 0x00000198
+
+#define REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS                    0x0000019c
+
+#define REG_DSI_7nm_PHY_PLL_SPARE_AND_JPC_OVERRIDES            0x000001a0
+
+#define REG_DSI_7nm_PHY_PLL_BIAS_CONTROL_1                     0x000001a4
+
+#define REG_DSI_7nm_PHY_PLL_BIAS_CONTROL_2                     0x000001a8
+
+#define REG_DSI_7nm_PHY_PLL_ALOG_OBSV_BUS_CTRL_1               0x000001ac
+
+#define REG_DSI_7nm_PHY_PLL_COMMON_STATUS_ONE                  0x000001b0
+
+#define REG_DSI_7nm_PHY_PLL_COMMON_STATUS_TWO                  0x000001b4
+
+#define REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL                       0x000001b8
+
+#define REG_DSI_7nm_PHY_PLL_ICODE_ACCUM_STATUS_LOW             0x000001bc
+
+#define REG_DSI_7nm_PHY_PLL_ICODE_ACCUM_STATUS_HIGH            0x000001c0
+
+#define REG_DSI_7nm_PHY_PLL_FD_OUT_LOW                         0x000001c4
+
+#define REG_DSI_7nm_PHY_PLL_FD_OUT_HIGH                                0x000001c8
+
+#define REG_DSI_7nm_PHY_PLL_ALOG_OBSV_BUS_STATUS_1             0x000001cc
+
+#define REG_DSI_7nm_PHY_PLL_PLL_MISC_CONFIG                    0x000001d0
+
+#define REG_DSI_7nm_PHY_PLL_FLL_CONFIG                         0x000001d4
+
+#define REG_DSI_7nm_PHY_PLL_FLL_FREQ_ACQ_TIME                  0x000001d8
+
+#define REG_DSI_7nm_PHY_PLL_FLL_CODE0                          0x000001dc
+
+#define REG_DSI_7nm_PHY_PLL_FLL_CODE1                          0x000001e0
+
+#define REG_DSI_7nm_PHY_PLL_FLL_GAIN0                          0x000001e4
+
+#define REG_DSI_7nm_PHY_PLL_FLL_GAIN1                          0x000001e8
+
+#define REG_DSI_7nm_PHY_PLL_SW_RESET                           0x000001ec
+
+#define REG_DSI_7nm_PHY_PLL_FAST_PWRUP                         0x000001f0
+
+#define REG_DSI_7nm_PHY_PLL_LOCKTIME0                          0x000001f4
+
+#define REG_DSI_7nm_PHY_PLL_LOCKTIME1                          0x000001f8
+
+#define REG_DSI_7nm_PHY_PLL_DEBUG_BUS_SEL                      0x000001fc
+
+#define REG_DSI_7nm_PHY_PLL_DEBUG_BUS0                         0x00000200
+
+#define REG_DSI_7nm_PHY_PLL_DEBUG_BUS1                         0x00000204
+
+#define REG_DSI_7nm_PHY_PLL_DEBUG_BUS2                         0x00000208
+
+#define REG_DSI_7nm_PHY_PLL_DEBUG_BUS3                         0x0000020c
+
+#define REG_DSI_7nm_PHY_PLL_ANALOG_FLL_CONTROL_OVERRIDES       0x00000210
+
+#define REG_DSI_7nm_PHY_PLL_VCO_CONFIG                         0x00000214
+
+#define REG_DSI_7nm_PHY_PLL_VCO_CAL_CODE1_MODE0_STATUS         0x00000218
+
+#define REG_DSI_7nm_PHY_PLL_VCO_CAL_CODE1_MODE1_STATUS         0x0000021c
+
+#define REG_DSI_7nm_PHY_PLL_RESET_SM_STATUS                    0x00000220
+
+#define REG_DSI_7nm_PHY_PLL_TDC_OFFSET                         0x00000224
+
+#define REG_DSI_7nm_PHY_PLL_PS3_PWRDOWN_CONTROLS               0x00000228
+
+#define REG_DSI_7nm_PHY_PLL_PS4_PWRDOWN_CONTROLS               0x0000022c
+
+#define REG_DSI_7nm_PHY_PLL_PLL_RST_CONTROLS                   0x00000230
+
+#define REG_DSI_7nm_PHY_PLL_GEAR_BAND_SELECT_CONTROLS          0x00000234
+
+#define REG_DSI_7nm_PHY_PLL_PSM_CLK_CONTROLS                   0x00000238
+
+#define REG_DSI_7nm_PHY_PLL_SYSTEM_MUXES_2                     0x0000023c
+
+#define REG_DSI_7nm_PHY_PLL_VCO_CONFIG_1                       0x00000240
+
+#define REG_DSI_7nm_PHY_PLL_VCO_CONFIG_2                       0x00000244
+
+#define REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS_1                  0x00000248
+
+#define REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS_2                  0x0000024c
+
+#define REG_DSI_7nm_PHY_PLL_CMODE_1                            0x00000250
+
+#define REG_DSI_7nm_PHY_PLL_CMODE_2                            0x00000254
+
+#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE_1             0x00000258
+
+#define REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE_2             0x0000025c
+
+#define REG_DSI_7nm_PHY_PLL_PERF_OPTIMIZE                      0x00000260
 
 #endif /* DSI_XML */
index f892f2c..b2ff68a 100644 (file)
@@ -265,9 +265,12 @@ static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
                &msm8998_dsi_cfg, &msm_dsi_6g_v2_host_ops},
        {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_2_1,
                &sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops},
+       {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_3_0,
+               &sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops},
+       {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_4_0,
+               &sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops},
        {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_4_1,
                &sc7180_dsi_cfg, &msm_dsi_6g_v2_host_ops},
-
 };
 
 const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor)
index efd469d..ade9b60 100644 (file)
@@ -21,6 +21,8 @@
 #define MSM_DSI_6G_VER_MINOR_V2_1_0    0x20010000
 #define MSM_DSI_6G_VER_MINOR_V2_2_0    0x20000000
 #define MSM_DSI_6G_VER_MINOR_V2_2_1    0x20020001
+#define MSM_DSI_6G_VER_MINOR_V2_3_0    0x20030000
+#define MSM_DSI_6G_VER_MINOR_V2_4_0    0x20040000
 #define MSM_DSI_6G_VER_MINOR_V2_4_1    0x20040001
 
 #define MSM_DSI_V2_VER_MINOR_8064      0x0
index 009f5b8..e8c1a72 100644 (file)
@@ -364,6 +364,102 @@ int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing,
        return 0;
 }
 
+int msm_dsi_dphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing,
+       struct msm_dsi_phy_clk_request *clk_req)
+{
+       const unsigned long bit_rate = clk_req->bitclk_rate;
+       const unsigned long esc_rate = clk_req->escclk_rate;
+       s32 ui, ui_x8;
+       s32 tmax, tmin;
+       s32 pcnt_clk_prep = 50;
+       s32 pcnt_clk_zero = 2;
+       s32 pcnt_clk_trail = 30;
+       s32 pcnt_hs_prep = 50;
+       s32 pcnt_hs_zero = 10;
+       s32 pcnt_hs_trail = 30;
+       s32 pcnt_hs_exit = 10;
+       s32 coeff = 1000; /* Precision, should avoid overflow */
+       s32 hb_en;
+       s32 temp;
+
+       if (!bit_rate || !esc_rate)
+               return -EINVAL;
+
+       hb_en = 0;
+
+       ui = mult_frac(NSEC_PER_MSEC, coeff, bit_rate / 1000);
+       ui_x8 = ui << 3;
+
+       /* TODO: verify these calculations against latest downstream driver
+        * everything except clk_post/clk_pre uses calculations from v3 based
+        * on the downstream driver having the same calculations for v3 and v4
+        */
+
+       temp = S_DIV_ROUND_UP(38 * coeff, ui_x8);
+       tmin = max_t(s32, temp, 0);
+       temp = (95 * coeff) / ui_x8;
+       tmax = max_t(s32, temp, 0);
+       timing->clk_prepare = linear_inter(tmax, tmin, pcnt_clk_prep, 0, false);
+
+       temp = 300 * coeff - (timing->clk_prepare << 3) * ui;
+       tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
+       tmax = (tmin > 255) ? 511 : 255;
+       timing->clk_zero = linear_inter(tmax, tmin, pcnt_clk_zero, 0, false);
+
+       tmin = DIV_ROUND_UP(60 * coeff + 3 * ui, ui_x8);
+       temp = 105 * coeff + 12 * ui - 20 * coeff;
+       tmax = (temp + 3 * ui) / ui_x8;
+       timing->clk_trail = linear_inter(tmax, tmin, pcnt_clk_trail, 0, false);
+
+       temp = S_DIV_ROUND_UP(40 * coeff + 4 * ui, ui_x8);
+       tmin = max_t(s32, temp, 0);
+       temp = (85 * coeff + 6 * ui) / ui_x8;
+       tmax = max_t(s32, temp, 0);
+       timing->hs_prepare = linear_inter(tmax, tmin, pcnt_hs_prep, 0, false);
+
+       temp = 145 * coeff + 10 * ui - (timing->hs_prepare << 3) * ui;
+       tmin = S_DIV_ROUND_UP(temp, ui_x8) - 1;
+       tmax = 255;
+       timing->hs_zero = linear_inter(tmax, tmin, pcnt_hs_zero, 0, false);
+
+       tmin = DIV_ROUND_UP(60 * coeff + 4 * ui, ui_x8) - 1;
+       temp = 105 * coeff + 12 * ui - 20 * coeff;
+       tmax = (temp / ui_x8) - 1;
+       timing->hs_trail = linear_inter(tmax, tmin, pcnt_hs_trail, 0, false);
+
+       temp = 50 * coeff + ((hb_en << 2) - 8) * ui;
+       timing->hs_rqst = S_DIV_ROUND_UP(temp, ui_x8);
+
+       tmin = DIV_ROUND_UP(100 * coeff, ui_x8) - 1;
+       tmax = 255;
+       timing->hs_exit = linear_inter(tmax, tmin, pcnt_hs_exit, 0, false);
+
+       /* recommended min
+        * = roundup((mipi_min_ns + t_hs_trail_ns)/(16*bit_clk_ns), 0) - 1
+        */
+       temp = 60 * coeff + 52 * ui + + (timing->hs_trail + 1) * ui_x8;
+       tmin = DIV_ROUND_UP(temp, 16 * ui) - 1;
+       tmax = 255;
+       timing->shared_timings.clk_post = linear_inter(tmax, tmin, 5, 0, false);
+
+       /* recommended min
+        * val1 = (tlpx_ns + clk_prepare_ns + clk_zero_ns + hs_rqst_ns)
+        * val2 = (16 * bit_clk_ns)
+        * final = roundup(val1/val2, 0) - 1
+        */
+       temp = 52 * coeff + (timing->clk_prepare + timing->clk_zero + 1) * ui_x8 + 54 * coeff;
+       tmin = DIV_ROUND_UP(temp, 16 * ui) - 1;
+       tmax = 255;
+       timing->shared_timings.clk_pre = DIV_ROUND_UP((tmax - tmin) * 125, 10000) + tmin;
+
+       DBG("%d, %d, %d, %d, %d, %d, %d, %d, %d, %d",
+               timing->shared_timings.clk_pre, timing->shared_timings.clk_post,
+               timing->clk_zero, timing->clk_trail, timing->clk_prepare, timing->hs_exit,
+               timing->hs_zero, timing->hs_prepare, timing->hs_trail, timing->hs_rqst);
+
+       return 0;
+}
+
 void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg,
                                u32 bit_mask)
 {
@@ -507,6 +603,12 @@ static const struct of_device_id dsi_phy_dt_match[] = {
          .data = &dsi_phy_10nm_cfgs },
        { .compatible = "qcom,dsi-phy-10nm-8998",
          .data = &dsi_phy_10nm_8998_cfgs },
+#endif
+#ifdef CONFIG_DRM_MSM_DSI_7NM_PHY
+       { .compatible = "qcom,dsi-phy-7nm",
+         .data = &dsi_phy_7nm_cfgs },
+       { .compatible = "qcom,dsi-phy-7nm-8150",
+         .data = &dsi_phy_7nm_8150_cfgs },
 #endif
        {}
 };
index ef8672d..d2bd74b 100644 (file)
@@ -48,10 +48,10 @@ extern const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs;
 extern const struct msm_dsi_phy_cfg dsi_phy_14nm_660_cfgs;
 extern const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs;
 extern const struct msm_dsi_phy_cfg dsi_phy_10nm_8998_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_7nm_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_7nm_8150_cfgs;
 
 struct msm_dsi_dphy_timing {
-       u32 clk_pre;
-       u32 clk_post;
        u32 clk_zero;
        u32 clk_trail;
        u32 clk_prepare;
@@ -102,6 +102,8 @@ int msm_dsi_dphy_timing_calc_v2(struct msm_dsi_dphy_timing *timing,
                                struct msm_dsi_phy_clk_request *clk_req);
 int msm_dsi_dphy_timing_calc_v3(struct msm_dsi_dphy_timing *timing,
                                struct msm_dsi_phy_clk_request *clk_req);
+int msm_dsi_dphy_timing_calc_v4(struct msm_dsi_dphy_timing *timing,
+                               struct msm_dsi_phy_clk_request *clk_req);
 void msm_dsi_phy_set_src_pll(struct msm_dsi_phy *phy, int pll_id, u32 reg,
                                u32 bit_mask);
 int msm_dsi_phy_init_common(struct msm_dsi_phy *phy);
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
new file mode 100644 (file)
index 0000000..255b5f5
--- /dev/null
@@ -0,0 +1,255 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright (c) 2018, The Linux Foundation
+ */
+
+#include <linux/iopoll.h>
+
+#include "dsi_phy.h"
+#include "dsi.xml.h"
+
+static int dsi_phy_hw_v4_0_is_pll_on(struct msm_dsi_phy *phy)
+{
+       void __iomem *base = phy->base;
+       u32 data = 0;
+
+       data = dsi_phy_read(base + REG_DSI_7nm_PHY_CMN_PLL_CNTRL);
+       mb(); /* make sure read happened */
+
+       return (data & BIT(0));
+}
+
+static void dsi_phy_hw_v4_0_config_lpcdrx(struct msm_dsi_phy *phy, bool enable)
+{
+       void __iomem *lane_base = phy->lane_base;
+       int phy_lane_0 = 0;     /* TODO: Support all lane swap configs */
+
+       /*
+        * LPRX and CDRX need to enabled only for physical data lane
+        * corresponding to the logical data lane 0
+        */
+       if (enable)
+               dsi_phy_write(lane_base +
+                             REG_DSI_7nm_PHY_LN_LPRX_CTRL(phy_lane_0), 0x3);
+       else
+               dsi_phy_write(lane_base +
+                             REG_DSI_7nm_PHY_LN_LPRX_CTRL(phy_lane_0), 0);
+}
+
+static void dsi_phy_hw_v4_0_lane_settings(struct msm_dsi_phy *phy)
+{
+       int i;
+       const u8 tx_dctrl_0[] = { 0x00, 0x00, 0x00, 0x04, 0x01 };
+       const u8 tx_dctrl_1[] = { 0x40, 0x40, 0x40, 0x46, 0x41 };
+       const u8 *tx_dctrl = tx_dctrl_0;
+       void __iomem *lane_base = phy->lane_base;
+
+       if (phy->cfg->type == MSM_DSI_PHY_7NM_V4_1)
+               tx_dctrl = tx_dctrl_1;
+
+       /* Strength ctrl settings */
+       for (i = 0; i < 5; i++) {
+               /*
+                * Disable LPRX and CDRX for all lanes. And later on, it will
+                * be only enabled for the physical data lane corresponding
+                * to the logical data lane 0
+                */
+               dsi_phy_write(lane_base + REG_DSI_7nm_PHY_LN_LPRX_CTRL(i), 0);
+               dsi_phy_write(lane_base + REG_DSI_7nm_PHY_LN_PIN_SWAP(i), 0x0);
+       }
+
+       dsi_phy_hw_v4_0_config_lpcdrx(phy, true);
+
+       /* other settings */
+       for (i = 0; i < 5; i++) {
+               dsi_phy_write(lane_base + REG_DSI_7nm_PHY_LN_CFG0(i), 0x0);
+               dsi_phy_write(lane_base + REG_DSI_7nm_PHY_LN_CFG1(i), 0x0);
+               dsi_phy_write(lane_base + REG_DSI_7nm_PHY_LN_CFG2(i), i == 4 ? 0x8a : 0xa);
+               dsi_phy_write(lane_base + REG_DSI_7nm_PHY_LN_TX_DCTRL(i), tx_dctrl[i]);
+       }
+}
+
+static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
+                             struct msm_dsi_phy_clk_request *clk_req)
+{
+       int ret;
+       u32 status;
+       u32 const delay_us = 5;
+       u32 const timeout_us = 1000;
+       struct msm_dsi_dphy_timing *timing = &phy->timing;
+       void __iomem *base = phy->base;
+       bool less_than_1500_mhz;
+       u32 vreg_ctrl_0, glbl_str_swi_cal_sel_ctrl, glbl_hstx_str_ctrl_0;
+       u32 glbl_rescode_top_ctrl, glbl_rescode_bot_ctrl;
+       u32 data;
+
+       DBG("");
+
+       if (msm_dsi_dphy_timing_calc_v4(timing, clk_req)) {
+               DRM_DEV_ERROR(&phy->pdev->dev,
+                       "%s: D-PHY timing calculation failed\n", __func__);
+               return -EINVAL;
+       }
+
+       if (dsi_phy_hw_v4_0_is_pll_on(phy))
+               pr_warn("PLL turned on before configuring PHY\n");
+
+       /* wait for REFGEN READY */
+       ret = readl_poll_timeout_atomic(base + REG_DSI_7nm_PHY_CMN_PHY_STATUS,
+                                       status, (status & BIT(0)),
+                                       delay_us, timeout_us);
+       if (ret) {
+               pr_err("Ref gen not ready. Aborting\n");
+               return -EINVAL;
+       }
+
+       /* TODO: CPHY enable path (this is for DPHY only) */
+
+       /* Alter PHY configurations if data rate less than 1.5GHZ*/
+       less_than_1500_mhz = (clk_req->bitclk_rate <= 1500000000);
+
+       if (phy->cfg->type == MSM_DSI_PHY_7NM_V4_1) {
+               vreg_ctrl_0 = less_than_1500_mhz ? 0x53 : 0x52;
+               glbl_rescode_top_ctrl = less_than_1500_mhz ? 0x3d :  0x00;
+               glbl_rescode_bot_ctrl = less_than_1500_mhz ? 0x39 :  0x3c;
+               glbl_str_swi_cal_sel_ctrl = 0x00;
+               glbl_hstx_str_ctrl_0 = 0x88;
+       } else {
+               vreg_ctrl_0 = less_than_1500_mhz ? 0x5B : 0x59;
+               glbl_str_swi_cal_sel_ctrl = less_than_1500_mhz ? 0x03 : 0x00;
+               glbl_hstx_str_ctrl_0 = less_than_1500_mhz ? 0x66 : 0x88;
+               glbl_rescode_top_ctrl = 0x03;
+               glbl_rescode_bot_ctrl = 0x3c;
+       }
+
+       /* de-assert digital and pll power down */
+       data = BIT(6) | BIT(5);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_0, data);
+
+       /* Assert PLL core reset */
+       dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_PLL_CNTRL, 0x00);
+
+       /* turn off resync FIFO */
+       dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_RBUF_CTRL, 0x00);
+
+       /* program CMN_CTRL_4 for minor_ver 2 chipsets*/
+       data = dsi_phy_read(base + REG_DSI_7nm_PHY_CMN_REVISION_ID0);
+       data = data & (0xf0);
+       if (data == 0x20)
+               dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_4, 0x04);
+
+       /* Configure PHY lane swap (TODO: we need to calculate this) */
+       dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_LANE_CFG0, 0x21);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_LANE_CFG1, 0x84);
+
+       /* Enable LDO */
+       dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_VREG_CTRL_0, vreg_ctrl_0);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_VREG_CTRL_1, 0x5c);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_3, 0x00);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_STR_SWI_CAL_SEL_CTRL,
+                     glbl_str_swi_cal_sel_ctrl);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_HSTX_STR_CTRL_0,
+                     glbl_hstx_str_ctrl_0);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_PEMPH_CTRL_0, 0x00);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_TOP_CTRL,
+                     glbl_rescode_top_ctrl);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_RESCODE_OFFSET_BOT_CTRL,
+                     glbl_rescode_bot_ctrl);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_GLBL_LPTX_STR_CTRL, 0x55);
+
+       /* Remove power down from all blocks */
+       dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_0, 0x7f);
+
+       dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_LANE_CTRL0, 0x1f);
+
+       /* Select full-rate mode */
+       dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_CTRL_2, 0x40);
+
+       ret = msm_dsi_pll_set_usecase(phy->pll, phy->usecase);
+       if (ret) {
+               DRM_DEV_ERROR(&phy->pdev->dev, "%s: set pll usecase failed, %d\n",
+                       __func__, ret);
+               return ret;
+       }
+
+       /* DSI PHY timings */
+       dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_0, 0x00);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_1, timing->clk_zero);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_2, timing->clk_prepare);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_3, timing->clk_trail);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_4, timing->hs_exit);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_5, timing->hs_zero);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_6, timing->hs_prepare);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_7, timing->hs_trail);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_8, timing->hs_rqst);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_9, 0x02);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_10, 0x04);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_11, 0x00);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_12,
+                     timing->shared_timings.clk_pre);
+       dsi_phy_write(base + REG_DSI_7nm_PHY_CMN_TIMING_CTRL_13,
+                     timing->shared_timings.clk_post);
+
+       /* DSI lane settings */
+       dsi_phy_hw_v4_0_lane_settings(phy);
+
+       DBG("DSI%d PHY enabled", phy->id);
+
+       return 0;
+}
+
+static void dsi_7nm_phy_disable(struct msm_dsi_phy *phy)
+{
+       /* TODO */
+}
+
+static int dsi_7nm_phy_init(struct msm_dsi_phy *phy)
+{
+       struct platform_device *pdev = phy->pdev;
+
+       phy->lane_base = msm_ioremap(pdev, "dsi_phy_lane",
+                                    "DSI_PHY_LANE");
+       if (IS_ERR(phy->lane_base)) {
+               DRM_DEV_ERROR(&pdev->dev, "%s: failed to map phy lane base\n",
+                       __func__);
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+const struct msm_dsi_phy_cfg dsi_phy_7nm_cfgs = {
+       .type = MSM_DSI_PHY_7NM_V4_1,
+       .src_pll_truthtable = { {false, false}, {true, false} },
+       .reg_cfg = {
+               .num = 1,
+               .regs = {
+                       {"vdds", 36000, 32},
+               },
+       },
+       .ops = {
+               .enable = dsi_7nm_phy_enable,
+               .disable = dsi_7nm_phy_disable,
+               .init = dsi_7nm_phy_init,
+       },
+       .io_start = { 0xae94400, 0xae96400 },
+       .num_dsi_phy = 2,
+};
+
+const struct msm_dsi_phy_cfg dsi_phy_7nm_8150_cfgs = {
+       .type = MSM_DSI_PHY_7NM,
+       .src_pll_truthtable = { {false, false}, {true, false} },
+       .reg_cfg = {
+               .num = 1,
+               .regs = {
+                       {"vdds", 36000, 32},
+               },
+       },
+       .ops = {
+               .enable = dsi_7nm_phy_enable,
+               .disable = dsi_7nm_phy_disable,
+               .init = dsi_7nm_phy_init,
+       },
+       .io_start = { 0xae94400, 0xae96400 },
+       .num_dsi_phy = 2,
+};
index 4a4aa3c..a45fe95 100644 (file)
@@ -161,6 +161,10 @@ struct msm_dsi_pll *msm_dsi_pll_init(struct platform_device *pdev,
        case MSM_DSI_PHY_10NM:
                pll = msm_dsi_pll_10nm_init(pdev, id);
                break;
+       case MSM_DSI_PHY_7NM:
+       case MSM_DSI_PHY_7NM_V4_1:
+               pll = msm_dsi_pll_7nm_init(pdev, id);
+               break;
        default:
                pll = ERR_PTR(-ENXIO);
                break;
index c6a3623..3405982 100644 (file)
@@ -116,5 +116,15 @@ msm_dsi_pll_10nm_init(struct platform_device *pdev, int id)
        return ERR_PTR(-ENODEV);
 }
 #endif
+#ifdef CONFIG_DRM_MSM_DSI_7NM_PHY
+struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev, int id);
+#else
+static inline struct msm_dsi_pll *
+msm_dsi_pll_7nm_init(struct platform_device *pdev, int id)
+{
+       return ERR_PTR(-ENODEV);
+}
+#endif
+
 #endif /* __DSI_PLL_H__ */
 
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_7nm.c
new file mode 100644 (file)
index 0000000..de0dfb8
--- /dev/null
@@ -0,0 +1,904 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright (c) 2018, The Linux Foundation
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/iopoll.h>
+
+#include "dsi_pll.h"
+#include "dsi.xml.h"
+
+/*
+ * DSI PLL 7nm - clock diagram (eg: DSI0): TODO: updated CPHY diagram
+ *
+ *           dsi0_pll_out_div_clk  dsi0_pll_bit_clk
+ *                              |                |
+ *                              |                |
+ *                 +---------+  |  +----------+  |  +----+
+ *  dsi0vco_clk ---| out_div |--o--| divl_3_0 |--o--| /8 |-- dsi0_phy_pll_out_byteclk
+ *                 +---------+  |  +----------+  |  +----+
+ *                              |                |
+ *                              |                |         dsi0_pll_by_2_bit_clk
+ *                              |                |          |
+ *                              |                |  +----+  |  |\  dsi0_pclk_mux
+ *                              |                |--| /2 |--o--| \   |
+ *                              |                |  +----+     |  \  |  +---------+
+ *                              |                --------------|  |--o--| div_7_4 |-- dsi0_phy_pll_out_dsiclk
+ *                              |------------------------------|  /     +---------+
+ *                              |          +-----+             | /
+ *                              -----------| /4? |--o----------|/
+ *                                         +-----+  |           |
+ *                                                  |           |dsiclk_sel
+ *                                                  |
+ *                                                  dsi0_pll_post_out_div_clk
+ */
+
+#define DSI_BYTE_PLL_CLK               0
+#define DSI_PIXEL_PLL_CLK              1
+#define NUM_PROVIDED_CLKS              2
+
+#define VCO_REF_CLK_RATE               19200000
+
+struct dsi_pll_regs {
+       u32 pll_prop_gain_rate;
+       u32 pll_lockdet_rate;
+       u32 decimal_div_start;
+       u32 frac_div_start_low;
+       u32 frac_div_start_mid;
+       u32 frac_div_start_high;
+       u32 pll_clock_inverters;
+       u32 ssc_stepsize_low;
+       u32 ssc_stepsize_high;
+       u32 ssc_div_per_low;
+       u32 ssc_div_per_high;
+       u32 ssc_adjper_low;
+       u32 ssc_adjper_high;
+       u32 ssc_control;
+};
+
+struct dsi_pll_config {
+       u32 ref_freq;
+       bool div_override;
+       u32 output_div;
+       bool ignore_frac;
+       bool disable_prescaler;
+       bool enable_ssc;
+       bool ssc_center;
+       u32 dec_bits;
+       u32 frac_bits;
+       u32 lock_timer;
+       u32 ssc_freq;
+       u32 ssc_offset;
+       u32 ssc_adj_per;
+       u32 thresh_cycles;
+       u32 refclk_cycles;
+};
+
+struct pll_7nm_cached_state {
+       unsigned long vco_rate;
+       u8 bit_clk_div;
+       u8 pix_clk_div;
+       u8 pll_out_div;
+       u8 pll_mux;
+};
+
+struct dsi_pll_7nm {
+       struct msm_dsi_pll base;
+
+       int id;
+       struct platform_device *pdev;
+
+       void __iomem *phy_cmn_mmio;
+       void __iomem *mmio;
+
+       u64 vco_ref_clk_rate;
+       u64 vco_current_rate;
+
+       /* protects REG_DSI_7nm_PHY_CMN_CLK_CFG0 register */
+       spinlock_t postdiv_lock;
+
+       int vco_delay;
+       struct dsi_pll_config pll_configuration;
+       struct dsi_pll_regs reg_setup;
+
+       /* private clocks: */
+       struct clk_hw *out_div_clk_hw;
+       struct clk_hw *bit_clk_hw;
+       struct clk_hw *byte_clk_hw;
+       struct clk_hw *by_2_bit_clk_hw;
+       struct clk_hw *post_out_div_clk_hw;
+       struct clk_hw *pclk_mux_hw;
+       struct clk_hw *out_dsiclk_hw;
+
+       /* clock-provider: */
+       struct clk_hw_onecell_data *hw_data;
+
+       struct pll_7nm_cached_state cached_state;
+
+       enum msm_dsi_phy_usecase uc;
+       struct dsi_pll_7nm *slave;
+};
+
+#define to_pll_7nm(x)  container_of(x, struct dsi_pll_7nm, base)
+
+/*
+ * Global list of private DSI PLL struct pointers. We need this for Dual DSI
+ * mode, where the master PLL's clk_ops needs access the slave's private data
+ */
+static struct dsi_pll_7nm *pll_7nm_list[DSI_MAX];
+
+static void dsi_pll_setup_config(struct dsi_pll_7nm *pll)
+{
+       struct dsi_pll_config *config = &pll->pll_configuration;
+
+       config->ref_freq = pll->vco_ref_clk_rate;
+       config->output_div = 1;
+       config->dec_bits = 8;
+       config->frac_bits = 18;
+       config->lock_timer = 64;
+       config->ssc_freq = 31500;
+       config->ssc_offset = 4800;
+       config->ssc_adj_per = 2;
+       config->thresh_cycles = 32;
+       config->refclk_cycles = 256;
+
+       config->div_override = false;
+       config->ignore_frac = false;
+       config->disable_prescaler = false;
+
+       /* TODO: ssc enable */
+       config->enable_ssc = false;
+       config->ssc_center = 0;
+}
+
+static void dsi_pll_calc_dec_frac(struct dsi_pll_7nm *pll)
+{
+       struct dsi_pll_config *config = &pll->pll_configuration;
+       struct dsi_pll_regs *regs = &pll->reg_setup;
+       u64 fref = pll->vco_ref_clk_rate;
+       u64 pll_freq;
+       u64 divider;
+       u64 dec, dec_multiple;
+       u32 frac;
+       u64 multiplier;
+
+       pll_freq = pll->vco_current_rate;
+
+       if (config->disable_prescaler)
+               divider = fref;
+       else
+               divider = fref * 2;
+
+       multiplier = 1 << config->frac_bits;
+       dec_multiple = div_u64(pll_freq * multiplier, divider);
+       div_u64_rem(dec_multiple, multiplier, &frac);
+
+       dec = div_u64(dec_multiple, multiplier);
+
+       if (pll->base.type != MSM_DSI_PHY_7NM_V4_1)
+               regs->pll_clock_inverters = 0x28;
+       else if (pll_freq <= 1000000000ULL)
+               regs->pll_clock_inverters = 0xa0;
+       else if (pll_freq <= 2500000000ULL)
+               regs->pll_clock_inverters = 0x20;
+       else if (pll_freq <= 3020000000ULL)
+               regs->pll_clock_inverters = 0x00;
+       else
+               regs->pll_clock_inverters = 0x40;
+
+       regs->pll_lockdet_rate = config->lock_timer;
+       regs->decimal_div_start = dec;
+       regs->frac_div_start_low = (frac & 0xff);
+       regs->frac_div_start_mid = (frac & 0xff00) >> 8;
+       regs->frac_div_start_high = (frac & 0x30000) >> 16;
+}
+
+#define SSC_CENTER             BIT(0)
+#define SSC_EN                 BIT(1)
+
+static void dsi_pll_calc_ssc(struct dsi_pll_7nm *pll)
+{
+       struct dsi_pll_config *config = &pll->pll_configuration;
+       struct dsi_pll_regs *regs = &pll->reg_setup;
+       u32 ssc_per;
+       u32 ssc_mod;
+       u64 ssc_step_size;
+       u64 frac;
+
+       if (!config->enable_ssc) {
+               DBG("SSC not enabled\n");
+               return;
+       }
+
+       ssc_per = DIV_ROUND_CLOSEST(config->ref_freq, config->ssc_freq) / 2 - 1;
+       ssc_mod = (ssc_per + 1) % (config->ssc_adj_per + 1);
+       ssc_per -= ssc_mod;
+
+       frac = regs->frac_div_start_low |
+                       (regs->frac_div_start_mid << 8) |
+                       (regs->frac_div_start_high << 16);
+       ssc_step_size = regs->decimal_div_start;
+       ssc_step_size *= (1 << config->frac_bits);
+       ssc_step_size += frac;
+       ssc_step_size *= config->ssc_offset;
+       ssc_step_size *= (config->ssc_adj_per + 1);
+       ssc_step_size = div_u64(ssc_step_size, (ssc_per + 1));
+       ssc_step_size = DIV_ROUND_CLOSEST_ULL(ssc_step_size, 1000000);
+
+       regs->ssc_div_per_low = ssc_per & 0xFF;
+       regs->ssc_div_per_high = (ssc_per & 0xFF00) >> 8;
+       regs->ssc_stepsize_low = (u32)(ssc_step_size & 0xFF);
+       regs->ssc_stepsize_high = (u32)((ssc_step_size & 0xFF00) >> 8);
+       regs->ssc_adjper_low = config->ssc_adj_per & 0xFF;
+       regs->ssc_adjper_high = (config->ssc_adj_per & 0xFF00) >> 8;
+
+       regs->ssc_control = config->ssc_center ? SSC_CENTER : 0;
+
+       pr_debug("SCC: Dec:%d, frac:%llu, frac_bits:%d\n",
+                regs->decimal_div_start, frac, config->frac_bits);
+       pr_debug("SSC: div_per:0x%X, stepsize:0x%X, adjper:0x%X\n",
+                ssc_per, (u32)ssc_step_size, config->ssc_adj_per);
+}
+
+static void dsi_pll_ssc_commit(struct dsi_pll_7nm *pll)
+{
+       void __iomem *base = pll->mmio;
+       struct dsi_pll_regs *regs = &pll->reg_setup;
+
+       if (pll->pll_configuration.enable_ssc) {
+               pr_debug("SSC is enabled\n");
+
+               pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_LOW_1,
+                         regs->ssc_stepsize_low);
+               pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_STEPSIZE_HIGH_1,
+                         regs->ssc_stepsize_high);
+               pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_LOW_1,
+                         regs->ssc_div_per_low);
+               pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_DIV_PER_HIGH_1,
+                         regs->ssc_div_per_high);
+               pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_ADJPER_LOW_1,
+                         regs->ssc_adjper_low);
+               pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_ADJPER_HIGH_1,
+                         regs->ssc_adjper_high);
+               pll_write(base + REG_DSI_7nm_PHY_PLL_SSC_CONTROL,
+                         SSC_EN | regs->ssc_control);
+       }
+}
+
+static void dsi_pll_config_hzindep_reg(struct dsi_pll_7nm *pll)
+{
+       void __iomem *base = pll->mmio;
+       u8 analog_controls_five_1 = 0x01, vco_config_1 = 0x00;
+
+       if (pll->base.type == MSM_DSI_PHY_7NM_V4_1) {
+               if (pll->vco_current_rate >= 3100000000ULL)
+                       analog_controls_five_1 = 0x03;
+
+               if (pll->vco_current_rate < 1520000000ULL)
+                       vco_config_1 = 0x08;
+               else if (pll->vco_current_rate < 2990000000ULL)
+                       vco_config_1 = 0x01;
+       }
+
+       pll_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE_1,
+                 analog_controls_five_1);
+       pll_write(base + REG_DSI_7nm_PHY_PLL_VCO_CONFIG_1, vco_config_1);
+       pll_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_FIVE, 0x01);
+       pll_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_TWO, 0x03);
+       pll_write(base + REG_DSI_7nm_PHY_PLL_ANALOG_CONTROLS_THREE, 0x00);
+       pll_write(base + REG_DSI_7nm_PHY_PLL_DSM_DIVIDER, 0x00);
+       pll_write(base + REG_DSI_7nm_PHY_PLL_FEEDBACK_DIVIDER, 0x4e);
+       pll_write(base + REG_DSI_7nm_PHY_PLL_CALIBRATION_SETTINGS, 0x40);
+       pll_write(base + REG_DSI_7nm_PHY_PLL_BAND_SEL_CAL_SETTINGS_THREE, 0xba);
+       pll_write(base + REG_DSI_7nm_PHY_PLL_FREQ_DETECT_SETTINGS_ONE, 0x0c);
+       pll_write(base + REG_DSI_7nm_PHY_PLL_OUTDIV, 0x00);
+       pll_write(base + REG_DSI_7nm_PHY_PLL_CORE_OVERRIDE, 0x00);
+       pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_DIGITAL_TIMERS_TWO, 0x08);
+       pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_PROP_GAIN_RATE_1, 0x0a);
+       pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_BAND_SEL_RATE_1, 0xc0);
+       pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1, 0x84);
+       pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_INT_GAIN_IFILT_BAND_1, 0x82);
+       pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_FL_INT_GAIN_PFILT_BAND_1, 0x4c);
+       pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCK_OVERRIDE, 0x80);
+       pll_write(base + REG_DSI_7nm_PHY_PLL_PFILT, 0x29);
+       pll_write(base + REG_DSI_7nm_PHY_PLL_PFILT, 0x2f);
+       pll_write(base + REG_DSI_7nm_PHY_PLL_IFILT, 0x2a);
+       pll_write(base + REG_DSI_7nm_PHY_PLL_IFILT,
+                 pll->base.type == MSM_DSI_PHY_7NM_V4_1 ? 0x3f : 0x22);
+
+       if (pll->base.type == MSM_DSI_PHY_7NM_V4_1) {
+               pll_write(base + REG_DSI_7nm_PHY_PLL_PERF_OPTIMIZE, 0x22);
+               if (pll->slave)
+                       pll_write(pll->slave->mmio + REG_DSI_7nm_PHY_PLL_PERF_OPTIMIZE, 0x22);
+       }
+}
+
+static void dsi_pll_commit(struct dsi_pll_7nm *pll)
+{
+       void __iomem *base = pll->mmio;
+       struct dsi_pll_regs *reg = &pll->reg_setup;
+
+       pll_write(base + REG_DSI_7nm_PHY_PLL_CORE_INPUT_OVERRIDE, 0x12);
+       pll_write(base + REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_1, reg->decimal_div_start);
+       pll_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_1, reg->frac_div_start_low);
+       pll_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_1, reg->frac_div_start_mid);
+       pll_write(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_1, reg->frac_div_start_high);
+       pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCKDET_RATE_1, 0x40);
+       pll_write(base + REG_DSI_7nm_PHY_PLL_PLL_LOCK_DELAY, 0x06);
+       pll_write(base + REG_DSI_7nm_PHY_PLL_CMODE_1, 0x10); /* TODO: 0x00 for CPHY */
+       pll_write(base + REG_DSI_7nm_PHY_PLL_CLOCK_INVERTERS, reg->pll_clock_inverters);
+}
+
+static int dsi_pll_7nm_vco_set_rate(struct clk_hw *hw, unsigned long rate,
+                                    unsigned long parent_rate)
+{
+       struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+       struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);
+
+       DBG("DSI PLL%d rate=%lu, parent's=%lu", pll_7nm->id, rate,
+           parent_rate);
+
+       pll_7nm->vco_current_rate = rate;
+       pll_7nm->vco_ref_clk_rate = VCO_REF_CLK_RATE;
+
+       dsi_pll_setup_config(pll_7nm);
+
+       dsi_pll_calc_dec_frac(pll_7nm);
+
+       dsi_pll_calc_ssc(pll_7nm);
+
+       dsi_pll_commit(pll_7nm);
+
+       dsi_pll_config_hzindep_reg(pll_7nm);
+
+       dsi_pll_ssc_commit(pll_7nm);
+
+       /* flush, ensure all register writes are done*/
+       wmb();
+
+       return 0;
+}
+
+static int dsi_pll_7nm_lock_status(struct dsi_pll_7nm *pll)
+{
+       int rc;
+       u32 status = 0;
+       u32 const delay_us = 100;
+       u32 const timeout_us = 5000;
+
+       rc = readl_poll_timeout_atomic(pll->mmio +
+                                      REG_DSI_7nm_PHY_PLL_COMMON_STATUS_ONE,
+                                      status,
+                                      ((status & BIT(0)) > 0),
+                                      delay_us,
+                                      timeout_us);
+       if (rc)
+               pr_err("DSI PLL(%d) lock failed, status=0x%08x\n",
+                      pll->id, status);
+
+       return rc;
+}
+
+static void dsi_pll_disable_pll_bias(struct dsi_pll_7nm *pll)
+{
+       u32 data = pll_read(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CTRL_0);
+
+       pll_write(pll->mmio + REG_DSI_7nm_PHY_PLL_SYSTEM_MUXES, 0);
+       pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CTRL_0, data & ~BIT(5));
+       ndelay(250);
+}
+
+static void dsi_pll_enable_pll_bias(struct dsi_pll_7nm *pll)
+{
+       u32 data = pll_read(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CTRL_0);
+
+       pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CTRL_0, data | BIT(5));
+       pll_write(pll->mmio + REG_DSI_7nm_PHY_PLL_SYSTEM_MUXES, 0xc0);
+       ndelay(250);
+}
+
+static void dsi_pll_disable_global_clk(struct dsi_pll_7nm *pll)
+{
+       u32 data;
+
+       data = pll_read(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
+       pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CLK_CFG1, data & ~BIT(5));
+}
+
+static void dsi_pll_enable_global_clk(struct dsi_pll_7nm *pll)
+{
+       u32 data;
+
+       pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CTRL_3, 0x04);
+
+       data = pll_read(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
+       pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_CLK_CFG1,
+                 data | BIT(5) | BIT(4));
+}
+
+static void dsi_pll_phy_dig_reset(struct dsi_pll_7nm *pll)
+{
+       /*
+        * Reset the PHY digital domain. This would be needed when
+        * coming out of a CX or analog rail power collapse while
+        * ensuring that the pads maintain LP00 or LP11 state
+        */
+       pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE4, BIT(0));
+       wmb(); /* Ensure that the reset is deasserted */
+       pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE4, 0x0);
+       wmb(); /* Ensure that the reset is deasserted */
+}
+
+static int dsi_pll_7nm_vco_prepare(struct clk_hw *hw)
+{
+       struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+       struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);
+       int rc;
+
+       dsi_pll_enable_pll_bias(pll_7nm);
+       if (pll_7nm->slave)
+               dsi_pll_enable_pll_bias(pll_7nm->slave);
+
+       /* Start PLL */
+       pll_write(pll_7nm->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_PLL_CNTRL, 0x01);
+
+       /*
+        * ensure all PLL configurations are written prior to checking
+        * for PLL lock.
+        */
+       wmb();
+
+       /* Check for PLL lock */
+       rc = dsi_pll_7nm_lock_status(pll_7nm);
+       if (rc) {
+               pr_err("PLL(%d) lock failed\n", pll_7nm->id);
+               goto error;
+       }
+
+       pll->pll_on = true;
+
+       /*
+        * assert power on reset for PHY digital in case the PLL is
+        * enabled after CX of analog domain power collapse. This needs
+        * to be done before enabling the global clk.
+        */
+       dsi_pll_phy_dig_reset(pll_7nm);
+       if (pll_7nm->slave)
+               dsi_pll_phy_dig_reset(pll_7nm->slave);
+
+       dsi_pll_enable_global_clk(pll_7nm);
+       if (pll_7nm->slave)
+               dsi_pll_enable_global_clk(pll_7nm->slave);
+
+error:
+       return rc;
+}
+
+static void dsi_pll_disable_sub(struct dsi_pll_7nm *pll)
+{
+       pll_write(pll->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_RBUF_CTRL, 0);
+       dsi_pll_disable_pll_bias(pll);
+}
+
+static void dsi_pll_7nm_vco_unprepare(struct clk_hw *hw)
+{
+       struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+       struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);
+
+       /*
+        * To avoid any stray glitches while abruptly powering down the PLL
+        * make sure to gate the clock using the clock enable bit before
+        * powering down the PLL
+        */
+       dsi_pll_disable_global_clk(pll_7nm);
+       pll_write(pll_7nm->phy_cmn_mmio + REG_DSI_7nm_PHY_CMN_PLL_CNTRL, 0);
+       dsi_pll_disable_sub(pll_7nm);
+       if (pll_7nm->slave) {
+               dsi_pll_disable_global_clk(pll_7nm->slave);
+               dsi_pll_disable_sub(pll_7nm->slave);
+       }
+       /* flush, ensure all register writes are done */
+       wmb();
+       pll->pll_on = false;
+}
+
+static unsigned long dsi_pll_7nm_vco_recalc_rate(struct clk_hw *hw,
+                                                 unsigned long parent_rate)
+{
+       struct msm_dsi_pll *pll = hw_clk_to_pll(hw);
+       struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);
+       void __iomem *base = pll_7nm->mmio;
+       u64 ref_clk = pll_7nm->vco_ref_clk_rate;
+       u64 vco_rate = 0x0;
+       u64 multiplier;
+       u32 frac;
+       u32 dec;
+       u64 pll_freq, tmp64;
+
+       dec = pll_read(base + REG_DSI_7nm_PHY_PLL_DECIMAL_DIV_START_1);
+       dec &= 0xff;
+
+       frac = pll_read(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_LOW_1);
+       frac |= ((pll_read(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_MID_1) &
+                 0xff) << 8);
+       frac |= ((pll_read(base + REG_DSI_7nm_PHY_PLL_FRAC_DIV_START_HIGH_1) &
+                 0x3) << 16);
+
+       /*
+        * TODO:
+        *      1. Assumes prescaler is disabled
+        *      2. Multiplier is 2^18. it should be 2^(num_of_frac_bits)
+        */
+       multiplier = 1 << 18;
+       pll_freq = dec * (ref_clk * 2);
+       tmp64 = (ref_clk * 2 * frac);
+       pll_freq += div_u64(tmp64, multiplier);
+
+       vco_rate = pll_freq;
+
+       DBG("DSI PLL%d returning vco rate = %lu, dec = %x, frac = %x",
+           pll_7nm->id, (unsigned long)vco_rate, dec, frac);
+
+       return (unsigned long)vco_rate;
+}
+
+static const struct clk_ops clk_ops_dsi_pll_7nm_vco = {
+       .round_rate = msm_dsi_pll_helper_clk_round_rate,
+       .set_rate = dsi_pll_7nm_vco_set_rate,
+       .recalc_rate = dsi_pll_7nm_vco_recalc_rate,
+       .prepare = dsi_pll_7nm_vco_prepare,
+       .unprepare = dsi_pll_7nm_vco_unprepare,
+};
+
+/*
+ * PLL Callbacks
+ */
+
+static void dsi_pll_7nm_save_state(struct msm_dsi_pll *pll)
+{
+       struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);
+       struct pll_7nm_cached_state *cached = &pll_7nm->cached_state;
+       void __iomem *phy_base = pll_7nm->phy_cmn_mmio;
+       u32 cmn_clk_cfg0, cmn_clk_cfg1;
+
+       cached->pll_out_div = pll_read(pll_7nm->mmio +
+                                      REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE);
+       cached->pll_out_div &= 0x3;
+
+       cmn_clk_cfg0 = pll_read(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG0);
+       cached->bit_clk_div = cmn_clk_cfg0 & 0xf;
+       cached->pix_clk_div = (cmn_clk_cfg0 & 0xf0) >> 4;
+
+       cmn_clk_cfg1 = pll_read(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
+       cached->pll_mux = cmn_clk_cfg1 & 0x3;
+
+       DBG("DSI PLL%d outdiv %x bit_clk_div %x pix_clk_div %x pll_mux %x",
+           pll_7nm->id, cached->pll_out_div, cached->bit_clk_div,
+           cached->pix_clk_div, cached->pll_mux);
+}
+
+static int dsi_pll_7nm_restore_state(struct msm_dsi_pll *pll)
+{
+       struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);
+       struct pll_7nm_cached_state *cached = &pll_7nm->cached_state;
+       void __iomem *phy_base = pll_7nm->phy_cmn_mmio;
+       u32 val;
+
+       val = pll_read(pll_7nm->mmio + REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE);
+       val &= ~0x3;
+       val |= cached->pll_out_div;
+       pll_write(pll_7nm->mmio + REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE, val);
+
+       pll_write(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG0,
+                 cached->bit_clk_div | (cached->pix_clk_div << 4));
+
+       val = pll_read(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
+       val &= ~0x3;
+       val |= cached->pll_mux;
+       pll_write(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1, val);
+
+       DBG("DSI PLL%d", pll_7nm->id);
+
+       return 0;
+}
+
+static int dsi_pll_7nm_set_usecase(struct msm_dsi_pll *pll,
+                                   enum msm_dsi_phy_usecase uc)
+{
+       struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);
+       void __iomem *base = pll_7nm->phy_cmn_mmio;
+       u32 data = 0x0; /* internal PLL */
+
+       DBG("DSI PLL%d", pll_7nm->id);
+
+       switch (uc) {
+       case MSM_DSI_PHY_STANDALONE:
+               break;
+       case MSM_DSI_PHY_MASTER:
+               pll_7nm->slave = pll_7nm_list[(pll_7nm->id + 1) % DSI_MAX];
+               break;
+       case MSM_DSI_PHY_SLAVE:
+               data = 0x1; /* external PLL */
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       /* set PLL src */
+       pll_write(base + REG_DSI_7nm_PHY_CMN_CLK_CFG1, (data << 2));
+
+       pll_7nm->uc = uc;
+
+       return 0;
+}
+
+static int dsi_pll_7nm_get_provider(struct msm_dsi_pll *pll,
+                                    struct clk **byte_clk_provider,
+                                    struct clk **pixel_clk_provider)
+{
+       struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);
+       struct clk_hw_onecell_data *hw_data = pll_7nm->hw_data;
+
+       DBG("DSI PLL%d", pll_7nm->id);
+
+       if (byte_clk_provider)
+               *byte_clk_provider = hw_data->hws[DSI_BYTE_PLL_CLK]->clk;
+       if (pixel_clk_provider)
+               *pixel_clk_provider = hw_data->hws[DSI_PIXEL_PLL_CLK]->clk;
+
+       return 0;
+}
+
+static void dsi_pll_7nm_destroy(struct msm_dsi_pll *pll)
+{
+       struct dsi_pll_7nm *pll_7nm = to_pll_7nm(pll);
+       struct device *dev = &pll_7nm->pdev->dev;
+
+       DBG("DSI PLL%d", pll_7nm->id);
+       of_clk_del_provider(dev->of_node);
+
+       clk_hw_unregister_divider(pll_7nm->out_dsiclk_hw);
+       clk_hw_unregister_mux(pll_7nm->pclk_mux_hw);
+       clk_hw_unregister_fixed_factor(pll_7nm->post_out_div_clk_hw);
+       clk_hw_unregister_fixed_factor(pll_7nm->by_2_bit_clk_hw);
+       clk_hw_unregister_fixed_factor(pll_7nm->byte_clk_hw);
+       clk_hw_unregister_divider(pll_7nm->bit_clk_hw);
+       clk_hw_unregister_divider(pll_7nm->out_div_clk_hw);
+       clk_hw_unregister(&pll_7nm->base.clk_hw);
+}
+
+/*
+ * The post dividers and mux clocks are created using the standard divider and
+ * mux API. Unlike the 14nm PHY, the slave PLL doesn't need its dividers/mux
+ * state to follow the master PLL's divider/mux state. Therefore, we don't
+ * require special clock ops that also configure the slave PLL registers
+ */
+static int pll_7nm_register(struct dsi_pll_7nm *pll_7nm)
+{
+       char clk_name[32], parent[32], vco_name[32];
+       char parent2[32], parent3[32], parent4[32];
+       struct clk_init_data vco_init = {
+               .parent_names = (const char *[]){ "bi_tcxo" },
+               .num_parents = 1,
+               .name = vco_name,
+               .flags = CLK_IGNORE_UNUSED,
+               .ops = &clk_ops_dsi_pll_7nm_vco,
+       };
+       struct device *dev = &pll_7nm->pdev->dev;
+       struct clk_hw_onecell_data *hw_data;
+       struct clk_hw *hw;
+       int ret;
+
+       DBG("DSI%d", pll_7nm->id);
+
+       hw_data = devm_kzalloc(dev, sizeof(*hw_data) +
+                              NUM_PROVIDED_CLKS * sizeof(struct clk_hw *),
+                              GFP_KERNEL);
+       if (!hw_data)
+               return -ENOMEM;
+
+       snprintf(vco_name, 32, "dsi%dvco_clk", pll_7nm->id);
+       pll_7nm->base.clk_hw.init = &vco_init;
+
+       ret = clk_hw_register(dev, &pll_7nm->base.clk_hw);
+       if (ret)
+               return ret;
+
+       snprintf(clk_name, 32, "dsi%d_pll_out_div_clk", pll_7nm->id);
+       snprintf(parent, 32, "dsi%dvco_clk", pll_7nm->id);
+
+       hw = clk_hw_register_divider(dev, clk_name,
+                                    parent, CLK_SET_RATE_PARENT,
+                                    pll_7nm->mmio +
+                                    REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE,
+                                    0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL);
+       if (IS_ERR(hw)) {
+               ret = PTR_ERR(hw);
+               goto err_base_clk_hw;
+       }
+
+       pll_7nm->out_div_clk_hw = hw;
+
+       snprintf(clk_name, 32, "dsi%d_pll_bit_clk", pll_7nm->id);
+       snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_7nm->id);
+
+       /* BIT CLK: DIV_CTRL_3_0 */
+       hw = clk_hw_register_divider(dev, clk_name, parent,
+                                    CLK_SET_RATE_PARENT,
+                                    pll_7nm->phy_cmn_mmio +
+                                    REG_DSI_7nm_PHY_CMN_CLK_CFG0,
+                                    0, 4, CLK_DIVIDER_ONE_BASED,
+                                    &pll_7nm->postdiv_lock);
+       if (IS_ERR(hw)) {
+               ret = PTR_ERR(hw);
+               goto err_out_div_clk_hw;
+       }
+
+       pll_7nm->bit_clk_hw = hw;
+
+       snprintf(clk_name, 32, "dsi%d_phy_pll_out_byteclk", pll_7nm->id);
+       snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_7nm->id);
+
+       /* DSI Byte clock = VCO_CLK / OUT_DIV / BIT_DIV / 8 */
+       hw = clk_hw_register_fixed_factor(dev, clk_name, parent,
+                                         CLK_SET_RATE_PARENT, 1, 8);
+       if (IS_ERR(hw)) {
+               ret = PTR_ERR(hw);
+               goto err_bit_clk_hw;
+       }
+
+       pll_7nm->byte_clk_hw = hw;
+       hw_data->hws[DSI_BYTE_PLL_CLK] = hw;
+
+       snprintf(clk_name, 32, "dsi%d_pll_by_2_bit_clk", pll_7nm->id);
+       snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_7nm->id);
+
+       hw = clk_hw_register_fixed_factor(dev, clk_name, parent,
+                                         0, 1, 2);
+       if (IS_ERR(hw)) {
+               ret = PTR_ERR(hw);
+               goto err_byte_clk_hw;
+       }
+
+       pll_7nm->by_2_bit_clk_hw = hw;
+
+       snprintf(clk_name, 32, "dsi%d_pll_post_out_div_clk", pll_7nm->id);
+       snprintf(parent, 32, "dsi%d_pll_out_div_clk", pll_7nm->id);
+
+       hw = clk_hw_register_fixed_factor(dev, clk_name, parent,
+                                         0, 1, 4);
+       if (IS_ERR(hw)) {
+               ret = PTR_ERR(hw);
+               goto err_by_2_bit_clk_hw;
+       }
+
+       pll_7nm->post_out_div_clk_hw = hw;
+
+       snprintf(clk_name, 32, "dsi%d_pclk_mux", pll_7nm->id);
+       snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_7nm->id);
+       snprintf(parent2, 32, "dsi%d_pll_by_2_bit_clk", pll_7nm->id);
+       snprintf(parent3, 32, "dsi%d_pll_out_div_clk", pll_7nm->id);
+       snprintf(parent4, 32, "dsi%d_pll_post_out_div_clk", pll_7nm->id);
+
+       hw = clk_hw_register_mux(dev, clk_name,
+                                ((const char *[]){
+                                parent, parent2, parent3, parent4
+                                }), 4, 0, pll_7nm->phy_cmn_mmio +
+                                REG_DSI_7nm_PHY_CMN_CLK_CFG1,
+                                0, 2, 0, NULL);
+       if (IS_ERR(hw)) {
+               ret = PTR_ERR(hw);
+               goto err_post_out_div_clk_hw;
+       }
+
+       pll_7nm->pclk_mux_hw = hw;
+
+       snprintf(clk_name, 32, "dsi%d_phy_pll_out_dsiclk", pll_7nm->id);
+       snprintf(parent, 32, "dsi%d_pclk_mux", pll_7nm->id);
+
+       /* PIX CLK DIV : DIV_CTRL_7_4*/
+       hw = clk_hw_register_divider(dev, clk_name, parent,
+                                    0, pll_7nm->phy_cmn_mmio +
+                                       REG_DSI_7nm_PHY_CMN_CLK_CFG0,
+                                    4, 4, CLK_DIVIDER_ONE_BASED,
+                                    &pll_7nm->postdiv_lock);
+       if (IS_ERR(hw)) {
+               ret = PTR_ERR(hw);
+               goto err_pclk_mux_hw;
+       }
+
+       pll_7nm->out_dsiclk_hw = hw;
+       hw_data->hws[DSI_PIXEL_PLL_CLK] = hw;
+
+       hw_data->num = NUM_PROVIDED_CLKS;
+       pll_7nm->hw_data = hw_data;
+
+       ret = of_clk_add_hw_provider(dev->of_node, of_clk_hw_onecell_get,
+                                    pll_7nm->hw_data);
+       if (ret) {
+               DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);
+               goto err_dsiclk_hw;
+       }
+
+       return 0;
+
+err_dsiclk_hw:
+       clk_hw_unregister_divider(pll_7nm->out_dsiclk_hw);
+err_pclk_mux_hw:
+       clk_hw_unregister_mux(pll_7nm->pclk_mux_hw);
+err_post_out_div_clk_hw:
+       clk_hw_unregister_fixed_factor(pll_7nm->post_out_div_clk_hw);
+err_by_2_bit_clk_hw:
+       clk_hw_unregister_fixed_factor(pll_7nm->by_2_bit_clk_hw);
+err_byte_clk_hw:
+       clk_hw_unregister_fixed_factor(pll_7nm->byte_clk_hw);
+err_bit_clk_hw:
+       clk_hw_unregister_divider(pll_7nm->bit_clk_hw);
+err_out_div_clk_hw:
+       clk_hw_unregister_divider(pll_7nm->out_div_clk_hw);
+err_base_clk_hw:
+       clk_hw_unregister(&pll_7nm->base.clk_hw);
+
+       return ret;
+}
+
+struct msm_dsi_pll *msm_dsi_pll_7nm_init(struct platform_device *pdev, int id)
+{
+       struct dsi_pll_7nm *pll_7nm;
+       struct msm_dsi_pll *pll;
+       int ret;
+
+       pll_7nm = devm_kzalloc(&pdev->dev, sizeof(*pll_7nm), GFP_KERNEL);
+       if (!pll_7nm)
+               return ERR_PTR(-ENOMEM);
+
+       DBG("DSI PLL%d", id);
+
+       pll_7nm->pdev = pdev;
+       pll_7nm->id = id;
+       pll_7nm_list[id] = pll_7nm;
+
+       pll_7nm->phy_cmn_mmio = msm_ioremap(pdev, "dsi_phy", "DSI_PHY");
+       if (IS_ERR_OR_NULL(pll_7nm->phy_cmn_mmio)) {
+               DRM_DEV_ERROR(&pdev->dev, "failed to map CMN PHY base\n");
+               return ERR_PTR(-ENOMEM);
+       }
+
+       pll_7nm->mmio = msm_ioremap(pdev, "dsi_pll", "DSI_PLL");
+       if (IS_ERR_OR_NULL(pll_7nm->mmio)) {
+               DRM_DEV_ERROR(&pdev->dev, "failed to map PLL base\n");
+               return ERR_PTR(-ENOMEM);
+       }
+
+       spin_lock_init(&pll_7nm->postdiv_lock);
+
+       pll = &pll_7nm->base;
+       pll->min_rate = 1000000000UL;
+       pll->max_rate = 3500000000UL;
+       if (pll->type == MSM_DSI_PHY_7NM_V4_1) {
+               pll->min_rate = 600000000UL;
+               pll->max_rate = (unsigned long)5000000000ULL;
+               /* workaround for max rate overflowing on 32-bit builds: */
+               pll->max_rate = max(pll->max_rate, 0xffffffffUL);
+       }
+       pll->get_provider = dsi_pll_7nm_get_provider;
+       pll->destroy = dsi_pll_7nm_destroy;
+       pll->save_state = dsi_pll_7nm_save_state;
+       pll->restore_state = dsi_pll_7nm_restore_state;
+       pll->set_usecase = dsi_pll_7nm_set_usecase;
+
+       pll_7nm->vco_delay = 1;
+
+       ret = pll_7nm_register(pll_7nm);
+       if (ret) {
+               DRM_DEV_ERROR(&pdev->dev, "failed to register PLL: %d\n", ret);
+               return ERR_PTR(ret);
+       }
+
+       /* TODO: Remove this when we have proper display handover support */
+       msm_dsi_pll_save_state(pll);
+
+       return pll;
+}
index 7933384..4968557 100644 (file)
@@ -453,15 +453,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
        if (ret)
                goto err_msm_uninit;
 
-       if (!dev->dma_parms) {
-               dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
-                                             GFP_KERNEL);
-               if (!dev->dma_parms) {
-                       ret = -ENOMEM;
-                       goto err_msm_uninit;
-               }
-       }
-       dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
+       dma_set_max_seg_size(dev, UINT_MAX);
 
        msm_gem_shrinker_init(ddev);
 
@@ -594,9 +586,10 @@ static int context_init(struct drm_device *dev, struct drm_file *file)
        if (!ctx)
                return -ENOMEM;
 
+       kref_init(&ctx->ref);
        msm_submitqueue_init(dev, ctx);
 
-       ctx->aspace = priv->gpu ? priv->gpu->aspace : NULL;
+       ctx->aspace = msm_gpu_create_private_address_space(priv->gpu, current);
        file->driver_priv = ctx;
 
        return 0;
@@ -615,7 +608,7 @@ static int msm_open(struct drm_device *dev, struct drm_file *file)
 static void context_close(struct msm_file_private *ctx)
 {
        msm_submitqueue_close(ctx);
-       kfree(ctx);
+       msm_file_private_put(ctx);
 }
 
 static void msm_postclose(struct drm_device *dev, struct drm_file *file)
@@ -779,18 +772,19 @@ static int msm_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
 }
 
 static int msm_ioctl_gem_info_iova(struct drm_device *dev,
-               struct drm_gem_object *obj, uint64_t *iova)
+               struct drm_file *file, struct drm_gem_object *obj,
+               uint64_t *iova)
 {
-       struct msm_drm_private *priv = dev->dev_private;
+       struct msm_file_private *ctx = file->driver_priv;
 
-       if (!priv->gpu)
+       if (!ctx->aspace)
                return -EINVAL;
 
        /*
         * Don't pin the memory here - just get an address so that userspace can
         * be productive
         */
-       return msm_gem_get_iova(obj, priv->gpu->aspace, iova);
+       return msm_gem_get_iova(obj, ctx->aspace, iova);
 }
 
 static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
@@ -829,7 +823,7 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
                args->value = msm_gem_mmap_offset(obj);
                break;
        case MSM_INFO_GET_IOVA:
-               ret = msm_ioctl_gem_info_iova(dev, obj, &args->value);
+               ret = msm_ioctl_gem_info_iova(dev, file, obj, &args->value);
                break;
        case MSM_INFO_SET_NAME:
                /* length check should leave room for terminating null: */
@@ -1358,6 +1352,7 @@ static int __init msm_drm_register(void)
        msm_dsi_register();
        msm_edp_register();
        msm_hdmi_register();
+       msm_dp_register();
        adreno_register();
        return platform_driver_register(&msm_platform_driver);
 }
@@ -1366,6 +1361,7 @@ static void __exit msm_drm_unregister(void)
 {
        DBG("fini");
        platform_driver_unregister(&msm_platform_driver);
+       msm_dp_unregister();
        msm_hdmi_unregister();
        adreno_unregister();
        msm_edp_unregister();
index af259b0..b9dd8f8 100644 (file)
@@ -57,6 +57,7 @@ struct msm_file_private {
        struct list_head submitqueues;
        int queueid;
        struct msm_gem_address_space *aspace;
+       struct kref ref;
 };
 
 enum msm_mdp_plane_property {
@@ -159,6 +160,8 @@ struct msm_drm_private {
        /* DSI is shared by mdp4 and mdp5 */
        struct msm_dsi *dsi[2];
 
+       struct msm_dp *dp;
+
        /* when we have more than one 'msm_gpu' these need to be an array: */
        struct msm_gpu *gpu;
        struct msm_file_private *lastctx;
@@ -248,6 +251,10 @@ int msm_gem_map_vma(struct msm_gem_address_space *aspace,
 void msm_gem_close_vma(struct msm_gem_address_space *aspace,
                struct msm_gem_vma *vma);
 
+
+struct msm_gem_address_space *
+msm_gem_address_space_get(struct msm_gem_address_space *aspace);
+
 void msm_gem_address_space_put(struct msm_gem_address_space *aspace);
 
 struct msm_gem_address_space *
@@ -302,9 +309,8 @@ void msm_gem_put_vaddr(struct drm_gem_object *obj);
 int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv);
 int msm_gem_sync_object(struct drm_gem_object *obj,
                struct msm_fence_context *fctx, bool exclusive);
-void msm_gem_move_to_active(struct drm_gem_object *obj,
-               struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence);
-void msm_gem_move_to_inactive(struct drm_gem_object *obj);
+void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu);
+void msm_gem_active_put(struct drm_gem_object *obj);
 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout);
 int msm_gem_cpu_fini(struct drm_gem_object *obj);
 void msm_gem_free_object(struct drm_gem_object *obj);
@@ -378,6 +384,63 @@ static inline int msm_dsi_modeset_init(struct msm_dsi *msm_dsi,
 }
 #endif
 
+#ifdef CONFIG_DRM_MSM_DP
+int __init msm_dp_register(void);
+void __exit msm_dp_unregister(void);
+int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev,
+                        struct drm_encoder *encoder);
+int msm_dp_display_enable(struct msm_dp *dp, struct drm_encoder *encoder);
+int msm_dp_display_disable(struct msm_dp *dp, struct drm_encoder *encoder);
+int msm_dp_display_pre_disable(struct msm_dp *dp, struct drm_encoder *encoder);
+void msm_dp_display_mode_set(struct msm_dp *dp, struct drm_encoder *encoder,
+                               struct drm_display_mode *mode,
+                               struct drm_display_mode *adjusted_mode);
+void msm_dp_irq_postinstall(struct msm_dp *dp_display);
+
+void msm_dp_debugfs_init(struct msm_dp *dp_display, struct drm_minor *minor);
+
+#else
+static inline int __init msm_dp_register(void)
+{
+       return -EINVAL;
+}
+static inline void __exit msm_dp_unregister(void)
+{
+}
+static inline int msm_dp_modeset_init(struct msm_dp *dp_display,
+                                      struct drm_device *dev,
+                                      struct drm_encoder *encoder)
+{
+       return -EINVAL;
+}
+static inline int msm_dp_display_enable(struct msm_dp *dp,
+                                       struct drm_encoder *encoder)
+{
+       return -EINVAL;
+}
+static inline int msm_dp_display_disable(struct msm_dp *dp,
+                                       struct drm_encoder *encoder)
+{
+       return -EINVAL;
+}
+static inline void msm_dp_display_mode_set(struct msm_dp *dp,
+                               struct drm_encoder *encoder,
+                               struct drm_display_mode *mode,
+                               struct drm_display_mode *adjusted_mode)
+{
+}
+
+static inline void msm_dp_irq_postinstall(struct msm_dp *dp_display)
+{
+}
+
+static inline void msm_dp_debugfs_init(struct msm_dp *dp_display,
+               struct drm_minor *minor)
+{
+}
+
+#endif
+
 void __init msm_mdp_register(void);
 void __exit msm_mdp_unregister(void);
 void __init msm_dpu_register(void);
@@ -398,8 +461,9 @@ void msm_perf_debugfs_cleanup(struct msm_drm_private *priv);
 #else
 static inline int msm_debugfs_late_init(struct drm_device *dev) { return 0; }
 __printf(3, 4)
-static inline void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
-               const char *fmt, ...) {}
+static inline void msm_rd_dump_submit(struct msm_rd_state *rd,
+                       struct msm_gem_submit *submit,
+                       const char *fmt, ...) {}
 static inline void msm_rd_debugfs_cleanup(struct msm_drm_private *priv) {}
 static inline void msm_perf_debugfs_cleanup(struct msm_drm_private *priv) {}
 #endif
@@ -419,7 +483,8 @@ struct msm_gpu_submitqueue;
 int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx);
 struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
                u32 id);
-int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
+int msm_submitqueue_create(struct drm_device *drm,
+               struct msm_file_private *ctx,
                u32 prio, u32 flags, u32 *id);
 int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx,
                struct drm_msm_submitqueue_query *args);
@@ -428,6 +493,26 @@ void msm_submitqueue_close(struct msm_file_private *ctx);
 
 void msm_submitqueue_destroy(struct kref *kref);
 
+static inline void __msm_file_private_destroy(struct kref *kref)
+{
+       struct msm_file_private *ctx = container_of(kref,
+               struct msm_file_private, ref);
+
+       msm_gem_address_space_put(ctx->aspace);
+       kfree(ctx);
+}
+
+static inline void msm_file_private_put(struct msm_file_private *ctx)
+{
+       kref_put(&ctx->ref, __msm_file_private_destroy);
+}
+
+static inline struct msm_file_private *msm_file_private_get(
+       struct msm_file_private *ctx)
+{
+       kref_get(&ctx->ref);
+       return ctx;
+}
 
 #define DBG(fmt, ...) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
 #define VERB(fmt, ...) if (0) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
index e47958c..ec60211 100644 (file)
@@ -52,23 +52,14 @@ static void sync_for_device(struct msm_gem_object *msm_obj)
 {
        struct device *dev = msm_obj->base.dev->dev;
 
-       if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) {
-               dma_sync_sgtable_for_device(dev, msm_obj->sgt,
-                                           DMA_BIDIRECTIONAL);
-       } else {
-               dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
-       }
+       dma_map_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
 }
 
 static void sync_for_cpu(struct msm_gem_object *msm_obj)
 {
        struct device *dev = msm_obj->base.dev->dev;
 
-       if (get_dma_ops(dev) && IS_ENABLED(CONFIG_ARM64)) {
-               dma_sync_sgtable_for_cpu(dev, msm_obj->sgt, DMA_BIDIRECTIONAL);
-       } else {
-               dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
-       }
+       dma_unmap_sgtable(dev, msm_obj->sgt, DMA_BIDIRECTIONAL, 0);
 }
 
 /* allocate pages from VRAM carveout, used when no IOMMU: */
@@ -750,31 +741,31 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
        return 0;
 }
 
-void msm_gem_move_to_active(struct drm_gem_object *obj,
-               struct msm_gpu *gpu, bool exclusive, struct dma_fence *fence)
+void msm_gem_active_get(struct drm_gem_object *obj, struct msm_gpu *gpu)
 {
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
+       WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
        WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
-       msm_obj->gpu = gpu;
-       if (exclusive)
-               dma_resv_add_excl_fence(obj->resv, fence);
-       else
-               dma_resv_add_shared_fence(obj->resv, fence);
-       list_del_init(&msm_obj->mm_list);
-       list_add_tail(&msm_obj->mm_list, &gpu->active_list);
+
+       if (!atomic_fetch_inc(&msm_obj->active_count)) {
+               msm_obj->gpu = gpu;
+               list_del_init(&msm_obj->mm_list);
+               list_add_tail(&msm_obj->mm_list, &gpu->active_list);
+       }
 }
 
-void msm_gem_move_to_inactive(struct drm_gem_object *obj)
+void msm_gem_active_put(struct drm_gem_object *obj)
 {
-       struct drm_device *dev = obj->dev;
-       struct msm_drm_private *priv = dev->dev_private;
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
+       struct msm_drm_private *priv = obj->dev->dev_private;
 
-       WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+       WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
 
-       msm_obj->gpu = NULL;
-       list_del_init(&msm_obj->mm_list);
-       list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
+       if (!atomic_dec_return(&msm_obj->active_count)) {
+               msm_obj->gpu = NULL;
+               list_del_init(&msm_obj->mm_list);
+               list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
+       }
 }
 
 int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
@@ -849,11 +840,28 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
 
                seq_puts(m, "      vmas:");
 
-               list_for_each_entry(vma, &msm_obj->vmas, list)
-                       seq_printf(m, " [%s: %08llx,%s,inuse=%d]",
-                               vma->aspace != NULL ? vma->aspace->name : NULL,
-                               vma->iova, vma->mapped ? "mapped" : "unmapped",
+               list_for_each_entry(vma, &msm_obj->vmas, list) {
+                       const char *name, *comm;
+                       if (vma->aspace) {
+                               struct msm_gem_address_space *aspace = vma->aspace;
+                               struct task_struct *task =
+                                       get_pid_task(aspace->pid, PIDTYPE_PID);
+                               if (task) {
+                                       comm = kstrdup(task->comm, GFP_KERNEL);
+                               } else {
+                                       comm = NULL;
+                               }
+                               name = aspace->name;
+                       } else {
+                               name = comm = NULL;
+                       }
+                       seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s,inuse=%d]",
+                               name, comm ? ":" : "", comm ? comm : "",
+                               vma->aspace, vma->iova,
+                               vma->mapped ? "mapped" : "unmapped",
                                vma->inuse);
+                       kfree(comm);
+               }
 
                seq_puts(m, "\n");
        }
index 972490b..a1bf741 100644 (file)
@@ -24,6 +24,11 @@ struct msm_gem_address_space {
        spinlock_t lock; /* Protects drm_mm node allocation/removal */
        struct msm_mmu *mmu;
        struct kref kref;
+
+       /* For address spaces associated with a specific process, this
+        * will be non-NULL:
+        */
+       struct pid *pid;
 };
 
 struct msm_gem_vma {
@@ -83,12 +88,14 @@ struct msm_gem_object {
        struct mutex lock; /* Protects resources associated with bo */
 
        char name[32]; /* Identifier to print for the debugfs files */
+
+       atomic_t active_count;
 };
 #define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
 
 static inline bool is_active(struct msm_gem_object *msm_obj)
 {
-       return msm_obj->gpu != NULL;
+       return atomic_read(&msm_obj->active_count);
 }
 
 static inline bool is_purgeable(struct msm_gem_object *msm_obj)
@@ -142,6 +149,7 @@ struct msm_gem_submit {
        bool valid;         /* true if no cmdstream patching needed */
        bool in_rb;         /* "sudo" mode, copy cmds into RB */
        struct msm_ringbuffer *ring;
+       struct msm_file_private *ctx;
        unsigned int nr_cmds;
        unsigned int nr_bos;
        u32 ident;         /* A "identifier" for the submit for logging */
index 722d616..482576d 100644 (file)
@@ -6,6 +6,7 @@
 
 #include "msm_drv.h"
 #include "msm_gem.h"
+#include "msm_gpu_trace.h"
 
 static bool msm_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
 {
@@ -87,7 +88,7 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
                mutex_unlock(&dev->struct_mutex);
 
        if (freed > 0)
-               pr_info_ratelimited("Purging %lu bytes\n", freed << PAGE_SHIFT);
+               trace_msm_gem_purge(freed << PAGE_SHIFT);
 
        return freed;
 }
@@ -123,7 +124,7 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
        *(unsigned long *)ptr += unmapped;
 
        if (unmapped > 0)
-               pr_info_ratelimited("Purging %u vmaps\n", unmapped);
+               trace_msm_gem_purge_vmaps(unmapped);
 
        return NOTIFY_DONE;
 }
index 8cb9aa1..aa5c60a 100644 (file)
@@ -27,7 +27,7 @@
 #define BO_PINNED   0x2000
 
 static struct msm_gem_submit *submit_create(struct drm_device *dev,
-               struct msm_gpu *gpu, struct msm_gem_address_space *aspace,
+               struct msm_gpu *gpu,
                struct msm_gpu_submitqueue *queue, uint32_t nr_bos,
                uint32_t nr_cmds)
 {
@@ -43,7 +43,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
                return NULL;
 
        submit->dev = dev;
-       submit->aspace = aspace;
+       submit->aspace = queue->ctx->aspace;
        submit->gpu = gpu;
        submit->fence = NULL;
        submit->cmd = (void *)&submit->bos[nr_bos];
@@ -677,7 +677,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
                }
        }
 
-       submit = submit_create(dev, gpu, ctx->aspace, queue, args->nr_bos,
+       submit = submit_create(dev, gpu, queue, args->nr_bos,
                args->nr_cmds);
        if (!submit) {
                ret = -ENOMEM;
@@ -785,7 +785,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
                }
        }
 
-       msm_gpu_submit(gpu, submit, ctx);
+       msm_gpu_submit(gpu, submit);
 
        args->fence = submit->fence->seqno;
 
index 5f6a112..f914ddb 100644 (file)
@@ -17,6 +17,7 @@ msm_gem_address_space_destroy(struct kref *kref)
        drm_mm_takedown(&aspace->mm);
        if (aspace->mmu)
                aspace->mmu->funcs->destroy(aspace->mmu);
+       put_pid(aspace->pid);
        kfree(aspace);
 }
 
@@ -27,6 +28,15 @@ void msm_gem_address_space_put(struct msm_gem_address_space *aspace)
                kref_put(&aspace->kref, msm_gem_address_space_destroy);
 }
 
+struct msm_gem_address_space *
+msm_gem_address_space_get(struct msm_gem_address_space *aspace)
+{
+       if (!IS_ERR_OR_NULL(aspace))
+               kref_get(&aspace->kref);
+
+       return aspace;
+}
+
 /* Actually unmap memory for the vma */
 void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
                struct msm_gem_vma *vma)
@@ -78,8 +88,10 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace,
                ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
                                size, prot);
 
-       if (ret)
+       if (ret) {
                vma->mapped = false;
+               vma->inuse--;
+       }
 
        return ret;
 }
index 57ddc94..55d1648 100644 (file)
@@ -24,7 +24,7 @@
 static int msm_devfreq_target(struct device *dev, unsigned long *freq,
                u32 flags)
 {
-       struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
+       struct msm_gpu *gpu = dev_to_gpu(dev);
        struct dev_pm_opp *opp;
 
        opp = devfreq_recommended_opp(dev, freq, flags);
@@ -32,6 +32,8 @@ static int msm_devfreq_target(struct device *dev, unsigned long *freq,
        if (IS_ERR(opp))
                return PTR_ERR(opp);
 
+       trace_msm_gpu_freq_change(dev_pm_opp_get_freq(opp));
+
        if (gpu->funcs->gpu_set_freq)
                gpu->funcs->gpu_set_freq(gpu, opp);
        else
@@ -45,7 +47,7 @@ static int msm_devfreq_target(struct device *dev, unsigned long *freq,
 static int msm_devfreq_get_dev_status(struct device *dev,
                struct devfreq_dev_status *status)
 {
-       struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
+       struct msm_gpu *gpu = dev_to_gpu(dev);
        ktime_t time;
 
        if (gpu->funcs->gpu_get_freq)
@@ -64,7 +66,7 @@ static int msm_devfreq_get_dev_status(struct device *dev,
 
 static int msm_devfreq_get_cur_freq(struct device *dev, unsigned long *freq)
 {
-       struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
+       struct msm_gpu *gpu = dev_to_gpu(dev);
 
        if (gpu->funcs->gpu_get_freq)
                *freq = gpu->funcs->gpu_get_freq(gpu);
@@ -200,6 +202,7 @@ int msm_gpu_pm_resume(struct msm_gpu *gpu)
        int ret;
 
        DBG("%s", gpu->name);
+       trace_msm_gpu_resume(0);
 
        ret = enable_pwrrail(gpu);
        if (ret)
@@ -225,6 +228,7 @@ int msm_gpu_pm_suspend(struct msm_gpu *gpu)
        int ret;
 
        DBG("%s", gpu->name);
+       trace_msm_gpu_suspend(0);
 
        devfreq_suspend_device(gpu->devfreq.devfreq);
 
@@ -520,7 +524,7 @@ static void recover_worker(struct work_struct *work)
                        struct msm_ringbuffer *ring = gpu->rb[i];
 
                        list_for_each_entry(submit, &ring->submits, node)
-                               gpu->funcs->submit(gpu, submit, NULL);
+                               gpu->funcs->submit(gpu, submit);
                }
        }
 
@@ -694,8 +698,8 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
 
        for (i = 0; i < submit->nr_bos; i++) {
                struct msm_gem_object *msm_obj = submit->bos[i].obj;
-               /* move to inactive: */
-               msm_gem_move_to_inactive(&msm_obj->base);
+
+               msm_gem_active_put(&msm_obj->base);
                msm_gem_unpin_iova(&msm_obj->base, submit->aspace);
                drm_gem_object_put_locked(&msm_obj->base);
        }
@@ -747,8 +751,7 @@ void msm_gpu_retire(struct msm_gpu *gpu)
 }
 
 /* add bo's to gpu's ring, and kick gpu: */
-void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
-               struct msm_file_private *ctx)
+void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
 {
        struct drm_device *dev = gpu->dev;
        struct msm_drm_private *priv = dev->dev_private;
@@ -771,6 +774,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
 
        for (i = 0; i < submit->nr_bos; i++) {
                struct msm_gem_object *msm_obj = submit->bos[i].obj;
+               struct drm_gem_object *drm_obj = &msm_obj->base;
                uint64_t iova;
 
                /* can't happen yet.. but when we add 2d support we'll have
@@ -783,13 +787,15 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
                msm_gem_get_and_pin_iova(&msm_obj->base, submit->aspace, &iova);
 
                if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
-                       msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence);
+                       dma_resv_add_excl_fence(drm_obj->resv, submit->fence);
                else if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
-                       msm_gem_move_to_active(&msm_obj->base, gpu, false, submit->fence);
+                       dma_resv_add_shared_fence(drm_obj->resv, submit->fence);
+
+               msm_gem_active_get(drm_obj, gpu);
        }
 
-       gpu->funcs->submit(gpu, submit, ctx);
-       priv->lastctx = ctx;
+       gpu->funcs->submit(gpu, submit);
+       priv->lastctx = submit->queue->ctx;
 
        hangcheck_timer_reset(gpu);
 }
@@ -824,6 +830,30 @@ static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
        return 0;
 }
 
+/* Return a new address space for a msm_drm_private instance */
+struct msm_gem_address_space *
+msm_gpu_create_private_address_space(struct msm_gpu *gpu, struct task_struct *task)
+{
+       struct msm_gem_address_space *aspace = NULL;
+       if (!gpu)
+               return NULL;
+
+       /*
+        * If the target doesn't support private address spaces then return
+        * the global one
+        */
+       if (gpu->funcs->create_private_address_space) {
+               aspace = gpu->funcs->create_private_address_space(gpu);
+               if (!IS_ERR(aspace))
+                       aspace->pid = get_pid(task_pid(task));
+       }
+
+       if (IS_ERR_OR_NULL(aspace))
+               aspace = msm_gem_address_space_get(gpu->aspace);
+
+       return aspace;
+}
+
 int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
                struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
                const char *name, struct msm_gpu_config *config)
@@ -892,7 +922,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
                gpu->gpu_cx = NULL;
 
        gpu->pdev = pdev;
-       platform_set_drvdata(pdev, gpu);
+       platform_set_drvdata(pdev, &gpu->adreno_smmu);
 
        msm_devfreq_init(gpu);
 
index 37cffac..6c9e1fd 100644 (file)
@@ -7,6 +7,7 @@
 #ifndef __MSM_GPU_H__
 #define __MSM_GPU_H__
 
+#include <linux/adreno-smmu-priv.h>
 #include <linux/clk.h>
 #include <linux/interconnect.h>
 #include <linux/pm_opp.h>
@@ -45,8 +46,7 @@ struct msm_gpu_funcs {
        int (*hw_init)(struct msm_gpu *gpu);
        int (*pm_suspend)(struct msm_gpu *gpu);
        int (*pm_resume)(struct msm_gpu *gpu);
-       void (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit,
-                       struct msm_file_private *ctx);
+       void (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit);
        void (*flush)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
        irqreturn_t (*irq)(struct msm_gpu *irq);
        struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu);
@@ -66,6 +66,9 @@ struct msm_gpu_funcs {
        void (*gpu_set_freq)(struct msm_gpu *gpu, struct dev_pm_opp *opp);
        struct msm_gem_address_space *(*create_address_space)
                (struct msm_gpu *gpu, struct platform_device *pdev);
+       struct msm_gem_address_space *(*create_private_address_space)
+               (struct msm_gpu *gpu);
+       uint32_t (*get_rptr)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
 };
 
 struct msm_gpu {
@@ -74,6 +77,8 @@ struct msm_gpu {
        struct platform_device *pdev;
        const struct msm_gpu_funcs *funcs;
 
+       struct adreno_smmu_priv adreno_smmu;
+
        /* performance counters (hw & sw): */
        spinlock_t perf_lock;
        bool perfcntr_active;
@@ -144,6 +149,12 @@ struct msm_gpu {
        bool hw_apriv;
 };
 
+static inline struct msm_gpu *dev_to_gpu(struct device *dev)
+{
+       struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(dev);
+       return container_of(adreno_smmu, struct msm_gpu, adreno_smmu);
+}
+
 /* It turns out that all targets use the same ringbuffer size */
 #define MSM_GPU_RINGBUFFER_SZ SZ_32K
 #define MSM_GPU_RINGBUFFER_BLKSIZE 32
@@ -184,6 +195,7 @@ struct msm_gpu_submitqueue {
        u32 flags;
        u32 prio;
        int faults;
+       struct msm_file_private *ctx;
        struct list_head node;
        struct kref ref;
 };
@@ -283,13 +295,15 @@ int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
                uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs);
 
 void msm_gpu_retire(struct msm_gpu *gpu);
-void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
-               struct msm_file_private *ctx);
+void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit);
 
 int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
                struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
                const char *name, struct msm_gpu_config *config);
 
+struct msm_gem_address_space *
+msm_gpu_create_private_address_space(struct msm_gpu *gpu, struct task_struct *task);
+
 void msm_gpu_cleanup(struct msm_gpu *gpu);
 
 struct msm_gpu *adreno_load_gpu(struct drm_device *dev);
index 122b847..03e0c25 100644 (file)
@@ -83,6 +83,89 @@ TRACE_EVENT(msm_gpu_submit_retired,
                    __entry->start_ticks, __entry->end_ticks)
 );
 
+
+TRACE_EVENT(msm_gpu_freq_change,
+               TP_PROTO(u32 freq),
+               TP_ARGS(freq),
+               TP_STRUCT__entry(
+                       __field(u32, freq)
+                       ),
+               TP_fast_assign(
+                       /* trace freq in MHz to match intel_gpu_freq_change, to make life easier
+                        * for userspace
+                        */
+                       __entry->freq = DIV_ROUND_UP(freq, 1000000);
+                       ),
+               TP_printk("new_freq=%u", __entry->freq)
+);
+
+
+TRACE_EVENT(msm_gmu_freq_change,
+               TP_PROTO(u32 freq, u32 perf_index),
+               TP_ARGS(freq, perf_index),
+               TP_STRUCT__entry(
+                       __field(u32, freq)
+                       __field(u32, perf_index)
+                       ),
+               TP_fast_assign(
+                       __entry->freq = freq;
+                       __entry->perf_index = perf_index;
+                       ),
+               TP_printk("freq=%u, perf_index=%u", __entry->freq, __entry->perf_index)
+);
+
+
+TRACE_EVENT(msm_gem_purge,
+               TP_PROTO(u32 bytes),
+               TP_ARGS(bytes),
+               TP_STRUCT__entry(
+                       __field(u32, bytes)
+                       ),
+               TP_fast_assign(
+                       __entry->bytes = bytes;
+                       ),
+               TP_printk("Purging %u bytes", __entry->bytes)
+);
+
+
+TRACE_EVENT(msm_gem_purge_vmaps,
+               TP_PROTO(u32 unmapped),
+               TP_ARGS(unmapped),
+               TP_STRUCT__entry(
+                       __field(u32, unmapped)
+                       ),
+               TP_fast_assign(
+                       __entry->unmapped = unmapped;
+                       ),
+               TP_printk("Purging %u vmaps", __entry->unmapped)
+);
+
+
+TRACE_EVENT(msm_gpu_suspend,
+               TP_PROTO(int dummy),
+               TP_ARGS(dummy),
+               TP_STRUCT__entry(
+                       __field(u32, dummy)
+                       ),
+               TP_fast_assign(
+                       __entry->dummy = dummy;
+                       ),
+               TP_printk("%u", __entry->dummy)
+);
+
+
+TRACE_EVENT(msm_gpu_resume,
+               TP_PROTO(int dummy),
+               TP_ARGS(dummy),
+               TP_STRUCT__entry(
+                       __field(u32, dummy)
+                       ),
+               TP_fast_assign(
+                       __entry->dummy = dummy;
+                       ),
+               TP_printk("%u", __entry->dummy)
+);
+
 #endif
 
 #undef TRACE_INCLUDE_PATH
index 53a7348..3794961 100644 (file)
@@ -101,7 +101,7 @@ struct msm_mmu *msm_gpummu_new(struct device *dev, struct msm_gpu *gpu)
        }
 
        gpummu->gpu = gpu;
-       msm_mmu_init(&gpummu->base, dev, &funcs);
+       msm_mmu_init(&gpummu->base, dev, &funcs, MSM_MMU_GPUMMU);
 
        return &gpummu->base;
 }
index 6c31e65..3a83ffd 100644 (file)
  * Author: Rob Clark <robdclark@gmail.com>
  */
 
+#include <linux/adreno-smmu-priv.h>
+#include <linux/io-pgtable.h>
 #include "msm_drv.h"
 #include "msm_mmu.h"
 
 struct msm_iommu {
        struct msm_mmu base;
        struct iommu_domain *domain;
+       atomic_t pagetables;
 };
+
 #define to_msm_iommu(x) container_of(x, struct msm_iommu, base)
 
+struct msm_iommu_pagetable {
+       struct msm_mmu base;
+       struct msm_mmu *parent;
+       struct io_pgtable_ops *pgtbl_ops;
+       phys_addr_t ttbr;
+       u32 asid;
+};
+static struct msm_iommu_pagetable *to_pagetable(struct msm_mmu *mmu)
+{
+       return container_of(mmu, struct msm_iommu_pagetable, base);
+}
+
+static int msm_iommu_pagetable_unmap(struct msm_mmu *mmu, u64 iova,
+               size_t size)
+{
+       struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
+       struct io_pgtable_ops *ops = pagetable->pgtbl_ops;
+       size_t unmapped = 0;
+
+       /* Unmap the block one page at a time */
+       while (size) {
+               unmapped += ops->unmap(ops, iova, 4096, NULL);
+               iova += 4096;
+               size -= 4096;
+       }
+
+       iommu_flush_tlb_all(to_msm_iommu(pagetable->parent)->domain);
+
+       return (unmapped == size) ? 0 : -EINVAL;
+}
+
+static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova,
+               struct sg_table *sgt, size_t len, int prot)
+{
+       struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
+       struct io_pgtable_ops *ops = pagetable->pgtbl_ops;
+       struct scatterlist *sg;
+       size_t mapped = 0;
+       u64 addr = iova;
+       unsigned int i;
+
+       for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+               size_t size = sg->length;
+               phys_addr_t phys = sg_phys(sg);
+
+               /* Map the block one page at a time */
+               while (size) {
+                       if (ops->map(ops, addr, phys, 4096, prot, GFP_KERNEL)) {
+                               msm_iommu_pagetable_unmap(mmu, iova, mapped);
+                               return -EINVAL;
+                       }
+
+                       phys += 4096;
+                       addr += 4096;
+                       size -= 4096;
+                       mapped += 4096;
+               }
+       }
+
+       return 0;
+}
+
+static void msm_iommu_pagetable_destroy(struct msm_mmu *mmu)
+{
+       struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
+       struct msm_iommu *iommu = to_msm_iommu(pagetable->parent);
+       struct adreno_smmu_priv *adreno_smmu =
+               dev_get_drvdata(pagetable->parent->dev);
+
+       /*
+        * If this is the last attached pagetable for the parent,
+        * disable TTBR0 in the arm-smmu driver
+        */
+       if (atomic_dec_return(&iommu->pagetables) == 0)
+               adreno_smmu->set_ttbr0_cfg(adreno_smmu->cookie, NULL);
+
+       free_io_pgtable_ops(pagetable->pgtbl_ops);
+       kfree(pagetable);
+}
+
+int msm_iommu_pagetable_params(struct msm_mmu *mmu,
+               phys_addr_t *ttbr, int *asid)
+{
+       struct msm_iommu_pagetable *pagetable;
+
+       if (mmu->type != MSM_MMU_IOMMU_PAGETABLE)
+               return -EINVAL;
+
+       pagetable = to_pagetable(mmu);
+
+       if (ttbr)
+               *ttbr = pagetable->ttbr;
+
+       if (asid)
+               *asid = pagetable->asid;
+
+       return 0;
+}
+
+static const struct msm_mmu_funcs pagetable_funcs = {
+               .map = msm_iommu_pagetable_map,
+               .unmap = msm_iommu_pagetable_unmap,
+               .destroy = msm_iommu_pagetable_destroy,
+};
+
+static void msm_iommu_tlb_flush_all(void *cookie)
+{
+}
+
+static void msm_iommu_tlb_flush_walk(unsigned long iova, size_t size,
+               size_t granule, void *cookie)
+{
+}
+
+static void msm_iommu_tlb_add_page(struct iommu_iotlb_gather *gather,
+               unsigned long iova, size_t granule, void *cookie)
+{
+}
+
+static const struct iommu_flush_ops null_tlb_ops = {
+       .tlb_flush_all = msm_iommu_tlb_flush_all,
+       .tlb_flush_walk = msm_iommu_tlb_flush_walk,
+       .tlb_flush_leaf = msm_iommu_tlb_flush_walk,
+       .tlb_add_page = msm_iommu_tlb_add_page,
+};
+
+struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
+{
+       struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(parent->dev);
+       struct msm_iommu *iommu = to_msm_iommu(parent);
+       struct msm_iommu_pagetable *pagetable;
+       const struct io_pgtable_cfg *ttbr1_cfg = NULL;
+       struct io_pgtable_cfg ttbr0_cfg;
+       int ret;
+
+       /* Get the pagetable configuration from the domain */
+       if (adreno_smmu->cookie)
+               ttbr1_cfg = adreno_smmu->get_ttbr1_cfg(adreno_smmu->cookie);
+       if (!ttbr1_cfg)
+               return ERR_PTR(-ENODEV);
+
+       pagetable = kzalloc(sizeof(*pagetable), GFP_KERNEL);
+       if (!pagetable)
+               return ERR_PTR(-ENOMEM);
+
+       msm_mmu_init(&pagetable->base, parent->dev, &pagetable_funcs,
+               MSM_MMU_IOMMU_PAGETABLE);
+
+       /* Clone the TTBR1 cfg as starting point for TTBR0 cfg: */
+       ttbr0_cfg = *ttbr1_cfg;
+
+       /* The incoming cfg will have the TTBR1 quirk enabled */
+       ttbr0_cfg.quirks &= ~IO_PGTABLE_QUIRK_ARM_TTBR1;
+       ttbr0_cfg.tlb = &null_tlb_ops;
+
+       pagetable->pgtbl_ops = alloc_io_pgtable_ops(ARM_64_LPAE_S1,
+               &ttbr0_cfg, iommu->domain);
+
+       if (!pagetable->pgtbl_ops) {
+               kfree(pagetable);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       /*
+        * If this is the first pagetable that we've allocated, send it back to
+        * the arm-smmu driver as a trigger to set up TTBR0
+        */
+       if (atomic_inc_return(&iommu->pagetables) == 1) {
+               ret = adreno_smmu->set_ttbr0_cfg(adreno_smmu->cookie, &ttbr0_cfg);
+               if (ret) {
+                       free_io_pgtable_ops(pagetable->pgtbl_ops);
+                       kfree(pagetable);
+                       return ERR_PTR(ret);
+               }
+       }
+
+       /* Needed later for TLB flush */
+       pagetable->parent = parent;
+       pagetable->ttbr = ttbr0_cfg.arm_lpae_s1_cfg.ttbr;
+
+       /*
+        * TODO we would like each set of page tables to have a unique ASID
+        * to optimize TLB invalidation.  But iommu_flush_tlb_all() will
+        * end up flushing the ASID used for TTBR1 pagetables, which is not
+        * what we want.  So for now just use the same ASID as TTBR1.
+        */
+       pagetable->asid = 0;
+
+       return &pagetable->base;
+}
+
 static int msm_fault_handler(struct iommu_domain *domain, struct device *dev,
                unsigned long iova, int flags, void *arg)
 {
@@ -36,6 +231,10 @@ static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
        struct msm_iommu *iommu = to_msm_iommu(mmu);
        size_t ret;
 
+       /* The arm-smmu driver expects the addresses to be sign extended */
+       if (iova & BIT_ULL(48))
+               iova |= GENMASK_ULL(63, 49);
+
        ret = iommu_map_sgtable(iommu->domain, iova, sgt, prot);
        WARN_ON(!ret);
 
@@ -46,6 +245,9 @@ static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova, size_t len)
 {
        struct msm_iommu *iommu = to_msm_iommu(mmu);
 
+       if (iova & BIT_ULL(48))
+               iova |= GENMASK_ULL(63, 49);
+
        iommu_unmap(iommu->domain, iova, len);
 
        return 0;
@@ -78,9 +280,11 @@ struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
                return ERR_PTR(-ENOMEM);
 
        iommu->domain = domain;
-       msm_mmu_init(&iommu->base, dev, &funcs);
+       msm_mmu_init(&iommu->base, dev, &funcs, MSM_MMU_IOMMU);
        iommu_set_fault_handler(domain, msm_fault_handler, iommu);
 
+       atomic_set(&iommu->pagetables, 0);
+
        ret = iommu_attach_device(iommu->domain, dev);
        if (ret) {
                kfree(iommu);
index 3a534ee..61ade89 100644 (file)
@@ -17,18 +17,26 @@ struct msm_mmu_funcs {
        void (*destroy)(struct msm_mmu *mmu);
 };
 
+enum msm_mmu_type {
+       MSM_MMU_GPUMMU,
+       MSM_MMU_IOMMU,
+       MSM_MMU_IOMMU_PAGETABLE,
+};
+
 struct msm_mmu {
        const struct msm_mmu_funcs *funcs;
        struct device *dev;
        int (*handler)(void *arg, unsigned long iova, int flags);
        void *arg;
+       enum msm_mmu_type type;
 };
 
 static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev,
-               const struct msm_mmu_funcs *funcs)
+               const struct msm_mmu_funcs *funcs, enum msm_mmu_type type)
 {
        mmu->dev = dev;
        mmu->funcs = funcs;
+       mmu->type = type;
 }
 
 struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain);
@@ -41,7 +49,13 @@ static inline void msm_mmu_set_fault_handler(struct msm_mmu *mmu, void *arg,
        mmu->handler = handler;
 }
 
+struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent);
+
 void msm_gpummu_params(struct msm_mmu *mmu, dma_addr_t *pt_base,
                dma_addr_t *tran_error);
 
+
+int msm_iommu_pagetable_params(struct msm_mmu *mmu, phys_addr_t *ttbr,
+               int *asid);
+
 #endif /* __MSM_MMU_H__ */
index 7764373..0987d6b 100644 (file)
@@ -31,6 +31,7 @@ struct msm_rbmemptrs {
        volatile uint32_t fence;
 
        volatile struct msm_gpu_submit_stats stats[MSM_GPU_SUBMIT_STATS_COUNT];
+       volatile u64 ttbr0;
 };
 
 struct msm_ringbuffer {
index a1d94be..c3d2061 100644 (file)
@@ -12,6 +12,8 @@ void msm_submitqueue_destroy(struct kref *kref)
        struct msm_gpu_submitqueue *queue = container_of(kref,
                struct msm_gpu_submitqueue, ref);
 
+       msm_file_private_put(queue->ctx);
+
        kfree(queue);
 }
 
@@ -49,8 +51,10 @@ void msm_submitqueue_close(struct msm_file_private *ctx)
         * No lock needed in close and there won't
         * be any more user ioctls coming our way
         */
-       list_for_each_entry_safe(entry, tmp, &ctx->submitqueues, node)
+       list_for_each_entry_safe(entry, tmp, &ctx->submitqueues, node) {
+               list_del(&entry->node);
                msm_submitqueue_put(entry);
+       }
 }
 
 int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
@@ -81,6 +85,7 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
 
        write_lock(&ctx->queuelock);
 
+       queue->ctx = msm_file_private_get(ctx);
        queue->id = ctx->queueid++;
 
        if (id)
index e1b2a33..2aae636 100644 (file)
@@ -325,13 +325,13 @@ void panfrost_gpu_power_on(struct panfrost_device *pfdev)
        /* Just turn on everything for now */
        gpu_write(pfdev, L2_PWRON_LO, pfdev->features.l2_present);
        ret = readl_relaxed_poll_timeout(pfdev->iomem + L2_READY_LO,
-               val, val == pfdev->features.l2_present, 100, 1000);
+               val, val == pfdev->features.l2_present, 100, 20000);
        if (ret)
                dev_err(pfdev->dev, "error powering up gpu L2");
 
        gpu_write(pfdev, SHADER_PWRON_LO, pfdev->features.shader_present);
        ret = readl_relaxed_poll_timeout(pfdev->iomem + SHADER_READY_LO,
-               val, val == pfdev->features.shader_present, 100, 1000);
+               val, val == pfdev->features.shader_present, 100, 20000);
        if (ret)
                dev_err(pfdev->dev, "error powering up gpu shader");
 
index ebad27c..27b14ef 100644 (file)
@@ -188,7 +188,7 @@ struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
            vm_id->last_id_use == rdev->vm_manager.active[vm_id->id])
                return NULL;
 
-       /* we definately need to flush */
+       /* we definitely need to flush */
        vm_id->pd_gpu_addr = ~0ll;
 
        /* skip over VMID 0, since it is the system VM */
index 8007211..58557c2 100644 (file)
@@ -117,7 +117,7 @@ int uvd_v1_0_resume(struct radeon_device *rdev)
        if (r)
                return r;
 
-       /* programm the VCPU memory controller bits 0-27 */
+       /* program the VCPU memory controller bits 0-27 */
        addr = (rdev->uvd.gpu_addr >> 3) + 16;
        size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size) >> 3;
        WREG32(UVD_VCPU_CACHE_OFFSET0, addr);
@@ -360,7 +360,7 @@ int uvd_v1_0_start(struct radeon_device *rdev)
        /* Set the write pointer delay */
        WREG32(UVD_RBC_RB_WPTR_CNTL, 0);
 
-       /* programm the 4GB memory segment for rptr and ring buffer */
+       /* program the 4GB memory segment for rptr and ring buffer */
        WREG32(UVD_LMI_EXT40_ADDR, upper_32_bits(ring->gpu_addr) |
                                   (0x7 << 16) | (0x1 << 31));
 
index 23b18ed..6266167 100644 (file)
@@ -109,7 +109,7 @@ int uvd_v2_2_resume(struct radeon_device *rdev)
        if (r)
                return r;
 
-       /* programm the VCPU memory controller bits 0-27 */
+       /* program the VCPU memory controller bits 0-27 */
        addr = rdev->uvd.gpu_addr >> 3;
        size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 4) >> 3;
        WREG32(UVD_VCPU_CACHE_OFFSET0, addr);
index dc54fa4..f9e97fa 100644 (file)
@@ -40,7 +40,7 @@ int uvd_v4_2_resume(struct radeon_device *rdev)
        uint64_t addr;
        uint32_t size;
 
-       /* programm the VCPU memory controller bits 0-27 */
+       /* program the VCPU memory controller bits 0-27 */
 
        /* skip over the header of the new firmware format */
        if (rdev->uvd.fw_header_present)
index a393f93..482219f 100644 (file)
@@ -852,11 +852,19 @@ void vc4_crtc_destroy_state(struct drm_crtc *crtc,
 
 void vc4_crtc_reset(struct drm_crtc *crtc)
 {
+       struct vc4_crtc_state *vc4_crtc_state;
+
        if (crtc->state)
                vc4_crtc_destroy_state(crtc, crtc->state);
-       crtc->state = kzalloc(sizeof(struct vc4_crtc_state), GFP_KERNEL);
-       if (crtc->state)
-               __drm_atomic_helper_crtc_reset(crtc, crtc->state);
+
+       vc4_crtc_state = kzalloc(sizeof(*vc4_crtc_state), GFP_KERNEL);
+       if (!vc4_crtc_state) {
+               crtc->state = NULL;
+               return;
+       }
+
+       vc4_crtc_state->assigned_channel = VC4_HVS_CHANNEL_DISABLED;
+       __drm_atomic_helper_crtc_reset(crtc, &vc4_crtc_state->base);
 }
 
 static const struct drm_crtc_funcs vc4_crtc_funcs = {
index 8c8d96b..90b911f 100644 (file)
@@ -532,6 +532,8 @@ struct vc4_crtc_state {
        } margins;
 };
 
+#define VC4_HVS_CHANNEL_DISABLED ((unsigned int)-1)
+
 static inline struct vc4_crtc_state *
 to_vc4_crtc_state(struct drm_crtc_state *crtc_state)
 {
index af3ee3d..149825f 100644 (file)
@@ -616,7 +616,7 @@ static int
 vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
 {
        unsigned long unassigned_channels = GENMASK(NUM_CHANNELS - 1, 0);
-       struct drm_crtc_state *crtc_state;
+       struct drm_crtc_state *old_crtc_state, *new_crtc_state;
        struct drm_crtc *crtc;
        int i, ret;
 
@@ -629,6 +629,8 @@ vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
         * modified.
         */
        list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+               struct drm_crtc_state *crtc_state;
+
                if (!crtc->state->enable)
                        continue;
 
@@ -637,15 +639,23 @@ vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
                        return PTR_ERR(crtc_state);
        }
 
-       for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
-               struct vc4_crtc_state *vc4_crtc_state =
-                       to_vc4_crtc_state(crtc_state);
+       for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+               struct vc4_crtc_state *new_vc4_crtc_state =
+                       to_vc4_crtc_state(new_crtc_state);
                struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
                unsigned int matching_channels;
 
-               if (!crtc_state->active)
+               if (old_crtc_state->enable && !new_crtc_state->enable)
+                       new_vc4_crtc_state->assigned_channel = VC4_HVS_CHANNEL_DISABLED;
+
+               if (!new_crtc_state->enable)
                        continue;
 
+               if (new_vc4_crtc_state->assigned_channel != VC4_HVS_CHANNEL_DISABLED) {
+                       unassigned_channels &= ~BIT(new_vc4_crtc_state->assigned_channel);
+                       continue;
+               }
+
                /*
                 * The problem we have to solve here is that we have
                 * up to 7 encoders, connected to up to 6 CRTCs.
@@ -674,7 +684,7 @@ vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
                if (matching_channels) {
                        unsigned int channel = ffs(matching_channels) - 1;
 
-                       vc4_crtc_state->assigned_channel = channel;
+                       new_vc4_crtc_state->assigned_channel = channel;
                        unassigned_channels &= ~BIT(channel);
                } else {
                        return -EINVAL;
index 63fe7da..c158e67 100644 (file)
@@ -26,6 +26,8 @@ static struct vmw_thp_manager *to_thp_manager(struct ttm_resource_manager *man)
        return container_of(man, struct vmw_thp_manager, manager);
 }
 
+static const struct ttm_resource_manager_func vmw_thp_func;
+
 static int vmw_thp_insert_aligned(struct drm_mm *mm, struct drm_mm_node *node,
                                  unsigned long align_pages,
                                  const struct ttm_place *place,
@@ -132,6 +134,7 @@ int vmw_thp_init(struct vmw_private *dev_priv)
        ttm_resource_manager_init(&rman->manager,
                                  dev_priv->vram_size >> PAGE_SHIFT);
 
+       rman->manager.func = &vmw_thp_func;
        drm_mm_init(&rman->mm, 0, rman->manager.size);
        spin_lock_init(&rman->lock);
 
@@ -171,7 +174,7 @@ static void vmw_thp_debug(struct ttm_resource_manager *man,
        spin_unlock(&rman->lock);
 }
 
-const struct ttm_resource_manager_func vmw_thp_func = {
+static const struct ttm_resource_manager_func vmw_thp_func = {
        .alloc = vmw_thp_get_node,
        .free = vmw_thp_put_node,
        .debug = vmw_thp_debug
index dee757c..50c5e93 100644 (file)
@@ -35,3 +35,10 @@ config PHY_MTK_XSPHY
          Enable this to support the SuperSpeedPlus XS-PHY transceiver for
          USB3.1 GEN2 controllers on MediaTek chips. The driver supports
          multiple USB2.0, USB3.1 GEN2 ports.
+
+config PHY_MTK_HDMI
+       tristate "MediaTek HDMI-PHY Driver"
+       depends on ARCH_MEDIATEK && OF
+       select GENERIC_PHY
+       help
+         Support HDMI PHY for Mediatek SoCs.
index 08a8e6a..6325e38 100644 (file)
@@ -6,3 +6,8 @@
 obj-$(CONFIG_PHY_MTK_TPHY)             += phy-mtk-tphy.o
 obj-$(CONFIG_PHY_MTK_UFS)              += phy-mtk-ufs.o
 obj-$(CONFIG_PHY_MTK_XSPHY)            += phy-mtk-xsphy.o
+
+phy-mtk-hdmi-drv-y                     := phy-mtk-hdmi.o
+phy-mtk-hdmi-drv-y                     += phy-mtk-hdmi-mt2701.o
+phy-mtk-hdmi-drv-y                     += phy-mtk-hdmi-mt8173.o
+obj-$(CONFIG_PHY_MTK_HDMI)             += phy-mtk-hdmi-drv.o
diff --git a/drivers/phy/mediatek/phy-mtk-hdmi-mt2701.c b/drivers/phy/mediatek/phy-mtk-hdmi-mt2701.c
new file mode 100644 (file)
index 0000000..b74c65a
--- /dev/null
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: Chunhui Dai <chunhui.dai@mediatek.com>
+ */
+
+#include "phy-mtk-hdmi.h"
+
+#define HDMI_CON0      0x00
+#define RG_HDMITX_DRV_IBIAS            0
+#define RG_HDMITX_DRV_IBIAS_MASK       (0x3f << 0)
+#define RG_HDMITX_EN_SER               12
+#define RG_HDMITX_EN_SER_MASK          (0x0f << 12)
+#define RG_HDMITX_EN_SLDO              16
+#define RG_HDMITX_EN_SLDO_MASK         (0x0f << 16)
+#define RG_HDMITX_EN_PRED              20
+#define RG_HDMITX_EN_PRED_MASK         (0x0f << 20)
+#define RG_HDMITX_EN_IMP               24
+#define RG_HDMITX_EN_IMP_MASK          (0x0f << 24)
+#define RG_HDMITX_EN_DRV               28
+#define RG_HDMITX_EN_DRV_MASK          (0x0f << 28)
+
+#define HDMI_CON1      0x04
+#define RG_HDMITX_PRED_IBIAS           18
+#define RG_HDMITX_PRED_IBIAS_MASK      (0x0f << 18)
+#define RG_HDMITX_PRED_IMP             (0x01 << 22)
+#define RG_HDMITX_DRV_IMP              26
+#define RG_HDMITX_DRV_IMP_MASK         (0x3f << 26)
+
+#define HDMI_CON2      0x08
+#define RG_HDMITX_EN_TX_CKLDO          (0x01 << 0)
+#define RG_HDMITX_EN_TX_POSDIV         (0x01 << 1)
+#define RG_HDMITX_TX_POSDIV            3
+#define RG_HDMITX_TX_POSDIV_MASK       (0x03 << 3)
+#define RG_HDMITX_EN_MBIAS             (0x01 << 6)
+#define RG_HDMITX_MBIAS_LPF_EN         (0x01 << 7)
+
+#define HDMI_CON4      0x10
+#define RG_HDMITX_RESERVE_MASK         (0xffffffff << 0)
+
+#define HDMI_CON6      0x18
+#define RG_HTPLL_BR                    0
+#define RG_HTPLL_BR_MASK               (0x03 << 0)
+#define RG_HTPLL_BC                    2
+#define RG_HTPLL_BC_MASK               (0x03 << 2)
+#define RG_HTPLL_BP                    4
+#define RG_HTPLL_BP_MASK               (0x0f << 4)
+#define RG_HTPLL_IR                    8
+#define RG_HTPLL_IR_MASK               (0x0f << 8)
+#define RG_HTPLL_IC                    12
+#define RG_HTPLL_IC_MASK               (0x0f << 12)
+#define RG_HTPLL_POSDIV                        16
+#define RG_HTPLL_POSDIV_MASK           (0x03 << 16)
+#define RG_HTPLL_PREDIV                        18
+#define RG_HTPLL_PREDIV_MASK           (0x03 << 18)
+#define RG_HTPLL_FBKSEL                        20
+#define RG_HTPLL_FBKSEL_MASK           (0x03 << 20)
+#define RG_HTPLL_RLH_EN                        (0x01 << 22)
+#define RG_HTPLL_FBKDIV                        24
+#define RG_HTPLL_FBKDIV_MASK           (0x7f << 24)
+#define RG_HTPLL_EN                    (0x01 << 31)
+
+#define HDMI_CON7      0x1c
+#define RG_HTPLL_AUTOK_EN              (0x01 << 23)
+#define RG_HTPLL_DIVEN                 28
+#define RG_HTPLL_DIVEN_MASK            (0x07 << 28)
+
+static int mtk_hdmi_pll_prepare(struct clk_hw *hw)
+{
+       struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
+
+       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON7, RG_HTPLL_AUTOK_EN);
+       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_RLH_EN);
+       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_POSDIV_MASK);
+       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_MBIAS);
+       usleep_range(80, 100);
+       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_EN);
+       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_CKLDO);
+       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
+       usleep_range(80, 100);
+       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
+       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK);
+       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
+       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
+       usleep_range(80, 100);
+       return 0;
+}
+
+static void mtk_hdmi_pll_unprepare(struct clk_hw *hw)
+{
+       struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
+
+       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
+       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
+       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK);
+       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
+       usleep_range(80, 100);
+       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
+       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_CKLDO);
+       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_EN);
+       usleep_range(80, 100);
+       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_MBIAS);
+       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_POSDIV_MASK);
+       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_RLH_EN);
+       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON7, RG_HTPLL_AUTOK_EN);
+       usleep_range(80, 100);
+}
+
+static long mtk_hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+                                   unsigned long *parent_rate)
+{
+       return rate;
+}
+
+static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+                                unsigned long parent_rate)
+{
+       struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
+       u32 pos_div;
+
+       if (rate <= 64000000)
+               pos_div = 3;
+       else if (rate <= 128000000)
+               pos_div = 2;
+       else
+               pos_div = 1;
+
+       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_PREDIV_MASK);
+       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_POSDIV_MASK);
+       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_POSDIV);
+       mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (0x1 << RG_HTPLL_IC),
+                         RG_HTPLL_IC_MASK);
+       mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (0x1 << RG_HTPLL_IR),
+                         RG_HTPLL_IR_MASK);
+       mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON2, (pos_div << RG_HDMITX_TX_POSDIV),
+                         RG_HDMITX_TX_POSDIV_MASK);
+       mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (1 << RG_HTPLL_FBKSEL),
+                         RG_HTPLL_FBKSEL_MASK);
+       mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (19 << RG_HTPLL_FBKDIV),
+                         RG_HTPLL_FBKDIV_MASK);
+       mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON7, (0x2 << RG_HTPLL_DIVEN),
+                         RG_HTPLL_DIVEN_MASK);
+       mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (0xc << RG_HTPLL_BP),
+                         RG_HTPLL_BP_MASK);
+       mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (0x2 << RG_HTPLL_BC),
+                         RG_HTPLL_BC_MASK);
+       mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, (0x1 << RG_HTPLL_BR),
+                         RG_HTPLL_BR_MASK);
+
+       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PRED_IMP);
+       mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON1, (0x3 << RG_HDMITX_PRED_IBIAS),
+                         RG_HDMITX_PRED_IBIAS_MASK);
+       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_IMP_MASK);
+       mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON1, (0x28 << RG_HDMITX_DRV_IMP),
+                         RG_HDMITX_DRV_IMP_MASK);
+       mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON4, 0x28, RG_HDMITX_RESERVE_MASK);
+       mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON0, (0xa << RG_HDMITX_DRV_IBIAS),
+                         RG_HDMITX_DRV_IBIAS_MASK);
+       return 0;
+}
+
+static unsigned long mtk_hdmi_pll_recalc_rate(struct clk_hw *hw,
+                                             unsigned long parent_rate)
+{
+       struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
+       unsigned long out_rate, val;
+
+       val = (readl(hdmi_phy->regs + HDMI_CON6)
+              & RG_HTPLL_PREDIV_MASK) >> RG_HTPLL_PREDIV;
+       switch (val) {
+       case 0x00:
+               out_rate = parent_rate;
+               break;
+       case 0x01:
+               out_rate = parent_rate / 2;
+               break;
+       default:
+               out_rate = parent_rate / 4;
+               break;
+       }
+
+       val = (readl(hdmi_phy->regs + HDMI_CON6)
+              & RG_HTPLL_FBKDIV_MASK) >> RG_HTPLL_FBKDIV;
+       out_rate *= (val + 1) * 2;
+       val = (readl(hdmi_phy->regs + HDMI_CON2)
+              & RG_HDMITX_TX_POSDIV_MASK);
+       out_rate >>= (val >> RG_HDMITX_TX_POSDIV);
+
+       if (readl(hdmi_phy->regs + HDMI_CON2) & RG_HDMITX_EN_TX_POSDIV)
+               out_rate /= 5;
+
+       return out_rate;
+}
+
+static const struct clk_ops mtk_hdmi_phy_pll_ops = {
+       .prepare = mtk_hdmi_pll_prepare,
+       .unprepare = mtk_hdmi_pll_unprepare,
+       .set_rate = mtk_hdmi_pll_set_rate,
+       .round_rate = mtk_hdmi_pll_round_rate,
+       .recalc_rate = mtk_hdmi_pll_recalc_rate,
+};
+
+static void mtk_hdmi_phy_enable_tmds(struct mtk_hdmi_phy *hdmi_phy)
+{
+       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON7, RG_HTPLL_AUTOK_EN);
+       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_RLH_EN);
+       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_POSDIV_MASK);
+       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_MBIAS);
+       usleep_range(80, 100);
+       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_EN);
+       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_CKLDO);
+       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
+       usleep_range(80, 100);
+       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
+       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK);
+       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
+       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
+       usleep_range(80, 100);
+}
+
+static void mtk_hdmi_phy_disable_tmds(struct mtk_hdmi_phy *hdmi_phy)
+{
+       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_DRV_MASK);
+       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_PRED_MASK);
+       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SER_MASK);
+       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_MBIAS_LPF_EN);
+       usleep_range(80, 100);
+       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_EN_SLDO_MASK);
+       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_TX_CKLDO);
+       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_EN);
+       usleep_range(80, 100);
+       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON2, RG_HDMITX_EN_MBIAS);
+       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_POSDIV_MASK);
+       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON6, RG_HTPLL_RLH_EN);
+       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON7, RG_HTPLL_AUTOK_EN);
+       usleep_range(80, 100);
+}
+
+struct mtk_hdmi_phy_conf mtk_hdmi_phy_2701_conf = {
+       .flags = CLK_SET_RATE_GATE,
+       .pll_default_off = true,
+       .hdmi_phy_clk_ops = &mtk_hdmi_phy_pll_ops,
+       .hdmi_phy_enable_tmds = mtk_hdmi_phy_enable_tmds,
+       .hdmi_phy_disable_tmds = mtk_hdmi_phy_disable_tmds,
+};
+
+MODULE_AUTHOR("Chunhui Dai <chunhui.dai@mediatek.com>");
+MODULE_DESCRIPTION("MediaTek HDMI PHY Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/mediatek/phy-mtk-hdmi-mt8173.c b/drivers/phy/mediatek/phy-mtk-hdmi-mt8173.c
new file mode 100644 (file)
index 0000000..6cdfdf5
--- /dev/null
@@ -0,0 +1,282 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2014 MediaTek Inc.
+ * Author: Jie Qiu <jie.qiu@mediatek.com>
+ */
+
+#include "phy-mtk-hdmi.h"
+
+#define HDMI_CON0              0x00
+#define RG_HDMITX_PLL_EN               BIT(31)
+#define RG_HDMITX_PLL_FBKDIV           (0x7f << 24)
+#define PLL_FBKDIV_SHIFT               24
+#define RG_HDMITX_PLL_FBKSEL           (0x3 << 22)
+#define PLL_FBKSEL_SHIFT               22
+#define RG_HDMITX_PLL_PREDIV           (0x3 << 20)
+#define PREDIV_SHIFT                   20
+#define RG_HDMITX_PLL_POSDIV           (0x3 << 18)
+#define POSDIV_SHIFT                   18
+#define RG_HDMITX_PLL_RST_DLY          (0x3 << 16)
+#define RG_HDMITX_PLL_IR               (0xf << 12)
+#define PLL_IR_SHIFT                   12
+#define RG_HDMITX_PLL_IC               (0xf << 8)
+#define PLL_IC_SHIFT                   8
+#define RG_HDMITX_PLL_BP               (0xf << 4)
+#define PLL_BP_SHIFT                   4
+#define RG_HDMITX_PLL_BR               (0x3 << 2)
+#define PLL_BR_SHIFT                   2
+#define RG_HDMITX_PLL_BC               (0x3 << 0)
+#define PLL_BC_SHIFT                   0
+#define HDMI_CON1              0x04
+#define RG_HDMITX_PLL_DIVEN            (0x7 << 29)
+#define PLL_DIVEN_SHIFT                        29
+#define RG_HDMITX_PLL_AUTOK_EN         BIT(28)
+#define RG_HDMITX_PLL_AUTOK_KF         (0x3 << 26)
+#define RG_HDMITX_PLL_AUTOK_KS         (0x3 << 24)
+#define RG_HDMITX_PLL_AUTOK_LOAD       BIT(23)
+#define RG_HDMITX_PLL_BAND             (0x3f << 16)
+#define RG_HDMITX_PLL_REF_SEL          BIT(15)
+#define RG_HDMITX_PLL_BIAS_EN          BIT(14)
+#define RG_HDMITX_PLL_BIAS_LPF_EN      BIT(13)
+#define RG_HDMITX_PLL_TXDIV_EN         BIT(12)
+#define RG_HDMITX_PLL_TXDIV            (0x3 << 10)
+#define PLL_TXDIV_SHIFT                        10
+#define RG_HDMITX_PLL_LVROD_EN         BIT(9)
+#define RG_HDMITX_PLL_MONVC_EN         BIT(8)
+#define RG_HDMITX_PLL_MONCK_EN         BIT(7)
+#define RG_HDMITX_PLL_MONREF_EN                BIT(6)
+#define RG_HDMITX_PLL_TST_EN           BIT(5)
+#define RG_HDMITX_PLL_TST_CK_EN                BIT(4)
+#define RG_HDMITX_PLL_TST_SEL          (0xf << 0)
+#define HDMI_CON2              0x08
+#define RGS_HDMITX_PLL_AUTOK_BAND      (0x7f << 8)
+#define RGS_HDMITX_PLL_AUTOK_FAIL      BIT(1)
+#define RG_HDMITX_EN_TX_CKLDO          BIT(0)
+#define HDMI_CON3              0x0c
+#define RG_HDMITX_SER_EN               (0xf << 28)
+#define RG_HDMITX_PRD_EN               (0xf << 24)
+#define RG_HDMITX_PRD_IMP_EN           (0xf << 20)
+#define RG_HDMITX_DRV_EN               (0xf << 16)
+#define RG_HDMITX_DRV_IMP_EN           (0xf << 12)
+#define DRV_IMP_EN_SHIFT               12
+#define RG_HDMITX_MHLCK_FORCE          BIT(10)
+#define RG_HDMITX_MHLCK_PPIX_EN                BIT(9)
+#define RG_HDMITX_MHLCK_EN             BIT(8)
+#define RG_HDMITX_SER_DIN_SEL          (0xf << 4)
+#define RG_HDMITX_SER_5T1_BIST_EN      BIT(3)
+#define RG_HDMITX_SER_BIST_TOG         BIT(2)
+#define RG_HDMITX_SER_DIN_TOG          BIT(1)
+#define RG_HDMITX_SER_CLKDIG_INV       BIT(0)
+#define HDMI_CON4              0x10
+#define RG_HDMITX_PRD_IBIAS_CLK                (0xf << 24)
+#define RG_HDMITX_PRD_IBIAS_D2         (0xf << 16)
+#define RG_HDMITX_PRD_IBIAS_D1         (0xf << 8)
+#define RG_HDMITX_PRD_IBIAS_D0         (0xf << 0)
+#define PRD_IBIAS_CLK_SHIFT            24
+#define PRD_IBIAS_D2_SHIFT             16
+#define PRD_IBIAS_D1_SHIFT             8
+#define PRD_IBIAS_D0_SHIFT             0
+#define HDMI_CON5              0x14
+#define RG_HDMITX_DRV_IBIAS_CLK                (0x3f << 24)
+#define RG_HDMITX_DRV_IBIAS_D2         (0x3f << 16)
+#define RG_HDMITX_DRV_IBIAS_D1         (0x3f << 8)
+#define RG_HDMITX_DRV_IBIAS_D0         (0x3f << 0)
+#define DRV_IBIAS_CLK_SHIFT            24
+#define DRV_IBIAS_D2_SHIFT             16
+#define DRV_IBIAS_D1_SHIFT             8
+#define DRV_IBIAS_D0_SHIFT             0
+#define HDMI_CON6              0x18
+#define RG_HDMITX_DRV_IMP_CLK          (0x3f << 24)
+#define RG_HDMITX_DRV_IMP_D2           (0x3f << 16)
+#define RG_HDMITX_DRV_IMP_D1           (0x3f << 8)
+#define RG_HDMITX_DRV_IMP_D0           (0x3f << 0)
+#define DRV_IMP_CLK_SHIFT              24
+#define DRV_IMP_D2_SHIFT               16
+#define DRV_IMP_D1_SHIFT               8
+#define DRV_IMP_D0_SHIFT               0
+#define HDMI_CON7              0x1c
+#define RG_HDMITX_MHLCK_DRV_IBIAS      (0x1f << 27)
+#define RG_HDMITX_SER_DIN              (0x3ff << 16)
+#define RG_HDMITX_CHLDC_TST            (0xf << 12)
+#define RG_HDMITX_CHLCK_TST            (0xf << 8)
+#define RG_HDMITX_RESERVE              (0xff << 0)
+#define HDMI_CON8              0x20
+#define RGS_HDMITX_2T1_LEV             (0xf << 16)
+#define RGS_HDMITX_2T1_EDG             (0xf << 12)
+#define RGS_HDMITX_5T1_LEV             (0xf << 8)
+#define RGS_HDMITX_5T1_EDG             (0xf << 4)
+#define RGS_HDMITX_PLUG_TST            BIT(0)
+
+static int mtk_hdmi_pll_prepare(struct clk_hw *hw)
+{
+       struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
+
+       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_AUTOK_EN);
+       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_PLL_POSDIV);
+       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON3, RG_HDMITX_MHLCK_EN);
+       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_BIAS_EN);
+       usleep_range(100, 150);
+       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_PLL_EN);
+       usleep_range(100, 150);
+       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_BIAS_LPF_EN);
+       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_TXDIV_EN);
+
+       return 0;
+}
+
+static void mtk_hdmi_pll_unprepare(struct clk_hw *hw)
+{
+       struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
+
+       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_TXDIV_EN);
+       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_BIAS_LPF_EN);
+       usleep_range(100, 150);
+       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_PLL_EN);
+       usleep_range(100, 150);
+       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_BIAS_EN);
+       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_PLL_POSDIV);
+       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_AUTOK_EN);
+       usleep_range(100, 150);
+}
+
+static long mtk_hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+                                   unsigned long *parent_rate)
+{
+       struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
+
+       hdmi_phy->pll_rate = rate;
+       if (rate <= 74250000)
+               *parent_rate = rate;
+       else
+               *parent_rate = rate / 2;
+
+       return rate;
+}
+
+static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+                                unsigned long parent_rate)
+{
+       struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
+       unsigned int pre_div;
+       unsigned int div;
+       unsigned int pre_ibias;
+       unsigned int hdmi_ibias;
+       unsigned int imp_en;
+
+       dev_dbg(hdmi_phy->dev, "%s: %lu Hz, parent: %lu Hz\n", __func__,
+               rate, parent_rate);
+
+       if (rate <= 27000000) {
+               pre_div = 0;
+               div = 3;
+       } else if (rate <= 74250000) {
+               pre_div = 1;
+               div = 2;
+       } else {
+               pre_div = 1;
+               div = 1;
+       }
+
+       mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON0,
+                         (pre_div << PREDIV_SHIFT), RG_HDMITX_PLL_PREDIV);
+       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_PLL_POSDIV);
+       mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON0,
+                         (0x1 << PLL_IC_SHIFT) | (0x1 << PLL_IR_SHIFT),
+                         RG_HDMITX_PLL_IC | RG_HDMITX_PLL_IR);
+       mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON1,
+                         (div << PLL_TXDIV_SHIFT), RG_HDMITX_PLL_TXDIV);
+       mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON0,
+                         (0x1 << PLL_FBKSEL_SHIFT) | (19 << PLL_FBKDIV_SHIFT),
+                         RG_HDMITX_PLL_FBKSEL | RG_HDMITX_PLL_FBKDIV);
+       mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON1,
+                         (0x2 << PLL_DIVEN_SHIFT), RG_HDMITX_PLL_DIVEN);
+       mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON0,
+                         (0xc << PLL_BP_SHIFT) | (0x2 << PLL_BC_SHIFT) |
+                         (0x1 << PLL_BR_SHIFT),
+                         RG_HDMITX_PLL_BP | RG_HDMITX_PLL_BC |
+                         RG_HDMITX_PLL_BR);
+       if (rate < 165000000) {
+               mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON3,
+                                       RG_HDMITX_PRD_IMP_EN);
+               pre_ibias = 0x3;
+               imp_en = 0x0;
+               hdmi_ibias = hdmi_phy->ibias;
+       } else {
+               mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON3,
+                                     RG_HDMITX_PRD_IMP_EN);
+               pre_ibias = 0x6;
+               imp_en = 0xf;
+               hdmi_ibias = hdmi_phy->ibias_up;
+       }
+       mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON4,
+                         (pre_ibias << PRD_IBIAS_CLK_SHIFT) |
+                         (pre_ibias << PRD_IBIAS_D2_SHIFT) |
+                         (pre_ibias << PRD_IBIAS_D1_SHIFT) |
+                         (pre_ibias << PRD_IBIAS_D0_SHIFT),
+                         RG_HDMITX_PRD_IBIAS_CLK |
+                         RG_HDMITX_PRD_IBIAS_D2 |
+                         RG_HDMITX_PRD_IBIAS_D1 |
+                         RG_HDMITX_PRD_IBIAS_D0);
+       mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON3,
+                         (imp_en << DRV_IMP_EN_SHIFT),
+                         RG_HDMITX_DRV_IMP_EN);
+       mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6,
+                         (hdmi_phy->drv_imp_clk << DRV_IMP_CLK_SHIFT) |
+                         (hdmi_phy->drv_imp_d2 << DRV_IMP_D2_SHIFT) |
+                         (hdmi_phy->drv_imp_d1 << DRV_IMP_D1_SHIFT) |
+                         (hdmi_phy->drv_imp_d0 << DRV_IMP_D0_SHIFT),
+                         RG_HDMITX_DRV_IMP_CLK | RG_HDMITX_DRV_IMP_D2 |
+                         RG_HDMITX_DRV_IMP_D1 | RG_HDMITX_DRV_IMP_D0);
+       mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON5,
+                         (hdmi_ibias << DRV_IBIAS_CLK_SHIFT) |
+                         (hdmi_ibias << DRV_IBIAS_D2_SHIFT) |
+                         (hdmi_ibias << DRV_IBIAS_D1_SHIFT) |
+                         (hdmi_ibias << DRV_IBIAS_D0_SHIFT),
+                         RG_HDMITX_DRV_IBIAS_CLK |
+                         RG_HDMITX_DRV_IBIAS_D2 |
+                         RG_HDMITX_DRV_IBIAS_D1 |
+                         RG_HDMITX_DRV_IBIAS_D0);
+       return 0;
+}
+
+static unsigned long mtk_hdmi_pll_recalc_rate(struct clk_hw *hw,
+                                             unsigned long parent_rate)
+{
+       struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
+
+       return hdmi_phy->pll_rate;
+}
+
+static const struct clk_ops mtk_hdmi_phy_pll_ops = {
+       .prepare = mtk_hdmi_pll_prepare,
+       .unprepare = mtk_hdmi_pll_unprepare,
+       .set_rate = mtk_hdmi_pll_set_rate,
+       .round_rate = mtk_hdmi_pll_round_rate,
+       .recalc_rate = mtk_hdmi_pll_recalc_rate,
+};
+
+static void mtk_hdmi_phy_enable_tmds(struct mtk_hdmi_phy *hdmi_phy)
+{
+       mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON3,
+                             RG_HDMITX_SER_EN | RG_HDMITX_PRD_EN |
+                             RG_HDMITX_DRV_EN);
+       usleep_range(100, 150);
+}
+
+static void mtk_hdmi_phy_disable_tmds(struct mtk_hdmi_phy *hdmi_phy)
+{
+       mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON3,
+                               RG_HDMITX_DRV_EN | RG_HDMITX_PRD_EN |
+                               RG_HDMITX_SER_EN);
+}
+
+struct mtk_hdmi_phy_conf mtk_hdmi_phy_8173_conf = {
+       .flags = CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE,
+       .hdmi_phy_clk_ops = &mtk_hdmi_phy_pll_ops,
+       .hdmi_phy_enable_tmds = mtk_hdmi_phy_enable_tmds,
+       .hdmi_phy_disable_tmds = mtk_hdmi_phy_disable_tmds,
+};
+
+MODULE_AUTHOR("Jie Qiu <jie.qiu@mediatek.com>");
+MODULE_DESCRIPTION("MediaTek MT8173 HDMI PHY Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/mediatek/phy-mtk-hdmi.c b/drivers/phy/mediatek/phy-mtk-hdmi.c
new file mode 100644 (file)
index 0000000..47c029d
--- /dev/null
@@ -0,0 +1,214 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: Jie Qiu <jie.qiu@mediatek.com>
+ */
+
+#include "phy-mtk-hdmi.h"
+
+static int mtk_hdmi_phy_power_on(struct phy *phy);
+static int mtk_hdmi_phy_power_off(struct phy *phy);
+
+static const struct phy_ops mtk_hdmi_phy_dev_ops = {
+       .power_on = mtk_hdmi_phy_power_on,
+       .power_off = mtk_hdmi_phy_power_off,
+       .owner = THIS_MODULE,
+};
+
+void mtk_hdmi_phy_clear_bits(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
+                            u32 bits)
+{
+       void __iomem *reg = hdmi_phy->regs + offset;
+       u32 tmp;
+
+       tmp = readl(reg);
+       tmp &= ~bits;
+       writel(tmp, reg);
+}
+
+void mtk_hdmi_phy_set_bits(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
+                          u32 bits)
+{
+       void __iomem *reg = hdmi_phy->regs + offset;
+       u32 tmp;
+
+       tmp = readl(reg);
+       tmp |= bits;
+       writel(tmp, reg);
+}
+
+void mtk_hdmi_phy_mask(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
+                      u32 val, u32 mask)
+{
+       void __iomem *reg = hdmi_phy->regs + offset;
+       u32 tmp;
+
+       tmp = readl(reg);
+       tmp = (tmp & ~mask) | (val & mask);
+       writel(tmp, reg);
+}
+
+inline struct mtk_hdmi_phy *to_mtk_hdmi_phy(struct clk_hw *hw)
+{
+       return container_of(hw, struct mtk_hdmi_phy, pll_hw);
+}
+
+static int mtk_hdmi_phy_power_on(struct phy *phy)
+{
+       struct mtk_hdmi_phy *hdmi_phy = phy_get_drvdata(phy);
+       int ret;
+
+       ret = clk_prepare_enable(hdmi_phy->pll);
+       if (ret < 0)
+               return ret;
+
+       hdmi_phy->conf->hdmi_phy_enable_tmds(hdmi_phy);
+       return 0;
+}
+
+static int mtk_hdmi_phy_power_off(struct phy *phy)
+{
+       struct mtk_hdmi_phy *hdmi_phy = phy_get_drvdata(phy);
+
+       hdmi_phy->conf->hdmi_phy_disable_tmds(hdmi_phy);
+       clk_disable_unprepare(hdmi_phy->pll);
+
+       return 0;
+}
+
+static const struct phy_ops *
+mtk_hdmi_phy_dev_get_ops(const struct mtk_hdmi_phy *hdmi_phy)
+{
+       if (hdmi_phy && hdmi_phy->conf &&
+           hdmi_phy->conf->hdmi_phy_enable_tmds &&
+           hdmi_phy->conf->hdmi_phy_disable_tmds)
+               return &mtk_hdmi_phy_dev_ops;
+
+       dev_err(hdmi_phy->dev, "Failed to get dev ops of phy\n");
+               return NULL;
+}
+
+static void mtk_hdmi_phy_clk_get_data(struct mtk_hdmi_phy *hdmi_phy,
+                                     struct clk_init_data *clk_init)
+{
+       clk_init->flags = hdmi_phy->conf->flags;
+       clk_init->ops = hdmi_phy->conf->hdmi_phy_clk_ops;
+}
+
+static int mtk_hdmi_phy_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct mtk_hdmi_phy *hdmi_phy;
+       struct resource *mem;
+       struct clk *ref_clk;
+       const char *ref_clk_name;
+       struct clk_init_data clk_init = {
+               .num_parents = 1,
+               .parent_names = (const char * const *)&ref_clk_name,
+       };
+
+       struct phy *phy;
+       struct phy_provider *phy_provider;
+       int ret;
+
+       hdmi_phy = devm_kzalloc(dev, sizeof(*hdmi_phy), GFP_KERNEL);
+       if (!hdmi_phy)
+               return -ENOMEM;
+
+       mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       hdmi_phy->regs = devm_ioremap_resource(dev, mem);
+       if (IS_ERR(hdmi_phy->regs)) {
+               ret = PTR_ERR(hdmi_phy->regs);
+               dev_err(dev, "Failed to get memory resource: %d\n", ret);
+               return ret;
+       }
+
+       ref_clk = devm_clk_get(dev, "pll_ref");
+       if (IS_ERR(ref_clk)) {
+               ret = PTR_ERR(ref_clk);
+               dev_err(&pdev->dev, "Failed to get PLL reference clock: %d\n",
+                       ret);
+               return ret;
+       }
+       ref_clk_name = __clk_get_name(ref_clk);
+
+       ret = of_property_read_string(dev->of_node, "clock-output-names",
+                                     &clk_init.name);
+       if (ret < 0) {
+               dev_err(dev, "Failed to read clock-output-names: %d\n", ret);
+               return ret;
+       }
+
+       hdmi_phy->dev = dev;
+       hdmi_phy->conf =
+               (struct mtk_hdmi_phy_conf *)of_device_get_match_data(dev);
+       mtk_hdmi_phy_clk_get_data(hdmi_phy, &clk_init);
+       hdmi_phy->pll_hw.init = &clk_init;
+       hdmi_phy->pll = devm_clk_register(dev, &hdmi_phy->pll_hw);
+       if (IS_ERR(hdmi_phy->pll)) {
+               ret = PTR_ERR(hdmi_phy->pll);
+               dev_err(dev, "Failed to register PLL: %d\n", ret);
+               return ret;
+       }
+
+       ret = of_property_read_u32(dev->of_node, "mediatek,ibias",
+                                  &hdmi_phy->ibias);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "Failed to get ibias: %d\n", ret);
+               return ret;
+       }
+
+       ret = of_property_read_u32(dev->of_node, "mediatek,ibias_up",
+                                  &hdmi_phy->ibias_up);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "Failed to get ibias up: %d\n", ret);
+               return ret;
+       }
+
+       dev_info(dev, "Using default TX DRV impedance: 4.2k/36\n");
+       hdmi_phy->drv_imp_clk = 0x30;
+       hdmi_phy->drv_imp_d2 = 0x30;
+       hdmi_phy->drv_imp_d1 = 0x30;
+       hdmi_phy->drv_imp_d0 = 0x30;
+
+       phy = devm_phy_create(dev, NULL, mtk_hdmi_phy_dev_get_ops(hdmi_phy));
+       if (IS_ERR(phy)) {
+               dev_err(dev, "Failed to create HDMI PHY\n");
+               return PTR_ERR(phy);
+       }
+       phy_set_drvdata(phy, hdmi_phy);
+
+       phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+       if (IS_ERR(phy_provider)) {
+               dev_err(dev, "Failed to register HDMI PHY\n");
+               return PTR_ERR(phy_provider);
+       }
+
+       if (hdmi_phy->conf->pll_default_off)
+               hdmi_phy->conf->hdmi_phy_disable_tmds(hdmi_phy);
+
+       return of_clk_add_provider(dev->of_node, of_clk_src_simple_get,
+                                  hdmi_phy->pll);
+}
+
+static const struct of_device_id mtk_hdmi_phy_match[] = {
+       { .compatible = "mediatek,mt2701-hdmi-phy",
+         .data = &mtk_hdmi_phy_2701_conf,
+       },
+       { .compatible = "mediatek,mt8173-hdmi-phy",
+         .data = &mtk_hdmi_phy_8173_conf,
+       },
+       {},
+};
+
+struct platform_driver mtk_hdmi_phy_driver = {
+       .probe = mtk_hdmi_phy_probe,
+       .driver = {
+               .name = "mediatek-hdmi-phy",
+               .of_match_table = mtk_hdmi_phy_match,
+       },
+};
+module_platform_driver(mtk_hdmi_phy_driver);
+
+MODULE_DESCRIPTION("MediaTek HDMI PHY Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/mediatek/phy-mtk-hdmi.h b/drivers/phy/mediatek/phy-mtk-hdmi.h
new file mode 100644 (file)
index 0000000..dcf9bb1
--- /dev/null
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: Chunhui Dai <chunhui.dai@mediatek.com>
+ */
+
+#ifndef _MTK_HDMI_PHY_H
+#define _MTK_HDMI_PHY_H
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+struct mtk_hdmi_phy;
+
+struct mtk_hdmi_phy_conf {
+       unsigned long flags;
+       bool pll_default_off;
+       const struct clk_ops *hdmi_phy_clk_ops;
+       void (*hdmi_phy_enable_tmds)(struct mtk_hdmi_phy *hdmi_phy);
+       void (*hdmi_phy_disable_tmds)(struct mtk_hdmi_phy *hdmi_phy);
+};
+
+struct mtk_hdmi_phy {
+       void __iomem *regs;
+       struct device *dev;
+       struct mtk_hdmi_phy_conf *conf;
+       struct clk *pll;
+       struct clk_hw pll_hw;
+       unsigned long pll_rate;
+       unsigned char drv_imp_clk;
+       unsigned char drv_imp_d2;
+       unsigned char drv_imp_d1;
+       unsigned char drv_imp_d0;
+       unsigned int ibias;
+       unsigned int ibias_up;
+};
+
+void mtk_hdmi_phy_clear_bits(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
+                            u32 bits);
+void mtk_hdmi_phy_set_bits(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
+                          u32 bits);
+void mtk_hdmi_phy_mask(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
+                      u32 val, u32 mask);
+struct mtk_hdmi_phy *to_mtk_hdmi_phy(struct clk_hw *hw);
+
+extern struct mtk_hdmi_phy_conf mtk_hdmi_phy_8173_conf;
+extern struct mtk_hdmi_phy_conf mtk_hdmi_phy_2701_conf;
+
+#endif /* _MTK_HDMI_PHY_H */
index c9f2851..da53aeb 100644 (file)
@@ -1167,6 +1167,7 @@ struct drm_device;
 #define DP_MST_PHYSICAL_PORT_0 0
 #define DP_MST_LOGICAL_PORT_0 8
 
+#define DP_LINK_CONSTANT_N_VALUE 0x8000
 #define DP_LINK_STATUS_SIZE       6
 bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
                          int lane_count);
diff --git a/include/linux/adreno-smmu-priv.h b/include/linux/adreno-smmu-priv.h
new file mode 100644 (file)
index 0000000..a889f28
--- /dev/null
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 Google, Inc
+ */
+
+#ifndef __ADRENO_SMMU_PRIV_H
+#define __ADRENO_SMMU_PRIV_H
+
+#include <linux/io-pgtable.h>
+
+/**
+ * struct adreno_smmu_priv - private interface between adreno-smmu and GPU
+ *
+ * @cookie:        An opque token provided by adreno-smmu and passed
+ *                 back into the callbacks
+ * @get_ttbr1_cfg: Get the TTBR1 config for the GPUs context-bank
+ * @set_ttbr0_cfg: Set the TTBR0 config for the GPUs context bank.  A
+ *                 NULL config disables TTBR0 translation, otherwise
+ *                 TTBR0 translation is enabled with the specified cfg
+ *
+ * The GPU driver (drm/msm) and adreno-smmu work together for controlling
+ * the GPU's SMMU instance.  This is by necessity, as the GPU is directly
+ * updating the SMMU for context switches, while on the other hand we do
+ * not want to duplicate all of the initial setup logic from arm-smmu.
+ *
+ * This private interface is used for the two drivers to coordinate.  The
+ * cookie and callback functions are populated when the GPU driver attaches
+ * it's domain.
+ */
+struct adreno_smmu_priv {
+    const void *cookie;
+    const struct io_pgtable_cfg *(*get_ttbr1_cfg)(const void *cookie);
+    int (*set_ttbr0_cfg)(const void *cookie, const struct io_pgtable_cfg *cfg);
+};
+
+#endif /* __ADRENO_SMMU_PRIV_H */
\ No newline at end of file