Merge tag 'drm-next-2021-08-31-1' of git://anongit.freedesktop.org/drm/drm
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 1 Sep 2021 18:26:46 +0000 (11:26 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 1 Sep 2021 18:26:46 +0000 (11:26 -0700)
Pull drm updates from Dave Airlie:
 "Highlights:

   - i915 has seen a lot of refactoring and uAPI cleanups due to a
     change in the upstream direction going forward

     This has all been audited with known userspace, but there may be
     some pitfalls that were missed.

   - i915 now uses common TTM to enable discrete memory on DG1/2 GPUs

   - i915 enables Jasper and Elkhart Lake by default and has preliminary
     XeHP/DG2 support

   - amdgpu adds support for Cyan Skillfish

   - lots of implicit fencing rules documented and fixed up in drivers

   - msm now uses the core scheduler

   - the irq midlayer has been removed for non-legacy drivers

   - the sysfb code now works on more than x86.

  Otherwise the usual smattering of stuff everywhere, panels, bridges,
  refactorings.

  Detailed summary:

  core:
   - extract i915 eDP backlight into core
   - DP aux bus support
   - drm_device.irq_enabled removed
   - port drivers to native irq interfaces
   - export gem shadow plane handling for vgem
   - print proper driver name in framebuffer registration
   - driver fixes for implicit fencing rules
   - ARM fixed rate compression modifier added
   - updated fb damage handling
   - rmfb ioctl logging/docs
   - drop drm_gem_object_put_locked
   - define DRM_FORMAT_MAX_PLANES
   - add gem fb vmap/vunmap helpers
   - add lockdep_assert(once) helpers
   - mark drm irq midlayer as legacy
   - use offset adjusted bo mapping conversion

  vgaarb:
   - cleanups

  fbdev:
   - extend efifb handling to all arches
   - div by 0 fixes for multiple drivers

  udmabuf:
   - add hugepage mapping support

  dma-buf:
   - non-dynamic exporter fixups
   - document implicit fencing rules

  amdgpu:
   - Initial Cyan Skillfish support
   - switch virtual DCE over to vkms based atomic
   - VCN/JPEG power down fixes
   - NAVI PCIE link handling fixes
   - AMD HDMI freesync fixes
   - Yellow Carp + Beige Goby fixes
   - Clockgating/S0ix/SMU/EEPROM fixes
   - embed hw fence in job
   - rework dma-resv handling
   - ensure eviction to system ram

  amdkfd:
   - uapi: SVM address range query added
   - sysfs leak fix
   - GPUVM TLB optimizations
   - vmfault/migration counters

  i915:
   - Enable JSL and EHL by default
   - preliminary XeHP/DG2 support
   - remove all CNL support (never shipped)
   - move to TTM for discrete memory support
   - allow mixed object mmap handling
   - GEM uAPI spring cleaning
       - add I915_MMAP_OBJECT_FIXED
       - reinstate ADL-P mmap ioctls
       - drop a bunch of unused by userspace features
       - disable and remove GPU relocations
   - revert some i915 misfeatures
   - major refactoring of GuC for Gen11+
   - execbuffer object locking separate step
   - reject caching/set-domain on discrete
   - Enable pipe DMC loading on XE-LPD and ADL-P
   - add PSF GV point support
   - Refactor and fix DDI buffer translations
   - Clean up FBC CFB allocation code
   - Finish INTEL_GEN() and friends macro conversions

  nouveau:
   - add eDP backlight support
   - implicit fence fix

  msm:
   - a680/7c3 support
   - drm/scheduler conversion

  panfrost:
   - rework GPU reset

  virtio:
   - fix fencing for planes

  ast:
   - add detect support

  bochs:
   - move to tiny GPU driver

  vc4:
   - use hotplug irqs
   - HDMI codec support

  vmwgfx:
   - use internal vmware device headers

  ingenic:
   - demidlayering irq

  rcar-du:
   - shutdown fixes
   - convert to bridge connector helpers

  zynqmp-dsub:
   - misc fixes

  mgag200:
   - convert PLL handling to atomic

  mediatek:
   - MT8133 AAL support
   - gem mmap object support
   - MT8167 support

  etnaviv:
   - NXP Layerscape LS1028A SoC support
   - GEM mmap cleanups

  tegra:
   - new user API

  exynos:
   - missing unlock fix
   - build warning fix
   - use refcount_t"

* tag 'drm-next-2021-08-31-1' of git://anongit.freedesktop.org/drm/drm: (1318 commits)
  drm/amd/display: Move AllowDRAMSelfRefreshOrDRAMClockChangeInVblank to bounding box
  drm/amd/display: Remove duplicate dml init
  drm/amd/display: Update bounding box states (v2)
  drm/amd/display: Update number of DCN3 clock states
  drm/amdgpu: disable GFX CGCG in aldebaran
  drm/amdgpu: Clear RAS interrupt status on aldebaran
  drm/amdgpu: Add support for RAS XGMI err query
  drm/amdkfd: Account for SH/SE count when setting up cu masks.
  drm/amdgpu: rename amdgpu_bo_get_preferred_pin_domain
  drm/amdgpu: drop redundant cancel_delayed_work_sync call
  drm/amdgpu: add missing cleanups for more ASICs on UVD/VCE suspend
  drm/amdgpu: add missing cleanups for Polaris12 UVD/VCE on suspend
  drm/amdkfd: map SVM range with correct access permission
  drm/amdkfd: check access permisson to restore retry fault
  drm/amdgpu: Update RAS XGMI Error Query
  drm/amdgpu: Add driver infrastructure for MCA RAS
  drm/amd/display: Add Logging for HDMI color depth information
  drm/amd/amdgpu: consolidate PSP TA init shared buf functions
  drm/amd/amdgpu: add name field back to ras_common_if
  drm/amdgpu: Fix build with missing pm_suspend_target_state module export
  ...

24 files changed:
1  2 
Documentation/devicetree/bindings/vendor-prefixes.yaml
MAINTAINERS
arch/x86/Kconfig
drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
drivers/gpu/drm/i915/display/intel_ddi.c
drivers/gpu/drm/i915/display/intel_display.c
drivers/gpu/drm/i915/display/intel_display_power.c
drivers/gpu/drm/i915/display/intel_dp.c
drivers/gpu/drm/i915/i915_gpu_error.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/imx/ipuv3-plane.c
drivers/gpu/drm/kmb/kmb_drv.c
drivers/gpu/drm/mediatek/mtk_disp_aal.c
drivers/gpu/drm/mediatek/mtk_dpi.c
drivers/gpu/drm/mediatek/mtk_drm_crtc.c
drivers/gpu/drm/mediatek/mtk_drm_plane.c
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
drivers/gpu/drm/msm/dp/dp_catalog.c
drivers/gpu/drm/msm/dp/dp_display.c
drivers/gpu/drm/nouveau/dispnv50/disp.c
drivers/gpu/drm/nouveau/nouveau_drm.c
drivers/gpu/drm/panel/panel-simple.c

@@@ -271,8 -271,6 +271,8 @@@ patternProperties
      description: Shenzen Chuangsiqi Technology Co.,Ltd.
    "^cubietech,.*":
      description: Cubietech, Ltd.
 +  "^cui,.*":
 +    description: CUI Devices
    "^cypress,.*":
      description: Cypress Semiconductor Corporation
    "^cznic,.*":
      description: eGalax_eMPIA Technology Inc
    "^einfochips,.*":
      description: Einfochips
+   "^eink,.*":
+     description: E Ink Corporation
    "^elan,.*":
      description: Elan Microelectronic Corp.
    "^element14,.*":
      description: Chengdu Kaixuan Information Technology Co., Ltd.
    "^qiaodian,.*":
      description: QiaoDian XianShi Corporation
+   "^qishenglong,.*":
+     description: Shenzhen QiShenglong Industrialist Co., Ltd.
    "^qnap,.*":
      description: QNAP Systems, Inc.
    "^radxa,.*":
diff --combined MAINTAINERS
@@@ -459,12 -459,6 +459,12 @@@ S:       Maintaine
  W:    https://parisc.wiki.kernel.org/index.php/AD1889
  F:    sound/pci/ad1889.*
  
 +AD5110 ANALOG DEVICES DIGITAL POTENTIOMETERS DRIVER
 +M:    Mugilraj Dhavachelvan <dmugil2000@gmail.com>
 +L:    linux-iio@vger.kernel.org
 +S:    Supported
 +F:    drivers/iio/potentiometer/ad5110.c
 +
  AD525X ANALOG DEVICES DIGITAL POTENTIOMETERS DRIVER
  M:    Michael Hennerich <michael.hennerich@analog.com>
  S:    Supported
@@@ -1322,13 -1316,6 +1322,13 @@@ L:    linux-media@vger.kernel.or
  S:    Maintained
  F:    drivers/media/i2c/aptina-pll.*
  
 +AQUACOMPUTER D5 NEXT PUMP SENSOR DRIVER
 +M:    Aleksa Savic <savicaleksa83@gmail.com>
 +L:    linux-hwmon@vger.kernel.org
 +S:    Maintained
 +F:    Documentation/hwmon/aquacomputer_d5next.rst
 +F:    drivers/hwmon/aquacomputer_d5next.c
 +
  AQUANTIA ETHERNET DRIVER (atlantic)
  M:    Igor Russkikh <irusskikh@marvell.com>
  L:    netdev@vger.kernel.org
@@@ -1703,7 -1690,7 +1703,7 @@@ L:      linux-arm-kernel@lists.infradead.or
  S:    Maintained
  W:    https://asahilinux.org
  B:    https://github.com/AsahiLinux/linux/issues
- C:    irc://chat.freenode.net/asahi-dev
+ C:    irc://irc.oftc.net/asahi-dev
  T:    git https://github.com/AsahiLinux/linux.git
  F:    Documentation/devicetree/bindings/arm/apple.yaml
  F:    Documentation/devicetree/bindings/interrupt-controller/apple,aic.yaml
@@@ -2855,7 -2842,7 +2855,7 @@@ AS3645A LED FLASH CONTROLLER DRIVE
  M:    Sakari Ailus <sakari.ailus@iki.fi>
  L:    linux-leds@vger.kernel.org
  S:    Maintained
 -F:    drivers/leds/leds-as3645a.c
 +F:    drivers/leds/flash/leds-as3645a.c
  
  ASAHI KASEI AK7375 LENS VOICE COIL DRIVER
  M:    Tianshu Qiu <tian.shu.qiu@intel.com>
@@@ -3210,7 -3197,7 +3210,7 @@@ S:      Maintaine
  W:    https://www.open-mesh.org/
  Q:    https://patchwork.open-mesh.org/project/batman/list/
  B:    https://www.open-mesh.org/projects/batman-adv/issues
 -C:    irc://chat.freenode.net/batman
 +C:    ircs://irc.hackint.org/batadv
  T:    git https://git.open-mesh.org/linux-merge.git
  F:    Documentation/networking/batman-adv.rst
  F:    include/uapi/linux/batadv_packet.h
@@@ -3422,6 -3409,7 +3422,6 @@@ F:      drivers/net/ethernet/netronome/nfp/b
  
  BPF JIT for POWERPC (32-BIT AND 64-BIT)
  M:    Naveen N. Rao <naveen.n.rao@linux.ibm.com>
 -M:    Sandipan Das <sandipan@linux.ibm.com>
  L:    netdev@vger.kernel.org
  L:    bpf@vger.kernel.org
  S:    Maintained
@@@ -3878,16 -3866,6 +3878,16 @@@ L:    bcm-kernel-feedback-list@broadcom.co
  S:    Maintained
  F:    drivers/mtd/nand/raw/brcmnand/
  
 +BROADCOM STB PCIE DRIVER
 +M:    Jim Quinlan <jim2101024@gmail.com>
 +M:    Nicolas Saenz Julienne <nsaenz@kernel.org>
 +M:    Florian Fainelli <f.fainelli@gmail.com>
 +M:    bcm-kernel-feedback-list@broadcom.com
 +L:    linux-pci@vger.kernel.org
 +S:    Maintained
 +F:    Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml
 +F:    drivers/pci/controller/pcie-brcmstb.c
 +
  BROADCOM SYSTEMPORT ETHERNET DRIVER
  M:    Florian Fainelli <f.fainelli@gmail.com>
  L:    bcm-kernel-feedback-list@broadcom.com
@@@ -4520,7 -4498,7 +4520,7 @@@ L:      clang-built-linux@googlegroups.co
  S:    Supported
  W:    https://clangbuiltlinux.github.io/
  B:    https://github.com/ClangBuiltLinux/linux/issues
 -C:    irc://chat.freenode.net/clangbuiltlinux
 +C:    irc://irc.libera.chat/clangbuiltlinux
  F:    Documentation/kbuild/llvm.rst
  F:    include/linux/compiler-clang.h
  F:    scripts/clang-tools/
@@@ -4632,7 -4610,7 +4632,7 @@@ F:      include/linux/clk
  F:    include/linux/of_clk.h
  X:    drivers/clk/clkdev.c
  
 -COMMON INTERNET FILE SYSTEM (CIFS)
 +COMMON INTERNET FILE SYSTEM CLIENT (CIFS)
  M:    Steve French <sfrench@samba.org>
  L:    linux-cifs@vger.kernel.org
  L:    samba-technical@lists.samba.org (moderated for non-subscribers)
@@@ -4641,7 -4619,6 +4641,7 @@@ W:      http://linux-cifs.samba.org
  T:    git git://git.samba.org/sfrench/cifs-2.6.git
  F:    Documentation/admin-guide/cifs/
  F:    fs/cifs/
 +F:    fs/cifs_common/
  
  COMPACTPCI HOTPLUG CORE
  M:    Scott Murray <scott@spiteful.org>
@@@ -5707,7 -5684,6 +5707,7 @@@ DPAA2 ETHERNET SWITCH DRIVE
  M:    Ioana Ciornei <ioana.ciornei@nxp.com>
  L:    netdev@vger.kernel.org
  S:    Maintained
 +F:    Documentation/networking/device_drivers/ethernet/freescale/dpaa2/switch-driver.rst
  F:    drivers/net/ethernet/freescale/dpaa2/dpaa2-switch*
  F:    drivers/net/ethernet/freescale/dpaa2/dpsw*
  
@@@ -5731,11 -5707,6 +5731,11 @@@ F:    Documentation/admin-guide/blockdev
  F:    drivers/block/drbd/
  F:    lib/lru_cache.c
  
 +DRIVER COMPONENT FRAMEWORK
 +L:    dri-devel@lists.freedesktop.org
 +F:    drivers/base/component.c
 +F:    include/linux/component.h
 +
  DRIVER CORE, KOBJECTS, DEBUGFS AND SYSFS
  M:    Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  R:    "Rafael J. Wysocki" <rafael@kernel.org>
@@@ -5799,7 -5770,7 +5799,7 @@@ M:      Gerd Hoffmann <kraxel@redhat.com
  L:    virtualization@lists.linux-foundation.org
  S:    Maintained
  T:    git git://anongit.freedesktop.org/drm/drm-misc
- F:    drivers/gpu/drm/bochs/
+ F:    drivers/gpu/drm/tiny/bochs.c
  
  DRM DRIVER FOR BOE HIMAX8279D PANELS
  M:    Jerry Han <hanxu5@huaqin.corp-partner.google.com>
@@@ -5984,6 -5955,13 +5984,13 @@@ S:    Maintaine
  F:    Documentation/devicetree/bindings/display/panel/raydium,rm67191.yaml
  F:    drivers/gpu/drm/panel/panel-raydium-rm67191.c
  
+ DRM DRIVER FOR SAMSUNG DB7430 PANELS
+ M:    Linus Walleij <linus.walleij@linaro.org>
+ S:    Maintained
+ T:    git git://anongit.freedesktop.org/drm/drm-misc
+ F:    Documentation/devicetree/bindings/display/panel/samsung,lms397kf04.yaml
+ F:    drivers/gpu/drm/panel/panel-samsung-db7430.c
  DRM DRIVER FOR SITRONIX ST7703 PANELS
  M:    Guido Günther <agx@sigxcpu.org>
  R:    Purism Kernel Team <kernel@puri.sm>
@@@ -6082,21 -6060,27 +6089,27 @@@ F:   drivers/gpu/drm/vboxvideo
  
  DRM DRIVER FOR VMWARE VIRTUAL GPU
  M:    "VMware Graphics" <linux-graphics-maintainer@vmware.com>
- M:    Roland Scheidegger <sroland@vmware.com>
  M:    Zack Rusin <zackr@vmware.com>
  L:    dri-devel@lists.freedesktop.org
  S:    Supported
- T:    git git://people.freedesktop.org/~sroland/linux
+ T:    git git://anongit.freedesktop.org/drm/drm-misc
  F:    drivers/gpu/drm/vmwgfx/
  F:    include/uapi/drm/vmwgfx_drm.h
  
+ DRM DRIVER FOR WIDECHIPS WS2401 PANELS
+ M:    Linus Walleij <linus.walleij@linaro.org>
+ S:    Maintained
+ T:    git git://anongit.freedesktop.org/drm/drm-misc
+ F:    Documentation/devicetree/bindings/display/panel/samsung,lms380kf01.yaml
+ F:    drivers/gpu/drm/panel/panel-widechips-ws2401.c
  DRM DRIVERS
  M:    David Airlie <airlied@linux.ie>
  M:    Daniel Vetter <daniel@ffwll.ch>
  L:    dri-devel@lists.freedesktop.org
  S:    Maintained
  B:    https://gitlab.freedesktop.org/drm
- C:    irc://chat.freenode.net/dri-devel
+ C:    irc://irc.oftc.net/dri-devel
  T:    git git://anongit.freedesktop.org/drm/drm
  F:    Documentation/devicetree/bindings/display/
  F:    Documentation/devicetree/bindings/gpu/
@@@ -6933,12 -6917,6 +6946,12 @@@ M:    Mark Einon <mark.einon@gmail.com
  S:    Odd Fixes
  F:    drivers/net/ethernet/agere/
  
 +ETAS ES58X CAN/USB DRIVER
 +M:    Vincent Mailhol <mailhol.vincent@wanadoo.fr>
 +L:    linux-can@vger.kernel.org
 +S:    Maintained
 +F:    drivers/net/can/usb/etas_es58x/
 +
  ETHERNET BRIDGE
  M:    Roopa Prabhu <roopa@nvidia.com>
  M:    Nikolay Aleksandrov <nikolay@nvidia.com>
@@@ -6980,7 -6958,7 +6993,7 @@@ F:      include/uapi/linux/mdio.
  F:    include/uapi/linux/mii.h
  
  EXFAT FILE SYSTEM
 -M:    Namjae Jeon <namjae.jeon@samsung.com>
 +M:    Namjae Jeon <linkinjeon@kernel.org>
  M:    Sungjong Seo <sj1557.seo@samsung.com>
  L:    linux-fsdevel@vger.kernel.org
  S:    Maintained
@@@ -7893,9 -7871,9 +7906,9 @@@ S:      Maintaine
  F:    drivers/input/touchscreen/goodix.c
  
  GOOGLE ETHERNET DRIVERS
 -M:    Catherine Sullivan <csully@google.com>
 -R:    Sagi Shahar <sagis@google.com>
 -R:    Jon Olson <jonolson@google.com>
 +M:    Jeroen de Borst <jeroendb@google.com>
 +R:    Catherine Sullivan <csully@google.com>
 +R:    David Awogbemila <awogbemila@google.com>
  L:    netdev@vger.kernel.org
  S:    Supported
  F:    Documentation/networking/device_drivers/ethernet/google/gve.rst
@@@ -8468,12 -8446,10 +8481,12 @@@ S:   Maintaine
  F:    Documentation/devicetree/bindings/spmi/hisilicon,hisi-spmi-controller.yaml
  F:    drivers/spmi/hisi-spmi-controller.c
  
 -HISILICON STAGING DRIVERS FOR HIKEY 960/970
 +HISILICON SPMI PMIC DRIVER FOR HIKEY 6421v600
  M:    Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
 +L:    linux-kernel@vger.kernel.org
  S:    Maintained
 -F:    drivers/staging/hikey9xx/
 +F:    Documentation/devicetree/bindings/mfd/hisilicon,hi6421-spmi-pmic.yaml
 +F:    drivers/mfd/hi6421-spmi-pmic.c
  
  HISILICON TRUE RANDOM NUMBER GENERATOR V2 SUPPORT
  M:    Zaibo Xu <xuzaibo@huawei.com>
@@@ -9288,7 -9264,7 +9301,7 @@@ S:      Supporte
  W:    https://01.org/linuxgraphics/
  Q:    http://patchwork.freedesktop.org/project/intel-gfx/
  B:    https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs
- C:    irc://chat.freenode.net/intel-gfx
+ C:    irc://irc.oftc.net/intel-gfx
  T:    git git://anongit.freedesktop.org/drm-intel
  F:    Documentation/gpu/i915.rst
  F:    drivers/gpu/drm/i915/
@@@ -9786,6 -9762,11 +9799,6 @@@ M:     David Sterba <dsterba@suse.com
  S:    Odd Fixes
  F:    drivers/tty/ipwireless/
  
 -IPX NETWORK LAYER
 -L:    netdev@vger.kernel.org
 -S:    Obsolete
 -F:    include/uapi/linux/ipx.h
 -
  IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY)
  M:    Marc Zyngier <maz@kernel.org>
  S:    Maintained
@@@ -10135,17 -10116,6 +10148,17 @@@ T: git git://git.kernel.org/pub/scm/lin
  F:    Documentation/dev-tools/kselftest*
  F:    tools/testing/selftests/
  
 +KERNEL SMB3 SERVER (KSMBD)
 +M:    Namjae Jeon <linkinjeon@kernel.org>
 +M:    Sergey Senozhatsky <senozhatsky@chromium.org>
 +M:    Steve French <sfrench@samba.org>
 +M:    Hyunchul Lee <hyc.lee@gmail.com>
 +L:    linux-cifs@vger.kernel.org
 +S:    Maintained
 +T:    git git://git.samba.org/ksmbd.git
 +F:    fs/cifs_common/
 +F:    fs/ksmbd/
 +
  KERNEL UNIT TESTING FRAMEWORK (KUnit)
  M:    Brendan Higgins <brendanhiggins@google.com>
  L:    linux-kselftest@vger.kernel.org
@@@ -10431,7 -10401,6 +10444,7 @@@ F:   net/core/skmsg.
  F:    net/core/sock_map.c
  F:    net/ipv4/tcp_bpf.c
  F:    net/ipv4/udp_bpf.c
 +F:    net/unix/unix_bpf.c
  
  LANDLOCK SECURITY MODULE
  M:    Mickaël Salaün <mic@digikod.net>
@@@ -10653,6 -10622,15 +10666,6 @@@ F:  LICENSES
  F:    scripts/spdxcheck-test.sh
  F:    scripts/spdxcheck.py
  
 -LIGHTNVM PLATFORM SUPPORT
 -M:    Matias Bjorling <mb@lightnvm.io>
 -L:    linux-block@vger.kernel.org
 -S:    Maintained
 -W:    http://github/OpenChannelSSD
 -F:    drivers/lightnvm/
 -F:    include/linux/lightnvm.h
 -F:    include/uapi/linux/lightnvm.h
 -
  LINEAR RANGES HELPERS
  M:    Mark Brown <broonie@kernel.org>
  R:    Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>
@@@ -11065,18 -11043,6 +11078,18 @@@ F: drivers/mailbox/arm_mhuv2.
  F:    include/linux/mailbox/arm_mhuv2_message.h
  F:    Documentation/devicetree/bindings/mailbox/arm,mhuv2.yaml
  
 +MANAGEMENT COMPONENT TRANSPORT PROTOCOL (MCTP)
 +M:    Jeremy Kerr <jk@codeconstruct.com.au>
 +M:    Matt Johnston <matt@codeconstruct.com.au>
 +L:    netdev@vger.kernel.org
 +S:    Maintained
 +F:    Documentation/networking/mctp.rst
 +F:    drivers/net/mctp/
 +F:    include/net/mctp.h
 +F:    include/net/mctpdevice.h
 +F:    include/net/netns/mctp.h
 +F:    net/mctp/
 +
  MAN-PAGES: MANUAL PAGES FOR LINUX -- Sections 2, 3, 4, 5, and 7
  M:    Michael Kerrisk <mtk.manpages@gmail.com>
  L:    linux-man@vger.kernel.org
@@@ -11374,18 -11340,6 +11387,18 @@@ W: https://linuxtv.or
  T:    git git://linuxtv.org/media_tree.git
  F:    drivers/media/radio/radio-maxiradio*
  
 +MAXLINEAR ETHERNET PHY DRIVER
 +M:    Xu Liang <lxu@maxlinear.com>
 +L:    netdev@vger.kernel.org
 +S:    Supported
 +F:    drivers/net/phy/mxl-gpy.c
 +
 +MCBA MICROCHIP CAN BUS ANALYZER TOOL DRIVER
 +R:    Yasushi SHOJI <yashi@spacecubics.com>
 +L:    linux-can@vger.kernel.org
 +S:    Maintained
 +F:    drivers/net/can/usb/mcba_usb.c
 +
  MCAN MMIO DEVICE DRIVER
  M:    Chandrasekar Ramakrishnan <rcsekar@samsung.com>
  L:    linux-can@vger.kernel.org
@@@ -13833,15 -13787,6 +13846,15 @@@ T: git git://linuxtv.org/media_tree.gi
  F:    Documentation/devicetree/bindings/media/i2c/ov8856.yaml
  F:    drivers/media/i2c/ov8856.c
  
 +OMNIVISION OV9282 SENSOR DRIVER
 +M:    Paul J. Murphy <paul.j.murphy@intel.com>
 +M:    Daniele Alessandrelli <daniele.alessandrelli@intel.com>
 +L:    linux-media@vger.kernel.org
 +S:    Maintained
 +T:    git git://linuxtv.org/media_tree.git
 +F:    Documentation/devicetree/bindings/media/i2c/ovti,ov9282.yaml
 +F:    drivers/media/i2c/ov9282.c
 +
  OMNIVISION OV9640 SENSOR DRIVER
  M:    Petr Cvek <petrcvekcz@gmail.com>
  L:    linux-media@vger.kernel.org
@@@ -13932,12 -13877,6 +13945,12 @@@ F: Documentation/devicetree
  F:    arch/*/boot/dts/
  F:    include/dt-bindings/
  
 +OPENCOMPUTE PTP CLOCK DRIVER
 +M:    Jonathan Lemon <jonathan.lemon@gmail.com>
 +L:    netdev@vger.kernel.org
 +S:    Maintained
 +F:    drivers/ptp/ptp_ocp.c
 +
  OPENCORES I2C BUS DRIVER
  M:    Peter Korsgaard <peter@korsgaard.com>
  M:    Andrew Lunn <andrew@lunn.ch>
@@@ -14498,13 -14437,6 +14511,13 @@@ S: Maintaine
  F:    Documentation/devicetree/bindings/pci/hisilicon-histb-pcie.txt
  F:    drivers/pci/controller/dwc/pcie-histb.c
  
 +PCIE DRIVER FOR INTEL LGM GW SOC
 +M:    Rahul Tanwar <rtanwar@maxlinear.com>
 +L:    linux-pci@vger.kernel.org
 +S:    Maintained
 +F:    Documentation/devicetree/bindings/pci/intel-gw-pcie.yaml
 +F:    drivers/pci/controller/dwc/pcie-intel-gw.c
 +
  PCIE DRIVER FOR MEDIATEK
  M:    Ryder Lee <ryder.lee@mediatek.com>
  M:    Jianjun Wang <jianjun.wang@mediatek.com>
@@@ -15001,6 -14933,13 +15014,6 @@@ S:  Maintaine
  F:    include/linux/printk.h
  F:    kernel/printk/
  
 -PRISM54 WIRELESS DRIVER
 -M:    Luis Chamberlain <mcgrof@kernel.org>
 -L:    linux-wireless@vger.kernel.org
 -S:    Obsolete
 -W:    https://wireless.wiki.kernel.org/en/users/Drivers/p54
 -F:    drivers/net/wireless/intersil/prism54/
 -
  PROC FILESYSTEM
  L:    linux-kernel@vger.kernel.org
  L:    linux-fsdevel@vger.kernel.org
@@@ -15871,7 -15810,7 +15884,7 @@@ F:   Documentation/devicetree/bindings/i2
  F:    drivers/i2c/busses/i2c-emev2.c
  
  RENESAS ETHERNET DRIVERS
 -R:    Sergei Shtylyov <sergei.shtylyov@gmail.com>
 +R:    Sergey Shtylyov <s.shtylyov@omp.ru>
  L:    netdev@vger.kernel.org
  L:    linux-renesas-soc@vger.kernel.org
  F:    Documentation/devicetree/bindings/net/renesas,*.yaml
@@@ -15916,14 -15855,6 +15929,14 @@@ L: linux-renesas-soc@vger.kernel.or
  S:    Maintained
  F:    drivers/phy/renesas/phy-rcar-gen3-usb*.c
  
 +RENESAS RZ/G2L A/D DRIVER
 +M:    Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
 +L:    linux-iio@vger.kernel.org
 +L:    linux-renesas-soc@vger.kernel.org
 +S:    Supported
 +F:    Documentation/devicetree/bindings/iio/adc/renesas,rzg2l-adc.yaml
 +F:    drivers/iio/adc/rzg2l_adc.c
 +
  RESET CONTROLLER FRAMEWORK
  M:    Philipp Zabel <p.zabel@pengutronix.de>
  S:    Maintained
@@@ -16791,12 -16722,6 +16804,12 @@@ F: drivers/iio/chemical/scd30_core.
  F:    drivers/iio/chemical/scd30_i2c.c
  F:    drivers/iio/chemical/scd30_serial.c
  
 +SENSIRION SGP40 GAS SENSOR DRIVER
 +M:    Andreas Klinger <ak@it-klinger.de>
 +S:    Maintained
 +F:    Documentation/ABI/testing/sysfs-bus-iio-chemical-sgp40
 +F:    drivers/iio/chemical/sgp40.c
 +
  SENSIRION SPS30 AIR POLLUTION SENSOR DRIVER
  M:    Tomasz Duszynski <tduszyns@gmail.com>
  S:    Maintained
@@@ -17375,15 -17300,6 +17388,15 @@@ T: git git://linuxtv.org/media_tree.gi
  F:    Documentation/devicetree/bindings/media/i2c/sony,imx334.yaml
  F:    drivers/media/i2c/imx334.c
  
 +SONY IMX335 SENSOR DRIVER
 +M:    Paul J. Murphy <paul.j.murphy@intel.com>
 +M:    Daniele Alessandrelli <daniele.alessandrelli@intel.com>
 +L:    linux-media@vger.kernel.org
 +S:    Maintained
 +T:    git git://linuxtv.org/media_tree.git
 +F:    Documentation/devicetree/bindings/media/i2c/sony,imx335.yaml
 +F:    drivers/media/i2c/imx335.c
 +
  SONY IMX355 SENSOR DRIVER
  M:    Tianshu Qiu <tian.shu.qiu@intel.com>
  L:    linux-media@vger.kernel.org
@@@ -17391,15 -17307,6 +17404,15 @@@ S: Maintaine
  T:    git git://linuxtv.org/media_tree.git
  F:    drivers/media/i2c/imx355.c
  
 +SONY IMX412 SENSOR DRIVER
 +M:    Paul J. Murphy <paul.j.murphy@intel.com>
 +M:    Daniele Alessandrelli <daniele.alessandrelli@intel.com>
 +L:    linux-media@vger.kernel.org
 +S:    Maintained
 +T:    git git://linuxtv.org/media_tree.git
 +F:    Documentation/devicetree/bindings/media/i2c/sony,imx412.yaml
 +F:    drivers/media/i2c/imx412.c
 +
  SONY MEMORYSTICK SUBSYSTEM
  M:    Maxim Levitsky <maximlevitsky@gmail.com>
  M:    Alex Dubov <oakad@yahoo.com>
@@@ -17719,9 -17626,8 +17732,9 @@@ F:   drivers/staging/olpc_dcon
  
  STAGING - REALTEK RTL8188EU DRIVERS
  M:    Larry Finger <Larry.Finger@lwfinger.net>
 -S:    Odd Fixes
 -F:    drivers/staging/rtl8188eu/
 +M:    Phillip Potter <phil@philpotter.co.uk>
 +S:    Supported
 +F:    drivers/staging/r8188eu/
  
  STAGING - REALTEK RTL8712U DRIVERS
  M:    Larry Finger <Larry.Finger@lwfinger.net>
@@@ -17916,7 -17822,7 +17929,7 @@@ F:   include/linux/sync_file.
  F:    include/uapi/linux/sync_file.h
  
  SYNOPSYS ARC ARCHITECTURE
 -M:    Vineet Gupta <vgupta@synopsys.com>
 +M:    Vineet Gupta <vgupta@kernel.org>
  L:    linux-snps-arc@lists.infradead.org
  S:    Supported
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc.git
@@@ -19817,15 -19723,6 +19830,15 @@@ S: Maintaine
  F:    include/uapi/linux/virtio_snd.h
  F:    sound/virtio/*
  
 +VIRTIO I2C DRIVER
 +M:    Jie Deng <jie.deng@intel.com>
 +M:    Viresh Kumar <viresh.kumar@linaro.org>
 +L:    linux-i2c@vger.kernel.org
 +L:    virtualization@lists.linux-foundation.org
 +S:    Maintained
 +F:    drivers/i2c/busses/i2c-virtio.c
 +F:    include/uapi/linux/virtio_i2c.h
 +
  VIRTUAL BOX GUEST DEVICE DRIVER
  M:    Hans de Goede <hdegoede@redhat.com>
  M:    Arnd Bergmann <arnd@arndb.de>
@@@ -20127,8 -20024,7 +20140,8 @@@ F:   Documentation/devicetree/bindings/ex
  F:    Documentation/devicetree/bindings/mfd/wlf,arizona.yaml
  F:    Documentation/devicetree/bindings/mfd/wm831x.txt
  F:    Documentation/devicetree/bindings/regulator/wlf,arizona.yaml
 -F:    Documentation/devicetree/bindings/sound/wlf,arizona.yaml
 +F:    Documentation/devicetree/bindings/sound/wlf,*.yaml
 +F:    Documentation/devicetree/bindings/sound/wm*
  F:    Documentation/hwmon/wm83??.rst
  F:    arch/arm/mach-s3c/mach-crag6410*
  F:    drivers/clk/clk-wm83*.c
diff --combined arch/x86/Kconfig
@@@ -119,7 -119,6 +119,7 @@@ config X8
        select ARCH_WANT_HUGE_PMD_SHARE
        select ARCH_WANT_LD_ORPHAN_WARN
        select ARCH_WANTS_THP_SWAP              if X86_64
 +      select ARCH_HAS_PARANOID_L1D_FLUSH
        select BUILDTIME_TABLE_SORT
        select CLKEVT_I8253
        select CLOCKSOURCE_VALIDATE_LAST_CYCLE
        select HAVE_FUNCTION_TRACER
        select HAVE_GCC_PLUGINS
        select HAVE_HW_BREAKPOINT
 -      select HAVE_IDE
        select HAVE_IOREMAP_PROT
        select HAVE_IRQ_EXIT_ON_IRQ_STACK       if X86_64
        select HAVE_IRQ_TIME_ACCOUNTING
@@@ -2767,32 -2767,6 +2767,6 @@@ config AMD_N
        def_bool y
        depends on CPU_SUP_AMD && PCI
  
- config X86_SYSFB
-       bool "Mark VGA/VBE/EFI FB as generic system framebuffer"
-       help
-         Firmwares often provide initial graphics framebuffers so the BIOS,
-         bootloader or kernel can show basic video-output during boot for
-         user-guidance and debugging. Historically, x86 used the VESA BIOS
-         Extensions and EFI-framebuffers for this, which are mostly limited
-         to x86.
-         This option, if enabled, marks VGA/VBE/EFI framebuffers as generic
-         framebuffers so the new generic system-framebuffer drivers can be
-         used on x86. If the framebuffer is not compatible with the generic
-         modes, it is advertised as fallback platform framebuffer so legacy
-         drivers like efifb, vesafb and uvesafb can pick it up.
-         If this option is not selected, all system framebuffers are always
-         marked as fallback platform framebuffers as usual.
-         Note: Legacy fbdev drivers, including vesafb, efifb, uvesafb, will
-         not be able to pick up generic system framebuffers if this option
-         is selected. You are highly encouraged to enable simplefb as
-         replacement if you select this option. simplefb can correctly deal
-         with generic system framebuffers. But you should still keep vesafb
-         and others enabled as fallback if a system framebuffer is
-         incompatible with simplefb.
-         If unsure, say Y.
  endmenu
  
  
@@@ -46,7 -46,6 +46,6 @@@
  #include <linux/pci.h>
  
  #include <drm/drm_crtc_helper.h>
- #include <drm/drm_irq.h>
  #include <drm/drm_vblank.h>
  #include <drm/amdgpu_drm.h>
  #include <drm/drm_drv.h>
@@@ -184,7 -183,7 +183,7 @@@ void amdgpu_irq_disable_all(struct amdg
   * Returns:
   * result of handling the IRQ, as defined by &irqreturn_t
   */
- irqreturn_t amdgpu_irq_handler(int irq, void *arg)
static irqreturn_t amdgpu_irq_handler(int irq, void *arg)
  {
        struct drm_device *dev = (struct drm_device *) arg;
        struct amdgpu_device *adev = drm_to_adev(dev);
@@@ -307,6 -306,7 +306,7 @@@ static void amdgpu_restore_msix(struct 
  int amdgpu_irq_init(struct amdgpu_device *adev)
  {
        int r = 0;
+       unsigned int irq;
  
        spin_lock_init(&adev->irq.lock);
  
        INIT_WORK(&adev->irq.ih2_work, amdgpu_irq_handle_ih2);
        INIT_WORK(&adev->irq.ih_soft_work, amdgpu_irq_handle_ih_soft);
  
-       adev->irq.installed = true;
-       /* Use vector 0 for MSI-X */
-       r = drm_irq_install(adev_to_drm(adev), pci_irq_vector(adev->pdev, 0));
+       /* Use vector 0 for MSI-X. */
+       r = pci_irq_vector(adev->pdev, 0);
+       if (r < 0)
+               return r;
+       irq = r;
+       /* PCI devices require shared interrupts. */
+       r = request_irq(irq, amdgpu_irq_handler, IRQF_SHARED, adev_to_drm(adev)->driver->name,
+                       adev_to_drm(adev));
        if (r) {
-               adev->irq.installed = false;
                if (!amdgpu_device_has_dc_support(adev))
                        flush_work(&adev->hotplug_work);
                return r;
        }
+       adev->irq.installed = true;
+       adev->irq.irq = irq;
        adev_to_drm(adev)->max_vblank_count = 0x00ffffff;
  
        DRM_DEBUG("amdgpu: irq initialized.\n");
  void amdgpu_irq_fini_hw(struct amdgpu_device *adev)
  {
        if (adev->irq.installed) {
-               drm_irq_uninstall(&adev->ddev);
+               free_irq(adev->irq.irq, adev_to_drm(adev));
                adev->irq.installed = false;
                if (adev->irq.msi_enabled)
                        pci_free_irq_vectors(adev->pdev);
@@@ -502,7 -509,7 +509,7 @@@ void amdgpu_irq_dispatch(struct amdgpu_
  
        } else if ((client_id == AMDGPU_IRQ_CLIENTID_LEGACY) &&
                   adev->irq.virq[src_id]) {
 -              generic_handle_irq(irq_find_mapping(adev->irq.domain, src_id));
 +              generic_handle_domain_irq(adev->irq.domain, src_id);
  
        } else if (!adev->irq.client[client_id].sources) {
                DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n",
@@@ -584,7 -591,7 +591,7 @@@ void amdgpu_irq_gpu_reset_resume_helper
  {
        int i, j, k;
  
-       if (amdgpu_sriov_vf(adev))
+       if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
                amdgpu_restore_msix(adev);
  
        for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
  int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
                   unsigned type)
  {
-       if (!adev_to_drm(adev)->irq_enabled)
+       if (!adev->irq.installed)
                return -ENOENT;
  
        if (type >= src->num_types)
  int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
                   unsigned type)
  {
-       if (!adev_to_drm(adev)->irq_enabled)
+       if (!adev->irq.installed)
                return -ENOENT;
  
        if (type >= src->num_types)
  bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
                        unsigned type)
  {
-       if (!adev_to_drm(adev)->irq_enabled)
+       if (!adev->irq.installed)
                return false;
  
        if (type >= src->num_types)
  #include "amdgpu_ras.h"
  #include <linux/bits.h>
  #include "atom.h"
+ #include "amdgpu_eeprom.h"
  #include "amdgpu_atomfirmware.h"
+ #include <linux/debugfs.h>
+ #include <linux/uaccess.h>
  
- #define EEPROM_I2C_TARGET_ADDR_VEGA20         0xA0
- #define EEPROM_I2C_TARGET_ADDR_ARCTURUS               0xA8
- #define EEPROM_I2C_TARGET_ADDR_ARCTURUS_D342  0xA0
- #define EEPROM_I2C_TARGET_ADDR_SIENNA_CICHLID   0xA0
- #define EEPROM_I2C_TARGET_ADDR_ALDEBARAN        0xA0
+ #define EEPROM_I2C_MADDR_VEGA20         0x0
+ #define EEPROM_I2C_MADDR_ARCTURUS       0x40000
+ #define EEPROM_I2C_MADDR_ARCTURUS_D342  0x0
+ #define EEPROM_I2C_MADDR_SIENNA_CICHLID 0x0
+ #define EEPROM_I2C_MADDR_ALDEBARAN      0x0
  
  /*
   * The 2 macros bellow represent the actual size in bytes that
   * those entities occupy in the EEPROM memory.
-  * EEPROM_TABLE_RECORD_SIZE is different than sizeof(eeprom_table_record) which
+  * RAS_TABLE_RECORD_SIZE is different than sizeof(eeprom_table_record) which
   * uses uint64 to store 6b fields such as retired_page.
   */
- #define EEPROM_TABLE_HEADER_SIZE 20
- #define EEPROM_TABLE_RECORD_SIZE 24
- #define EEPROM_ADDRESS_SIZE 0x2
+ #define RAS_TABLE_HEADER_SIZE   20
+ #define RAS_TABLE_RECORD_SIZE   24
  
  /* Table hdr is 'AMDR' */
- #define EEPROM_TABLE_HDR_VAL 0x414d4452
- #define EEPROM_TABLE_VER 0x00010000
+ #define RAS_TABLE_HDR_VAL       0x414d4452
+ #define RAS_TABLE_VER           0x00010000
  
  /* Bad GPU tag â€˜BADG’ */
- #define EEPROM_TABLE_HDR_BAD 0x42414447
+ #define RAS_TABLE_HDR_BAD       0x42414447
+ /* Assume 2-Mbit size EEPROM and take up the whole space. */
+ #define RAS_TBL_SIZE_BYTES      (256 * 1024)
+ #define RAS_TABLE_START         0
+ #define RAS_HDR_START           RAS_TABLE_START
+ #define RAS_RECORD_START        (RAS_HDR_START + RAS_TABLE_HEADER_SIZE)
+ #define RAS_MAX_RECORD_COUNT    ((RAS_TBL_SIZE_BYTES - RAS_TABLE_HEADER_SIZE) \
+                                / RAS_TABLE_RECORD_SIZE)
+ /* Given a zero-based index of an EEPROM RAS record, yields the EEPROM
+  * offset off of RAS_TABLE_START.  That is, this is something you can
+  * add to control->i2c_address, and then tell I2C layer to read
+  * from/write to there. _N is the so called absolute index,
+  * because it starts right after the table header.
+  */
+ #define RAS_INDEX_TO_OFFSET(_C, _N) ((_C)->ras_record_offset + \
+                                    (_N) * RAS_TABLE_RECORD_SIZE)
+ #define RAS_OFFSET_TO_INDEX(_C, _O) (((_O) - \
+                                     (_C)->ras_record_offset) / RAS_TABLE_RECORD_SIZE)
+ /* Given a 0-based relative record index, 0, 1, 2, ..., etc., off
+  * of "fri", return the absolute record index off of the end of
+  * the table header.
+  */
+ #define RAS_RI_TO_AI(_C, _I) (((_I) + (_C)->ras_fri) % \
+                             (_C)->ras_max_record_count)
  
- /* Assume 2 Mbit size */
- #define EEPROM_SIZE_BYTES 256000
- #define EEPROM_PAGE__SIZE_BYTES 256
- #define EEPROM_HDR_START 0
- #define EEPROM_RECORD_START (EEPROM_HDR_START + EEPROM_TABLE_HEADER_SIZE)
- #define EEPROM_MAX_RECORD_NUM ((EEPROM_SIZE_BYTES - EEPROM_TABLE_HEADER_SIZE) / EEPROM_TABLE_RECORD_SIZE)
- #define EEPROM_ADDR_MSB_MASK GENMASK(17, 8)
+ #define RAS_NUM_RECS(_tbl_hdr)  (((_tbl_hdr)->tbl_size - \
+                                 RAS_TABLE_HEADER_SIZE) / RAS_TABLE_RECORD_SIZE)
  
  #define to_amdgpu_device(x) (container_of(x, struct amdgpu_ras, eeprom_control))->adev
  
  static bool __is_ras_eeprom_supported(struct amdgpu_device *adev)
  {
-       if ((adev->asic_type == CHIP_VEGA20) ||
-           (adev->asic_type == CHIP_ARCTURUS) ||
-           (adev->asic_type == CHIP_SIENNA_CICHLID) ||
-           (adev->asic_type == CHIP_ALDEBARAN))
-               return true;
-       return false;
+       return  adev->asic_type == CHIP_VEGA20 ||
+               adev->asic_type == CHIP_ARCTURUS ||
+               adev->asic_type == CHIP_SIENNA_CICHLID ||
+               adev->asic_type == CHIP_ALDEBARAN;
  }
  
  static bool __get_eeprom_i2c_addr_arct(struct amdgpu_device *adev,
-                                      uint16_t *i2c_addr)
+                                      struct amdgpu_ras_eeprom_control *control)
  {
        struct atom_context *atom_ctx = adev->mode_info.atom_context;
  
-       if (!i2c_addr || !atom_ctx)
+       if (!control || !atom_ctx)
                return false;
  
        if (strnstr(atom_ctx->vbios_version,
                    "D342",
                    sizeof(atom_ctx->vbios_version)))
-               *i2c_addr = EEPROM_I2C_TARGET_ADDR_ARCTURUS_D342;
+               control->i2c_address = EEPROM_I2C_MADDR_ARCTURUS_D342;
        else
-               *i2c_addr = EEPROM_I2C_TARGET_ADDR_ARCTURUS;
+               control->i2c_address = EEPROM_I2C_MADDR_ARCTURUS;
  
        return true;
  }
  
  static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev,
-                                 uint16_t *i2c_addr)
+                                 struct amdgpu_ras_eeprom_control *control)
  {
-       if (!i2c_addr)
++      uint8_t ras_rom_i2c_slave_addr;
++
+       if (!control)
                return false;
  
-       if (amdgpu_atomfirmware_ras_rom_addr(adev, (uint8_t*)i2c_addr))
-               return true;
+       control->i2c_address = 0;
 -      if (amdgpu_atomfirmware_ras_rom_addr(adev, (uint8_t*)&control->i2c_address))
++      if (amdgpu_atomfirmware_ras_rom_addr(adev, &ras_rom_i2c_slave_addr))
+       {
 -              if (control->i2c_address == 0xA0)
++              switch (ras_rom_i2c_slave_addr) {
++              case 0xA0:
+                       control->i2c_address = 0;
 -              else if (control->i2c_address == 0xA8)
++                      return true;
++              case 0xA8:
+                       control->i2c_address = 0x40000;
 -              else {
 -                      dev_warn(adev->dev, "RAS EEPROM I2C address not supported");
++                      return true;
++              default:
++                      dev_warn(adev->dev, "RAS EEPROM I2C slave address %02x not supported",
++                               ras_rom_i2c_slave_addr);
+                       return false;
+               }
 -
 -              return true;
+       }
  
        switch (adev->asic_type) {
        case CHIP_VEGA20:
-               *i2c_addr = EEPROM_I2C_TARGET_ADDR_VEGA20;
+               control->i2c_address = EEPROM_I2C_MADDR_VEGA20;
                break;
  
        case CHIP_ARCTURUS:
-               return __get_eeprom_i2c_addr_arct(adev, i2c_addr);
+               return __get_eeprom_i2c_addr_arct(adev, control);
  
        case CHIP_SIENNA_CICHLID:
-               *i2c_addr = EEPROM_I2C_TARGET_ADDR_SIENNA_CICHLID;
+               control->i2c_address = EEPROM_I2C_MADDR_SIENNA_CICHLID;
                break;
  
        case CHIP_ALDEBARAN:
-               *i2c_addr = EEPROM_I2C_TARGET_ADDR_ALDEBARAN;
+               control->i2c_address = EEPROM_I2C_MADDR_ALDEBARAN;
                break;
  
        default:
        return true;
  }
  
- static void __encode_table_header_to_buff(struct amdgpu_ras_eeprom_table_header *hdr,
-                                         unsigned char *buff)
+ static void
+ __encode_table_header_to_buf(struct amdgpu_ras_eeprom_table_header *hdr,
+                            unsigned char *buf)
  {
-       uint32_t *pp = (uint32_t *) buff;
+       u32 *pp = (uint32_t *)buf;
  
        pp[0] = cpu_to_le32(hdr->header);
        pp[1] = cpu_to_le32(hdr->version);
        pp[4] = cpu_to_le32(hdr->checksum);
  }
  
- static void __decode_table_header_from_buff(struct amdgpu_ras_eeprom_table_header *hdr,
-                                         unsigned char *buff)
+ static void
+ __decode_table_header_from_buf(struct amdgpu_ras_eeprom_table_header *hdr,
+                              unsigned char *buf)
  {
-       uint32_t *pp = (uint32_t *)buff;
+       u32 *pp = (uint32_t *)buf;
  
        hdr->header           = le32_to_cpu(pp[0]);
        hdr->version          = le32_to_cpu(pp[1]);
        hdr->checksum         = le32_to_cpu(pp[4]);
  }
  
- static int __update_table_header(struct amdgpu_ras_eeprom_control *control,
-                                unsigned char *buff)
+ static int __write_table_header(struct amdgpu_ras_eeprom_control *control)
  {
-       int ret = 0;
+       u8 buf[RAS_TABLE_HEADER_SIZE];
        struct amdgpu_device *adev = to_amdgpu_device(control);
-       struct i2c_msg msg = {
-                       .addr   = 0,
-                       .flags  = 0,
-                       .len    = EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE,
-                       .buf    = buff,
-       };
+       int res;
  
-       *(uint16_t *)buff = EEPROM_HDR_START;
-       __encode_table_header_to_buff(&control->tbl_hdr, buff + EEPROM_ADDRESS_SIZE);
-       msg.addr = control->i2c_address;
+       memset(buf, 0, sizeof(buf));
+       __encode_table_header_to_buf(&control->tbl_hdr, buf);
  
        /* i2c may be unstable in gpu reset */
        down_read(&adev->reset_sem);
-       ret = i2c_transfer(&adev->pm.smu_i2c, &msg, 1);
+       res = amdgpu_eeprom_write(&adev->pm.smu_i2c,
+                                 control->i2c_address +
+                                 control->ras_header_offset,
+                                 buf, RAS_TABLE_HEADER_SIZE);
        up_read(&adev->reset_sem);
  
-       if (ret < 1)
-               DRM_ERROR("Failed to write EEPROM table header, ret:%d", ret);
-       return ret;
- }
- static uint32_t  __calc_hdr_byte_sum(struct amdgpu_ras_eeprom_control *control)
- {
-       int i;
-       uint32_t tbl_sum = 0;
-       /* Header checksum, skip checksum field in the calculation */
-       for (i = 0; i < sizeof(control->tbl_hdr) - sizeof(control->tbl_hdr.checksum); i++)
-               tbl_sum += *(((unsigned char *)&control->tbl_hdr) + i);
-       return tbl_sum;
- }
- static uint32_t  __calc_recs_byte_sum(struct eeprom_table_record *records,
-                                     int num)
- {
-       int i, j;
-       uint32_t tbl_sum = 0;
-       /* Records checksum */
-       for (i = 0; i < num; i++) {
-               struct eeprom_table_record *record = &records[i];
-               for (j = 0; j < sizeof(*record); j++) {
-                       tbl_sum += *(((unsigned char *)record) + j);
-               }
+       if (res < 0) {
+               DRM_ERROR("Failed to write EEPROM table header:%d", res);
+       } else if (res < RAS_TABLE_HEADER_SIZE) {
+               DRM_ERROR("Short write:%d out of %d\n",
+                         res, RAS_TABLE_HEADER_SIZE);
+               res = -EIO;
+       } else {
+               res = 0;
        }
  
-       return tbl_sum;
- }
- static inline uint32_t  __calc_tbl_byte_sum(struct amdgpu_ras_eeprom_control *control,
-                                 struct eeprom_table_record *records, int num)
- {
-       return __calc_hdr_byte_sum(control) + __calc_recs_byte_sum(records, num);
- }
- /* Checksum = 256 -((sum of all table entries) mod 256) */
- static void __update_tbl_checksum(struct amdgpu_ras_eeprom_control *control,
-                                 struct eeprom_table_record *records, int num,
-                                 uint32_t old_hdr_byte_sum)
- {
-       /*
-        * This will update the table sum with new records.
-        *
-        * TODO: What happens when the EEPROM table is to be wrapped around
-        * and old records from start will get overridden.
-        */
-       /* need to recalculate updated header byte sum */
-       control->tbl_byte_sum -= old_hdr_byte_sum;
-       control->tbl_byte_sum += __calc_tbl_byte_sum(control, records, num);
-       control->tbl_hdr.checksum = 256 - (control->tbl_byte_sum % 256);
+       return res;
  }
  
- /* table sum mod 256 + checksum must equals 256 */
- static bool __validate_tbl_checksum(struct amdgpu_ras_eeprom_control *control,
-                           struct eeprom_table_record *records, int num)
+ static u8 __calc_hdr_byte_sum(const struct amdgpu_ras_eeprom_control *control)
  {
-       control->tbl_byte_sum = __calc_tbl_byte_sum(control, records, num);
+       int ii;
+       u8  *pp, csum;
+       size_t sz;
  
-       if (control->tbl_hdr.checksum + (control->tbl_byte_sum % 256) != 256) {
-               DRM_WARN("Checksum mismatch, checksum: %u ", control->tbl_hdr.checksum);
-               return false;
-       }
+       /* Header checksum, skip checksum field in the calculation */
+       sz = sizeof(control->tbl_hdr) - sizeof(control->tbl_hdr.checksum);
+       pp = (u8 *) &control->tbl_hdr;
+       csum = 0;
+       for (ii = 0; ii < sz; ii++, pp++)
+               csum += *pp;
  
-       return true;
+       return csum;
  }
  
  static int amdgpu_ras_eeprom_correct_header_tag(
-                               struct amdgpu_ras_eeprom_control *control,
-                               uint32_t header)
+       struct amdgpu_ras_eeprom_control *control,
+       uint32_t header)
  {
-       unsigned char buff[EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE];
        struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr;
-       int ret = 0;
-       memset(buff, 0, EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE);
-       mutex_lock(&control->tbl_mutex);
+       u8 *hh;
+       int res;
+       u8 csum;
+       csum = -hdr->checksum;
+       hh = (void *) &hdr->header;
+       csum -= (hh[0] + hh[1] + hh[2] + hh[3]);
+       hh = (void *) &header;
+       csum += hh[0] + hh[1] + hh[2] + hh[3];
+       csum = -csum;
+       mutex_lock(&control->ras_tbl_mutex);
        hdr->header = header;
-       ret = __update_table_header(control, buff);
-       mutex_unlock(&control->tbl_mutex);
+       hdr->checksum = csum;
+       res = __write_table_header(control);
+       mutex_unlock(&control->ras_tbl_mutex);
  
-       return ret;
+       return res;
  }
  
+ /**
+  * amdgpu_ras_eeprom_reset_table -- Reset the RAS EEPROM table
+  * @control: pointer to control structure
+  *
+  * Reset the contents of the header of the RAS EEPROM table.
+  * Return 0 on success, -errno on error.
+  */
  int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control)
  {
-       unsigned char buff[EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE] = { 0 };
        struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr;
-       int ret = 0;
-       mutex_lock(&control->tbl_mutex);
-       hdr->header = EEPROM_TABLE_HDR_VAL;
-       hdr->version = EEPROM_TABLE_VER;
-       hdr->first_rec_offset = EEPROM_RECORD_START;
-       hdr->tbl_size = EEPROM_TABLE_HEADER_SIZE;
-       control->tbl_byte_sum = 0;
-       __update_tbl_checksum(control, NULL, 0, 0);
-       control->next_addr = EEPROM_RECORD_START;
-       ret = __update_table_header(control, buff);
-       mutex_unlock(&control->tbl_mutex);
-       return ret;
- }
- int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control,
-                       bool *exceed_err_limit)
- {
-       int ret = 0;
-       struct amdgpu_device *adev = to_amdgpu_device(control);
-       unsigned char buff[EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE] = { 0 };
-       struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr;
-       struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
-       struct i2c_msg msg = {
-                       .addr   = 0,
-                       .flags  = I2C_M_RD,
-                       .len    = EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE,
-                       .buf    = buff,
-       };
-       *exceed_err_limit = false;
-       if (!__is_ras_eeprom_supported(adev))
-               return 0;
-       /* Verify i2c adapter is initialized */
-       if (!adev->pm.smu_i2c.algo)
-               return -ENOENT;
+       u8 csum;
+       int res;
  
-       if (!__get_eeprom_i2c_addr(adev, &control->i2c_address))
-               return -EINVAL;
+       mutex_lock(&control->ras_tbl_mutex);
  
-       mutex_init(&control->tbl_mutex);
+       hdr->header = RAS_TABLE_HDR_VAL;
+       hdr->version = RAS_TABLE_VER;
+       hdr->first_rec_offset = RAS_RECORD_START;
+       hdr->tbl_size = RAS_TABLE_HEADER_SIZE;
  
-       msg.addr = control->i2c_address;
-       /* Read/Create table header from EEPROM address 0 */
-       ret = i2c_transfer(&adev->pm.smu_i2c, &msg, 1);
-       if (ret < 1) {
-               DRM_ERROR("Failed to read EEPROM table header, ret:%d", ret);
-               return ret;
-       }
+       csum = __calc_hdr_byte_sum(control);
+       csum = -csum;
+       hdr->checksum = csum;
+       res = __write_table_header(control);
  
-       __decode_table_header_from_buff(hdr, &buff[2]);
+       control->ras_num_recs = 0;
+       control->ras_fri = 0;
  
-       if (hdr->header == EEPROM_TABLE_HDR_VAL) {
-               control->num_recs = (hdr->tbl_size - EEPROM_TABLE_HEADER_SIZE) /
-                                   EEPROM_TABLE_RECORD_SIZE;
-               control->tbl_byte_sum = __calc_hdr_byte_sum(control);
-               control->next_addr = EEPROM_RECORD_START;
+       amdgpu_ras_debugfs_set_ret_size(control);
  
-               DRM_DEBUG_DRIVER("Found existing EEPROM table with %d records",
-                                control->num_recs);
-       } else if ((hdr->header == EEPROM_TABLE_HDR_BAD) &&
-                       (amdgpu_bad_page_threshold != 0)) {
-               if (ras->bad_page_cnt_threshold > control->num_recs) {
-                       dev_info(adev->dev, "Using one valid bigger bad page "
-                               "threshold and correcting eeprom header tag.\n");
-                       ret = amdgpu_ras_eeprom_correct_header_tag(control,
-                                                       EEPROM_TABLE_HDR_VAL);
-               } else {
-                       *exceed_err_limit = true;
-                       dev_err(adev->dev, "Exceeding the bad_page_threshold parameter, "
-                               "disabling the GPU.\n");
-               }
-       } else {
-               DRM_INFO("Creating new EEPROM table");
+       mutex_unlock(&control->ras_tbl_mutex);
  
-               ret = amdgpu_ras_eeprom_reset_table(control);
-       }
-       return ret == 1 ? 0 : -EIO;
+       return res;
  }
  
- static void __encode_table_record_to_buff(struct amdgpu_ras_eeprom_control *control,
-                                         struct eeprom_table_record *record,
-                                         unsigned char *buff)
+ static void
+ __encode_table_record_to_buf(struct amdgpu_ras_eeprom_control *control,
+                            struct eeprom_table_record *record,
+                            unsigned char *buf)
  {
        __le64 tmp = 0;
        int i = 0;
  
        /* Next are all record fields according to EEPROM page spec in LE foramt */
-       buff[i++] = record->err_type;
+       buf[i++] = record->err_type;
  
-       buff[i++] = record->bank;
+       buf[i++] = record->bank;
  
        tmp = cpu_to_le64(record->ts);
-       memcpy(buff + i, &tmp, 8);
+       memcpy(buf + i, &tmp, 8);
        i += 8;
  
        tmp = cpu_to_le64((record->offset & 0xffffffffffff));
-       memcpy(buff + i, &tmp, 6);
+       memcpy(buf + i, &tmp, 6);
        i += 6;
  
-       buff[i++] = record->mem_channel;
-       buff[i++] = record->mcumc_id;
+       buf[i++] = record->mem_channel;
+       buf[i++] = record->mcumc_id;
  
        tmp = cpu_to_le64((record->retired_page & 0xffffffffffff));
-       memcpy(buff + i, &tmp, 6);
+       memcpy(buf + i, &tmp, 6);
  }
  
- static void __decode_table_record_from_buff(struct amdgpu_ras_eeprom_control *control,
-                                           struct eeprom_table_record *record,
-                                           unsigned char *buff)
+ static void
+ __decode_table_record_from_buf(struct amdgpu_ras_eeprom_control *control,
+                              struct eeprom_table_record *record,
+                              unsigned char *buf)
  {
        __le64 tmp = 0;
        int i =  0;
  
        /* Next are all record fields according to EEPROM page spec in LE foramt */
-       record->err_type = buff[i++];
+       record->err_type = buf[i++];
  
-       record->bank = buff[i++];
+       record->bank = buf[i++];
  
-       memcpy(&tmp, buff + i, 8);
+       memcpy(&tmp, buf + i, 8);
        record->ts = le64_to_cpu(tmp);
        i += 8;
  
-       memcpy(&tmp, buff + i, 6);
+       memcpy(&tmp, buf + i, 6);
        record->offset = (le64_to_cpu(tmp) & 0xffffffffffff);
        i += 6;
  
-       record->mem_channel = buff[i++];
-       record->mcumc_id = buff[i++];
+       record->mem_channel = buf[i++];
+       record->mcumc_id = buf[i++];
  
-       memcpy(&tmp, buff + i,  6);
+       memcpy(&tmp, buf + i,  6);
        record->retired_page = (le64_to_cpu(tmp) & 0xffffffffffff);
  }
  
- /*
-  * When reaching end of EEPROM memory jump back to 0 record address
-  * When next record access will go beyond EEPROM page boundary modify bits A17/A8
-  * in I2C selector to go to next page
-  */
- static uint32_t __correct_eeprom_dest_address(uint32_t curr_address)
- {
-       uint32_t next_address = curr_address + EEPROM_TABLE_RECORD_SIZE;
-       /* When all EEPROM memory used jump back to 0 address */
-       if (next_address > EEPROM_SIZE_BYTES) {
-               DRM_INFO("Reached end of EEPROM memory, jumping to 0 "
-                        "and overriding old record");
-               return EEPROM_RECORD_START;
-       }
-       /*
-        * To check if we overflow page boundary  compare next address with
-        * current and see if bits 17/8 of the EEPROM address will change
-        * If they do start from the next 256b page
-        *
-        * https://www.st.com/resource/en/datasheet/m24m02-dr.pdf sec. 5.1.2
-        */
-       if ((curr_address & EEPROM_ADDR_MSB_MASK) != (next_address & EEPROM_ADDR_MSB_MASK)) {
-               DRM_DEBUG_DRIVER("Reached end of EEPROM memory page, jumping to next: %lx",
-                               (next_address & EEPROM_ADDR_MSB_MASK));
-               return  (next_address & EEPROM_ADDR_MSB_MASK);
-       }
-       return curr_address;
- }
  bool amdgpu_ras_eeprom_check_err_threshold(struct amdgpu_device *adev)
  {
        struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
                if (!(con->features & BIT(AMDGPU_RAS_BLOCK__UMC)))
                        return false;
  
-       if (con->eeprom_control.tbl_hdr.header == EEPROM_TABLE_HDR_BAD) {
+       if (con->eeprom_control.tbl_hdr.header == RAS_TABLE_HDR_BAD) {
                dev_warn(adev->dev, "This GPU is in BAD status.");
-               dev_warn(adev->dev, "Please retire it or setting one bigger "
-                               "threshold value when reloading driver.\n");
+               dev_warn(adev->dev, "Please retire it or set a larger "
+                        "threshold value when reloading driver.\n");
                return true;
        }
  
        return false;
  }
  
- int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control,
-                                           struct eeprom_table_record *records,
-                                           bool write,
-                                           int num)
+ /**
+  * __amdgpu_ras_eeprom_write -- write indexed from buffer to EEPROM
+  * @control: pointer to control structure
+  * @buf: pointer to buffer containing data to write
+  * @fri: start writing at this index
+  * @num: number of records to write
+  *
+  * The caller must hold the table mutex in @control.
+  * Return 0 on success, -errno otherwise.
+  */
+ static int __amdgpu_ras_eeprom_write(struct amdgpu_ras_eeprom_control *control,
+                                    u8 *buf, const u32 fri, const u32 num)
  {
-       int i, ret = 0;
-       struct i2c_msg *msgs, *msg;
-       unsigned char *buffs, *buff;
-       struct eeprom_table_record *record;
        struct amdgpu_device *adev = to_amdgpu_device(control);
-       struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+       u32 buf_size;
+       int res;
  
-       if (!__is_ras_eeprom_supported(adev))
-               return 0;
+       /* i2c may be unstable in gpu reset */
+       down_read(&adev->reset_sem);
+       buf_size = num * RAS_TABLE_RECORD_SIZE;
+       res = amdgpu_eeprom_write(&adev->pm.smu_i2c,
+                                 control->i2c_address +
+                                 RAS_INDEX_TO_OFFSET(control, fri),
+                                 buf, buf_size);
+       up_read(&adev->reset_sem);
+       if (res < 0) {
+               DRM_ERROR("Writing %d EEPROM table records error:%d",
+                         num, res);
+       } else if (res < buf_size) {
+               /* Short write, return error.
+                */
+               DRM_ERROR("Wrote %d records out of %d",
+                         res / RAS_TABLE_RECORD_SIZE, num);
+               res = -EIO;
+       } else {
+               res = 0;
+       }
  
-       buffs = kcalloc(num, EEPROM_ADDRESS_SIZE + EEPROM_TABLE_RECORD_SIZE,
-                        GFP_KERNEL);
-       if (!buffs)
-               return -ENOMEM;
+       return res;
+ }
  
-       mutex_lock(&control->tbl_mutex);
+ static int
+ amdgpu_ras_eeprom_append_table(struct amdgpu_ras_eeprom_control *control,
+                              struct eeprom_table_record *record,
+                              const u32 num)
+ {
+       u32 a, b, i;
+       u8 *buf, *pp;
+       int res;
+       buf = kcalloc(num, RAS_TABLE_RECORD_SIZE, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
  
-       msgs = kcalloc(num, sizeof(*msgs), GFP_KERNEL);
-       if (!msgs) {
-               ret = -ENOMEM;
-               goto free_buff;
+       /* Encode all of them in one go.
+        */
+       pp = buf;
+       for (i = 0; i < num; i++, pp += RAS_TABLE_RECORD_SIZE)
+               __encode_table_record_to_buf(control, &record[i], pp);
+       /* a, first record index to write into.
+        * b, last record index to write into.
+        * a = first index to read (fri) + number of records in the table,
+        * b = a + @num - 1.
+        * Let N = control->ras_max_num_record_count, then we have,
+        * case 0: 0 <= a <= b < N,
+        *   just append @num records starting at a;
+        * case 1: 0 <= a < N <= b,
+        *   append (N - a) records starting at a, and
+        *   append the remainder,  b % N + 1, starting at 0.
+        * case 2: 0 <= fri < N <= a <= b, then modulo N we get two subcases,
+        * case 2a: 0 <= a <= b < N
+        *   append num records starting at a; and fix fri if b overwrote it,
+        *   and since a <= b, if b overwrote it then a must've also,
+        *   and if b didn't overwrite it, then a didn't also.
+        * case 2b: 0 <= b < a < N
+        *   write num records starting at a, which wraps around 0=N
+        *   and overwrite fri unconditionally. Now from case 2a,
+        *   this means that b eclipsed fri to overwrite it and wrap
+        *   around 0 again, i.e. b = 2N+r pre modulo N, so we unconditionally
+        *   set fri = b + 1 (mod N).
+        * Now, since fri is updated in every case, except the trivial case 0,
+        * the number of records present in the table after writing, is,
+        * num_recs - 1 = b - fri (mod N), and we take the positive value,
+        * by adding an arbitrary multiple of N before taking the modulo N
+        * as shown below.
+        */
+       a = control->ras_fri + control->ras_num_recs;
+       b = a + num  - 1;
+       if (b < control->ras_max_record_count) {
+               res = __amdgpu_ras_eeprom_write(control, buf, a, num);
+       } else if (a < control->ras_max_record_count) {
+               u32 g0, g1;
+               g0 = control->ras_max_record_count - a;
+               g1 = b % control->ras_max_record_count + 1;
+               res = __amdgpu_ras_eeprom_write(control, buf, a, g0);
+               if (res)
+                       goto Out;
+               res = __amdgpu_ras_eeprom_write(control,
+                                               buf + g0 * RAS_TABLE_RECORD_SIZE,
+                                               0, g1);
+               if (res)
+                       goto Out;
+               if (g1 > control->ras_fri)
+                       control->ras_fri = g1 % control->ras_max_record_count;
+       } else {
+               a %= control->ras_max_record_count;
+               b %= control->ras_max_record_count;
+               if (a <= b) {
+                       /* Note that, b - a + 1 = num. */
+                       res = __amdgpu_ras_eeprom_write(control, buf, a, num);
+                       if (res)
+                               goto Out;
+                       if (b >= control->ras_fri)
+                               control->ras_fri = (b + 1) % control->ras_max_record_count;
+               } else {
+                       u32 g0, g1;
+                       /* b < a, which means, we write from
+                        * a to the end of the table, and from
+                        * the start of the table to b.
+                        */
+                       g0 = control->ras_max_record_count - a;
+                       g1 = b + 1;
+                       res = __amdgpu_ras_eeprom_write(control, buf, a, g0);
+                       if (res)
+                               goto Out;
+                       res = __amdgpu_ras_eeprom_write(control,
+                                                       buf + g0 * RAS_TABLE_RECORD_SIZE,
+                                                       0, g1);
+                       if (res)
+                               goto Out;
+                       control->ras_fri = g1 % control->ras_max_record_count;
+               }
        }
+       control->ras_num_recs = 1 + (control->ras_max_record_count + b
+                                    - control->ras_fri)
+               % control->ras_max_record_count;
+ Out:
+       kfree(buf);
+       return res;
+ }
  
-       /*
-        * If saved bad pages number exceeds the bad page threshold for
-        * the whole VRAM, update table header to mark the BAD GPU tag
-        * and schedule one ras recovery after eeprom write is done,
-        * this can avoid the missing for latest records.
-        *
-        * This new header will be picked up and checked in the bootup
-        * by ras recovery, which may break bootup process to notify
-        * user this GPU is in bad state and to retire such GPU for
-        * further check.
+ static int
+ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control)
+ {
+       struct amdgpu_device *adev = to_amdgpu_device(control);
+       struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+       u8 *buf, *pp, csum;
+       u32 buf_size;
+       int res;
+       /* Modify the header if it exceeds.
         */
-       if (write && (amdgpu_bad_page_threshold != 0) &&
-               ((control->num_recs + num) >= ras->bad_page_cnt_threshold)) {
+       if (amdgpu_bad_page_threshold != 0 &&
+           control->ras_num_recs >= ras->bad_page_cnt_threshold) {
                dev_warn(adev->dev,
-                       "Saved bad pages(%d) reaches threshold value(%d).\n",
-                       control->num_recs + num, ras->bad_page_cnt_threshold);
-               control->tbl_hdr.header = EEPROM_TABLE_HDR_BAD;
+                       "Saved bad pages %d reaches threshold value %d\n",
+                       control->ras_num_recs, ras->bad_page_cnt_threshold);
+               control->tbl_hdr.header = RAS_TABLE_HDR_BAD;
+       }
+       control->tbl_hdr.version = RAS_TABLE_VER;
+       control->tbl_hdr.first_rec_offset = RAS_INDEX_TO_OFFSET(control, control->ras_fri);
+       control->tbl_hdr.tbl_size = RAS_TABLE_HEADER_SIZE + control->ras_num_recs * RAS_TABLE_RECORD_SIZE;
+       control->tbl_hdr.checksum = 0;
+       buf_size = control->ras_num_recs * RAS_TABLE_RECORD_SIZE;
+       buf = kcalloc(control->ras_num_recs, RAS_TABLE_RECORD_SIZE, GFP_KERNEL);
+       if (!buf) {
+               DRM_ERROR("allocating memory for table of size %d bytes failed\n",
+                         control->tbl_hdr.tbl_size);
+               res = -ENOMEM;
+               goto Out;
        }
  
-       /* In case of overflow just start from beginning to not lose newest records */
-       if (write && (control->next_addr + EEPROM_TABLE_RECORD_SIZE * num > EEPROM_SIZE_BYTES))
-               control->next_addr = EEPROM_RECORD_START;
+       down_read(&adev->reset_sem);
+       res = amdgpu_eeprom_read(&adev->pm.smu_i2c,
+                                control->i2c_address +
+                                control->ras_record_offset,
+                                buf, buf_size);
+       up_read(&adev->reset_sem);
+       if (res < 0) {
+               DRM_ERROR("EEPROM failed reading records:%d\n",
+                         res);
+               goto Out;
+       } else if (res < buf_size) {
+               DRM_ERROR("EEPROM read %d out of %d bytes\n",
+                         res, buf_size);
+               res = -EIO;
+               goto Out;
+       }
  
-       /*
-        * TODO Currently makes EEPROM writes for each record, this creates
-        * internal fragmentation. Optimized the code to do full page write of
-        * 256b
+       /* Recalc the checksum.
         */
-       for (i = 0; i < num; i++) {
-               buff = &buffs[i * (EEPROM_ADDRESS_SIZE + EEPROM_TABLE_RECORD_SIZE)];
-               record = &records[i];
-               msg = &msgs[i];
+       csum = 0;
+       for (pp = buf; pp < buf + buf_size; pp++)
+               csum += *pp;
+       csum += __calc_hdr_byte_sum(control);
+       /* avoid sign extension when assigning to "checksum" */
+       csum = -csum;
+       control->tbl_hdr.checksum = csum;
+       res = __write_table_header(control);
+ Out:
+       kfree(buf);
+       return res;
+ }
  
-               control->next_addr = __correct_eeprom_dest_address(control->next_addr);
+ /**
+  * amdgpu_ras_eeprom_append -- append records to the EEPROM RAS table
+  * @control: pointer to control structure
+  * @record: array of records to append
+  * @num: number of records in @record array
+  *
+  * Append @num records to the table, calculate the checksum and write
+  * the table back to EEPROM. The maximum number of records that
+  * can be appended is between 1 and control->ras_max_record_count,
+  * regardless of how many records are already stored in the table.
+  *
+  * Return 0 on success or if EEPROM is not supported, -errno on error.
+  */
+ int amdgpu_ras_eeprom_append(struct amdgpu_ras_eeprom_control *control,
+                            struct eeprom_table_record *record,
+                            const u32 num)
+ {
+       struct amdgpu_device *adev = to_amdgpu_device(control);
+       int res;
  
-               /*
-                * Update bits 16,17 of EEPROM address in I2C address by setting them
-                * to bits 1,2 of Device address byte
-                */
-               msg->addr = control->i2c_address |
-                               ((control->next_addr & EEPROM_ADDR_MSB_MASK) >> 15);
-               msg->flags      = write ? 0 : I2C_M_RD;
-               msg->len        = EEPROM_ADDRESS_SIZE + EEPROM_TABLE_RECORD_SIZE;
-               msg->buf        = buff;
-               /* Insert the EEPROM dest addess, bits 0-15 */
-               buff[0] = ((control->next_addr >> 8) & 0xff);
-               buff[1] = (control->next_addr & 0xff);
-               /* EEPROM table content is stored in LE format */
-               if (write)
-                       __encode_table_record_to_buff(control, record, buff + EEPROM_ADDRESS_SIZE);
-               /*
-                * The destination EEPROM address might need to be corrected to account
-                * for page or entire memory wrapping
-                */
-               control->next_addr += EEPROM_TABLE_RECORD_SIZE;
+       if (!__is_ras_eeprom_supported(adev))
+               return 0;
+       if (num == 0) {
+               DRM_ERROR("will not append 0 records\n");
+               return -EINVAL;
+       } else if (num > control->ras_max_record_count) {
+               DRM_ERROR("cannot append %d records than the size of table %d\n",
+                         num, control->ras_max_record_count);
+               return -EINVAL;
        }
  
+       mutex_lock(&control->ras_tbl_mutex);
+       res = amdgpu_ras_eeprom_append_table(control, record, num);
+       if (!res)
+               res = amdgpu_ras_eeprom_update_header(control);
+       if (!res)
+               amdgpu_ras_debugfs_set_ret_size(control);
+       mutex_unlock(&control->ras_tbl_mutex);
+       return res;
+ }
+ /**
+  * __amdgpu_ras_eeprom_read -- read indexed from EEPROM into buffer
+  * @control: pointer to control structure
+  * @buf: pointer to buffer to read into
+  * @fri: first record index, start reading at this index, absolute index
+  * @num: number of records to read
+  *
+  * The caller must hold the table mutex in @control.
+  * Return 0 on success, -errno otherwise.
+  */
+ static int __amdgpu_ras_eeprom_read(struct amdgpu_ras_eeprom_control *control,
+                                   u8 *buf, const u32 fri, const u32 num)
+ {
+       struct amdgpu_device *adev = to_amdgpu_device(control);
+       u32 buf_size;
+       int res;
        /* i2c may be unstable in gpu reset */
        down_read(&adev->reset_sem);
-       ret = i2c_transfer(&adev->pm.smu_i2c, msgs, num);
+       buf_size = num * RAS_TABLE_RECORD_SIZE;
+       res = amdgpu_eeprom_read(&adev->pm.smu_i2c,
+                                control->i2c_address +
+                                RAS_INDEX_TO_OFFSET(control, fri),
+                                buf, buf_size);
        up_read(&adev->reset_sem);
+       if (res < 0) {
+               DRM_ERROR("Reading %d EEPROM table records error:%d",
+                         num, res);
+       } else if (res < buf_size) {
+               /* Short read, return error.
+                */
+               DRM_ERROR("Read %d records out of %d",
+                         res / RAS_TABLE_RECORD_SIZE, num);
+               res = -EIO;
+       } else {
+               res = 0;
+       }
+       return res;
+ }
+ /**
+  * amdgpu_ras_eeprom_read -- read EEPROM
+  * @control: pointer to control structure
+  * @record: array of records to read into
+  * @num: number of records in @record
+  *
+  * Reads num records from the RAS table in EEPROM and
+  * writes the data into @record array.
+  *
+  * Returns 0 on success, -errno on error.
+  */
+ int amdgpu_ras_eeprom_read(struct amdgpu_ras_eeprom_control *control,
+                          struct eeprom_table_record *record,
+                          const u32 num)
+ {
+       struct amdgpu_device *adev = to_amdgpu_device(control);
+       int i, res;
+       u8 *buf, *pp;
+       u32 g0, g1;
  
-       if (ret < 1) {
-               DRM_ERROR("Failed to process EEPROM table records, ret:%d", ret);
+       if (!__is_ras_eeprom_supported(adev))
+               return 0;
  
-               /* TODO Restore prev next EEPROM address ? */
-               goto free_msgs;
+       if (num == 0) {
+               DRM_ERROR("will not read 0 records\n");
+               return -EINVAL;
+       } else if (num > control->ras_num_recs) {
+               DRM_ERROR("too many records to read:%d available:%d\n",
+                         num, control->ras_num_recs);
+               return -EINVAL;
        }
  
+       buf = kcalloc(num, RAS_TABLE_RECORD_SIZE, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
  
-       if (!write) {
-               for (i = 0; i < num; i++) {
-                       buff = &buffs[i*(EEPROM_ADDRESS_SIZE + EEPROM_TABLE_RECORD_SIZE)];
-                       record = &records[i];
+       /* Determine how many records to read, from the first record
+        * index, fri, to the end of the table, and from the beginning
+        * of the table, such that the total number of records is
+        * @num, and we handle wrap around when fri > 0 and
+        * fri + num > RAS_MAX_RECORD_COUNT.
+        *
+        * First we compute the index of the last element
+        * which would be fetched from each region,
+        * g0 is in [fri, fri + num - 1], and
+        * g1 is in [0, RAS_MAX_RECORD_COUNT - 1].
+        * Then, if g0 < RAS_MAX_RECORD_COUNT, the index of
+        * the last element to fetch, we set g0 to _the number_
+        * of elements to fetch, @num, since we know that the last
+        * indexed to be fetched does not exceed the table.
+        *
+        * If, however, g0 >= RAS_MAX_RECORD_COUNT, then
+        * we set g0 to the number of elements to read
+        * until the end of the table, and g1 to the number of
+        * elements to read from the beginning of the table.
+        */
+       g0 = control->ras_fri + num - 1;
+       g1 = g0 % control->ras_max_record_count;
+       if (g0 < control->ras_max_record_count) {
+               g0 = num;
+               g1 = 0;
+       } else {
+               g0 = control->ras_max_record_count - control->ras_fri;
+               g1 += 1;
+       }
  
-                       __decode_table_record_from_buff(control, record, buff + EEPROM_ADDRESS_SIZE);
-               }
+       mutex_lock(&control->ras_tbl_mutex);
+       res = __amdgpu_ras_eeprom_read(control, buf, control->ras_fri, g0);
+       if (res)
+               goto Out;
+       if (g1) {
+               res = __amdgpu_ras_eeprom_read(control,
+                                              buf + g0 * RAS_TABLE_RECORD_SIZE,
+                                              0, g1);
+               if (res)
+                       goto Out;
        }
  
-       if (write) {
-               uint32_t old_hdr_byte_sum = __calc_hdr_byte_sum(control);
+       res = 0;
+       /* Read up everything? Then transform.
+        */
+       pp = buf;
+       for (i = 0; i < num; i++, pp += RAS_TABLE_RECORD_SIZE)
+               __decode_table_record_from_buf(control, &record[i], pp);
+ Out:
+       kfree(buf);
+       mutex_unlock(&control->ras_tbl_mutex);
+       return res;
+ }
+ inline uint32_t amdgpu_ras_eeprom_max_record_count(void)
+ {
+       return RAS_MAX_RECORD_COUNT;
+ }
+ static ssize_t
+ amdgpu_ras_debugfs_eeprom_size_read(struct file *f, char __user *buf,
+                                   size_t size, loff_t *pos)
+ {
+       struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
+       struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+       struct amdgpu_ras_eeprom_control *control = ras ? &ras->eeprom_control : NULL;
+       u8 data[50];
+       int res;
+       if (!size)
+               return size;
+       if (!ras || !control) {
+               res = snprintf(data, sizeof(data), "Not supported\n");
+       } else {
+               res = snprintf(data, sizeof(data), "%d bytes or %d records\n",
+                              RAS_TBL_SIZE_BYTES, control->ras_max_record_count);
+       }
+       if (*pos >= res)
+               return 0;
+       res -= *pos;
+       res = min_t(size_t, res, size);
+       if (copy_to_user(buf, &data[*pos], res))
+               return -EFAULT;
+       *pos += res;
  
-               /*
-                * Update table header with size and CRC and account for table
-                * wrap around where the assumption is that we treat it as empty
-                * table
-                *
-                * TODO - Check the assumption is correct
+       return res;
+ }
+ const struct file_operations amdgpu_ras_debugfs_eeprom_size_ops = {
+       .owner = THIS_MODULE,
+       .read = amdgpu_ras_debugfs_eeprom_size_read,
+       .write = NULL,
+       .llseek = default_llseek,
+ };
+ static const char *tbl_hdr_str = " Signature    Version  FirstOffs       Size   Checksum\n";
+ static const char *tbl_hdr_fmt = "0x%08X 0x%08X 0x%08X 0x%08X 0x%08X\n";
+ #define tbl_hdr_fmt_size (5 * (2+8) + 4 + 1)
+ static const char *rec_hdr_str = "Index  Offset ErrType Bank/CU          TimeStamp      Offs/Addr MemChl MCUMCID    RetiredPage\n";
+ static const char *rec_hdr_fmt = "%5d 0x%05X %7s    0x%02X 0x%016llX 0x%012llX   0x%02X    0x%02X 0x%012llX\n";
+ #define rec_hdr_fmt_size (5 + 1 + 7 + 1 + 7 + 1 + 7 + 1 + 18 + 1 + 14 + 1 + 6 + 1 + 7 + 1 + 14 + 1)
+ static const char *record_err_type_str[AMDGPU_RAS_EEPROM_ERR_COUNT] = {
+       "ignore",
+       "re",
+       "ue",
+ };
+ static loff_t amdgpu_ras_debugfs_table_size(struct amdgpu_ras_eeprom_control *control)
+ {
+       return strlen(tbl_hdr_str) + tbl_hdr_fmt_size +
+               strlen(rec_hdr_str) + rec_hdr_fmt_size * control->ras_num_recs;
+ }
+ void amdgpu_ras_debugfs_set_ret_size(struct amdgpu_ras_eeprom_control *control)
+ {
+       struct amdgpu_ras *ras = container_of(control, struct amdgpu_ras,
+                                             eeprom_control);
+       struct dentry *de = ras->de_ras_eeprom_table;
+       if (de)
+               d_inode(de)->i_size = amdgpu_ras_debugfs_table_size(control);
+ }
+ static ssize_t amdgpu_ras_debugfs_table_read(struct file *f, char __user *buf,
+                                            size_t size, loff_t *pos)
+ {
+       struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
+       struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+       struct amdgpu_ras_eeprom_control *control = &ras->eeprom_control;
+       const size_t orig_size = size;
+       int res = -EFAULT;
+       size_t data_len;
+       mutex_lock(&control->ras_tbl_mutex);
+       /* We want *pos - data_len > 0, which means there's
+        * bytes to be printed from data.
+        */
+       data_len = strlen(tbl_hdr_str);
+       if (*pos < data_len) {
+               data_len -= *pos;
+               data_len = min_t(size_t, data_len, size);
+               if (copy_to_user(buf, &tbl_hdr_str[*pos], data_len))
+                       goto Out;
+               buf += data_len;
+               size -= data_len;
+               *pos += data_len;
+       }
+       data_len = strlen(tbl_hdr_str) + tbl_hdr_fmt_size;
+       if (*pos < data_len && size > 0) {
+               u8 data[tbl_hdr_fmt_size + 1];
+               loff_t lpos;
+               snprintf(data, sizeof(data), tbl_hdr_fmt,
+                        control->tbl_hdr.header,
+                        control->tbl_hdr.version,
+                        control->tbl_hdr.first_rec_offset,
+                        control->tbl_hdr.tbl_size,
+                        control->tbl_hdr.checksum);
+               data_len -= *pos;
+               data_len = min_t(size_t, data_len, size);
+               lpos = *pos - strlen(tbl_hdr_str);
+               if (copy_to_user(buf, &data[lpos], data_len))
+                       goto Out;
+               buf += data_len;
+               size -= data_len;
+               *pos += data_len;
+       }
+       data_len = strlen(tbl_hdr_str) + tbl_hdr_fmt_size + strlen(rec_hdr_str);
+       if (*pos < data_len && size > 0) {
+               loff_t lpos;
+               data_len -= *pos;
+               data_len = min_t(size_t, data_len, size);
+               lpos = *pos - strlen(tbl_hdr_str) - tbl_hdr_fmt_size;
+               if (copy_to_user(buf, &rec_hdr_str[lpos], data_len))
+                       goto Out;
+               buf += data_len;
+               size -= data_len;
+               *pos += data_len;
+       }
+       data_len = amdgpu_ras_debugfs_table_size(control);
+       if (*pos < data_len && size > 0) {
+               u8 dare[RAS_TABLE_RECORD_SIZE];
+               u8 data[rec_hdr_fmt_size + 1];
+               struct eeprom_table_record record;
+               int s, r;
+               /* Find the starting record index
                 */
-               control->num_recs += num;
-               control->num_recs %= EEPROM_MAX_RECORD_NUM;
-               control->tbl_hdr.tbl_size += EEPROM_TABLE_RECORD_SIZE * num;
-               if (control->tbl_hdr.tbl_size > EEPROM_SIZE_BYTES)
-                       control->tbl_hdr.tbl_size = EEPROM_TABLE_HEADER_SIZE +
-                       control->num_recs * EEPROM_TABLE_RECORD_SIZE;
-               __update_tbl_checksum(control, records, num, old_hdr_byte_sum);
-               __update_table_header(control, buffs);
-       } else if (!__validate_tbl_checksum(control, records, num)) {
-               DRM_WARN("EEPROM Table checksum mismatch!");
-               /* TODO Uncomment when EEPROM read/write is relliable */
-               /* ret = -EIO; */
+               s = *pos - strlen(tbl_hdr_str) - tbl_hdr_fmt_size -
+                       strlen(rec_hdr_str);
+               s = s / rec_hdr_fmt_size;
+               r = *pos - strlen(tbl_hdr_str) - tbl_hdr_fmt_size -
+                       strlen(rec_hdr_str);
+               r = r % rec_hdr_fmt_size;
+               for ( ; size > 0 && s < control->ras_num_recs; s++) {
+                       u32 ai = RAS_RI_TO_AI(control, s);
+                       /* Read a single record
+                        */
+                       res = __amdgpu_ras_eeprom_read(control, dare, ai, 1);
+                       if (res)
+                               goto Out;
+                       __decode_table_record_from_buf(control, &record, dare);
+                       snprintf(data, sizeof(data), rec_hdr_fmt,
+                                s,
+                                RAS_INDEX_TO_OFFSET(control, ai),
+                                record_err_type_str[record.err_type],
+                                record.bank,
+                                record.ts,
+                                record.offset,
+                                record.mem_channel,
+                                record.mcumc_id,
+                                record.retired_page);
+                       data_len = min_t(size_t, rec_hdr_fmt_size - r, size);
+                       if (copy_to_user(buf, &data[r], data_len)) {
+                               res = -EFAULT;
+                               goto Out;
+                       }
+                       buf += data_len;
+                       size -= data_len;
+                       *pos += data_len;
+                       r = 0;
+               }
        }
+       res = 0;
+ Out:
+       mutex_unlock(&control->ras_tbl_mutex);
+       return res < 0 ? res : orig_size - size;
+ }
  
- free_msgs:
-       kfree(msgs);
+ static ssize_t
+ amdgpu_ras_debugfs_eeprom_table_read(struct file *f, char __user *buf,
+                                    size_t size, loff_t *pos)
+ {
+       struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
+       struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+       struct amdgpu_ras_eeprom_control *control = ras ? &ras->eeprom_control : NULL;
+       u8 data[81];
+       int res;
+       if (!size)
+               return size;
+       if (!ras || !control) {
+               res = snprintf(data, sizeof(data), "Not supported\n");
+               if (*pos >= res)
+                       return 0;
+               res -= *pos;
+               res = min_t(size_t, res, size);
  
- free_buff:
-       kfree(buffs);
+               if (copy_to_user(buf, &data[*pos], res))
+                       return -EFAULT;
  
-       mutex_unlock(&control->tbl_mutex);
+               *pos += res;
  
-       return ret == num ? 0 : -EIO;
+               return res;
+       } else {
+               return amdgpu_ras_debugfs_table_read(f, buf, size, pos);
+       }
  }
  
- inline uint32_t amdgpu_ras_eeprom_get_record_max_length(void)
+ const struct file_operations amdgpu_ras_debugfs_eeprom_table_ops = {
+       .owner = THIS_MODULE,
+       .read = amdgpu_ras_debugfs_eeprom_table_read,
+       .write = NULL,
+       .llseek = default_llseek,
+ };
+ /**
+  * __verify_ras_table_checksum -- verify the RAS EEPROM table checksum
+  * @control: pointer to control structure
+  *
+  * Check the checksum of the stored in EEPROM RAS table.
+  *
+  * Return 0 if the checksum is correct,
+  * positive if it is not correct, and
+  * -errno on I/O error.
+  */
+ static int __verify_ras_table_checksum(struct amdgpu_ras_eeprom_control *control)
  {
-       return EEPROM_MAX_RECORD_NUM;
+       struct amdgpu_device *adev = to_amdgpu_device(control);
+       int buf_size, res;
+       u8  csum, *buf, *pp;
+       buf_size = RAS_TABLE_HEADER_SIZE +
+               control->ras_num_recs * RAS_TABLE_RECORD_SIZE;
+       buf = kzalloc(buf_size, GFP_KERNEL);
+       if (!buf) {
+               DRM_ERROR("Out of memory checking RAS table checksum.\n");
+               return -ENOMEM;
+       }
+       res = amdgpu_eeprom_read(&adev->pm.smu_i2c,
+                                control->i2c_address +
+                                control->ras_header_offset,
+                                buf, buf_size);
+       if (res < buf_size) {
+               DRM_ERROR("Partial read for checksum, res:%d\n", res);
+               /* On partial reads, return -EIO.
+                */
+               if (res >= 0)
+                       res = -EIO;
+               goto Out;
+       }
+       csum = 0;
+       for (pp = buf; pp < buf + buf_size; pp++)
+               csum += *pp;
+ Out:
+       kfree(buf);
+       return res < 0 ? res : csum;
  }
  
- /* Used for testing if bugs encountered */
- #if 0
- void amdgpu_ras_eeprom_test(struct amdgpu_ras_eeprom_control *control)
+ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control,
+                          bool *exceed_err_limit)
  {
-       int i;
-       struct eeprom_table_record *recs = kcalloc(1, sizeof(*recs), GFP_KERNEL);
+       struct amdgpu_device *adev = to_amdgpu_device(control);
+       unsigned char buf[RAS_TABLE_HEADER_SIZE] = { 0 };
+       struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr;
+       struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+       int res;
+       *exceed_err_limit = false;
+       if (!__is_ras_eeprom_supported(adev))
+               return 0;
  
-       if (!recs)
-               return;
+       /* Verify i2c adapter is initialized */
+       if (!adev->pm.smu_i2c.algo)
+               return -ENOENT;
  
-       for (i = 0; i < 1 ; i++) {
-               recs[i].address = 0xdeadbeef;
-               recs[i].retired_page = i;
+       if (!__get_eeprom_i2c_addr(adev, control))
+               return -EINVAL;
+       control->ras_header_offset = RAS_HDR_START;
+       control->ras_record_offset = RAS_RECORD_START;
+       control->ras_max_record_count  = RAS_MAX_RECORD_COUNT;
+       mutex_init(&control->ras_tbl_mutex);
+       /* Read the table header from EEPROM address */
+       res = amdgpu_eeprom_read(&adev->pm.smu_i2c,
+                                control->i2c_address + control->ras_header_offset,
+                                buf, RAS_TABLE_HEADER_SIZE);
+       if (res < RAS_TABLE_HEADER_SIZE) {
+               DRM_ERROR("Failed to read EEPROM table header, res:%d", res);
+               return res >= 0 ? -EIO : res;
        }
  
-       if (!amdgpu_ras_eeprom_process_recods(control, recs, true, 1)) {
+       __decode_table_header_from_buf(hdr, buf);
  
-               memset(recs, 0, sizeof(*recs) * 1);
+       control->ras_num_recs = RAS_NUM_RECS(hdr);
+       control->ras_fri = RAS_OFFSET_TO_INDEX(control, hdr->first_rec_offset);
  
-               control->next_addr = EEPROM_RECORD_START;
+       if (hdr->header == RAS_TABLE_HDR_VAL) {
+               DRM_DEBUG_DRIVER("Found existing EEPROM table with %d records",
+                                control->ras_num_recs);
+               res = __verify_ras_table_checksum(control);
+               if (res)
+                       DRM_ERROR("RAS table incorrect checksum or error:%d\n",
+                                 res);
+       } else if (hdr->header == RAS_TABLE_HDR_BAD &&
+                  amdgpu_bad_page_threshold != 0) {
+               res = __verify_ras_table_checksum(control);
+               if (res)
+                       DRM_ERROR("RAS Table incorrect checksum or error:%d\n",
+                                 res);
+               if (ras->bad_page_cnt_threshold > control->ras_num_recs) {
+                       /* This means that, the threshold was increased since
+                        * the last time the system was booted, and now,
+                        * ras->bad_page_cnt_threshold - control->num_recs > 0,
+                        * so that at least one more record can be saved,
+                        * before the page count threshold is reached.
+                        */
+                       dev_info(adev->dev,
+                                "records:%d threshold:%d, resetting "
+                                "RAS table header signature",
+                                control->ras_num_recs,
+                                ras->bad_page_cnt_threshold);
+                       res = amdgpu_ras_eeprom_correct_header_tag(control,
+                                                                  RAS_TABLE_HDR_VAL);
+               } else {
+                       *exceed_err_limit = true;
+                       dev_err(adev->dev,
+                               "RAS records:%d exceed threshold:%d, "
+                               "maybe retire this GPU?",
+                               control->ras_num_recs, ras->bad_page_cnt_threshold);
+               }
+       } else {
+               DRM_INFO("Creating a new EEPROM table");
  
-               if (!amdgpu_ras_eeprom_process_recods(control, recs, false, 1)) {
-                       for (i = 0; i < 1; i++)
-                               DRM_INFO("rec.address :0x%llx, rec.retired_page :%llu",
-                                        recs[i].address, recs[i].retired_page);
-               } else
-                       DRM_ERROR("Failed in reading from table");
+               res = amdgpu_ras_eeprom_reset_table(control);
+       }
  
-       } else
-               DRM_ERROR("Failed in writing to table");
+       return res < 0 ? res : 0;
  }
- #endif
@@@ -51,6 -51,7 +51,7 @@@
  #include "intel_panel.h"
  #include "intel_pps.h"
  #include "intel_psr.h"
+ #include "intel_snps_phy.h"
  #include "intel_sprite.h"
  #include "intel_tc.h"
  #include "intel_vdsc.h"
@@@ -95,24 -96,18 +96,18 @@@ static int intel_ddi_hdmi_level(struct 
   * values in advance. This function programs the correct values for
   * DP/eDP/FDI use cases.
   */
- void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder,
-                                 const struct intel_crtc_state *crtc_state)
+ void hsw_prepare_dp_ddi_buffers(struct intel_encoder *encoder,
+                               const struct intel_crtc_state *crtc_state)
  {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        u32 iboost_bit = 0;
        int i, n_entries;
        enum port port = encoder->port;
-       const struct ddi_buf_trans *ddi_translations;
-       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
-               ddi_translations = intel_ddi_get_buf_trans_fdi(dev_priv,
-                                                              &n_entries);
-       else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
-               ddi_translations = intel_ddi_get_buf_trans_edp(encoder,
-                                                              &n_entries);
-       else
-               ddi_translations = intel_ddi_get_buf_trans_dp(encoder,
-                                                             &n_entries);
+       const struct intel_ddi_buf_trans *ddi_translations;
+       ddi_translations = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
+       if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
+               return;
  
        /* If we're boosting the current, set bit 31 of trans1 */
        if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv) &&
  
        for (i = 0; i < n_entries; i++) {
                intel_de_write(dev_priv, DDI_BUF_TRANS_LO(port, i),
-                              ddi_translations[i].trans1 | iboost_bit);
+                              ddi_translations->entries[i].hsw.trans1 | iboost_bit);
                intel_de_write(dev_priv, DDI_BUF_TRANS_HI(port, i),
-                              ddi_translations[i].trans2);
+                              ddi_translations->entries[i].hsw.trans2);
        }
  }
  
   * values in advance. This function programs the correct values for
   * HDMI/DVI use cases.
   */
- static void intel_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder,
-                                          int level)
+ static void hsw_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder,
+                                        const struct intel_crtc_state *crtc_state,
+                                        int level)
  {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        u32 iboost_bit = 0;
        int n_entries;
        enum port port = encoder->port;
-       const struct ddi_buf_trans *ddi_translations;
-       ddi_translations = intel_ddi_get_buf_trans_hdmi(encoder, &n_entries);
+       const struct intel_ddi_buf_trans *ddi_translations;
  
+       ddi_translations = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
        if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
                return;
        if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
  
        /* Entry 9 is for HDMI: */
        intel_de_write(dev_priv, DDI_BUF_TRANS_LO(port, 9),
-                      ddi_translations[level].trans1 | iboost_bit);
+                      ddi_translations->entries[level].hsw.trans1 | iboost_bit);
        intel_de_write(dev_priv, DDI_BUF_TRANS_HI(port, 9),
-                      ddi_translations[level].trans2);
+                      ddi_translations->entries[level].hsw.trans2);
  }
  
  void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
  static void intel_wait_ddi_buf_active(struct drm_i915_private *dev_priv,
                                      enum port port)
  {
+       int ret;
        /* Wait > 518 usecs for DDI_BUF_CTL to be non idle */
        if (DISPLAY_VER(dev_priv) < 10) {
                usleep_range(518, 1000);
                return;
        }
  
-       if (wait_for_us(!(intel_de_read(dev_priv, DDI_BUF_CTL(port)) &
-                         DDI_BUF_IS_IDLE), 500))
+       ret = _wait_for(!(intel_de_read(dev_priv, DDI_BUF_CTL(port)) &
+                         DDI_BUF_IS_IDLE), IS_DG2(dev_priv) ? 1200 : 500, 10, 10);
+       if (ret)
                drm_err(&dev_priv->drm, "Timeout waiting for DDI BUF %c to get active\n",
                        port_name(port));
  }
@@@ -828,7 -827,7 +827,7 @@@ bool intel_ddi_get_hw_state(struct inte
  static enum intel_display_power_domain
  intel_ddi_main_link_aux_domain(struct intel_digital_port *dig_port)
  {
-       /* CNL+ HW requires corresponding AUX IOs to be powered up for PSR with
+       /* ICL+ HW requires corresponding AUX IOs to be powered up for PSR with
         * DC states enabled at the same time, while for driver initiated AUX
         * transfers we need the same AUX IOs to be powered but with DC states
         * disabled. Accordingly use the AUX power domain here which leaves DC
@@@ -948,22 -947,16 +947,16 @@@ static void skl_ddi_set_iboost(struct i
                iboost = intel_bios_encoder_dp_boost_level(encoder->devdata);
  
        if (iboost == 0) {
-               const struct ddi_buf_trans *ddi_translations;
+               const struct intel_ddi_buf_trans *ddi_translations;
                int n_entries;
  
-               if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
-                       ddi_translations = intel_ddi_get_buf_trans_hdmi(encoder, &n_entries);
-               else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
-                       ddi_translations = intel_ddi_get_buf_trans_edp(encoder, &n_entries);
-               else
-                       ddi_translations = intel_ddi_get_buf_trans_dp(encoder, &n_entries);
+               ddi_translations = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
                if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
                        return;
                if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
                        level = n_entries - 1;
  
-               iboost = ddi_translations[level].i_boost;
+               iboost = ddi_translations->entries[level].hsw.i_boost;
        }
  
        /* Make sure that the requested I_boost is valid */
@@@ -983,21 -976,21 +976,21 @@@ static void bxt_ddi_vswing_sequence(str
                                    int level)
  {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-       const struct bxt_ddi_buf_trans *ddi_translations;
+       const struct intel_ddi_buf_trans *ddi_translations;
        enum port port = encoder->port;
        int n_entries;
  
-       ddi_translations = bxt_get_buf_trans(encoder, crtc_state, &n_entries);
+       ddi_translations = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
        if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
                return;
        if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
                level = n_entries - 1;
  
        bxt_ddi_phy_set_signal_level(dev_priv, port,
-                                    ddi_translations[level].margin,
-                                    ddi_translations[level].scale,
-                                    ddi_translations[level].enable,
-                                    ddi_translations[level].deemphasis);
+                                    ddi_translations->entries[level].bxt.margin,
+                                    ddi_translations->entries[level].bxt.scale,
+                                    ddi_translations->entries[level].bxt.enable,
+                                    ddi_translations->entries[level].bxt.deemphasis);
  }
  
  static u8 intel_ddi_dp_voltage_max(struct intel_dp *intel_dp,
  {
        struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-       enum port port = encoder->port;
-       enum phy phy = intel_port_to_phy(dev_priv, port);
        int n_entries;
  
-       if (DISPLAY_VER(dev_priv) >= 12) {
-               if (intel_phy_is_combo(dev_priv, phy))
-                       tgl_get_combo_buf_trans(encoder, crtc_state, &n_entries);
-               else if (IS_ALDERLAKE_P(dev_priv))
-                       adlp_get_dkl_buf_trans(encoder, crtc_state, &n_entries);
-               else
-                       tgl_get_dkl_buf_trans(encoder, crtc_state, &n_entries);
-       } else if (DISPLAY_VER(dev_priv) == 11) {
-               if (IS_PLATFORM(dev_priv, INTEL_JASPERLAKE))
-                       jsl_get_combo_buf_trans(encoder, crtc_state, &n_entries);
-               else if (IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE))
-                       ehl_get_combo_buf_trans(encoder, crtc_state, &n_entries);
-               else if (intel_phy_is_combo(dev_priv, phy))
-                       icl_get_combo_buf_trans(encoder, crtc_state, &n_entries);
-               else
-                       icl_get_mg_buf_trans(encoder, crtc_state, &n_entries);
-       } else if (IS_CANNONLAKE(dev_priv)) {
-               cnl_get_buf_trans(encoder, crtc_state, &n_entries);
-       } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
-               bxt_get_buf_trans(encoder, crtc_state, &n_entries);
-       } else {
-               if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
-                       intel_ddi_get_buf_trans_edp(encoder, &n_entries);
-               else
-                       intel_ddi_get_buf_trans_dp(encoder, &n_entries);
-       }
+       encoder->get_buf_trans(encoder, crtc_state, &n_entries);
  
        if (drm_WARN_ON(&dev_priv->drm, n_entries < 1))
                n_entries = 1;
@@@ -1056,146 -1022,17 +1022,17 @@@ static u8 intel_ddi_dp_preemph_max(stru
        return DP_TRAIN_PRE_EMPH_LEVEL_3;
  }
  
- static void cnl_ddi_vswing_program(struct intel_encoder *encoder,
-                                  const struct intel_crtc_state *crtc_state,
-                                  int level)
- {
-       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-       const struct cnl_ddi_buf_trans *ddi_translations;
-       enum port port = encoder->port;
-       int n_entries, ln;
-       u32 val;
-       ddi_translations = cnl_get_buf_trans(encoder, crtc_state, &n_entries);
-       if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
-               return;
-       if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
-               level = n_entries - 1;
-       /* Set PORT_TX_DW5 Scaling Mode Sel to 010b. */
-       val = intel_de_read(dev_priv, CNL_PORT_TX_DW5_LN0(port));
-       val &= ~SCALING_MODE_SEL_MASK;
-       val |= SCALING_MODE_SEL(2);
-       intel_de_write(dev_priv, CNL_PORT_TX_DW5_GRP(port), val);
-       /* Program PORT_TX_DW2 */
-       val = intel_de_read(dev_priv, CNL_PORT_TX_DW2_LN0(port));
-       val &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
-                RCOMP_SCALAR_MASK);
-       val |= SWING_SEL_UPPER(ddi_translations[level].dw2_swing_sel);
-       val |= SWING_SEL_LOWER(ddi_translations[level].dw2_swing_sel);
-       /* Rcomp scalar is fixed as 0x98 for every table entry */
-       val |= RCOMP_SCALAR(0x98);
-       intel_de_write(dev_priv, CNL_PORT_TX_DW2_GRP(port), val);
-       /* Program PORT_TX_DW4 */
-       /* We cannot write to GRP. It would overrite individual loadgen */
-       for (ln = 0; ln < 4; ln++) {
-               val = intel_de_read(dev_priv, CNL_PORT_TX_DW4_LN(ln, port));
-               val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
-                        CURSOR_COEFF_MASK);
-               val |= POST_CURSOR_1(ddi_translations[level].dw4_post_cursor_1);
-               val |= POST_CURSOR_2(ddi_translations[level].dw4_post_cursor_2);
-               val |= CURSOR_COEFF(ddi_translations[level].dw4_cursor_coeff);
-               intel_de_write(dev_priv, CNL_PORT_TX_DW4_LN(ln, port), val);
-       }
-       /* Program PORT_TX_DW5 */
-       /* All DW5 values are fixed for every table entry */
-       val = intel_de_read(dev_priv, CNL_PORT_TX_DW5_LN0(port));
-       val &= ~RTERM_SELECT_MASK;
-       val |= RTERM_SELECT(6);
-       val |= TAP3_DISABLE;
-       intel_de_write(dev_priv, CNL_PORT_TX_DW5_GRP(port), val);
-       /* Program PORT_TX_DW7 */
-       val = intel_de_read(dev_priv, CNL_PORT_TX_DW7_LN0(port));
-       val &= ~N_SCALAR_MASK;
-       val |= N_SCALAR(ddi_translations[level].dw7_n_scalar);
-       intel_de_write(dev_priv, CNL_PORT_TX_DW7_GRP(port), val);
- }
- static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder,
-                                   const struct intel_crtc_state *crtc_state,
-                                   int level)
- {
-       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-       enum port port = encoder->port;
-       int width, rate, ln;
-       u32 val;
-       width = crtc_state->lane_count;
-       rate = crtc_state->port_clock;
-       /*
-        * 1. If port type is eDP or DP,
-        * set PORT_PCS_DW1 cmnkeeper_enable to 1b,
-        * else clear to 0b.
-        */
-       val = intel_de_read(dev_priv, CNL_PORT_PCS_DW1_LN0(port));
-       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
-               val &= ~COMMON_KEEPER_EN;
-       else
-               val |= COMMON_KEEPER_EN;
-       intel_de_write(dev_priv, CNL_PORT_PCS_DW1_GRP(port), val);
-       /* 2. Program loadgen select */
-       /*
-        * Program PORT_TX_DW4_LN depending on Bit rate and used lanes
-        * <= 6 GHz and 4 lanes (LN0=0, LN1=1, LN2=1, LN3=1)
-        * <= 6 GHz and 1,2 lanes (LN0=0, LN1=1, LN2=1, LN3=0)
-        * > 6 GHz (LN0=0, LN1=0, LN2=0, LN3=0)
-        */
-       for (ln = 0; ln <= 3; ln++) {
-               val = intel_de_read(dev_priv, CNL_PORT_TX_DW4_LN(ln, port));
-               val &= ~LOADGEN_SELECT;
-               if ((rate <= 600000 && width == 4 && ln >= 1)  ||
-                   (rate <= 600000 && width < 4 && (ln == 1 || ln == 2))) {
-                       val |= LOADGEN_SELECT;
-               }
-               intel_de_write(dev_priv, CNL_PORT_TX_DW4_LN(ln, port), val);
-       }
-       /* 3. Set PORT_CL_DW5 SUS Clock Config to 11b */
-       val = intel_de_read(dev_priv, CNL_PORT_CL1CM_DW5);
-       val |= SUS_CLOCK_CONFIG;
-       intel_de_write(dev_priv, CNL_PORT_CL1CM_DW5, val);
-       /* 4. Clear training enable to change swing values */
-       val = intel_de_read(dev_priv, CNL_PORT_TX_DW5_LN0(port));
-       val &= ~TX_TRAINING_EN;
-       intel_de_write(dev_priv, CNL_PORT_TX_DW5_GRP(port), val);
-       /* 5. Program swing and de-emphasis */
-       cnl_ddi_vswing_program(encoder, crtc_state, level);
-       /* 6. Set training enable to trigger update */
-       val = intel_de_read(dev_priv, CNL_PORT_TX_DW5_LN0(port));
-       val |= TX_TRAINING_EN;
-       intel_de_write(dev_priv, CNL_PORT_TX_DW5_GRP(port), val);
- }
  static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
                                         const struct intel_crtc_state *crtc_state,
                                         int level)
  {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-       const struct cnl_ddi_buf_trans *ddi_translations;
+       const struct intel_ddi_buf_trans *ddi_translations;
        enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
        int n_entries, ln;
        u32 val;
  
-       if (DISPLAY_VER(dev_priv) >= 12)
-               ddi_translations = tgl_get_combo_buf_trans(encoder, crtc_state, &n_entries);
-       else if (IS_PLATFORM(dev_priv, INTEL_JASPERLAKE))
-               ddi_translations = jsl_get_combo_buf_trans(encoder, crtc_state, &n_entries);
-       else if (IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE))
-               ddi_translations = ehl_get_combo_buf_trans(encoder, crtc_state, &n_entries);
-       else
-               ddi_translations = icl_get_combo_buf_trans(encoder, crtc_state, &n_entries);
+       ddi_translations = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
        if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
                return;
        if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
        val = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN0(phy));
        val &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
                 RCOMP_SCALAR_MASK);
-       val |= SWING_SEL_UPPER(ddi_translations[level].dw2_swing_sel);
-       val |= SWING_SEL_LOWER(ddi_translations[level].dw2_swing_sel);
+       val |= SWING_SEL_UPPER(ddi_translations->entries[level].icl.dw2_swing_sel);
+       val |= SWING_SEL_LOWER(ddi_translations->entries[level].icl.dw2_swing_sel);
        /* Program Rcomp scalar for every table entry */
        val |= RCOMP_SCALAR(0x98);
        intel_de_write(dev_priv, ICL_PORT_TX_DW2_GRP(phy), val);
                val = intel_de_read(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy));
                val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
                         CURSOR_COEFF_MASK);
-               val |= POST_CURSOR_1(ddi_translations[level].dw4_post_cursor_1);
-               val |= POST_CURSOR_2(ddi_translations[level].dw4_post_cursor_2);
-               val |= CURSOR_COEFF(ddi_translations[level].dw4_cursor_coeff);
+               val |= POST_CURSOR_1(ddi_translations->entries[level].icl.dw4_post_cursor_1);
+               val |= POST_CURSOR_2(ddi_translations->entries[level].icl.dw4_post_cursor_2);
+               val |= CURSOR_COEFF(ddi_translations->entries[level].icl.dw4_cursor_coeff);
                intel_de_write(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy), val);
        }
  
        /* Program PORT_TX_DW7 */
        val = intel_de_read(dev_priv, ICL_PORT_TX_DW7_LN0(phy));
        val &= ~N_SCALAR_MASK;
-       val |= N_SCALAR(ddi_translations[level].dw7_n_scalar);
+       val |= N_SCALAR(ddi_translations->entries[level].icl.dw7_n_scalar);
        intel_de_write(dev_priv, ICL_PORT_TX_DW7_GRP(phy), val);
  }
  
@@@ -1315,15 -1152,14 +1152,14 @@@ static void icl_mg_phy_ddi_vswing_seque
  {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port);
-       const struct icl_mg_phy_ddi_buf_trans *ddi_translations;
+       const struct intel_ddi_buf_trans *ddi_translations;
        int n_entries, ln;
        u32 val;
  
        if (enc_to_dig_port(encoder)->tc_mode == TC_PORT_TBT_ALT)
                return;
  
-       ddi_translations = icl_get_mg_buf_trans(encoder, crtc_state, &n_entries);
+       ddi_translations = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
        if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
                return;
        if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
                val = intel_de_read(dev_priv, MG_TX1_SWINGCTRL(ln, tc_port));
                val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK;
                val |= CRI_TXDEEMPH_OVERRIDE_17_12(
-                       ddi_translations[level].cri_txdeemph_override_17_12);
+                       ddi_translations->entries[level].mg.cri_txdeemph_override_17_12);
                intel_de_write(dev_priv, MG_TX1_SWINGCTRL(ln, tc_port), val);
  
                val = intel_de_read(dev_priv, MG_TX2_SWINGCTRL(ln, tc_port));
                val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK;
                val |= CRI_TXDEEMPH_OVERRIDE_17_12(
-                       ddi_translations[level].cri_txdeemph_override_17_12);
+                       ddi_translations->entries[level].mg.cri_txdeemph_override_17_12);
                intel_de_write(dev_priv, MG_TX2_SWINGCTRL(ln, tc_port), val);
        }
  
                val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK |
                         CRI_TXDEEMPH_OVERRIDE_5_0_MASK);
                val |= CRI_TXDEEMPH_OVERRIDE_5_0(
-                       ddi_translations[level].cri_txdeemph_override_5_0) |
+                       ddi_translations->entries[level].mg.cri_txdeemph_override_5_0) |
                        CRI_TXDEEMPH_OVERRIDE_11_6(
-                               ddi_translations[level].cri_txdeemph_override_11_6) |
+                               ddi_translations->entries[level].mg.cri_txdeemph_override_11_6) |
                        CRI_TXDEEMPH_OVERRIDE_EN;
                intel_de_write(dev_priv, MG_TX1_DRVCTRL(ln, tc_port), val);
  
                val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK |
                         CRI_TXDEEMPH_OVERRIDE_5_0_MASK);
                val |= CRI_TXDEEMPH_OVERRIDE_5_0(
-                       ddi_translations[level].cri_txdeemph_override_5_0) |
+                       ddi_translations->entries[level].mg.cri_txdeemph_override_5_0) |
                        CRI_TXDEEMPH_OVERRIDE_11_6(
-                               ddi_translations[level].cri_txdeemph_override_11_6) |
+                               ddi_translations->entries[level].mg.cri_txdeemph_override_11_6) |
                        CRI_TXDEEMPH_OVERRIDE_EN;
                intel_de_write(dev_priv, MG_TX2_DRVCTRL(ln, tc_port), val);
  
@@@ -1453,18 -1289,14 +1289,14 @@@ tgl_dkl_phy_ddi_vswing_sequence(struct 
  {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port);
-       const struct tgl_dkl_phy_ddi_buf_trans *ddi_translations;
+       const struct intel_ddi_buf_trans *ddi_translations;
        u32 val, dpcnt_mask, dpcnt_val;
        int n_entries, ln;
  
        if (enc_to_dig_port(encoder)->tc_mode == TC_PORT_TBT_ALT)
                return;
  
-       if (IS_ALDERLAKE_P(dev_priv))
-               ddi_translations = adlp_get_dkl_buf_trans(encoder, crtc_state, &n_entries);
-       else
-               ddi_translations = tgl_get_dkl_buf_trans(encoder, crtc_state, &n_entries);
+       ddi_translations = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
        if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
                return;
        if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
        dpcnt_mask = (DKL_TX_PRESHOOT_COEFF_MASK |
                      DKL_TX_DE_EMPAHSIS_COEFF_MASK |
                      DKL_TX_VSWING_CONTROL_MASK);
-       dpcnt_val = DKL_TX_VSWING_CONTROL(ddi_translations[level].dkl_vswing_control);
-       dpcnt_val |= DKL_TX_DE_EMPHASIS_COEFF(ddi_translations[level].dkl_de_emphasis_control);
-       dpcnt_val |= DKL_TX_PRESHOOT_COEFF(ddi_translations[level].dkl_preshoot_control);
+       dpcnt_val = DKL_TX_VSWING_CONTROL(ddi_translations->entries[level].dkl.dkl_vswing_control);
+       dpcnt_val |= DKL_TX_DE_EMPHASIS_COEFF(ddi_translations->entries[level].dkl.dkl_de_emphasis_control);
+       dpcnt_val |= DKL_TX_PRESHOOT_COEFF(ddi_translations->entries[level].dkl.dkl_preshoot_control);
  
        for (ln = 0; ln < 2; ln++) {
                intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
@@@ -1549,33 -1381,33 +1381,33 @@@ static int intel_ddi_dp_level(struct in
  }
  
  static void
tgl_set_signal_levels(struct intel_dp *intel_dp,
dg2_set_signal_levels(struct intel_dp *intel_dp,
                      const struct intel_crtc_state *crtc_state)
  {
        struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
        int level = intel_ddi_dp_level(intel_dp);
  
-       tgl_ddi_vswing_sequence(encoder, crtc_state, level);
+       intel_snps_phy_ddi_vswing_sequence(encoder, level);
  }
  
  static void
icl_set_signal_levels(struct intel_dp *intel_dp,
tgl_set_signal_levels(struct intel_dp *intel_dp,
                      const struct intel_crtc_state *crtc_state)
  {
        struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
        int level = intel_ddi_dp_level(intel_dp);
  
-       icl_ddi_vswing_sequence(encoder, crtc_state, level);
+       tgl_ddi_vswing_sequence(encoder, crtc_state, level);
  }
  
  static void
cnl_set_signal_levels(struct intel_dp *intel_dp,
icl_set_signal_levels(struct intel_dp *intel_dp,
                      const struct intel_crtc_state *crtc_state)
  {
        struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
        int level = intel_ddi_dp_level(intel_dp);
  
-       cnl_ddi_vswing_sequence(encoder, crtc_state, level);
+       icl_ddi_vswing_sequence(encoder, crtc_state, level);
  }
  
  static void
@@@ -1613,7 -1445,7 +1445,7 @@@ hsw_set_signal_levels(struct intel_dp *
        intel_de_posting_read(dev_priv, DDI_BUF_CTL(port));
  }
  
- static void _cnl_ddi_enable_clock(struct drm_i915_private *i915, i915_reg_t reg,
+ static void _icl_ddi_enable_clock(struct drm_i915_private *i915, i915_reg_t reg,
                                  u32 clk_sel_mask, u32 clk_sel, u32 clk_off)
  {
        mutex_lock(&i915->dpll.lock);
        mutex_unlock(&i915->dpll.lock);
  }
  
- static void _cnl_ddi_disable_clock(struct drm_i915_private *i915, i915_reg_t reg,
+ static void _icl_ddi_disable_clock(struct drm_i915_private *i915, i915_reg_t reg,
                                   u32 clk_off)
  {
        mutex_lock(&i915->dpll.lock);
        mutex_unlock(&i915->dpll.lock);
  }
  
- static bool _cnl_ddi_is_clock_enabled(struct drm_i915_private *i915, i915_reg_t reg,
+ static bool _icl_ddi_is_clock_enabled(struct drm_i915_private *i915, i915_reg_t reg,
                                      u32 clk_off)
  {
        return !(intel_de_read(i915, reg) & clk_off);
  }
  
  static struct intel_shared_dpll *
- _cnl_ddi_get_pll(struct drm_i915_private *i915, i915_reg_t reg,
+ _icl_ddi_get_pll(struct drm_i915_private *i915, i915_reg_t reg,
                 u32 clk_sel_mask, u32 clk_sel_shift)
  {
        enum intel_dpll_id id;
@@@ -1666,7 -1498,7 +1498,7 @@@ static void adls_ddi_enable_clock(struc
        if (drm_WARN_ON(&i915->drm, !pll))
                return;
  
-       _cnl_ddi_enable_clock(i915, ADLS_DPCLKA_CFGCR(phy),
+       _icl_ddi_enable_clock(i915, ADLS_DPCLKA_CFGCR(phy),
                              ADLS_DPCLKA_CFGCR_DDI_CLK_SEL_MASK(phy),
                              pll->info->id << ADLS_DPCLKA_CFGCR_DDI_SHIFT(phy),
                              ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
@@@ -1677,7 -1509,7 +1509,7 @@@ static void adls_ddi_disable_clock(stru
        struct drm_i915_private *i915 = to_i915(encoder->base.dev);
        enum phy phy = intel_port_to_phy(i915, encoder->port);
  
-       _cnl_ddi_disable_clock(i915, ADLS_DPCLKA_CFGCR(phy),
+       _icl_ddi_disable_clock(i915, ADLS_DPCLKA_CFGCR(phy),
                               ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
  }
  
@@@ -1686,7 -1518,7 +1518,7 @@@ static bool adls_ddi_is_clock_enabled(s
        struct drm_i915_private *i915 = to_i915(encoder->base.dev);
        enum phy phy = intel_port_to_phy(i915, encoder->port);
  
-       return _cnl_ddi_is_clock_enabled(i915, ADLS_DPCLKA_CFGCR(phy),
+       return _icl_ddi_is_clock_enabled(i915, ADLS_DPCLKA_CFGCR(phy),
                                         ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
  }
  
@@@ -1695,7 -1527,7 +1527,7 @@@ static struct intel_shared_dpll *adls_d
        struct drm_i915_private *i915 = to_i915(encoder->base.dev);
        enum phy phy = intel_port_to_phy(i915, encoder->port);
  
-       return _cnl_ddi_get_pll(i915, ADLS_DPCLKA_CFGCR(phy),
+       return _icl_ddi_get_pll(i915, ADLS_DPCLKA_CFGCR(phy),
                                ADLS_DPCLKA_CFGCR_DDI_CLK_SEL_MASK(phy),
                                ADLS_DPCLKA_CFGCR_DDI_SHIFT(phy));
  }
@@@ -1710,7 -1542,7 +1542,7 @@@ static void rkl_ddi_enable_clock(struc
        if (drm_WARN_ON(&i915->drm, !pll))
                return;
  
-       _cnl_ddi_enable_clock(i915, ICL_DPCLKA_CFGCR0,
+       _icl_ddi_enable_clock(i915, ICL_DPCLKA_CFGCR0,
                              RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy),
                              RKL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy),
                              RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
@@@ -1721,7 -1553,7 +1553,7 @@@ static void rkl_ddi_disable_clock(struc
        struct drm_i915_private *i915 = to_i915(encoder->base.dev);
        enum phy phy = intel_port_to_phy(i915, encoder->port);
  
-       _cnl_ddi_disable_clock(i915, ICL_DPCLKA_CFGCR0,
+       _icl_ddi_disable_clock(i915, ICL_DPCLKA_CFGCR0,
                               RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
  }
  
@@@ -1730,7 -1562,7 +1562,7 @@@ static bool rkl_ddi_is_clock_enabled(st
        struct drm_i915_private *i915 = to_i915(encoder->base.dev);
        enum phy phy = intel_port_to_phy(i915, encoder->port);
  
-       return _cnl_ddi_is_clock_enabled(i915, ICL_DPCLKA_CFGCR0,
+       return _icl_ddi_is_clock_enabled(i915, ICL_DPCLKA_CFGCR0,
                                         RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
  }
  
@@@ -1739,7 -1571,7 +1571,7 @@@ static struct intel_shared_dpll *rkl_dd
        struct drm_i915_private *i915 = to_i915(encoder->base.dev);
        enum phy phy = intel_port_to_phy(i915, encoder->port);
  
-       return _cnl_ddi_get_pll(i915, ICL_DPCLKA_CFGCR0,
+       return _icl_ddi_get_pll(i915, ICL_DPCLKA_CFGCR0,
                                RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy),
                                RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy));
  }
@@@ -1763,7 -1595,7 +1595,7 @@@ static void dg1_ddi_enable_clock(struc
                        (pll->info->id >= DPLL_ID_DG1_DPLL2 && phy < PHY_C)))
                return;
  
-       _cnl_ddi_enable_clock(i915, DG1_DPCLKA_CFGCR0(phy),
+       _icl_ddi_enable_clock(i915, DG1_DPCLKA_CFGCR0(phy),
                              DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy),
                              DG1_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy),
                              DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
@@@ -1774,7 -1606,7 +1606,7 @@@ static void dg1_ddi_disable_clock(struc
        struct drm_i915_private *i915 = to_i915(encoder->base.dev);
        enum phy phy = intel_port_to_phy(i915, encoder->port);
  
-       _cnl_ddi_disable_clock(i915, DG1_DPCLKA_CFGCR0(phy),
+       _icl_ddi_disable_clock(i915, DG1_DPCLKA_CFGCR0(phy),
                               DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
  }
  
@@@ -1783,7 -1615,7 +1615,7 @@@ static bool dg1_ddi_is_clock_enabled(st
        struct drm_i915_private *i915 = to_i915(encoder->base.dev);
        enum phy phy = intel_port_to_phy(i915, encoder->port);
  
-       return _cnl_ddi_is_clock_enabled(i915, DG1_DPCLKA_CFGCR0(phy),
+       return _icl_ddi_is_clock_enabled(i915, DG1_DPCLKA_CFGCR0(phy),
                                         DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
  }
  
@@@ -1820,7 -1652,7 +1652,7 @@@ static void icl_ddi_combo_enable_clock(
        if (drm_WARN_ON(&i915->drm, !pll))
                return;
  
-       _cnl_ddi_enable_clock(i915, ICL_DPCLKA_CFGCR0,
+       _icl_ddi_enable_clock(i915, ICL_DPCLKA_CFGCR0,
                              ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy),
                              ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy),
                              ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
@@@ -1831,7 -1663,7 +1663,7 @@@ static void icl_ddi_combo_disable_clock
        struct drm_i915_private *i915 = to_i915(encoder->base.dev);
        enum phy phy = intel_port_to_phy(i915, encoder->port);
  
-       _cnl_ddi_disable_clock(i915, ICL_DPCLKA_CFGCR0,
+       _icl_ddi_disable_clock(i915, ICL_DPCLKA_CFGCR0,
                               ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
  }
  
@@@ -1840,7 -1672,7 +1672,7 @@@ static bool icl_ddi_combo_is_clock_enab
        struct drm_i915_private *i915 = to_i915(encoder->base.dev);
        enum phy phy = intel_port_to_phy(i915, encoder->port);
  
-       return _cnl_ddi_is_clock_enabled(i915, ICL_DPCLKA_CFGCR0,
+       return _icl_ddi_is_clock_enabled(i915, ICL_DPCLKA_CFGCR0,
                                         ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
  }
  
@@@ -1849,7 -1681,7 +1681,7 @@@ struct intel_shared_dpll *icl_ddi_combo
        struct drm_i915_private *i915 = to_i915(encoder->base.dev);
        enum phy phy = intel_port_to_phy(i915, encoder->port);
  
-       return _cnl_ddi_get_pll(i915, ICL_DPCLKA_CFGCR0,
+       return _icl_ddi_get_pll(i915, ICL_DPCLKA_CFGCR0,
                                ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy),
                                ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy));
  }
@@@ -1982,50 -1814,6 +1814,6 @@@ static struct intel_shared_dpll *icl_dd
        return intel_get_shared_dpll_by_id(i915, id);
  }
  
- static void cnl_ddi_enable_clock(struct intel_encoder *encoder,
-                                const struct intel_crtc_state *crtc_state)
- {
-       struct drm_i915_private *i915 = to_i915(encoder->base.dev);
-       const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
-       enum port port = encoder->port;
-       if (drm_WARN_ON(&i915->drm, !pll))
-               return;
-       _cnl_ddi_enable_clock(i915, DPCLKA_CFGCR0,
-                             DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port),
-                             DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port),
-                             DPCLKA_CFGCR0_DDI_CLK_OFF(port));
- }
- static void cnl_ddi_disable_clock(struct intel_encoder *encoder)
- {
-       struct drm_i915_private *i915 = to_i915(encoder->base.dev);
-       enum port port = encoder->port;
-       _cnl_ddi_disable_clock(i915, DPCLKA_CFGCR0,
-                              DPCLKA_CFGCR0_DDI_CLK_OFF(port));
- }
- static bool cnl_ddi_is_clock_enabled(struct intel_encoder *encoder)
- {
-       struct drm_i915_private *i915 = to_i915(encoder->base.dev);
-       enum port port = encoder->port;
-       return _cnl_ddi_is_clock_enabled(i915, DPCLKA_CFGCR0,
-                                        DPCLKA_CFGCR0_DDI_CLK_OFF(port));
- }
- static struct intel_shared_dpll *cnl_ddi_get_pll(struct intel_encoder *encoder)
- {
-       struct drm_i915_private *i915 = to_i915(encoder->base.dev);
-       enum port port = encoder->port;
-       return _cnl_ddi_get_pll(i915, DPCLKA_CFGCR0,
-                               DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port),
-                               DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port));
- }
  static struct intel_shared_dpll *bxt_ddi_get_pll(struct intel_encoder *encoder)
  {
        struct drm_i915_private *i915 = to_i915(encoder->base.dev);
@@@ -2249,7 -2037,7 +2037,7 @@@ void intel_ddi_sanitize_encoder_pll_map
                ddi_clk_needed = false;
        }
  
-       if (ddi_clk_needed || !encoder->disable_clock ||
+       if (ddi_clk_needed || !encoder->is_clock_enabled ||
            !encoder->is_clock_enabled(encoder))
                return;
  
@@@ -2463,15 -2251,6 +2251,15 @@@ static void intel_ddi_power_up_lanes(st
        }
  }
  
 +/* Splitter enable for eDP MSO is limited to certain pipes. */
 +static u8 intel_ddi_splitter_pipe_mask(struct drm_i915_private *i915)
 +{
 +      if (IS_ALDERLAKE_P(i915))
 +              return BIT(PIPE_A) | BIT(PIPE_B);
 +      else
 +              return BIT(PIPE_A);
 +}
 +
  static void intel_ddi_mso_get_config(struct intel_encoder *encoder,
                                     struct intel_crtc_state *pipe_config)
  {
        if (!pipe_config->splitter.enable)
                return;
  
 -      /* Splitter enable is supported for pipe A only. */
 -      if (drm_WARN_ON(&i915->drm, pipe != PIPE_A)) {
 +      if (drm_WARN_ON(&i915->drm, !(intel_ddi_splitter_pipe_mask(i915) & BIT(pipe)))) {
                pipe_config->splitter.enable = false;
                return;
        }
@@@ -2521,6 -2301,10 +2309,6 @@@ static void intel_ddi_mso_configure(con
                return;
  
        if (crtc_state->splitter.enable) {
 -              /* Splitter enable is supported for pipe A only. */
 -              if (drm_WARN_ON(&i915->drm, pipe != PIPE_A))
 -                      return;
 -
                dss1 |= SPLITTER_ENABLE;
                dss1 |= OVERLAP_PIXELS(crtc_state->splitter.pixel_overlap);
                if (crtc_state->splitter.link_count == 2)
                     OVERLAP_PIXELS_MASK, dss1);
  }
  
+ static void dg2_ddi_pre_enable_dp(struct intel_atomic_state *state,
+                                 struct intel_encoder *encoder,
+                                 const struct intel_crtc_state *crtc_state,
+                                 const struct drm_connector_state *conn_state)
+ {
+       struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+       struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+       bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
+       int level = intel_ddi_dp_level(intel_dp);
+       intel_dp_set_link_params(intel_dp, crtc_state->port_clock,
+                                crtc_state->lane_count);
+       /*
+        * 1. Enable Power Wells
+        *
+        * This was handled at the beginning of intel_atomic_commit_tail(),
+        * before we called down into this function.
+        */
+       /* 2. Enable Panel Power if PPS is required */
+       intel_pps_on(intel_dp);
+       /*
+        * 3. Enable the port PLL.
+        */
+       intel_ddi_enable_clock(encoder, crtc_state);
+       /* 4. Enable IO power */
+       if (!intel_phy_is_tc(dev_priv, phy) ||
+           dig_port->tc_mode != TC_PORT_TBT_ALT)
+               dig_port->ddi_io_wakeref = intel_display_power_get(dev_priv,
+                                                                  dig_port->ddi_io_power_domain);
+       /*
+        * 5. The rest of the below are substeps under the bspec's "Enable and
+        * Train Display Port" step.  Note that steps that are specific to
+        * MST will be handled by intel_mst_pre_enable_dp() before/after it
+        * calls into this function.  Also intel_mst_pre_enable_dp() only calls
+        * us when active_mst_links==0, so any steps designated for "single
+        * stream or multi-stream master transcoder" can just be performed
+        * unconditionally here.
+        */
+       /*
+        * 5.a Configure Transcoder Clock Select to direct the Port clock to the
+        * Transcoder.
+        */
+       intel_ddi_enable_pipe_clock(encoder, crtc_state);
+       /* 5.b Not relevant to i915 for now */
+       /*
+        * 5.c Configure TRANS_DDI_FUNC_CTL DDI Select, DDI Mode Select & MST
+        * Transport Select
+        */
+       intel_ddi_config_transcoder_func(encoder, crtc_state);
+       /*
+        * 5.d Configure & enable DP_TP_CTL with link training pattern 1
+        * selected
+        *
+        * This will be handled by the intel_dp_start_link_train() farther
+        * down this function.
+        */
+       /* 5.e Configure voltage swing and related IO settings */
+       intel_snps_phy_ddi_vswing_sequence(encoder, level);
+       /*
+        * 5.f Configure and enable DDI_BUF_CTL
+        * 5.g Wait for DDI_BUF_CTL DDI Idle Status = 0b (Not Idle), timeout
+        *     after 1200 us.
+        *
+        * We only configure what the register value will be here.  Actual
+        * enabling happens during link training farther down.
+        */
+       intel_ddi_init_dp_buf_reg(encoder, crtc_state);
+       if (!is_mst)
+               intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
+       intel_dp_sink_set_decompression_state(intel_dp, crtc_state, true);
+       /*
+        * DDI FEC: "anticipates enabling FEC encoding sets the FEC_READY bit
+        * in the FEC_CONFIGURATION register to 1 before initiating link
+        * training
+        */
+       intel_dp_sink_set_fec_ready(intel_dp, crtc_state);
+       /*
+        * 5.h Follow DisplayPort specification training sequence (see notes for
+        *     failure handling)
+        * 5.i If DisplayPort multi-stream - Set DP_TP_CTL link training to Idle
+        *     Pattern, wait for 5 idle patterns (DP_TP_STATUS Min_Idles_Sent)
+        *     (timeout after 800 us)
+        */
+       intel_dp_start_link_train(intel_dp, crtc_state);
+       /* 5.j Set DP_TP_CTL link training to Normal */
+       if (!is_trans_port_sync_mode(crtc_state))
+               intel_dp_stop_link_train(intel_dp, crtc_state);
+       /* 5.k Configure and enable FEC if needed */
+       intel_ddi_enable_fec(encoder, crtc_state);
+       intel_dsc_enable(encoder, crtc_state);
+ }
  static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
                                  struct intel_encoder *encoder,
                                  const struct intel_crtc_state *crtc_state,
@@@ -2714,12 -2608,10 +2612,10 @@@ static void hsw_ddi_pre_enable_dp(struc
  
        if (DISPLAY_VER(dev_priv) >= 11)
                icl_ddi_vswing_sequence(encoder, crtc_state, level);
-       else if (IS_CANNONLAKE(dev_priv))
-               cnl_ddi_vswing_sequence(encoder, crtc_state, level);
        else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
                bxt_ddi_vswing_sequence(encoder, crtc_state, level);
        else
-               intel_prepare_dp_ddi_buffers(encoder, crtc_state);
+               hsw_prepare_dp_ddi_buffers(encoder, crtc_state);
  
        intel_ddi_power_up_lanes(encoder, crtc_state);
  
@@@ -2751,7 -2643,9 +2647,9 @@@ static void intel_ddi_pre_enable_dp(str
  {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
  
-       if (DISPLAY_VER(dev_priv) >= 12)
+       if (IS_DG2(dev_priv))
+               dg2_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state);
+       else if (DISPLAY_VER(dev_priv) >= 12)
                tgl_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state);
        else
                hsw_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state);
@@@ -2827,6 -2721,7 +2725,7 @@@ static void intel_ddi_pre_enable(struc
                                        conn_state);
  
                /* FIXME precompute everything properly */
+               /* FIXME how do we turn infoframes off again? */
                if (dig_port->lspcon.active && dig_port->dp.has_hdmi_sink)
                        dig_port->set_infoframes(encoder,
                                                 crtc_state->has_infoframe,
@@@ -3157,16 -3052,16 +3056,16 @@@ static void intel_enable_ddi_hdmi(struc
                            "[CONNECTOR:%d:%s] Failed to configure sink scrambling/TMDS bit clock ratio\n",
                            connector->base.id, connector->name);
  
-       if (DISPLAY_VER(dev_priv) >= 12)
+       if (IS_DG2(dev_priv))
+               intel_snps_phy_ddi_vswing_sequence(encoder, U32_MAX);
+       else if (DISPLAY_VER(dev_priv) >= 12)
                tgl_ddi_vswing_sequence(encoder, crtc_state, level);
        else if (DISPLAY_VER(dev_priv) == 11)
                icl_ddi_vswing_sequence(encoder, crtc_state, level);
-       else if (IS_CANNONLAKE(dev_priv))
-               cnl_ddi_vswing_sequence(encoder, crtc_state, level);
        else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
                bxt_ddi_vswing_sequence(encoder, crtc_state, level);
        else
-               intel_prepare_hdmi_ddi_buffers(encoder, level);
+               hsw_prepare_hdmi_ddi_buffers(encoder, crtc_state, level);
  
        if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv))
                skl_ddi_set_iboost(encoder, crtc_state, level);
@@@ -3260,12 -3155,6 +3159,6 @@@ static void intel_disable_ddi_dp(struc
  
        intel_dp->link_trained = false;
  
-       if (old_crtc_state->has_audio)
-               intel_audio_codec_disable(encoder,
-                                         old_crtc_state, old_conn_state);
-       intel_edp_drrs_disable(intel_dp, old_crtc_state);
-       intel_psr_disable(intel_dp, old_crtc_state);
        intel_edp_backlight_off(old_conn_state);
        /* Disable the decompression in DP Sink */
        intel_dp_sink_set_decompression_state(intel_dp, old_crtc_state,
@@@ -3283,10 -3172,6 +3176,6 @@@ static void intel_disable_ddi_hdmi(stru
        struct drm_i915_private *i915 = to_i915(encoder->base.dev);
        struct drm_connector *connector = old_conn_state->connector;
  
-       if (old_crtc_state->has_audio)
-               intel_audio_codec_disable(encoder,
-                                         old_crtc_state, old_conn_state);
        if (!intel_hdmi_handle_sink_scrambling(encoder, connector,
                                               false, false))
                drm_dbg_kms(&i915->drm,
                            connector->base.id, connector->name);
  }
  
+ static void intel_pre_disable_ddi(struct intel_atomic_state *state,
+                                 struct intel_encoder *encoder,
+                                 const struct intel_crtc_state *old_crtc_state,
+                                 const struct drm_connector_state *old_conn_state)
+ {
+       struct intel_dp *intel_dp;
+       if (old_crtc_state->has_audio)
+               intel_audio_codec_disable(encoder, old_crtc_state,
+                                         old_conn_state);
+       if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_HDMI))
+               return;
+       intel_dp = enc_to_intel_dp(encoder);
+       intel_edp_drrs_disable(intel_dp, old_crtc_state);
+       intel_psr_disable(intel_dp, old_crtc_state);
+ }
  static void intel_disable_ddi(struct intel_atomic_state *state,
                              struct intel_encoder *encoder,
                              const struct intel_crtc_state *old_crtc_state,
@@@ -3510,7 -3414,7 +3418,7 @@@ static bool intel_ddi_is_audio_enabled(
        if (cpu_transcoder == TRANSCODER_EDP)
                return false;
  
-       if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_AUDIO))
+       if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_AUDIO_MMIO))
                return false;
  
        return intel_de_read(dev_priv, HSW_AUD_PIN_ELD_CP_VLD) &
@@@ -3526,8 -3430,6 +3434,6 @@@ void intel_ddi_compute_min_voltage_leve
                crtc_state->min_voltage_level = 3;
        else if (DISPLAY_VER(dev_priv) >= 11 && crtc_state->port_clock > 594000)
                crtc_state->min_voltage_level = 1;
-       else if (IS_CANNONLAKE(dev_priv) && crtc_state->port_clock > 594000)
-               crtc_state->min_voltage_level = 2;
  }
  
  static enum transcoder bdw_transcoder_master_readout(struct drm_i915_private *dev_priv,
@@@ -3594,7 -3496,7 +3500,7 @@@ static void intel_ddi_read_func_ctl(str
                                    struct intel_crtc_state *pipe_config)
  {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-       struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc);
+       struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
        enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
        struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
        u32 temp, flags = 0;
                        pipe_config->output_types |= BIT(INTEL_OUTPUT_DP);
                pipe_config->lane_count =
                        ((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1;
-               intel_dp_get_m_n(intel_crtc, pipe_config);
+               intel_dp_get_m_n(crtc, pipe_config);
  
                if (DISPLAY_VER(dev_priv) >= 11) {
                        i915_reg_t dp_tp_ctl = dp_tp_ctl_reg(encoder, pipe_config);
                        pipe_config->mst_master_transcoder =
                                        REG_FIELD_GET(TRANS_DDI_MST_TRANSPORT_SELECT_MASK, temp);
  
-               intel_dp_get_m_n(intel_crtc, pipe_config);
+               intel_dp_get_m_n(crtc, pipe_config);
  
                pipe_config->infoframes.enable |=
                        intel_hdmi_infoframes_enabled(encoder, pipe_config);
@@@ -3801,6 -3703,15 +3707,15 @@@ void intel_ddi_get_clock(struct intel_e
                                                     &crtc_state->dpll_hw_state);
  }
  
+ static void dg2_ddi_get_config(struct intel_encoder *encoder,
+                               struct intel_crtc_state *crtc_state)
+ {
+       intel_mpllb_readout_hw_state(encoder, &crtc_state->mpllb_state);
+       crtc_state->port_clock = intel_mpllb_calc_port_clock(encoder, &crtc_state->mpllb_state);
+       intel_ddi_get_config(encoder, crtc_state);
+ }
  static void adls_ddi_get_config(struct intel_encoder *encoder,
                                struct intel_crtc_state *crtc_state)
  {
@@@ -3868,13 -3779,6 +3783,6 @@@ static void icl_ddi_tc_get_config(struc
        intel_ddi_get_config(encoder, crtc_state);
  }
  
- static void cnl_ddi_get_config(struct intel_encoder *encoder,
-                              struct intel_crtc_state *crtc_state)
- {
-       intel_ddi_get_clock(encoder, crtc_state, cnl_ddi_get_pll(encoder));
-       intel_ddi_get_config(encoder, crtc_state);
- }
  static void bxt_ddi_get_config(struct intel_encoder *encoder,
                               struct intel_crtc_state *crtc_state)
  {
@@@ -4121,12 -4025,12 +4029,12 @@@ intel_ddi_init_dp_connector(struct inte
        dig_port->dp.set_link_train = intel_ddi_set_link_train;
        dig_port->dp.set_idle_link_train = intel_ddi_set_idle_link_train;
  
-       if (DISPLAY_VER(dev_priv) >= 12)
+       if (IS_DG2(dev_priv))
+               dig_port->dp.set_signal_levels = dg2_set_signal_levels;
+       else if (DISPLAY_VER(dev_priv) >= 12)
                dig_port->dp.set_signal_levels = tgl_set_signal_levels;
        else if (DISPLAY_VER(dev_priv) >= 11)
                dig_port->dp.set_signal_levels = icl_set_signal_levels;
-       else if (IS_CANNONLAKE(dev_priv))
-               dig_port->dp.set_signal_levels = cnl_set_signal_levels;
        else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
                dig_port->dp.set_signal_levels = bxt_set_signal_levels;
        else
@@@ -4373,15 -4277,6 +4281,6 @@@ static bool intel_ddi_a_force_4_lanes(s
        if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
                return true;
  
-       /* Cannonlake: Most of SKUs don't support DDI_E, and the only
-        *             one who does also have a full A/E split called
-        *             DDI_F what makes DDI_E useless. However for this
-        *             case let's trust VBT info.
-        */
-       if (IS_CANNONLAKE(dev_priv) &&
-           !intel_bios_is_port_present(dev_priv, PORT_E))
-               return true;
        return false;
  }
  
@@@ -4486,15 -4381,6 +4385,6 @@@ static enum hpd_pin ehl_hpd_pin(struct 
        return HPD_PORT_A + port - PORT_A;
  }
  
- static enum hpd_pin cnl_hpd_pin(struct drm_i915_private *dev_priv,
-                               enum port port)
- {
-       if (port == PORT_F)
-               return HPD_PORT_E;
-       return HPD_PORT_A + port - PORT_A;
- }
  static enum hpd_pin skl_hpd_pin(struct drm_i915_private *dev_priv, enum port port)
  {
        if (HAS_PCH_TGP(dev_priv))
@@@ -4513,6 -4399,36 +4403,36 @@@ static bool intel_ddi_is_tc(struct drm_
                return false;
  }
  
+ static void intel_ddi_encoder_suspend(struct intel_encoder *encoder)
+ {
+       struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+       struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+       enum phy phy = intel_port_to_phy(i915, encoder->port);
+       intel_dp_encoder_suspend(encoder);
+       if (!intel_phy_is_tc(i915, phy))
+               return;
+       intel_tc_port_disconnect_phy(dig_port);
+ }
+ static void intel_ddi_encoder_shutdown(struct intel_encoder *encoder)
+ {
+       struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+       struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+       enum phy phy = intel_port_to_phy(i915, encoder->port);
+       intel_dp_encoder_shutdown(encoder);
+       if (!intel_phy_is_tc(i915, phy))
+               return;
+       intel_tc_port_disconnect_phy(dig_port);
+ }
  #define port_tc_name(port) ((port) - PORT_TC1 + '1')
  #define tc_port_name(tc_port) ((tc_port) - TC_PORT_1 + '1')
  
@@@ -4616,14 -4532,15 +4536,15 @@@ void intel_ddi_init(struct drm_i915_pri
        encoder->enable = intel_enable_ddi;
        encoder->pre_pll_enable = intel_ddi_pre_pll_enable;
        encoder->pre_enable = intel_ddi_pre_enable;
+       encoder->pre_disable = intel_pre_disable_ddi;
        encoder->disable = intel_disable_ddi;
        encoder->post_disable = intel_ddi_post_disable;
        encoder->update_pipe = intel_ddi_update_pipe;
        encoder->get_hw_state = intel_ddi_get_hw_state;
        encoder->sync_state = intel_ddi_sync_state;
        encoder->initial_fastset_check = intel_ddi_initial_fastset_check;
-       encoder->suspend = intel_dp_encoder_suspend;
-       encoder->shutdown = intel_dp_encoder_shutdown;
+       encoder->suspend = intel_ddi_encoder_suspend;
+       encoder->shutdown = intel_ddi_encoder_shutdown;
        encoder->get_power_domains = intel_ddi_get_power_domains;
  
        encoder->type = INTEL_OUTPUT_DDI;
        encoder->cloneable = 0;
        encoder->pipe_mask = ~0;
  
-       if (IS_ALDERLAKE_S(dev_priv)) {
+       if (IS_DG2(dev_priv)) {
+               encoder->enable_clock = intel_mpllb_enable;
+               encoder->disable_clock = intel_mpllb_disable;
+               encoder->get_config = dg2_ddi_get_config;
+       } else if (IS_ALDERLAKE_S(dev_priv)) {
                encoder->enable_clock = adls_ddi_enable_clock;
                encoder->disable_clock = adls_ddi_disable_clock;
                encoder->is_clock_enabled = adls_ddi_is_clock_enabled;
                        encoder->is_clock_enabled = icl_ddi_combo_is_clock_enabled;
                        encoder->get_config = icl_ddi_combo_get_config;
                }
-       } else if (IS_CANNONLAKE(dev_priv)) {
-               encoder->enable_clock = cnl_ddi_enable_clock;
-               encoder->disable_clock = cnl_ddi_disable_clock;
-               encoder->is_clock_enabled = cnl_ddi_is_clock_enabled;
-               encoder->get_config = cnl_ddi_get_config;
        } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
                /* BXT/GLK have fixed PLL->port mapping */
                encoder->get_config = bxt_ddi_get_config;
                encoder->get_config = hsw_ddi_get_config;
        }
  
+       intel_ddi_buf_trans_init(encoder);
        if (DISPLAY_VER(dev_priv) >= 13)
                encoder->hpd_pin = xelpd_hpd_pin(dev_priv, port);
        else if (IS_DG1(dev_priv))
                encoder->hpd_pin = ehl_hpd_pin(dev_priv, port);
        else if (DISPLAY_VER(dev_priv) == 11)
                encoder->hpd_pin = icl_hpd_pin(dev_priv, port);
-       else if (IS_CANNONLAKE(dev_priv))
-               encoder->hpd_pin = cnl_hpd_pin(dev_priv, port);
        else if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv))
                encoder->hpd_pin = skl_hpd_pin(dev_priv, port);
        else
  
                dig_port->hpd_pulse = intel_dp_hpd_pulse;
  
 -              /* Splitter enable for eDP MSO is limited to certain pipes. */
 -              if (dig_port->dp.mso_link_count) {
 -                      encoder->pipe_mask = BIT(PIPE_A);
 -                      if (IS_ALDERLAKE_P(dev_priv))
 -                              encoder->pipe_mask |= BIT(PIPE_B);
 -              }
 +              if (dig_port->dp.mso_link_count)
 +                      encoder->pipe_mask = intel_ddi_splitter_pipe_mask(dev_priv);
        }
  
        /* In theory we don't need the encoder->type check, but leave it just in
@@@ -59,6 -59,7 +59,7 @@@
  #include "display/intel_hdmi.h"
  #include "display/intel_lvds.h"
  #include "display/intel_sdvo.h"
+ #include "display/intel_snps_phy.h"
  #include "display/intel_tv.h"
  #include "display/intel_vdsc.h"
  #include "display/intel_vrr.h"
@@@ -975,7 -976,7 +976,7 @@@ void intel_enable_pipe(const struct int
                /* FIXME: assert CPU port conditions for SNB+ */
        }
  
-       /* Wa_22012358565:adlp */
+       /* Wa_22012358565:adl-p */
        if (DISPLAY_VER(dev_priv) == 13)
                intel_de_rmw(dev_priv, PIPE_ARB_CTL(pipe),
                             0, PIPE_ARB_USE_PROG_SLOTS);
@@@ -1035,6 -1036,10 +1036,10 @@@ void intel_disable_pipe(const struct in
        if (!IS_I830(dev_priv))
                val &= ~PIPECONF_ENABLE;
  
+       if (DISPLAY_VER(dev_priv) >= 12)
+               intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder),
+                            FECSTALL_DIS_DPTSTREAM_DPTTG, 0);
        intel_de_write(dev_priv, reg, val);
        if ((val & PIPECONF_ENABLE) == 0)
                intel_wait_for_pipe_off(old_crtc_state);
@@@ -1331,6 -1336,9 +1336,9 @@@ retry
        ret = i915_gem_object_lock(obj, &ww);
        if (!ret && phys_cursor)
                ret = i915_gem_object_attach_phys(obj, alignment);
+       else if (!ret && HAS_LMEM(dev_priv))
+               ret = i915_gem_object_migrate(obj, &ww, INTEL_REGION_LMEM);
+       /* TODO: Do we need to sync when migration becomes async? */
        if (!ret)
                ret = i915_gem_object_pin_pages(obj);
        if (ret)
@@@ -1914,20 -1922,50 +1922,50 @@@ static void intel_dpt_unpin(struct i915
        i915_vma_put(dpt->vma);
  }
  
+ static bool
+ intel_reuse_initial_plane_obj(struct drm_i915_private *i915,
+                             const struct intel_initial_plane_config *plane_config,
+                             struct drm_framebuffer **fb,
+                             struct i915_vma **vma)
+ {
+       struct intel_crtc *crtc;
+       for_each_intel_crtc(&i915->drm, crtc) {
+               struct intel_crtc_state *crtc_state =
+                       to_intel_crtc_state(crtc->base.state);
+               struct intel_plane *plane =
+                       to_intel_plane(crtc->base.primary);
+               struct intel_plane_state *plane_state =
+                       to_intel_plane_state(plane->base.state);
+               if (!crtc_state->uapi.active)
+                       continue;
+               if (!plane_state->ggtt_vma)
+                       continue;
+               if (intel_plane_ggtt_offset(plane_state) == plane_config->base) {
+                       *fb = plane_state->hw.fb;
+                       *vma = plane_state->ggtt_vma;
+                       return true;
+               }
+       }
+       return false;
+ }
  static void
- intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
+ intel_find_initial_plane_obj(struct intel_crtc *crtc,
                             struct intel_initial_plane_config *plane_config)
  {
-       struct drm_device *dev = intel_crtc->base.dev;
+       struct drm_device *dev = crtc->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
-       struct drm_crtc *c;
-       struct drm_plane *primary = intel_crtc->base.primary;
-       struct drm_plane_state *plane_state = primary->state;
-       struct intel_plane *intel_plane = to_intel_plane(primary);
-       struct intel_plane_state *intel_state =
-               to_intel_plane_state(plane_state);
        struct intel_crtc_state *crtc_state =
-               to_intel_crtc_state(intel_crtc->base.state);
+               to_intel_crtc_state(crtc->base.state);
+       struct intel_plane *plane =
+               to_intel_plane(crtc->base.primary);
+       struct intel_plane_state *plane_state =
+               to_intel_plane_state(plane->base.state);
        struct drm_framebuffer *fb;
        struct i915_vma *vma;
  
        if (!plane_config->fb)
                return;
  
-       if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
+       if (intel_alloc_initial_plane_obj(crtc, plane_config)) {
                fb = &plane_config->fb->base;
                vma = plane_config->vma;
                goto valid_fb;
         * Failed to alloc the obj, check to see if we should share
         * an fb with another CRTC instead
         */
-       for_each_crtc(dev, c) {
-               struct intel_plane_state *state;
-               if (c == &intel_crtc->base)
-                       continue;
-               if (!to_intel_crtc_state(c->state)->uapi.active)
-                       continue;
-               state = to_intel_plane_state(c->primary->state);
-               if (!state->ggtt_vma)
-                       continue;
-               if (intel_plane_ggtt_offset(state) == plane_config->base) {
-                       fb = state->hw.fb;
-                       vma = state->ggtt_vma;
-                       goto valid_fb;
-               }
-       }
+       if (intel_reuse_initial_plane_obj(dev_priv, plane_config, &fb, &vma))
+               goto valid_fb;
  
        /*
         * We've failed to reconstruct the BIOS FB.  Current display state
         * simplest solution is to just disable the primary plane now and
         * pretend the BIOS never had it enabled.
         */
-       intel_plane_disable_noatomic(intel_crtc, intel_plane);
+       intel_plane_disable_noatomic(crtc, plane);
        if (crtc_state->bigjoiner) {
                struct intel_crtc *slave =
                        crtc_state->bigjoiner_linked_crtc;
        return;
  
  valid_fb:
-       plane_state->rotation = plane_config->rotation;
-       intel_fb_fill_view(to_intel_framebuffer(fb), plane_state->rotation,
-                          &intel_state->view);
+       plane_state->uapi.rotation = plane_config->rotation;
+       intel_fb_fill_view(to_intel_framebuffer(fb),
+                          plane_state->uapi.rotation, &plane_state->view);
  
        __i915_vma_pin(vma);
-       intel_state->ggtt_vma = i915_vma_get(vma);
-       if (intel_plane_uses_fence(intel_state) && i915_vma_pin_fence(vma) == 0)
-               if (vma->fence)
-                       intel_state->flags |= PLANE_HAS_FENCE;
+       plane_state->ggtt_vma = i915_vma_get(vma);
+       if (intel_plane_uses_fence(plane_state) &&
+           i915_vma_pin_fence(vma) == 0 && vma->fence)
+               plane_state->flags |= PLANE_HAS_FENCE;
  
-       plane_state->src_x = 0;
-       plane_state->src_y = 0;
-       plane_state->src_w = fb->width << 16;
-       plane_state->src_h = fb->height << 16;
+       plane_state->uapi.src_x = 0;
+       plane_state->uapi.src_y = 0;
+       plane_state->uapi.src_w = fb->width << 16;
+       plane_state->uapi.src_h = fb->height << 16;
  
-       plane_state->crtc_x = 0;
-       plane_state->crtc_y = 0;
-       plane_state->crtc_w = fb->width;
-       plane_state->crtc_h = fb->height;
+       plane_state->uapi.crtc_x = 0;
+       plane_state->uapi.crtc_y = 0;
+       plane_state->uapi.crtc_w = fb->width;
+       plane_state->uapi.crtc_h = fb->height;
  
        if (plane_config->tiling)
                dev_priv->preserve_bios_swizzle = true;
  
-       plane_state->fb = fb;
+       plane_state->uapi.fb = fb;
        drm_framebuffer_get(fb);
  
-       plane_state->crtc = &intel_crtc->base;
-       intel_plane_copy_uapi_to_hw_state(intel_state, intel_state,
-                                         intel_crtc);
+       plane_state->uapi.crtc = &crtc->base;
+       intel_plane_copy_uapi_to_hw_state(plane_state, plane_state, crtc);
  
        intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
  
-       atomic_or(to_intel_plane(primary)->frontbuffer_bit,
-                 &to_intel_frontbuffer(fb)->bits);
+       atomic_or(plane->frontbuffer_bit, &to_intel_frontbuffer(fb)->bits);
  }
  
  unsigned int
@@@ -2193,8 -2212,29 +2212,29 @@@ unlock
        clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
  }
  
- static void icl_set_pipe_chicken(struct intel_crtc *crtc)
+ static bool underrun_recovery_supported(const struct intel_crtc_state *crtc_state)
  {
+       if (crtc_state->pch_pfit.enabled &&
+           (crtc_state->pipe_src_w > drm_rect_width(&crtc_state->pch_pfit.dst) ||
+            crtc_state->pipe_src_h > drm_rect_height(&crtc_state->pch_pfit.dst) ||
+            crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420))
+               return false;
+       if (crtc_state->dsc.compression_enable)
+               return false;
+       if (crtc_state->has_psr2)
+               return false;
+       if (crtc_state->splitter.enable)
+               return false;
+       return true;
+ }
+ static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state)
+ {
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        enum pipe pipe = crtc->pipe;
        u32 tmp;
         */
        tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
  
-       /*
-        * "The underrun recovery mechanism should be disabled
-        *  when the following is enabled for this pipe:
-        *  WiDi
-        *  Downscaling (this includes YUV420 fullblend)
-        *  COG
-        *  DSC
-        *  PSR2"
-        *
-        * FIXME: enable whenever possible...
-        */
-       if (IS_ALDERLAKE_P(dev_priv))
-               tmp |= UNDERRUN_RECOVERY_DISABLE;
+       if (IS_DG2(dev_priv)) {
+               /*
+                * Underrun recovery must always be disabled on DG2.  However
+                * the chicken bit meaning is inverted compared to other
+                * platforms.
+                */
+               tmp &= ~UNDERRUN_RECOVERY_ENABLE_DG2;
+       } else if (DISPLAY_VER(dev_priv) >= 13) {
+               if (underrun_recovery_supported(crtc_state))
+                       tmp &= ~UNDERRUN_RECOVERY_DISABLE_ADLP;
+               else
+                       tmp |= UNDERRUN_RECOVERY_DISABLE_ADLP;
+       }
  
        intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
  }
@@@ -2706,10 -2746,10 +2746,10 @@@ void hsw_disable_ips(const struct intel
        intel_wait_for_vblank(dev_priv, crtc->pipe);
  }
  
- static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
+ static void intel_crtc_dpms_overlay_disable(struct intel_crtc *crtc)
  {
-       if (intel_crtc->overlay)
-               (void) intel_overlay_switch_off(intel_crtc->overlay);
+       if (crtc->overlay)
+               (void) intel_overlay_switch_off(crtc->overlay);
  
        /* Let userspace switch the overlay on again. In most cases userspace
         * has to recompute where to put it anyway.
@@@ -3177,6 -3217,28 +3217,28 @@@ static void intel_encoders_enable(struc
        }
  }
  
+ static void intel_encoders_pre_disable(struct intel_atomic_state *state,
+                                      struct intel_crtc *crtc)
+ {
+       const struct intel_crtc_state *old_crtc_state =
+               intel_atomic_get_old_crtc_state(state, crtc);
+       const struct drm_connector_state *old_conn_state;
+       struct drm_connector *conn;
+       int i;
+       for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
+               struct intel_encoder *encoder =
+                       to_intel_encoder(old_conn_state->best_encoder);
+               if (old_conn_state->crtc != &crtc->base)
+                       continue;
+               if (encoder->pre_disable)
+                       encoder->pre_disable(state, encoder, old_crtc_state,
+                                            old_conn_state);
+       }
+ }
  static void intel_encoders_disable(struct intel_atomic_state *state,
                                   struct intel_crtc *crtc)
  {
@@@ -3386,13 -3448,17 +3448,17 @@@ static void glk_pipe_scaler_clock_gatin
        intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), val);
  }
  
- static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
+ static void icl_pipe_mbus_enable(struct intel_crtc *crtc, bool joined_mbus)
  {
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        enum pipe pipe = crtc->pipe;
        u32 val;
  
-       val = MBUS_DBOX_A_CREDIT(2);
+       /* Wa_22010947358:adl-p */
+       if (IS_ALDERLAKE_P(dev_priv))
+               val = joined_mbus ? MBUS_DBOX_A_CREDIT(6) : MBUS_DBOX_A_CREDIT(4);
+       else
+               val = MBUS_DBOX_A_CREDIT(2);
  
        if (DISPLAY_VER(dev_priv) >= 12) {
                val |= MBUS_DBOX_BW_CREDIT(2);
@@@ -3460,7 -3526,8 +3526,8 @@@ static void icl_ddi_bigjoiner_pre_enabl
                 * Enable sequence steps 1-7 on bigjoiner master
                 */
                intel_encoders_pre_pll_enable(state, master);
-               intel_enable_shared_dpll(master_crtc_state);
+               if (master_crtc_state->shared_dpll)
+                       intel_enable_shared_dpll(master_crtc_state);
                intel_encoders_pre_enable(state, master);
  
                /* and DSC on slave */
@@@ -3518,7 -3585,7 +3585,7 @@@ static void hsw_crtc_enable(struct inte
  
        crtc->active = true;
  
-       /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
+       /* Display WA #1180: WaDisableScalarClockGating: glk */
        psl_clkgate_wa = DISPLAY_VER(dev_priv) == 10 &&
                new_crtc_state->pch_pfit.enabled;
        if (psl_clkgate_wa)
        hsw_set_linetime_wm(new_crtc_state);
  
        if (DISPLAY_VER(dev_priv) >= 11)
-               icl_set_pipe_chicken(crtc);
+               icl_set_pipe_chicken(new_crtc_state);
  
        if (dev_priv->display.initial_watermarks)
                dev_priv->display.initial_watermarks(state, crtc);
  
-       if (DISPLAY_VER(dev_priv) >= 11)
-               icl_pipe_mbus_enable(crtc);
+       if (DISPLAY_VER(dev_priv) >= 11) {
+               const struct intel_dbuf_state *dbuf_state =
+                               intel_atomic_get_new_dbuf_state(state);
+               icl_pipe_mbus_enable(crtc, dbuf_state->joined_mbus);
+       }
  
        if (new_crtc_state->bigjoiner_slave)
                intel_crtc_vblank_on(new_crtc_state);
@@@ -3682,6 -3753,13 +3753,13 @@@ bool intel_phy_is_combo(struct drm_i915
  {
        if (phy == PHY_NONE)
                return false;
+       else if (IS_DG2(dev_priv))
+               /*
+                * DG2 outputs labelled as "combo PHY" in the bspec use
+                * SNPS PHYs with completely different programming,
+                * hence we always return false here.
+                */
+               return false;
        else if (IS_ALDERLAKE_S(dev_priv))
                return phy <= PHY_E;
        else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
  
  bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
  {
-       if (IS_ALDERLAKE_P(dev_priv))
+       if (IS_DG2(dev_priv))
+               /* DG2's "TC1" output uses a SNPS PHY */
+               return false;
+       else if (IS_ALDERLAKE_P(dev_priv))
                return phy >= PHY_F && phy <= PHY_I;
        else if (IS_TIGERLAKE(dev_priv))
                return phy >= PHY_D && phy <= PHY_I;
                return false;
  }
  
+ bool intel_phy_is_snps(struct drm_i915_private *dev_priv, enum phy phy)
+ {
+       if (phy == PHY_NONE)
+               return false;
+       else if (IS_DG2(dev_priv))
+               /*
+                * All four "combo" ports and the TC1 port (PHY E) use
+                * Synopsis PHYs.
+                */
+               return phy <= PHY_E;
+       return false;
+ }
  enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
  {
        if (DISPLAY_VER(i915) >= 13 && port >= PORT_D_XELPD)
@@@ -3850,7 -3945,7 +3945,7 @@@ static u64 get_crtc_power_domains(struc
        }
  
        if (HAS_DDI(dev_priv) && crtc_state->has_audio)
-               mask |= BIT_ULL(POWER_DOMAIN_AUDIO);
+               mask |= BIT_ULL(POWER_DOMAIN_AUDIO_MMIO);
  
        if (crtc_state->shared_dpll)
                mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE);
@@@ -5746,18 -5841,16 +5841,18 @@@ static void bdw_set_pipemisc(const stru
  
        switch (crtc_state->pipe_bpp) {
        case 18:
 -              val |= PIPEMISC_DITHER_6_BPC;
 +              val |= PIPEMISC_6_BPC;
                break;
        case 24:
 -              val |= PIPEMISC_DITHER_8_BPC;
 +              val |= PIPEMISC_8_BPC;
                break;
        case 30:
 -              val |= PIPEMISC_DITHER_10_BPC;
 +              val |= PIPEMISC_10_BPC;
                break;
        case 36:
 -              val |= PIPEMISC_DITHER_12_BPC;
 +              /* Port output 12BPC defined for ADLP+ */
 +              if (DISPLAY_VER(dev_priv) > 12)
 +                      val |= PIPEMISC_12_BPC_ADLP;
                break;
        default:
                MISSING_CASE(crtc_state->pipe_bpp);
@@@ -5810,27 -5903,15 +5905,27 @@@ int bdw_get_pipemisc_bpp(struct intel_c
  
        tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
  
 -      switch (tmp & PIPEMISC_DITHER_BPC_MASK) {
 -      case PIPEMISC_DITHER_6_BPC:
 +      switch (tmp & PIPEMISC_BPC_MASK) {
 +      case PIPEMISC_6_BPC:
                return 18;
 -      case PIPEMISC_DITHER_8_BPC:
 +      case PIPEMISC_8_BPC:
                return 24;
 -      case PIPEMISC_DITHER_10_BPC:
 +      case PIPEMISC_10_BPC:
                return 30;
 -      case PIPEMISC_DITHER_12_BPC:
 -              return 36;
 +      /*
 +       * PORT OUTPUT 12 BPC defined for ADLP+.
 +       *
 +       * TODO:
 +       * For previous platforms with DSI interface, bits 5:7
 +       * are used for storing pipe_bpp irrespective of dithering.
 +       * Since the value of 12 BPC is not defined for these bits
 +       * on older platforms, need to find a workaround for 12 BPC
 +       * MIPI DSI HW readout.
 +       */
 +      case PIPEMISC_12_BPC_ADLP:
 +              if (DISPLAY_VER(dev_priv) > 12)
 +                      return 36;
 +              fallthrough;
        default:
                MISSING_CASE(tmp);
                return 0;
@@@ -6487,23 -6568,21 +6582,21 @@@ int intel_get_load_detect_pipe(struct d
                               struct intel_load_detect_pipe *old,
                               struct drm_modeset_acquire_ctx *ctx)
  {
-       struct intel_crtc *intel_crtc;
-       struct intel_encoder *intel_encoder =
+       struct intel_encoder *encoder =
                intel_attached_encoder(to_intel_connector(connector));
-       struct drm_crtc *possible_crtc;
-       struct drm_encoder *encoder = &intel_encoder->base;
-       struct drm_crtc *crtc = NULL;
-       struct drm_device *dev = encoder->dev;
+       struct intel_crtc *possible_crtc;
+       struct intel_crtc *crtc = NULL;
+       struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_mode_config *config = &dev->mode_config;
        struct drm_atomic_state *state = NULL, *restore_state = NULL;
        struct drm_connector_state *connector_state;
        struct intel_crtc_state *crtc_state;
-       int ret, i = -1;
+       int ret;
  
        drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
                    connector->base.id, connector->name,
-                   encoder->base.id, encoder->name);
+                   encoder->base.base.id, encoder->base.name);
  
        old->restore_state = NULL;
  
  
        /* See if we already have a CRTC for this connector */
        if (connector->state->crtc) {
-               crtc = connector->state->crtc;
+               crtc = to_intel_crtc(connector->state->crtc);
  
-               ret = drm_modeset_lock(&crtc->mutex, ctx);
+               ret = drm_modeset_lock(&crtc->base.mutex, ctx);
                if (ret)
                        goto fail;
  
        }
  
        /* Find an unused one (if possible) */
-       for_each_crtc(dev, possible_crtc) {
-               i++;
-               if (!(encoder->possible_crtcs & (1 << i)))
+       for_each_intel_crtc(dev, possible_crtc) {
+               if (!(encoder->base.possible_crtcs &
+                     drm_crtc_mask(&possible_crtc->base)))
                        continue;
  
-               ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
+               ret = drm_modeset_lock(&possible_crtc->base.mutex, ctx);
                if (ret)
                        goto fail;
  
-               if (possible_crtc->state->enable) {
-                       drm_modeset_unlock(&possible_crtc->mutex);
+               if (possible_crtc->base.state->enable) {
+                       drm_modeset_unlock(&possible_crtc->base.mutex);
                        continue;
                }
  
        }
  
  found:
-       intel_crtc = to_intel_crtc(crtc);
        state = drm_atomic_state_alloc(dev);
        restore_state = drm_atomic_state_alloc(dev);
        if (!state || !restore_state) {
                goto fail;
        }
  
-       ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
+       ret = drm_atomic_set_crtc_for_connector(connector_state, &crtc->base);
        if (ret)
                goto fail;
  
-       crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
+       crtc_state = intel_atomic_get_crtc_state(state, crtc);
        if (IS_ERR(crtc_state)) {
                ret = PTR_ERR(crtc_state);
                goto fail;
        if (ret)
                goto fail;
  
-       ret = intel_modeset_disable_planes(state, crtc);
+       ret = intel_modeset_disable_planes(state, &crtc->base);
        if (ret)
                goto fail;
  
        ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
        if (!ret)
-               ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
+               ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, &crtc->base));
        if (!ret)
-               ret = drm_atomic_add_affected_planes(restore_state, crtc);
+               ret = drm_atomic_add_affected_planes(restore_state, &crtc->base);
        if (ret) {
                drm_dbg_kms(&dev_priv->drm,
                            "Failed to create a copy of old state to restore: %i\n",
        drm_atomic_state_put(state);
  
        /* let the connector get through one full cycle before testing */
-       intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
+       intel_wait_for_vblank(dev_priv, crtc->pipe);
        return true;
  
  fail:
@@@ -7295,12 -7372,13 +7386,13 @@@ static int intel_crtc_atomic_check(stru
        }
  
        if (dev_priv->display.compute_pipe_wm) {
-               ret = dev_priv->display.compute_pipe_wm(crtc_state);
+               ret = dev_priv->display.compute_pipe_wm(state, crtc);
                if (ret) {
                        drm_dbg_kms(&dev_priv->drm,
                                    "Target pipe watermarks are invalid\n");
                        return ret;
                }
        }
  
        if (dev_priv->display.compute_intermediate_wm) {
                 * old state and the new state.  We can program these
                 * immediately.
                 */
-               ret = dev_priv->display.compute_intermediate_wm(crtc_state);
+               ret = dev_priv->display.compute_intermediate_wm(state, crtc);
                if (ret) {
                        drm_dbg_kms(&dev_priv->drm,
                                    "No valid intermediate pipe watermarks are possible\n");
@@@ -8636,10 -8714,11 +8728,11 @@@ intel_pipe_config_compare(const struct 
  
        PIPE_CONF_CHECK_BOOL(double_wide);
  
-       PIPE_CONF_CHECK_P(shared_dpll);
+       if (dev_priv->dpll.mgr)
+               PIPE_CONF_CHECK_P(shared_dpll);
  
        /* FIXME do the readout properly and get rid of this quirk */
-       if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) {
+       if (dev_priv->dpll.mgr && !PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) {
                PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
                PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
                PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
                PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
                PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
                PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
+       }
  
+       if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) {
                PIPE_CONF_CHECK_X(dsi_pll.ctrl);
                PIPE_CONF_CHECK_X(dsi_pll.div);
  
@@@ -9009,6 -9090,10 +9104,10 @@@ verify_crtc_state(struct intel_crtc *cr
        if (!new_crtc_state->hw.active)
                return;
  
+       if (new_crtc_state->bigjoiner_slave)
+               /* No PLLs set for slave */
+               pipe_config->shared_dpll = NULL;
        intel_pipe_config_sanity_check(dev_priv, pipe_config);
  
        if (!intel_pipe_config_compare(new_crtc_state,
@@@ -9111,6 -9196,55 +9210,55 @@@ verify_shared_dpll_state(struct intel_c
        }
  }
  
+ static void
+ verify_mpllb_state(struct intel_atomic_state *state,
+                  struct intel_crtc_state *new_crtc_state)
+ {
+       struct drm_i915_private *i915 = to_i915(state->base.dev);
+       struct intel_mpllb_state mpllb_hw_state = { 0 };
+       struct intel_mpllb_state *mpllb_sw_state = &new_crtc_state->mpllb_state;
+       struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
+       struct intel_encoder *encoder;
+       if (!IS_DG2(i915))
+               return;
+       if (!new_crtc_state->hw.active)
+               return;
+       if (new_crtc_state->bigjoiner_slave)
+               return;
+       encoder = intel_get_crtc_new_encoder(state, new_crtc_state);
+       intel_mpllb_readout_hw_state(encoder, &mpllb_hw_state);
+ #define MPLLB_CHECK(name) do { \
+       if (mpllb_sw_state->name != mpllb_hw_state.name) { \
+               pipe_config_mismatch(false, crtc, "MPLLB:" __stringify(name), \
+                                    "(expected 0x%08x, found 0x%08x)", \
+                                    mpllb_sw_state->name, \
+                                    mpllb_hw_state.name); \
+       } \
+ } while (0)
+       MPLLB_CHECK(mpllb_cp);
+       MPLLB_CHECK(mpllb_div);
+       MPLLB_CHECK(mpllb_div2);
+       MPLLB_CHECK(mpllb_fracn1);
+       MPLLB_CHECK(mpllb_fracn2);
+       MPLLB_CHECK(mpllb_sscen);
+       MPLLB_CHECK(mpllb_sscstep);
+       /*
+        * ref_control is handled by the hardware/firemware and never
+        * programmed by the software, but the proper values are supplied
+        * in the bspec for verification purposes.
+        */
+       MPLLB_CHECK(ref_control);
+ #undef MPLLB_CHECK
+ }
  static void
  intel_modeset_verify_crtc(struct intel_crtc *crtc,
                          struct intel_atomic_state *state,
        verify_connector_state(state, crtc);
        verify_crtc_state(crtc, old_crtc_state, new_crtc_state);
        verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state);
+       verify_mpllb_state(state, new_crtc_state);
  }
  
  static void
@@@ -9749,7 -9884,7 +9898,7 @@@ static int intel_atomic_check_async(str
  
                /*
                 * FIXME: This check is kept generic for all platforms.
-                * Need to verify this for all gen9 and gen10 platforms to enable
+                * Need to verify this for all gen9 platforms to enable
                 * this selectively if required.
                 */
                switch (new_plane_state->hw.fb->modifier) {
@@@ -10160,7 -10295,7 +10309,7 @@@ static void intel_pipe_fastset(const st
                hsw_set_linetime_wm(new_crtc_state);
  
        if (DISPLAY_VER(dev_priv) >= 11)
-               icl_set_pipe_chicken(crtc);
+               icl_set_pipe_chicken(new_crtc_state);
  }
  
  static void commit_pipe_pre_planes(struct intel_atomic_state *state,
@@@ -10294,6 -10429,8 +10443,8 @@@ static void intel_old_crtc_state_disabl
  
        drm_WARN_ON(&dev_priv->drm, old_crtc_state->bigjoiner_slave);
  
+       intel_encoders_pre_disable(state, crtc);
        intel_crtc_disable_planes(state, crtc);
  
        /*
@@@ -11328,7 -11465,12 +11479,12 @@@ static void intel_setup_outputs(struct 
        if (!HAS_DISPLAY(dev_priv))
                return;
  
-       if (IS_ALDERLAKE_P(dev_priv)) {
+       if (IS_DG2(dev_priv)) {
+               intel_ddi_init(dev_priv, PORT_A);
+               intel_ddi_init(dev_priv, PORT_B);
+               intel_ddi_init(dev_priv, PORT_C);
+               intel_ddi_init(dev_priv, PORT_D_XELPD);
+       } else if (IS_ALDERLAKE_P(dev_priv)) {
                intel_ddi_init(dev_priv, PORT_A);
                intel_ddi_init(dev_priv, PORT_B);
                intel_ddi_init(dev_priv, PORT_TC1);
                intel_ddi_init(dev_priv, PORT_B);
                intel_ddi_init(dev_priv, PORT_C);
                vlv_dsi_init(dev_priv);
-       } else if (DISPLAY_VER(dev_priv) == 10) {
-               intel_ddi_init(dev_priv, PORT_A);
-               intel_ddi_init(dev_priv, PORT_B);
-               intel_ddi_init(dev_priv, PORT_C);
-               intel_ddi_init(dev_priv, PORT_D);
-               intel_ddi_init(dev_priv, PORT_E);
-               intel_ddi_init(dev_priv, PORT_F);
        } else if (DISPLAY_VER(dev_priv) >= 9) {
                intel_ddi_init(dev_priv, PORT_A);
                intel_ddi_init(dev_priv, PORT_B);
@@@ -11790,7 -11925,7 +11939,7 @@@ intel_user_framebuffer_create(struct dr
  
        /* object is backed with LMEM for discrete */
        i915 = to_i915(obj->base.dev);
-       if (HAS_LMEM(i915) && !i915_gem_object_is_lmem(obj)) {
+       if (HAS_LMEM(i915) && !i915_gem_object_can_migrate(obj, INTEL_REGION_LMEM)) {
                /* object is "remote", not in local memory */
                i915_gem_object_put(obj);
                return ERR_PTR(-EREMOTE);
@@@ -13136,7 -13271,7 +13285,7 @@@ get_encoder_power_domains(struct drm_i9
  static void intel_early_display_was(struct drm_i915_private *dev_priv)
  {
        /*
-        * Display WA #1185 WaDisableDARBFClkGating:cnl,glk,icl,ehl,tgl
+        * Display WA #1185 WaDisableDARBFClkGating:glk,icl,ehl,tgl
         * Also known as Wa_14010480278.
         */
        if (IS_DISPLAY_VER(dev_priv, 10, 12))
@@@ -18,6 -18,7 +18,7 @@@
  #include "intel_pm.h"
  #include "intel_pps.h"
  #include "intel_sideband.h"
+ #include "intel_snps_phy.h"
  #include "intel_tc.h"
  #include "intel_vga.h"
  
@@@ -106,8 -107,10 +107,10 @@@ intel_display_power_domain_str(enum int
                return "PORT_OTHER";
        case POWER_DOMAIN_VGA:
                return "VGA";
-       case POWER_DOMAIN_AUDIO:
-               return "AUDIO";
+       case POWER_DOMAIN_AUDIO_MMIO:
+               return "AUDIO_MMIO";
+       case POWER_DOMAIN_AUDIO_PLAYBACK:
+               return "AUDIO_PLAYBACK";
        case POWER_DOMAIN_AUX_A:
                return "AUX_A";
        case POWER_DOMAIN_AUX_B:
@@@ -341,6 -344,17 +344,17 @@@ static void hsw_wait_for_power_well_ena
  {
        const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
        int pw_idx = power_well->desc->hsw.idx;
+       int enable_delay = power_well->desc->hsw.fixed_enable_delay;
+       /*
+        * For some power wells we're not supposed to watch the status bit for
+        * an ack, but rather just wait a fixed amount of time and then
+        * proceed.  This is only used on DG2.
+        */
+       if (IS_DG2(dev_priv) && enable_delay) {
+               usleep_range(enable_delay, 2 * enable_delay);
+               return;
+       }
  
        /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
        if (intel_de_wait_for_set(dev_priv, regs->driver,
@@@ -436,17 -450,6 +450,6 @@@ static void hsw_power_well_enable(struc
  
        hsw_wait_for_power_well_enable(dev_priv, power_well, false);
  
-       /* Display WA #1178: cnl */
-       if (IS_CANNONLAKE(dev_priv) &&
-           pw_idx >= GLK_PW_CTL_IDX_AUX_B &&
-           pw_idx <= CNL_PW_CTL_IDX_AUX_F) {
-               u32 val;
-               val = intel_de_read(dev_priv, CNL_AUX_ANAOVRD1(pw_idx));
-               val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS;
-               intel_de_write(dev_priv, CNL_AUX_ANAOVRD1(pw_idx), val);
-       }
        if (power_well->desc->hsw.has_fuses) {
                enum skl_power_gate pg;
  
@@@ -961,8 -964,9 +964,9 @@@ static void bxt_disable_dc9(struct drm_
  static void assert_dmc_loaded(struct drm_i915_private *dev_priv)
  {
        drm_WARN_ONCE(&dev_priv->drm,
-                     !intel_de_read(dev_priv, DMC_PROGRAM(0)),
-                     "DMC program storage start is NULL\n");
+                     !intel_de_read(dev_priv,
+                                    DMC_PROGRAM(dev_priv->dmc.dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)),
+                                    "DMC program storage start is NULL\n");
        drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, DMC_SSP_BASE),
                      "DMC SSP Base Not fine\n");
        drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, DMC_HTP_SKL),
@@@ -2507,7 -2511,8 +2511,8 @@@ intel_display_power_put_mask_in_set(str
        BIT_ULL(POWER_DOMAIN_PORT_DSI) |                \
        BIT_ULL(POWER_DOMAIN_PORT_CRT) |                \
        BIT_ULL(POWER_DOMAIN_VGA) |                     \
-       BIT_ULL(POWER_DOMAIN_AUDIO) |           \
+       BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) |              \
+       BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) |          \
        BIT_ULL(POWER_DOMAIN_AUX_B) |           \
        BIT_ULL(POWER_DOMAIN_AUX_C) |           \
        BIT_ULL(POWER_DOMAIN_GMBUS) |           \
        BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |        \
        BIT_ULL(POWER_DOMAIN_PORT_DSI) |                \
        BIT_ULL(POWER_DOMAIN_VGA) |                     \
-       BIT_ULL(POWER_DOMAIN_AUDIO) |           \
+       BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) |              \
+       BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) |          \
        BIT_ULL(POWER_DOMAIN_AUX_B) |           \
        BIT_ULL(POWER_DOMAIN_AUX_C) |           \
        BIT_ULL(POWER_DOMAIN_AUX_D) |           \
        BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |                \
        BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */    \
        BIT_ULL(POWER_DOMAIN_VGA) |                             \
-       BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
+       BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) |              \
+       BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) |                  \
        BIT_ULL(POWER_DOMAIN_INIT))
  
  #define BDW_DISPLAY_POWER_DOMAINS (                   \
        BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |                \
        BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */    \
        BIT_ULL(POWER_DOMAIN_VGA) |                             \
-       BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
+       BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) |              \
+       BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) |                  \
        BIT_ULL(POWER_DOMAIN_INIT))
  
  #define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS (               \
        BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
        BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
        BIT_ULL(POWER_DOMAIN_AUX_D) |                   \
-       BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
+       BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) |              \
+       BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) |                  \
        BIT_ULL(POWER_DOMAIN_VGA) |                             \
        BIT_ULL(POWER_DOMAIN_INIT))
  #define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS (                \
        BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
        BIT_ULL(POWER_DOMAIN_AUX_B) |                   \
        BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
-       BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
+       BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) |              \
+       BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) |                  \
        BIT_ULL(POWER_DOMAIN_VGA) |                             \
        BIT_ULL(POWER_DOMAIN_INIT))
  #define BXT_DISPLAY_DC_OFF_POWER_DOMAINS (            \
        BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
        BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
        BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
-       BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
+       BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) |              \
+       BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) |                  \
        BIT_ULL(POWER_DOMAIN_VGA) |                             \
        BIT_ULL(POWER_DOMAIN_INIT))
  #define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS (          \
        BIT_ULL(POWER_DOMAIN_GMBUS) |                   \
        BIT_ULL(POWER_DOMAIN_INIT))
  
- #define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS (               \
-       BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |            \
-       BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
-       BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
-       BIT_ULL(POWER_DOMAIN_PIPE_C) |                  \
-       BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |            \
-       BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |             \
-       BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |             \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |                \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) |                \
-       BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
-       BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
-       BIT_ULL(POWER_DOMAIN_AUX_D) |                   \
-       BIT_ULL(POWER_DOMAIN_AUX_F) |                   \
-       BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
-       BIT_ULL(POWER_DOMAIN_VGA) |                             \
-       BIT_ULL(POWER_DOMAIN_INIT))
- #define CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS (          \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) |           \
-       BIT_ULL(POWER_DOMAIN_INIT))
- #define CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS (          \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |           \
-       BIT_ULL(POWER_DOMAIN_INIT))
- #define CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS (          \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |           \
-       BIT_ULL(POWER_DOMAIN_INIT))
- #define CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS (          \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |           \
-       BIT_ULL(POWER_DOMAIN_INIT))
- #define CNL_DISPLAY_AUX_A_POWER_DOMAINS (             \
-       BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
-       BIT_ULL(POWER_DOMAIN_AUX_IO_A) |                \
-       BIT_ULL(POWER_DOMAIN_INIT))
- #define CNL_DISPLAY_AUX_B_POWER_DOMAINS (             \
-       BIT_ULL(POWER_DOMAIN_AUX_B) |                   \
-       BIT_ULL(POWER_DOMAIN_INIT))
- #define CNL_DISPLAY_AUX_C_POWER_DOMAINS (             \
-       BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
-       BIT_ULL(POWER_DOMAIN_INIT))
- #define CNL_DISPLAY_AUX_D_POWER_DOMAINS (             \
-       BIT_ULL(POWER_DOMAIN_AUX_D) |                   \
-       BIT_ULL(POWER_DOMAIN_INIT))
- #define CNL_DISPLAY_AUX_F_POWER_DOMAINS (             \
-       BIT_ULL(POWER_DOMAIN_AUX_F) |                   \
-       BIT_ULL(POWER_DOMAIN_INIT))
- #define CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS (          \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) |           \
-       BIT_ULL(POWER_DOMAIN_INIT))
- #define CNL_DISPLAY_DC_OFF_POWER_DOMAINS (            \
-       CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS |         \
-       BIT_ULL(POWER_DOMAIN_GT_IRQ) |                  \
-       BIT_ULL(POWER_DOMAIN_MODESET) |                 \
-       BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
-       BIT_ULL(POWER_DOMAIN_INIT))
  /*
   * ICL PW_0/PG_0 domains (HW/DMC control):
   * - PCI
        BIT_ULL(POWER_DOMAIN_AUX_E_TBT) |               \
        BIT_ULL(POWER_DOMAIN_AUX_F_TBT) |               \
        BIT_ULL(POWER_DOMAIN_VGA) |                     \
-       BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
+       BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) |              \
+       BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) |                  \
        BIT_ULL(POWER_DOMAIN_INIT))
        /*
         * - transcoder WD
        BIT_ULL(POWER_DOMAIN_AUX_TBT5) |                \
        BIT_ULL(POWER_DOMAIN_AUX_TBT6) |                \
        BIT_ULL(POWER_DOMAIN_VGA) |                     \
-       BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
+       BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) |              \
+       BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) |                  \
        BIT_ULL(POWER_DOMAIN_INIT))
  
  #define TGL_PW_2_POWER_DOMAINS (                      \
        RKL_PW_4_POWER_DOMAINS |                        \
        BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
        BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |     \
-       BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
+       BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) |              \
+       BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) |                  \
        BIT_ULL(POWER_DOMAIN_VGA) |                     \
        BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
        BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) |      \
        BIT_ULL(POWER_DOMAIN_AUX_B) |                   \
        BIT_ULL(POWER_DOMAIN_INIT))
  
+ /*
+  * DG1 onwards Audio MMIO/VERBS lies in PG0 power well.
+  */
+ #define DG1_PW_3_POWER_DOMAINS (                      \
+       TGL_PW_4_POWER_DOMAINS |                        \
+       BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
+       BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
+       BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |     \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) |      \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) |      \
+       BIT_ULL(POWER_DOMAIN_AUX_USBC1) |               \
+       BIT_ULL(POWER_DOMAIN_AUX_USBC2) |               \
+       BIT_ULL(POWER_DOMAIN_VGA) |                     \
+       BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) |                  \
+       BIT_ULL(POWER_DOMAIN_INIT))
+ #define DG1_PW_2_POWER_DOMAINS (                      \
+       DG1_PW_3_POWER_DOMAINS |                        \
+       BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) |     \
+       BIT_ULL(POWER_DOMAIN_INIT))
+ #define DG1_DISPLAY_DC_OFF_POWER_DOMAINS (            \
+       DG1_PW_3_POWER_DOMAINS |                        \
+       BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) |              \
+       BIT_ULL(POWER_DOMAIN_MODESET) |                 \
+       BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
+       BIT_ULL(POWER_DOMAIN_AUX_B) |                   \
+       BIT_ULL(POWER_DOMAIN_INIT))
  /*
   * XE_LPD Power Domains
   *
        XELPD_PW_B_POWER_DOMAINS |                      \
        XELPD_PW_C_POWER_DOMAINS |                      \
        XELPD_PW_D_POWER_DOMAINS |                      \
-       BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
+       BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) |                  \
        BIT_ULL(POWER_DOMAIN_VGA) |                     \
        BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |        \
        BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_D_XELPD) |  \
  
  #define XELPD_DISPLAY_DC_OFF_POWER_DOMAINS (          \
        XELPD_PW_2_POWER_DOMAINS |                      \
+       BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) |              \
        BIT_ULL(POWER_DOMAIN_MODESET) |                 \
        BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
        BIT_ULL(POWER_DOMAIN_AUX_B) |                   \
@@@ -3694,148 -3681,6 +3681,6 @@@ static const struct i915_power_well_des
        },
  };
  
- static const struct i915_power_well_desc cnl_power_wells[] = {
-       {
-               .name = "always-on",
-               .always_on = true,
-               .domains = POWER_DOMAIN_MASK,
-               .ops = &i9xx_always_on_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-       },
-       {
-               .name = "power well 1",
-               /* Handled by the DMC firmware */
-               .always_on = true,
-               .domains = 0,
-               .ops = &hsw_power_well_ops,
-               .id = SKL_DISP_PW_1,
-               {
-                       .hsw.regs = &hsw_power_well_regs,
-                       .hsw.idx = SKL_PW_CTL_IDX_PW_1,
-                       .hsw.has_fuses = true,
-               },
-       },
-       {
-               .name = "AUX A",
-               .domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-               {
-                       .hsw.regs = &hsw_power_well_regs,
-                       .hsw.idx = GLK_PW_CTL_IDX_AUX_A,
-               },
-       },
-       {
-               .name = "AUX B",
-               .domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-               {
-                       .hsw.regs = &hsw_power_well_regs,
-                       .hsw.idx = GLK_PW_CTL_IDX_AUX_B,
-               },
-       },
-       {
-               .name = "AUX C",
-               .domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-               {
-                       .hsw.regs = &hsw_power_well_regs,
-                       .hsw.idx = GLK_PW_CTL_IDX_AUX_C,
-               },
-       },
-       {
-               .name = "AUX D",
-               .domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-               {
-                       .hsw.regs = &hsw_power_well_regs,
-                       .hsw.idx = CNL_PW_CTL_IDX_AUX_D,
-               },
-       },
-       {
-               .name = "DC off",
-               .domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS,
-               .ops = &gen9_dc_off_power_well_ops,
-               .id = SKL_DISP_DC_OFF,
-       },
-       {
-               .name = "power well 2",
-               .domains = CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-               .id = SKL_DISP_PW_2,
-               {
-                       .hsw.regs = &hsw_power_well_regs,
-                       .hsw.idx = SKL_PW_CTL_IDX_PW_2,
-                       .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
-                       .hsw.has_vga = true,
-                       .hsw.has_fuses = true,
-               },
-       },
-       {
-               .name = "DDI A IO power well",
-               .domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-               {
-                       .hsw.regs = &hsw_power_well_regs,
-                       .hsw.idx = GLK_PW_CTL_IDX_DDI_A,
-               },
-       },
-       {
-               .name = "DDI B IO power well",
-               .domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-               {
-                       .hsw.regs = &hsw_power_well_regs,
-                       .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
-               },
-       },
-       {
-               .name = "DDI C IO power well",
-               .domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-               {
-                       .hsw.regs = &hsw_power_well_regs,
-                       .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
-               },
-       },
-       {
-               .name = "DDI D IO power well",
-               .domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-               {
-                       .hsw.regs = &hsw_power_well_regs,
-                       .hsw.idx = SKL_PW_CTL_IDX_DDI_D,
-               },
-       },
-       {
-               .name = "DDI F IO power well",
-               .domains = CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-               .id = CNL_DISP_PW_DDI_F_IO,
-               {
-                       .hsw.regs = &hsw_power_well_regs,
-                       .hsw.idx = CNL_PW_CTL_IDX_DDI_F,
-               },
-       },
-       {
-               .name = "AUX F",
-               .domains = CNL_DISPLAY_AUX_F_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-               .id = CNL_DISP_PW_DDI_F_AUX,
-               {
-                       .hsw.regs = &hsw_power_well_regs,
-                       .hsw.idx = CNL_PW_CTL_IDX_AUX_F,
-               },
-       },
- };
  static const struct i915_power_well_ops icl_aux_power_well_ops = {
        .sync_hw = hsw_power_well_sync_hw,
        .enable = icl_aux_power_well_enable,
@@@ -4642,6 -4487,165 +4487,165 @@@ static const struct i915_power_well_des
        },
  };
  
+ static const struct i915_power_well_desc dg1_power_wells[] = {
+       {
+               .name = "always-on",
+               .always_on = true,
+               .domains = POWER_DOMAIN_MASK,
+               .ops = &i9xx_always_on_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+       },
+       {
+               .name = "power well 1",
+               /* Handled by the DMC firmware */
+               .always_on = true,
+               .domains = 0,
+               .ops = &hsw_power_well_ops,
+               .id = SKL_DISP_PW_1,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = ICL_PW_CTL_IDX_PW_1,
+                       .hsw.has_fuses = true,
+               },
+       },
+       {
+               .name = "DC off",
+               .domains = DG1_DISPLAY_DC_OFF_POWER_DOMAINS,
+               .ops = &gen9_dc_off_power_well_ops,
+               .id = SKL_DISP_DC_OFF,
+       },
+       {
+               .name = "power well 2",
+               .domains = DG1_PW_2_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = SKL_DISP_PW_2,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = ICL_PW_CTL_IDX_PW_2,
+                       .hsw.has_fuses = true,
+               },
+       },
+       {
+               .name = "power well 3",
+               .domains = DG1_PW_3_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = ICL_DISP_PW_3,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = ICL_PW_CTL_IDX_PW_3,
+                       .hsw.irq_pipe_mask = BIT(PIPE_B),
+                       .hsw.has_vga = true,
+                       .hsw.has_fuses = true,
+               },
+       },
+       {
+               .name = "DDI A IO",
+               .domains = ICL_DDI_IO_A_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &icl_ddi_power_well_regs,
+                       .hsw.idx = ICL_PW_CTL_IDX_DDI_A,
+               }
+       },
+       {
+               .name = "DDI B IO",
+               .domains = ICL_DDI_IO_B_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &icl_ddi_power_well_regs,
+                       .hsw.idx = ICL_PW_CTL_IDX_DDI_B,
+               }
+       },
+       {
+               .name = "DDI IO TC1",
+               .domains = TGL_DDI_IO_TC1_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &icl_ddi_power_well_regs,
+                       .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1,
+               },
+       },
+       {
+               .name = "DDI IO TC2",
+               .domains = TGL_DDI_IO_TC2_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &icl_ddi_power_well_regs,
+                       .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2,
+               },
+       },
+       {
+               .name = "AUX A",
+               .domains = TGL_AUX_A_IO_POWER_DOMAINS,
+               .ops = &icl_aux_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &icl_aux_power_well_regs,
+                       .hsw.idx = ICL_PW_CTL_IDX_AUX_A,
+               },
+       },
+       {
+               .name = "AUX B",
+               .domains = TGL_AUX_B_IO_POWER_DOMAINS,
+               .ops = &icl_aux_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &icl_aux_power_well_regs,
+                       .hsw.idx = ICL_PW_CTL_IDX_AUX_B,
+               },
+       },
+       {
+               .name = "AUX USBC1",
+               .domains = TGL_AUX_IO_USBC1_POWER_DOMAINS,
+               .ops = &icl_aux_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &icl_aux_power_well_regs,
+                       .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1,
+                       .hsw.is_tc_tbt = false,
+               },
+       },
+       {
+               .name = "AUX USBC2",
+               .domains = TGL_AUX_IO_USBC2_POWER_DOMAINS,
+               .ops = &icl_aux_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &icl_aux_power_well_regs,
+                       .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2,
+                       .hsw.is_tc_tbt = false,
+               },
+       },
+       {
+               .name = "power well 4",
+               .domains = TGL_PW_4_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = ICL_PW_CTL_IDX_PW_4,
+                       .hsw.has_fuses = true,
+                       .hsw.irq_pipe_mask = BIT(PIPE_C),
+               }
+       },
+       {
+               .name = "power well 5",
+               .domains = TGL_PW_5_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = TGL_PW_CTL_IDX_PW_5,
+                       .hsw.has_fuses = true,
+                       .hsw.irq_pipe_mask = BIT(PIPE_D),
+               },
+       },
+ };
  static const struct i915_power_well_desc xelpd_power_wells[] = {
        {
                .name = "always-on",
                {
                        .hsw.regs = &icl_aux_power_well_regs,
                        .hsw.idx = ICL_PW_CTL_IDX_AUX_A,
+                       .hsw.fixed_enable_delay = 600,
                },
        },
        {
                {
                        .hsw.regs = &icl_aux_power_well_regs,
                        .hsw.idx = ICL_PW_CTL_IDX_AUX_B,
+                       .hsw.fixed_enable_delay = 600,
                },
        },
        {
                {
                        .hsw.regs = &icl_aux_power_well_regs,
                        .hsw.idx = ICL_PW_CTL_IDX_AUX_C,
+                       .hsw.fixed_enable_delay = 600,
                },
        },
        {
                {
                        .hsw.regs = &icl_aux_power_well_regs,
                        .hsw.idx = XELPD_PW_CTL_IDX_AUX_D,
+                       .hsw.fixed_enable_delay = 600,
                },
        },
        {
                {
                        .hsw.regs = &icl_aux_power_well_regs,
                        .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1,
+                       .hsw.fixed_enable_delay = 600,
                },
        },
        {
@@@ -5121,7 -5130,9 +5130,9 @@@ int intel_power_domains_init(struct drm
                err = 0;
        } else if (DISPLAY_VER(dev_priv) >= 13) {
                err = set_power_wells(power_domains, xelpd_power_wells);
-       } else if (IS_ALDERLAKE_S(dev_priv) || IS_DG1(dev_priv)) {
+       } else if (IS_DG1(dev_priv)) {
+               err = set_power_wells(power_domains, dg1_power_wells);
+       } else if (IS_ALDERLAKE_S(dev_priv)) {
                err = set_power_wells_mask(power_domains, tgl_power_wells,
                                           BIT_ULL(TGL_DISP_PW_TC_COLD_OFF));
        } else if (IS_ROCKETLAKE(dev_priv)) {
                err = set_power_wells(power_domains, tgl_power_wells);
        } else if (DISPLAY_VER(dev_priv) == 11) {
                err = set_power_wells(power_domains, icl_power_wells);
-       } else if (IS_CNL_WITH_PORT_F(dev_priv)) {
-               err = set_power_wells(power_domains, cnl_power_wells);
-       } else if (IS_CANNONLAKE(dev_priv)) {
-               err = set_power_wells_mask(power_domains, cnl_power_wells,
-                                          BIT_ULL(CNL_DISP_PW_DDI_F_IO) |
-                                          BIT_ULL(CNL_DISP_PW_DDI_F_AUX));
        } else if (IS_GEMINILAKE(dev_priv)) {
                err = set_power_wells(power_domains, glk_power_wells);
        } else if (IS_BROXTON(dev_priv)) {
@@@ -5690,75 -5695,6 +5695,6 @@@ static void bxt_display_core_uninit(str
        usleep_range(10, 30);           /* 10 us delay per Bspec */
  }
  
- static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume)
- {
-       struct i915_power_domains *power_domains = &dev_priv->power_domains;
-       struct i915_power_well *well;
-       gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
-       /* 1. Enable PCH Reset Handshake */
-       intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
-       if (!HAS_DISPLAY(dev_priv))
-               return;
-       /* 2-3. */
-       intel_combo_phy_init(dev_priv);
-       /*
-        * 4. Enable Power Well 1 (PG1).
-        *    The AUX IO power wells will be enabled on demand.
-        */
-       mutex_lock(&power_domains->lock);
-       well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
-       intel_power_well_enable(dev_priv, well);
-       mutex_unlock(&power_domains->lock);
-       /* 5. Enable CD clock */
-       intel_cdclk_init_hw(dev_priv);
-       /* 6. Enable DBUF */
-       gen9_dbuf_enable(dev_priv);
-       if (resume && intel_dmc_has_payload(dev_priv))
-               intel_dmc_load_program(dev_priv);
- }
- static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
- {
-       struct i915_power_domains *power_domains = &dev_priv->power_domains;
-       struct i915_power_well *well;
-       if (!HAS_DISPLAY(dev_priv))
-               return;
-       gen9_disable_dc_states(dev_priv);
-       /* 1. Disable all display engine functions -> aready done */
-       /* 2. Disable DBUF */
-       gen9_dbuf_disable(dev_priv);
-       /* 3. Disable CD clock */
-       intel_cdclk_uninit_hw(dev_priv);
-       /*
-        * 4. Disable Power Well 1 (PG1).
-        *    The AUX IO power wells are toggled on demand, so they are already
-        *    disabled at this point.
-        */
-       mutex_lock(&power_domains->lock);
-       well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
-       intel_power_well_disable(dev_priv, well);
-       mutex_unlock(&power_domains->lock);
-       usleep_range(10, 30);           /* 10 us delay per Bspec */
-       /* 5. */
-       intel_combo_phy_uninit(dev_priv);
- }
  struct buddy_page_mask {
        u32 page_mask;
        u8 type;
@@@ -5797,9 -5733,14 +5733,14 @@@ static void tgl_bw_buddy_init(struct dr
        unsigned long abox_mask = INTEL_INFO(dev_priv)->abox_mask;
        int config, i;
  
+       /* BW_BUDDY registers are not used on dgpu's beyond DG1 */
+       if (IS_DGFX(dev_priv) && !IS_DG1(dev_priv))
+               return;
        if (IS_ALDERLAKE_S(dev_priv) ||
-           IS_DG1_REVID(dev_priv, DG1_REVID_A0, DG1_REVID_A0) ||
-           IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
+           IS_DG1_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) ||
+           IS_RKL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) ||
+           IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0))
                /* Wa_1409767108:tgl,dg1,adl-s */
                table = wa_1409767108_buddy_page_masks;
        else
                        intel_de_write(dev_priv, BW_BUDDY_PAGE_MASK(i),
                                       table[config].page_mask);
  
-                       /* Wa_22010178259:tgl,rkl */
-                       intel_de_rmw(dev_priv, BW_BUDDY_CTL(i),
-                                    BW_BUDDY_TLB_REQ_TIMER_MASK,
-                                    BW_BUDDY_TLB_REQ_TIMER(0x8));
+                       /* Wa_22010178259:tgl,dg1,rkl,adl-s */
+                       if (DISPLAY_VER(dev_priv) == 12)
+                               intel_de_rmw(dev_priv, BW_BUDDY_CTL(i),
+                                            BW_BUDDY_TLB_REQ_TIMER_MASK,
+                                            BW_BUDDY_TLB_REQ_TIMER(0x8));
                }
        }
  }
@@@ -5878,11 -5820,15 +5820,15 @@@ static void icl_display_core_init(struc
        if (DISPLAY_VER(dev_priv) >= 12)
                tgl_bw_buddy_init(dev_priv);
  
+       /* 8. Ensure PHYs have completed calibration and adaptation */
+       if (IS_DG2(dev_priv))
+               intel_snps_phy_wait_for_calibration(dev_priv);
        if (resume && intel_dmc_has_payload(dev_priv))
                intel_dmc_load_program(dev_priv);
  
-       /* Wa_14011508470 */
-       if (DISPLAY_VER(dev_priv) == 12) {
+       /* Wa_14011508470:tgl,dg1,rkl,adl-s,adl-p */
+       if (DISPLAY_VER(dev_priv) >= 12) {
                val = DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM |
                      DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR;
                intel_uncore_rmw(&dev_priv->uncore, GEN11_CHICKEN_DCPR_2, 0, val);
@@@ -6097,8 -6043,6 +6043,6 @@@ void intel_power_domains_init_hw(struc
  
        if (DISPLAY_VER(i915) >= 11) {
                icl_display_core_init(i915, resume);
-       } else if (IS_CANNONLAKE(i915)) {
-               cnl_display_core_init(i915, resume);
        } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
                bxt_display_core_init(i915, resume);
        } else if (DISPLAY_VER(i915) == 9) {
@@@ -6258,8 -6202,6 +6202,6 @@@ void intel_power_domains_suspend(struc
  
        if (DISPLAY_VER(i915) >= 11)
                icl_display_core_uninit(i915);
-       else if (IS_CANNONLAKE(i915))
-               cnl_display_core_uninit(i915);
        else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
                bxt_display_core_uninit(i915);
        else if (DISPLAY_VER(i915) == 9)
@@@ -6387,13 -6329,13 +6329,13 @@@ void intel_display_power_suspend_late(s
        if (DISPLAY_VER(i915) >= 11 || IS_GEMINILAKE(i915) ||
            IS_BROXTON(i915)) {
                bxt_enable_dc9(i915);
 -              /* Tweaked Wa_14010685332:icp,jsp,mcc */
 -              if (INTEL_PCH_TYPE(i915) >= PCH_ICP && INTEL_PCH_TYPE(i915) <= PCH_MCC)
 -                      intel_de_rmw(i915, SOUTH_CHICKEN1,
 -                                   SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
        } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
                hsw_enable_pc8(i915);
        }
 +
 +      /* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
 +      if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1)
 +              intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
  }
  
  void intel_display_power_resume_early(struct drm_i915_private *i915)
            IS_BROXTON(i915)) {
                gen9_sanitize_dc_state(i915);
                bxt_disable_dc9(i915);
 -              /* Tweaked Wa_14010685332:icp,jsp,mcc */
 -              if (INTEL_PCH_TYPE(i915) >= PCH_ICP && INTEL_PCH_TYPE(i915) <= PCH_MCC)
 -                      intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0);
 -
        } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
                hsw_disable_pc8(i915);
        }
 +
 +      /* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
 +      if (INTEL_PCH_TYPE(i915) >= PCH_CNP && INTEL_PCH_TYPE(i915) < PCH_DG1)
 +              intel_de_rmw(i915, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0);
  }
  
  void intel_display_power_suspend(struct drm_i915_private *i915)
@@@ -222,29 -222,6 +222,6 @@@ bool intel_dp_can_bigjoiner(struct inte
                 encoder->port != PORT_A);
  }
  
- static int cnl_max_source_rate(struct intel_dp *intel_dp)
- {
-       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-       struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
-       enum port port = dig_port->base.port;
-       u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
-       /* Low voltage SKUs are limited to max of 5.4G */
-       if (voltage == VOLTAGE_INFO_0_85V)
-               return 540000;
-       /* For this SKU 8.1G is supported in all ports */
-       if (IS_CNL_WITH_PORT_F(dev_priv))
-               return 810000;
-       /* For other SKUs, max rate on ports A and D is 5.4G */
-       if (port == PORT_A || port == PORT_D)
-               return 540000;
-       return 810000;
- }
  static int icl_max_source_rate(struct intel_dp *intel_dp)
  {
        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
@@@ -270,7 -247,7 +247,7 @@@ static voi
  intel_dp_set_source_rates(struct intel_dp *intel_dp)
  {
        /* The values must be in increasing order */
-       static const int cnl_rates[] = {
+       static const int icl_rates[] = {
                162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000
        };
        static const int bxt_rates[] = {
        drm_WARN_ON(&dev_priv->drm,
                    intel_dp->source_rates || intel_dp->num_source_rates);
  
-       if (DISPLAY_VER(dev_priv) >= 11 || IS_CANNONLAKE(dev_priv)) {
-               source_rates = cnl_rates;
-               size = ARRAY_SIZE(cnl_rates);
-               if (DISPLAY_VER(dev_priv) == 10)
-                       max_rate = cnl_max_source_rate(intel_dp);
-               else if (IS_JSL_EHL(dev_priv))
+       if (DISPLAY_VER(dev_priv) >= 11) {
+               source_rates = icl_rates;
+               size = ARRAY_SIZE(icl_rates);
+               if (IS_JSL_EHL(dev_priv))
                        max_rate = ehl_max_source_rate(intel_dp);
                else
                        max_rate = icl_max_source_rate(intel_dp);
@@@ -1274,6 -1249,23 +1249,23 @@@ static int intel_dp_dsc_compute_config(
                                                               pipe_config->pipe_bpp);
                pipe_config->dsc.slice_count = dsc_dp_slice_count;
        }
+       /* As of today we support DSC for only RGB */
+       if (intel_dp->force_dsc_bpp) {
+               if (intel_dp->force_dsc_bpp >= 8 &&
+                   intel_dp->force_dsc_bpp < pipe_bpp) {
+                       drm_dbg_kms(&dev_priv->drm,
+                                   "DSC BPP forced to %d",
+                                   intel_dp->force_dsc_bpp);
+                       pipe_config->dsc.compressed_bpp =
+                                               intel_dp->force_dsc_bpp;
+               } else {
+                       drm_dbg_kms(&dev_priv->drm,
+                                   "Invalid DSC BPP %d",
+                                   intel_dp->force_dsc_bpp);
+               }
+       }
        /*
         * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate
         * is greater than the maximum Cdclock and if slice count is even
@@@ -3031,9 -3023,6 +3023,6 @@@ void intel_read_dp_sdp(struct intel_enc
                       struct intel_crtc_state *crtc_state,
                       unsigned int type)
  {
-       if (encoder->type != INTEL_OUTPUT_DDI)
-               return;
        switch (type) {
        case DP_SDP_VSC:
                intel_read_dp_vsc_sdp(encoder, crtc_state,
@@@ -3342,6 -3331,9 +3331,9 @@@ static void intel_dp_process_phy_reques
  
        intel_dp_autotest_phy_ddi_enable(intel_dp, crtc_state);
  
+       drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
+                         intel_dp->train_set, crtc_state->lane_count);
        drm_dp_set_phy_test_pattern(&intel_dp->aux, data,
                                    link_status[DP_DPCD_REV]);
  }
@@@ -3850,18 -3842,23 +3842,18 @@@ static void intel_dp_check_device_servi
  
  static void intel_dp_check_link_service_irq(struct intel_dp *intel_dp)
  {
 -      struct drm_i915_private *i915 = dp_to_i915(intel_dp);
        u8 val;
  
        if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
                return;
  
        if (drm_dp_dpcd_readb(&intel_dp->aux,
 -                            DP_LINK_SERVICE_IRQ_VECTOR_ESI0, &val) != 1 || !val) {
 -              drm_dbg_kms(&i915->drm, "Error in reading link service irq vector\n");
 +                            DP_LINK_SERVICE_IRQ_VECTOR_ESI0, &val) != 1 || !val)
                return;
 -      }
  
        if (drm_dp_dpcd_writeb(&intel_dp->aux,
 -                             DP_LINK_SERVICE_IRQ_VECTOR_ESI0, val) != 1) {
 -              drm_dbg_kms(&i915->drm, "Error in writing link service irq vector\n");
 +                             DP_LINK_SERVICE_IRQ_VECTOR_ESI0, val) != 1)
                return;
 -      }
  
        if (val & HDMI_LINK_STATUS_CHANGED)
                intel_dp_handle_hdmi_link_status_change(intel_dp);
@@@ -4736,7 -4733,7 +4728,7 @@@ static void intel_dp_set_drrs_state(str
                                    int refresh_rate)
  {
        struct intel_dp *intel_dp = dev_priv->drrs.dp;
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
        enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
  
        if (refresh_rate <= 0) {
                return;
        }
  
-       if (!intel_crtc) {
+       if (!crtc) {
                drm_dbg_kms(&dev_priv->drm,
                            "DRRS: intel_crtc not initialized\n");
                return;
@@@ -5233,7 -5230,8 +5225,8 @@@ static bool intel_edp_init_connector(st
        }
  
        intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
-       intel_connector->panel.backlight.power = intel_pps_backlight_power;
+       if (!(dev_priv->quirks & QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK))
+               intel_connector->panel.backlight.power = intel_pps_backlight_power;
        intel_panel_setup_backlight(connector, pipe);
  
        if (fixed_mode) {
@@@ -727,18 -727,9 +727,18 @@@ static void err_print_gt(struct drm_i91
        if (GRAPHICS_VER(m->i915) >= 12) {
                int i;
  
 -              for (i = 0; i < GEN12_SFC_DONE_MAX; i++)
 +              for (i = 0; i < GEN12_SFC_DONE_MAX; i++) {
 +                      /*
 +                       * SFC_DONE resides in the VD forcewake domain, so it
 +                       * only exists if the corresponding VCS engine is
 +                       * present.
 +                       */
 +                      if (!HAS_ENGINE(gt->_gt, _VCS(i * 2)))
 +                              continue;
 +
                        err_printf(m, "  SFC_DONE[%d]: 0x%08x\n", i,
                                   gt->sfc_done[i]);
 +              }
  
                err_printf(m, "  GAM_DONE: 0x%08x\n", gt->gam_done);
        }
@@@ -1048,7 -1039,7 +1048,7 @@@ i915_vma_coredump_create(const struct i
                        if (ret)
                                break;
                }
-       } else if (i915_gem_object_is_lmem(vma->obj)) {
+       } else if (__i915_gem_object_is_lmem(vma->obj)) {
                struct intel_memory_region *mem = vma->obj->mm.region;
                dma_addr_t dma;
  
@@@ -1438,20 -1429,37 +1438,37 @@@ capture_engine(struct intel_engine_cs *
  {
        struct intel_engine_capture_vma *capture = NULL;
        struct intel_engine_coredump *ee;
-       struct i915_request *rq;
+       struct intel_context *ce;
+       struct i915_request *rq = NULL;
        unsigned long flags;
  
        ee = intel_engine_coredump_alloc(engine, GFP_KERNEL);
        if (!ee)
                return NULL;
  
-       spin_lock_irqsave(&engine->active.lock, flags);
-       rq = intel_engine_find_active_request(engine);
+       ce = intel_engine_get_hung_context(engine);
+       if (ce) {
+               intel_engine_clear_hung_context(engine);
+               rq = intel_context_find_active_request(ce);
+               if (!rq || !i915_request_started(rq))
+                       goto no_request_capture;
+       } else {
+               /*
+                * Getting here with GuC enabled means it is a forced error capture
+                * with no actual hang. So, no need to attempt the execlist search.
+                */
+               if (!intel_uc_uses_guc_submission(&engine->gt->uc)) {
+                       spin_lock_irqsave(&engine->sched_engine->lock, flags);
+                       rq = intel_engine_execlist_find_hung_request(engine);
+                       spin_unlock_irqrestore(&engine->sched_engine->lock,
+                                              flags);
+               }
+       }
        if (rq)
                capture = intel_engine_coredump_add_request(ee, rq,
                                                            ATOMIC_MAYFAIL);
-       spin_unlock_irqrestore(&engine->active.lock, flags);
        if (!capture) {
+ no_request_capture:
                kfree(ee);
                return NULL;
        }
@@@ -1590,14 -1598,6 +1607,14 @@@ static void gt_record_regs(struct intel
  
        if (GRAPHICS_VER(i915) >= 12) {
                for (i = 0; i < GEN12_SFC_DONE_MAX; i++) {
 +                      /*
 +                       * SFC_DONE resides in the VD forcewake domain, so it
 +                       * only exists if the corresponding VCS engine is
 +                       * present.
 +                       */
 +                      if (!HAS_ENGINE(gt->_gt, _VCS(i * 2)))
 +                              continue;
 +
                        gt->sfc_done[i] =
                                intel_uncore_read(uncore, GEN12_SFC_DONE(i));
                }
@@@ -207,7 -207,7 +207,7 @@@ static void intel_hpd_init_pins(struct 
            (!HAS_PCH_SPLIT(dev_priv) || HAS_PCH_NOP(dev_priv)))
                return;
  
-       if (HAS_PCH_DG1(dev_priv))
+       if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1)
                hpd->pch_hpd = hpd_sde_dg1;
        else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
                hpd->pch_hpd = hpd_icp;
@@@ -2297,11 -2297,10 +2297,10 @@@ static u32 gen8_de_port_aux_mask(struc
                        GEN9_AUX_CHANNEL_C |
                        GEN9_AUX_CHANNEL_D;
  
-       if (IS_CNL_WITH_PORT_F(dev_priv) || DISPLAY_VER(dev_priv) == 11)
-               mask |= CNL_AUX_CHANNEL_F;
-       if (DISPLAY_VER(dev_priv) == 11)
+       if (DISPLAY_VER(dev_priv) == 11) {
+               mask |= ICL_AUX_CHANNEL_F;
                mask |= ICL_AUX_CHANNEL_E;
+       }
  
        return mask;
  }
@@@ -2698,11 -2697,9 +2697,9 @@@ gen11_display_irq_handler(struct drm_i9
        enable_rpm_wakeref_asserts(&i915->runtime_pm);
  }
  
- static __always_inline irqreturn_t
- __gen11_irq_handler(struct drm_i915_private * const i915,
-                   u32 (*intr_disable)(void __iomem * const regs),
-                   void (*intr_enable)(void __iomem * const regs))
+ static irqreturn_t gen11_irq_handler(int irq, void *arg)
  {
+       struct drm_i915_private *i915 = arg;
        void __iomem * const regs = i915->uncore.regs;
        struct intel_gt *gt = &i915->gt;
        u32 master_ctl;
        if (!intel_irqs_enabled(i915))
                return IRQ_NONE;
  
-       master_ctl = intr_disable(regs);
+       master_ctl = gen11_master_intr_disable(regs);
        if (!master_ctl) {
-               intr_enable(regs);
+               gen11_master_intr_enable(regs);
                return IRQ_NONE;
        }
  
  
        gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl);
  
-       intr_enable(regs);
+       gen11_master_intr_enable(regs);
  
        gen11_gu_misc_irq_handler(gt, gu_misc_iir);
  
        return IRQ_HANDLED;
  }
  
- static irqreturn_t gen11_irq_handler(int irq, void *arg)
- {
-       return __gen11_irq_handler(arg,
-                                  gen11_master_intr_disable,
-                                  gen11_master_intr_enable);
- }
- static u32 dg1_master_intr_disable_and_ack(void __iomem * const regs)
+ static inline u32 dg1_master_intr_disable(void __iomem * const regs)
  {
        u32 val;
  
        /* First disable interrupts */
-       raw_reg_write(regs, DG1_MSTR_UNIT_INTR, 0);
+       raw_reg_write(regs, DG1_MSTR_TILE_INTR, 0);
  
        /* Get the indication levels and ack the master unit */
-       val = raw_reg_read(regs, DG1_MSTR_UNIT_INTR);
+       val = raw_reg_read(regs, DG1_MSTR_TILE_INTR);
        if (unlikely(!val))
                return 0;
  
-       raw_reg_write(regs, DG1_MSTR_UNIT_INTR, val);
-       /*
-        * Now with master disabled, get a sample of level indications
-        * for this interrupt and ack them right away - we keep GEN11_MASTER_IRQ
-        * out as this bit doesn't exist anymore for DG1
-        */
-       val = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ) & ~GEN11_MASTER_IRQ;
-       if (unlikely(!val))
-               return 0;
-       raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, val);
+       raw_reg_write(regs, DG1_MSTR_TILE_INTR, val);
  
        return val;
  }
  
  static inline void dg1_master_intr_enable(void __iomem * const regs)
  {
-       raw_reg_write(regs, DG1_MSTR_UNIT_INTR, DG1_MSTR_IRQ);
+       raw_reg_write(regs, DG1_MSTR_TILE_INTR, DG1_MSTR_IRQ);
  }
  
  static irqreturn_t dg1_irq_handler(int irq, void *arg)
  {
-       return __gen11_irq_handler(arg,
-                                  dg1_master_intr_disable_and_ack,
-                                  dg1_master_intr_enable);
+       struct drm_i915_private * const i915 = arg;
+       struct intel_gt *gt = &i915->gt;
+       void __iomem * const regs = i915->uncore.regs;
+       u32 master_tile_ctl, master_ctl;
+       u32 gu_misc_iir;
+       if (!intel_irqs_enabled(i915))
+               return IRQ_NONE;
+       master_tile_ctl = dg1_master_intr_disable(regs);
+       if (!master_tile_ctl) {
+               dg1_master_intr_enable(regs);
+               return IRQ_NONE;
+       }
+       /* FIXME: we only support tile 0 for now. */
+       if (master_tile_ctl & DG1_MSTR_TILE(0)) {
+               master_ctl = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
+               raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, master_ctl);
+       } else {
+               DRM_ERROR("Tile not supported: 0x%08x\n", master_tile_ctl);
+               dg1_master_intr_enable(regs);
+               return IRQ_NONE;
+       }
+       gen11_gt_irq_handler(gt, master_ctl);
+       if (master_ctl & GEN11_DISPLAY_IRQ)
+               gen11_display_irq_handler(i915);
+       gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl);
+       dg1_master_intr_enable(regs);
+       gen11_gu_misc_irq_handler(gt, gu_misc_iir);
+       pmu_irq_stats(i915, IRQ_HANDLED);
+       return IRQ_HANDLED;
  }
  
  /* Called from drm generic code, passed 'crtc' which
@@@ -2880,14 -2895,14 +2895,14 @@@ static bool gen11_dsi_configure_te(stru
        return true;
  }
  
- int bdw_enable_vblank(struct drm_crtc *crtc)
+ int bdw_enable_vblank(struct drm_crtc *_crtc)
  {
-       struct drm_i915_private *dev_priv = to_i915(crtc->dev);
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       enum pipe pipe = intel_crtc->pipe;
+       struct intel_crtc *crtc = to_intel_crtc(_crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       enum pipe pipe = crtc->pipe;
        unsigned long irqflags;
  
-       if (gen11_dsi_configure_te(intel_crtc, true))
+       if (gen11_dsi_configure_te(crtc, true))
                return 0;
  
        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
         * PSR is active as no frames are generated, so check only for PSR.
         */
        if (HAS_PSR(dev_priv))
-               drm_crtc_vblank_restore(crtc);
+               drm_crtc_vblank_restore(&crtc->base);
  
        return 0;
  }
@@@ -2952,14 -2967,14 +2967,14 @@@ void ilk_disable_vblank(struct drm_crt
        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  }
  
- void bdw_disable_vblank(struct drm_crtc *crtc)
+ void bdw_disable_vblank(struct drm_crtc *_crtc)
  {
-       struct drm_i915_private *dev_priv = to_i915(crtc->dev);
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       enum pipe pipe = intel_crtc->pipe;
+       struct intel_crtc *crtc = to_intel_crtc(_crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+       enum pipe pipe = crtc->pipe;
        unsigned long irqflags;
  
-       if (gen11_dsi_configure_te(intel_crtc, false))
+       if (gen11_dsi_configure_te(crtc, false))
                return;
  
        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@@ -3064,6 -3079,24 +3079,6 @@@ static void valleyview_irq_reset(struc
        spin_unlock_irq(&dev_priv->irq_lock);
  }
  
 -static void cnp_display_clock_wa(struct drm_i915_private *dev_priv)
 -{
 -      struct intel_uncore *uncore = &dev_priv->uncore;
 -
 -      /*
 -       * Wa_14010685332:cnp/cmp,tgp,adp
 -       * TODO: Clarify which platforms this applies to
 -       * TODO: Figure out if this workaround can be applied in the s0ix suspend/resume handlers as
 -       * on earlier platforms and whether the workaround is also needed for runtime suspend/resume
 -       */
 -      if (INTEL_PCH_TYPE(dev_priv) == PCH_CNP ||
 -          (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP && INTEL_PCH_TYPE(dev_priv) < PCH_DG1)) {
 -              intel_uncore_rmw(uncore, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS,
 -                               SBCLK_RUN_REFCLK_DIS);
 -              intel_uncore_rmw(uncore, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0);
 -      }
 -}
 -
  static void gen8_display_irq_reset(struct drm_i915_private *dev_priv)
  {
        struct intel_uncore *uncore = &dev_priv->uncore;
@@@ -3097,6 -3130,7 +3112,6 @@@ static void gen8_irq_reset(struct drm_i
        if (HAS_PCH_SPLIT(dev_priv))
                ibx_irq_reset(dev_priv);
  
 -      cnp_display_clock_wa(dev_priv);
  }
  
  static void gen11_display_irq_reset(struct drm_i915_private *dev_priv)
  
        if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
                GEN3_IRQ_RESET(uncore, SDE);
 -
 -      cnp_display_clock_wa(dev_priv);
  }
  
  static void gen11_irq_reset(struct drm_i915_private *dev_priv)
  {
        struct intel_uncore *uncore = &dev_priv->uncore;
  
-       if (HAS_MASTER_UNIT_IRQ(dev_priv))
-               dg1_master_intr_disable_and_ack(dev_priv->uncore.regs);
-       else
-               gen11_master_intr_disable(dev_priv->uncore.regs);
+       gen11_master_intr_disable(dev_priv->uncore.regs);
+       gen11_gt_irq_reset(&dev_priv->gt);
+       gen11_display_irq_reset(dev_priv);
+       GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
+       GEN3_IRQ_RESET(uncore, GEN8_PCU_);
+ }
+ static void dg1_irq_reset(struct drm_i915_private *dev_priv)
+ {
+       struct intel_uncore *uncore = &dev_priv->uncore;
+       dg1_master_intr_disable(dev_priv->uncore.regs);
  
        gen11_gt_irq_reset(&dev_priv->gt);
        gen11_display_irq_reset(dev_priv);
@@@ -3841,13 -3887,28 +3866,28 @@@ static void gen11_irq_postinstall(struc
  
        GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
  
-       if (HAS_MASTER_UNIT_IRQ(dev_priv)) {
-               dg1_master_intr_enable(uncore->regs);
-               intel_uncore_posting_read(&dev_priv->uncore, DG1_MSTR_UNIT_INTR);
-       } else {
-               gen11_master_intr_enable(uncore->regs);
-               intel_uncore_posting_read(&dev_priv->uncore, GEN11_GFX_MSTR_IRQ);
+       gen11_master_intr_enable(uncore->regs);
+       intel_uncore_posting_read(&dev_priv->uncore, GEN11_GFX_MSTR_IRQ);
+ }
+ static void dg1_irq_postinstall(struct drm_i915_private *dev_priv)
+ {
+       struct intel_uncore *uncore = &dev_priv->uncore;
+       u32 gu_misc_masked = GEN11_GU_MISC_GSE;
+       gen11_gt_irq_postinstall(&dev_priv->gt);
+       GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
+       if (HAS_DISPLAY(dev_priv)) {
+               icp_irq_postinstall(dev_priv);
+               gen8_de_irq_postinstall(dev_priv);
+               intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL,
+                                  GEN11_DISPLAY_IRQ_ENABLE);
        }
+       dg1_master_intr_enable(dev_priv->uncore.regs);
+       intel_uncore_posting_read(&dev_priv->uncore, DG1_MSTR_TILE_INTR);
  }
  
  static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
@@@ -4386,9 -4447,9 +4426,9 @@@ static irq_handler_t intel_irq_handler(
                else
                        return i8xx_irq_handler;
        } else {
-               if (HAS_MASTER_UNIT_IRQ(dev_priv))
+               if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
                        return dg1_irq_handler;
-               if (GRAPHICS_VER(dev_priv) >= 11)
+               else if (GRAPHICS_VER(dev_priv) >= 11)
                        return gen11_irq_handler;
                else if (GRAPHICS_VER(dev_priv) >= 8)
                        return gen8_irq_handler;
@@@ -4411,7 -4472,9 +4451,9 @@@ static void intel_irq_reset(struct drm_
                else
                        i8xx_irq_reset(dev_priv);
        } else {
-               if (GRAPHICS_VER(dev_priv) >= 11)
+               if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
+                       dg1_irq_reset(dev_priv);
+               else if (GRAPHICS_VER(dev_priv) >= 11)
                        gen11_irq_reset(dev_priv);
                else if (GRAPHICS_VER(dev_priv) >= 8)
                        gen8_irq_reset(dev_priv);
@@@ -4434,7 -4497,9 +4476,9 @@@ static void intel_irq_postinstall(struc
                else
                        i8xx_irq_postinstall(dev_priv);
        } else {
-               if (GRAPHICS_VER(dev_priv) >= 11)
+               if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
+                       dg1_irq_postinstall(dev_priv);
+               else if (GRAPHICS_VER(dev_priv) >= 11)
                        gen11_irq_postinstall(dev_priv);
                else if (GRAPHICS_VER(dev_priv) >= 8)
                        gen8_irq_postinstall(dev_priv);
@@@ -4466,14 -4531,14 +4510,14 @@@ int intel_irq_install(struct drm_i915_p
         */
        dev_priv->runtime_pm.irqs_enabled = true;
  
-       dev_priv->drm.irq_enabled = true;
+       dev_priv->irq_enabled = true;
  
        intel_irq_reset(dev_priv);
  
        ret = request_irq(irq, intel_irq_handler(dev_priv),
                          IRQF_SHARED, DRIVER_NAME, dev_priv);
        if (ret < 0) {
-               dev_priv->drm.irq_enabled = false;
+               dev_priv->irq_enabled = false;
                return ret;
        }
  
@@@ -4499,10 -4564,10 +4543,10 @@@ void intel_irq_uninstall(struct drm_i91
         * intel_modeset_driver_remove() calling us out of sequence.
         * Would be nice if it didn't do that...
         */
-       if (!dev_priv->drm.irq_enabled)
+       if (!dev_priv->irq_enabled)
                return;
  
-       dev_priv->drm.irq_enabled = false;
+       dev_priv->irq_enabled = false;
  
        intel_irq_reset(dev_priv);
  
@@@ -395,10 -395,18 +395,18 @@@ static inline bool i915_mmio_reg_valid(
  #define  GEN11_GRDOM_MEDIA2           (1 << 6)
  #define  GEN11_GRDOM_MEDIA3           (1 << 7)
  #define  GEN11_GRDOM_MEDIA4           (1 << 8)
+ #define  GEN11_GRDOM_MEDIA5           (1 << 9)
+ #define  GEN11_GRDOM_MEDIA6           (1 << 10)
+ #define  GEN11_GRDOM_MEDIA7           (1 << 11)
+ #define  GEN11_GRDOM_MEDIA8           (1 << 12)
  #define  GEN11_GRDOM_VECS             (1 << 13)
  #define  GEN11_GRDOM_VECS2            (1 << 14)
+ #define  GEN11_GRDOM_VECS3            (1 << 15)
+ #define  GEN11_GRDOM_VECS4            (1 << 16)
  #define  GEN11_GRDOM_SFC0             (1 << 17)
  #define  GEN11_GRDOM_SFC1             (1 << 18)
+ #define  GEN11_GRDOM_SFC2             (1 << 19)
+ #define  GEN11_GRDOM_SFC3             (1 << 20)
  
  #define  GEN11_VCS_SFC_RESET_BIT(instance)    (GEN11_GRDOM_SFC0 << ((instance) >> 1))
  #define  GEN11_VECS_SFC_RESET_BIT(instance)   (GEN11_GRDOM_SFC0 << (instance))
  #define BXT_PORT_CL1CM_DW30(phy)      _BXT_PHY((phy), _PORT_CL1CM_DW30_BC)
  
  /*
-  * CNL/ICL Port/COMBO-PHY Registers
+  * ICL Port/COMBO-PHY Registers
   */
  #define _ICL_COMBOPHY_A                       0x162000
  #define _ICL_COMBOPHY_B                       0x6C000
                                              _RKL_COMBOPHY_D, \
                                              _ADL_COMBOPHY_E)
  
- /* CNL/ICL Port CL_DW registers */
+ /* ICL Port CL_DW registers */
  #define _ICL_PORT_CL_DW(dw, phy)      (_ICL_COMBOPHY(phy) + \
                                         4 * (dw))
  
- #define CNL_PORT_CL1CM_DW5            _MMIO(0x162014)
  #define ICL_PORT_CL_DW5(phy)          _MMIO(_ICL_PORT_CL_DW(5, phy))
  #define   CL_POWER_DOWN_ENABLE                (1 << 4)
  #define   SUS_CLOCK_CONFIG            (3 << 0)
  #define ICL_PORT_CL_DW12(phy)         _MMIO(_ICL_PORT_CL_DW(12, phy))
  #define   ICL_LANE_ENABLE_AUX         (1 << 0)
  
- /* CNL/ICL Port COMP_DW registers */
+ /* ICL Port COMP_DW registers */
  #define _ICL_PORT_COMP                        0x100
  #define _ICL_PORT_COMP_DW(dw, phy)    (_ICL_COMBOPHY(phy) + \
                                         _ICL_PORT_COMP + 4 * (dw))
  
- #define CNL_PORT_COMP_DW0             _MMIO(0x162100)
  #define ICL_PORT_COMP_DW0(phy)                _MMIO(_ICL_PORT_COMP_DW(0, phy))
  #define   COMP_INIT                   (1 << 31)
  
- #define CNL_PORT_COMP_DW1             _MMIO(0x162104)
  #define ICL_PORT_COMP_DW1(phy)                _MMIO(_ICL_PORT_COMP_DW(1, phy))
  
- #define CNL_PORT_COMP_DW3             _MMIO(0x16210c)
  #define ICL_PORT_COMP_DW3(phy)                _MMIO(_ICL_PORT_COMP_DW(3, phy))
  #define   PROCESS_INFO_DOT_0          (0 << 26)
  #define   PROCESS_INFO_DOT_1          (1 << 26)
  #define ICL_PORT_COMP_DW8(phy)                _MMIO(_ICL_PORT_COMP_DW(8, phy))
  #define   IREFGEN                     (1 << 24)
  
- #define CNL_PORT_COMP_DW9             _MMIO(0x162124)
  #define ICL_PORT_COMP_DW9(phy)                _MMIO(_ICL_PORT_COMP_DW(9, phy))
  
- #define CNL_PORT_COMP_DW10            _MMIO(0x162128)
  #define ICL_PORT_COMP_DW10(phy)               _MMIO(_ICL_PORT_COMP_DW(10, phy))
  
- /* CNL/ICL Port PCS registers */
- #define _CNL_PORT_PCS_DW1_GRP_AE      0x162304
- #define _CNL_PORT_PCS_DW1_GRP_B               0x162384
- #define _CNL_PORT_PCS_DW1_GRP_C               0x162B04
- #define _CNL_PORT_PCS_DW1_GRP_D               0x162B84
- #define _CNL_PORT_PCS_DW1_GRP_F               0x162A04
- #define _CNL_PORT_PCS_DW1_LN0_AE      0x162404
- #define _CNL_PORT_PCS_DW1_LN0_B               0x162604
- #define _CNL_PORT_PCS_DW1_LN0_C               0x162C04
- #define _CNL_PORT_PCS_DW1_LN0_D               0x162E04
- #define _CNL_PORT_PCS_DW1_LN0_F               0x162804
- #define CNL_PORT_PCS_DW1_GRP(phy)     _MMIO(_PICK(phy, \
-                                                   _CNL_PORT_PCS_DW1_GRP_AE, \
-                                                   _CNL_PORT_PCS_DW1_GRP_B, \
-                                                   _CNL_PORT_PCS_DW1_GRP_C, \
-                                                   _CNL_PORT_PCS_DW1_GRP_D, \
-                                                   _CNL_PORT_PCS_DW1_GRP_AE, \
-                                                   _CNL_PORT_PCS_DW1_GRP_F))
- #define CNL_PORT_PCS_DW1_LN0(phy)     _MMIO(_PICK(phy, \
-                                                   _CNL_PORT_PCS_DW1_LN0_AE, \
-                                                   _CNL_PORT_PCS_DW1_LN0_B, \
-                                                   _CNL_PORT_PCS_DW1_LN0_C, \
-                                                   _CNL_PORT_PCS_DW1_LN0_D, \
-                                                   _CNL_PORT_PCS_DW1_LN0_AE, \
-                                                   _CNL_PORT_PCS_DW1_LN0_F))
+ /* ICL Port PCS registers */
  #define _ICL_PORT_PCS_AUX             0x300
  #define _ICL_PORT_PCS_GRP             0x600
  #define _ICL_PORT_PCS_LN(ln)          (0x800 + (ln) * 0x100)
  #define   LATENCY_OPTIM_MASK          (0x3 << 2)
  #define   LATENCY_OPTIM_VAL(x)                ((x) << 2)
  
- /* CNL/ICL Port TX registers */
- #define _CNL_PORT_TX_AE_GRP_OFFSET            0x162340
- #define _CNL_PORT_TX_B_GRP_OFFSET             0x1623C0
- #define _CNL_PORT_TX_C_GRP_OFFSET             0x162B40
- #define _CNL_PORT_TX_D_GRP_OFFSET             0x162BC0
- #define _CNL_PORT_TX_F_GRP_OFFSET             0x162A40
- #define _CNL_PORT_TX_AE_LN0_OFFSET            0x162440
- #define _CNL_PORT_TX_B_LN0_OFFSET             0x162640
- #define _CNL_PORT_TX_C_LN0_OFFSET             0x162C40
- #define _CNL_PORT_TX_D_LN0_OFFSET             0x162E40
- #define _CNL_PORT_TX_F_LN0_OFFSET             0x162840
- #define _CNL_PORT_TX_DW_GRP(dw, port) (_PICK((port), \
-                                              _CNL_PORT_TX_AE_GRP_OFFSET, \
-                                              _CNL_PORT_TX_B_GRP_OFFSET, \
-                                              _CNL_PORT_TX_B_GRP_OFFSET, \
-                                              _CNL_PORT_TX_D_GRP_OFFSET, \
-                                              _CNL_PORT_TX_AE_GRP_OFFSET, \
-                                              _CNL_PORT_TX_F_GRP_OFFSET) + \
-                                              4 * (dw))
- #define _CNL_PORT_TX_DW_LN0(dw, port) (_PICK((port), \
-                                              _CNL_PORT_TX_AE_LN0_OFFSET, \
-                                              _CNL_PORT_TX_B_LN0_OFFSET, \
-                                              _CNL_PORT_TX_B_LN0_OFFSET, \
-                                              _CNL_PORT_TX_D_LN0_OFFSET, \
-                                              _CNL_PORT_TX_AE_LN0_OFFSET, \
-                                              _CNL_PORT_TX_F_LN0_OFFSET) + \
-                                              4 * (dw))
+ /* ICL Port TX registers */
  #define _ICL_PORT_TX_AUX              0x380
  #define _ICL_PORT_TX_GRP              0x680
  #define _ICL_PORT_TX_LN(ln)           (0x880 + (ln) * 0x100)
  #define _ICL_PORT_TX_DW_LN(dw, ln, phy) (_ICL_COMBOPHY(phy) + \
                                          _ICL_PORT_TX_LN(ln) + 4 * (dw))
  
- #define CNL_PORT_TX_DW2_GRP(port)     _MMIO(_CNL_PORT_TX_DW_GRP(2, port))
- #define CNL_PORT_TX_DW2_LN0(port)     _MMIO(_CNL_PORT_TX_DW_LN0(2, port))
  #define ICL_PORT_TX_DW2_AUX(phy)      _MMIO(_ICL_PORT_TX_DW_AUX(2, phy))
  #define ICL_PORT_TX_DW2_GRP(phy)      _MMIO(_ICL_PORT_TX_DW_GRP(2, phy))
  #define ICL_PORT_TX_DW2_LN0(phy)      _MMIO(_ICL_PORT_TX_DW_LN(2, 0, phy))
  #define   RCOMP_SCALAR(x)             ((x) << 0)
  #define   RCOMP_SCALAR_MASK           (0xFF << 0)
  
- #define _CNL_PORT_TX_DW4_LN0_AE               0x162450
- #define _CNL_PORT_TX_DW4_LN1_AE               0x1624D0
- #define CNL_PORT_TX_DW4_GRP(port)     _MMIO(_CNL_PORT_TX_DW_GRP(4, (port)))
- #define CNL_PORT_TX_DW4_LN0(port)     _MMIO(_CNL_PORT_TX_DW_LN0(4, (port)))
- #define CNL_PORT_TX_DW4_LN(ln, port)   _MMIO(_CNL_PORT_TX_DW_LN0(4, (port)) + \
-                                          ((ln) * (_CNL_PORT_TX_DW4_LN1_AE - \
-                                                   _CNL_PORT_TX_DW4_LN0_AE)))
  #define ICL_PORT_TX_DW4_AUX(phy)      _MMIO(_ICL_PORT_TX_DW_AUX(4, phy))
  #define ICL_PORT_TX_DW4_GRP(phy)      _MMIO(_ICL_PORT_TX_DW_GRP(4, phy))
  #define ICL_PORT_TX_DW4_LN0(phy)      _MMIO(_ICL_PORT_TX_DW_LN(4, 0, phy))
  #define   CURSOR_COEFF(x)             ((x) << 0)
  #define   CURSOR_COEFF_MASK           (0x3F << 0)
  
- #define CNL_PORT_TX_DW5_GRP(port)     _MMIO(_CNL_PORT_TX_DW_GRP(5, port))
- #define CNL_PORT_TX_DW5_LN0(port)     _MMIO(_CNL_PORT_TX_DW_LN0(5, port))
  #define ICL_PORT_TX_DW5_AUX(phy)      _MMIO(_ICL_PORT_TX_DW_AUX(5, phy))
  #define ICL_PORT_TX_DW5_GRP(phy)      _MMIO(_ICL_PORT_TX_DW_GRP(5, phy))
  #define ICL_PORT_TX_DW5_LN0(phy)      _MMIO(_ICL_PORT_TX_DW_LN(5, 0, phy))
  #define   RTERM_SELECT(x)             ((x) << 3)
  #define   RTERM_SELECT_MASK           (0x7 << 3)
  
- #define CNL_PORT_TX_DW7_GRP(port)     _MMIO(_CNL_PORT_TX_DW_GRP(7, (port)))
- #define CNL_PORT_TX_DW7_LN0(port)     _MMIO(_CNL_PORT_TX_DW_LN0(7, (port)))
  #define ICL_PORT_TX_DW7_AUX(phy)      _MMIO(_ICL_PORT_TX_DW_AUX(7, phy))
  #define ICL_PORT_TX_DW7_GRP(phy)      _MMIO(_ICL_PORT_TX_DW_GRP(7, phy))
  #define ICL_PORT_TX_DW7_LN0(phy)      _MMIO(_ICL_PORT_TX_DW_LN(7, 0, phy))
  #define   MG_DP_MODE_CFG_DP_X2_MODE                   (1 << 7)
  #define   MG_DP_MODE_CFG_DP_X1_MODE                   (1 << 6)
  
+ /*
+  * DG2 SNPS PHY registers (TC1 = PHY_E)
+  */
+ #define _SNPS_PHY_A_BASE                      0x168000
+ #define _SNPS_PHY_B_BASE                      0x169000
+ #define _SNPS_PHY(phy)                                _PHY(phy, \
+                                                    _SNPS_PHY_A_BASE, \
+                                                    _SNPS_PHY_B_BASE)
+ #define _SNPS2(phy, reg)                      (_SNPS_PHY(phy) - \
+                                                _SNPS_PHY_A_BASE + (reg))
+ #define _MMIO_SNPS(phy, reg)                  _MMIO(_SNPS2(phy, reg))
+ #define _MMIO_SNPS_LN(ln, phy, reg)           _MMIO(_SNPS2(phy, \
+                                                            (reg) + (ln) * 0x10))
+ #define SNPS_PHY_MPLLB_CP(phy)                        _MMIO_SNPS(phy, 0x168000)
+ #define   SNPS_PHY_MPLLB_CP_INT                       REG_GENMASK(31, 25)
+ #define   SNPS_PHY_MPLLB_CP_INT_GS            REG_GENMASK(23, 17)
+ #define   SNPS_PHY_MPLLB_CP_PROP              REG_GENMASK(15, 9)
+ #define   SNPS_PHY_MPLLB_CP_PROP_GS           REG_GENMASK(7, 1)
+ #define SNPS_PHY_MPLLB_DIV(phy)                       _MMIO_SNPS(phy, 0x168004)
+ #define   SNPS_PHY_MPLLB_FORCE_EN             REG_BIT(31)
+ #define   SNPS_PHY_MPLLB_DIV5_CLK_EN          REG_BIT(29)
+ #define   SNPS_PHY_MPLLB_V2I                  REG_GENMASK(27, 26)
+ #define   SNPS_PHY_MPLLB_FREQ_VCO             REG_GENMASK(25, 24)
+ #define   SNPS_PHY_MPLLB_PMIX_EN              REG_BIT(10)
+ #define   SNPS_PHY_MPLLB_TX_CLK_DIV           REG_GENMASK(7, 5)
+ #define SNPS_PHY_MPLLB_FRACN1(phy)            _MMIO_SNPS(phy, 0x168008)
+ #define   SNPS_PHY_MPLLB_FRACN_EN             REG_BIT(31)
+ #define   SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN  REG_BIT(30)
+ #define   SNPS_PHY_MPLLB_FRACN_DEN            REG_GENMASK(15, 0)
+ #define SNPS_PHY_MPLLB_FRACN2(phy)            _MMIO_SNPS(phy, 0x16800C)
+ #define   SNPS_PHY_MPLLB_FRACN_REM            REG_GENMASK(31, 16)
+ #define   SNPS_PHY_MPLLB_FRACN_QUOT           REG_GENMASK(15, 0)
+ #define SNPS_PHY_MPLLB_SSCEN(phy)             _MMIO_SNPS(phy, 0x168014)
+ #define   SNPS_PHY_MPLLB_SSC_EN                       REG_BIT(31)
+ #define   SNPS_PHY_MPLLB_SSC_UP_SPREAD                REG_BIT(30)
+ #define   SNPS_PHY_MPLLB_SSC_PEAK             REG_GENMASK(29, 10)
+ #define SNPS_PHY_MPLLB_SSCSTEP(phy)           _MMIO_SNPS(phy, 0x168018)
+ #define   SNPS_PHY_MPLLB_SSC_STEPSIZE         REG_GENMASK(31, 11)
+ #define SNPS_PHY_MPLLB_DIV2(phy)              _MMIO_SNPS(phy, 0x16801C)
+ #define   SNPS_PHY_MPLLB_HDMI_PIXEL_CLK_DIV   REG_GENMASK(19, 18)
+ #define   SNPS_PHY_MPLLB_HDMI_DIV             REG_GENMASK(17, 15)
+ #define   SNPS_PHY_MPLLB_REF_CLK_DIV          REG_GENMASK(14, 12)
+ #define   SNPS_PHY_MPLLB_MULTIPLIER           REG_GENMASK(11, 0)
+ #define SNPS_PHY_REF_CONTROL(phy)             _MMIO_SNPS(phy, 0x168188)
+ #define   SNPS_PHY_REF_CONTROL_REF_RANGE      REG_GENMASK(31, 27)
+ #define SNPS_PHY_TX_REQ(phy)                  _MMIO_SNPS(phy, 0x168200)
+ #define   SNPS_PHY_TX_REQ_LN_DIS_PWR_STATE_PSR        REG_GENMASK(31, 30)
+ #define SNPS_PHY_TX_EQ(ln, phy)                       _MMIO_SNPS_LN(ln, phy, 0x168300)
+ #define   SNPS_PHY_TX_EQ_MAIN                 REG_GENMASK(23, 18)
+ #define   SNPS_PHY_TX_EQ_POST                 REG_GENMASK(15, 10)
+ #define   SNPS_PHY_TX_EQ_PRE                  REG_GENMASK(7, 2)
  /* The spec defines this only for BXT PHY0, but lets assume that this
   * would exist for PHY1 too if it had a second channel.
   */
  #define GEN11_BSD2_RING_BASE  0x1c4000
  #define GEN11_BSD3_RING_BASE  0x1d0000
  #define GEN11_BSD4_RING_BASE  0x1d4000
+ #define XEHP_BSD5_RING_BASE   0x1e0000
+ #define XEHP_BSD6_RING_BASE   0x1e4000
+ #define XEHP_BSD7_RING_BASE   0x1f0000
+ #define XEHP_BSD8_RING_BASE   0x1f4000
  #define VEBOX_RING_BASE               0x1a000
  #define GEN11_VEBOX_RING_BASE         0x1c8000
  #define GEN11_VEBOX2_RING_BASE                0x1d8000
+ #define XEHP_VEBOX3_RING_BASE         0x1e8000
+ #define XEHP_VEBOX4_RING_BASE         0x1f8000
  #define BLT_RING_BASE         0x22000
  #define RING_TAIL(base)               _MMIO((base) + 0x30)
  #define RING_HEAD(base)               _MMIO((base) + 0x34)
  #define   ARB_MODE_BWGTLB_DISABLE (1 << 9)
  #define   ARB_MODE_SWIZZLE_BDW        (1 << 1)
  #define RENDER_HWS_PGA_GEN7   _MMIO(0x04080)
- #define RING_FAULT_REG(engine)        _MMIO(0x4094 + 0x100 * (engine)->hw_id)
+ #define _RING_FAULT_REG_RCS        0x4094
+ #define _RING_FAULT_REG_VCS        0x4194
+ #define _RING_FAULT_REG_BCS        0x4294
+ #define _RING_FAULT_REG_VECS       0x4394
+ #define RING_FAULT_REG(engine)     _MMIO(_PICK((engine)->class, \
+                                              _RING_FAULT_REG_RCS, \
+                                              _RING_FAULT_REG_VCS, \
+                                              _RING_FAULT_REG_VECS, \
+                                              _RING_FAULT_REG_BCS))
  #define GEN8_RING_FAULT_REG   _MMIO(0x4094)
  #define GEN12_RING_FAULT_REG  _MMIO(0xcec4)
  #define   GEN8_RING_FAULT_ENGINE_ID(x)        (((x) >> 12) & 0x7)
  #define GEN12_SC_INSTDONE_EXTRA2      _MMIO(0x7108)
  #define GEN7_SAMPLER_INSTDONE _MMIO(0xe160)
  #define GEN7_ROW_INSTDONE     _MMIO(0xe164)
+ #define MCFG_MCR_SELECTOR             _MMIO(0xfd0)
+ #define SF_MCR_SELECTOR                       _MMIO(0xfd8)
  #define GEN8_MCR_SELECTOR             _MMIO(0xfdc)
  #define   GEN8_MCR_SLICE(slice)               (((slice) & 3) << 26)
  #define   GEN8_MCR_SLICE_MASK         GEN8_MCR_SLICE(3)
  #define       GEN10_MIRROR_FUSE3              _MMIO(0x9118)
  #define GEN10_L3BANK_PAIR_COUNT     4
  #define GEN10_L3BANK_MASK   0x0F
+ /* on Xe_HP the same fuses indicates mslices instead of L3 banks */
+ #define GEN12_MAX_MSLICES 4
+ #define GEN12_MEML3_EN_MASK 0x0F
  
  #define GEN8_EU_DISABLE0              _MMIO(0x9134)
  #define   GEN8_EU_DIS0_S0_MASK                0xffffff
  
  #define GEN12_GT_DSS_ENABLE _MMIO(0x913C)
  
+ #define XEHP_EU_ENABLE                        _MMIO(0x9134)
+ #define XEHP_EU_ENA_MASK              0xFF
  #define GEN6_BSD_SLEEP_PSMI_CONTROL   _MMIO(0x12050)
  #define   GEN6_BSD_SLEEP_MSG_DISABLE  (1 << 0)
  #define   GEN6_BSD_SLEEP_FLUSH_DISABLE        (1 << 2)
  #define BXT_GT_PERF_STATUS      _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x7070)
  #define GEN6_RP_STATE_LIMITS  _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5994)
  #define GEN6_RP_STATE_CAP     _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5998)
+ #define   RP0_CAP_MASK                REG_GENMASK(7, 0)
+ #define   RP1_CAP_MASK                REG_GENMASK(15, 8)
+ #define   RPN_CAP_MASK                REG_GENMASK(23, 16)
  #define BXT_RP_STATE_CAP        _MMIO(0x138170)
  #define GEN9_RP_STATE_LIMITS  _MMIO(0x138148)
  
@@@ -4142,6 -4167,7 +4167,7 @@@ enum 
        FAULT_AND_CONTINUE /* Unsupported */
  };
  
+ #define CTX_GTT_ADDRESS_MASK GENMASK(31, 12)
  #define GEN8_CTX_VALID (1 << 0)
  #define GEN8_CTX_FORCE_PD_RESTORE (1 << 1)
  #define GEN8_CTX_FORCE_RESTORE (1 << 2)
  #define GEN11_ENGINE_INSTANCE_SHIFT 48
  #define GEN11_ENGINE_INSTANCE_WIDTH 6
  
+ #define XEHP_SW_CTX_ID_SHIFT 39
+ #define XEHP_SW_CTX_ID_WIDTH 16
+ #define XEHP_SW_COUNTER_SHIFT 58
+ #define XEHP_SW_COUNTER_WIDTH 6
  #define CHV_CLK_CTL1                  _MMIO(0x101100)
  #define VLV_CLK_CTL2                  _MMIO(0x101104)
  #define   CLK_CTL2_CZCOUNT_30NS_SHIFT 28
  #define _PSR2_CTL_EDP                         0x6f900
  #define EDP_PSR2_CTL(tran)                    _MMIO_TRANS2(tran, _PSR2_CTL_A)
  #define   EDP_PSR2_ENABLE                     (1 << 31)
- #define   EDP_SU_TRACK_ENABLE                 (1 << 30)
+ #define   EDP_SU_TRACK_ENABLE                 (1 << 30) /* up to adl-p */
  #define   TGL_EDP_PSR2_BLOCK_COUNT_NUM_2      (0 << 28)
  #define   TGL_EDP_PSR2_BLOCK_COUNT_NUM_3      (1 << 28)
  #define   EDP_Y_COORDINATE_ENABLE             REG_BIT(25) /* display 10, 11 and 12 */
+ #define   EDP_PSR2_SU_SDP_SCANLINE            REG_BIT(25) /* display 13+ */
  #define   EDP_MAX_SU_DISABLE_TIME(t)          ((t) << 20)
  #define   EDP_MAX_SU_DISABLE_TIME_MASK                (0x1f << 20)
  #define   EDP_PSR2_IO_BUFFER_WAKE_MAX_LINES   8
  #define   EDP_PSR2_IO_BUFFER_WAKE(lines)      ((EDP_PSR2_IO_BUFFER_WAKE_MAX_LINES - (lines)) << 13)
  #define   EDP_PSR2_IO_BUFFER_WAKE_MASK                (3 << 13)
  #define   TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES       5
- #define   TGL_EDP_PSR2_IO_BUFFER_WAKE(lines)  (((lines) - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES) << 13)
+ #define   TGL_EDP_PSR2_IO_BUFFER_WAKE_SHIFT   13
+ #define   TGL_EDP_PSR2_IO_BUFFER_WAKE(lines)  (((lines) - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES) << TGL_EDP_PSR2_IO_BUFFER_WAKE_SHIFT)
  #define   TGL_EDP_PSR2_IO_BUFFER_WAKE_MASK    (7 << 13)
  #define   EDP_PSR2_FAST_WAKE_MAX_LINES                8
  #define   EDP_PSR2_FAST_WAKE(lines)           ((EDP_PSR2_FAST_WAKE_MAX_LINES - (lines)) << 11)
  #define   EDP_PSR2_FAST_WAKE_MASK             (3 << 11)
  #define   TGL_EDP_PSR2_FAST_WAKE_MIN_LINES    5
- #define   TGL_EDP_PSR2_FAST_WAKE(lines)               (((lines) - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES) << 10)
+ #define   TGL_EDP_PSR2_FAST_WAKE_MIN_SHIFT    10
+ #define   TGL_EDP_PSR2_FAST_WAKE(lines)               (((lines) - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES) << TGL_EDP_PSR2_FAST_WAKE_MIN_SHIFT)
  #define   TGL_EDP_PSR2_FAST_WAKE_MASK         (7 << 10)
  #define   EDP_PSR2_TP2_TIME_500us             (0 << 8)
  #define   EDP_PSR2_TP2_TIME_100us             (1 << 8)
  #define PSR2_SU_STATUS_MASK(frame)    (0x3ff << PSR2_SU_STATUS_SHIFT(frame))
  #define PSR2_SU_STATUS_FRAMES         8
  
- #define _PSR2_MAN_TRK_CTL_A                           0x60910
- #define _PSR2_MAN_TRK_CTL_EDP                         0x6f910
- #define PSR2_MAN_TRK_CTL(tran)                                _MMIO_TRANS2(tran, _PSR2_MAN_TRK_CTL_A)
- #define  PSR2_MAN_TRK_CTL_ENABLE                      REG_BIT(31)
- #define  PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR_MASK   REG_GENMASK(30, 21)
- #define  PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(val)   REG_FIELD_PREP(PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR_MASK, val)
+ #define _PSR2_MAN_TRK_CTL_A                                   0x60910
+ #define _PSR2_MAN_TRK_CTL_EDP                                 0x6f910
+ #define PSR2_MAN_TRK_CTL(tran)                                        _MMIO_TRANS2(tran, _PSR2_MAN_TRK_CTL_A)
+ #define  PSR2_MAN_TRK_CTL_ENABLE                              REG_BIT(31)
+ #define  PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR_MASK           REG_GENMASK(30, 21)
+ #define  PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(val)           REG_FIELD_PREP(PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR_MASK, val)
  #define  PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR_MASK             REG_GENMASK(20, 11)
  #define  PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(val)             REG_FIELD_PREP(PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR_MASK, val)
- #define  PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME                REG_BIT(3)
- #define  PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME     REG_BIT(2)
- #define  PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE     REG_BIT(1)
+ #define  PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME                        REG_BIT(3)
+ #define  PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME             REG_BIT(2)
+ #define  PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE             REG_BIT(1)
+ #define  ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR_MASK      REG_GENMASK(28, 16)
+ #define  ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(val)      REG_FIELD_PREP(ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR_MASK, val)
+ #define  ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR_MASK                REG_GENMASK(12, 0)
+ #define  ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(val)                REG_FIELD_PREP(ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR_MASK, val)
+ #define  ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME           REG_BIT(14)
+ #define  ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME                REG_BIT(13)
  
  /* Icelake DSC Rate Control Range Parameter Registers */
  #define DSCA_RC_RANGE_PARAMETERS_0            _MMIO(0x6B240)
  #define   PIPEMISC_HDR_MODE_PRECISION (1 << 23) /* icl+ */
  #define   PIPEMISC_OUTPUT_COLORSPACE_YUV  (1 << 11)
  #define   PIPEMISC_PIXEL_ROUNDING_TRUNC       REG_BIT(8) /* tgl+ */
 -#define   PIPEMISC_DITHER_BPC_MASK    (7 << 5)
 -#define   PIPEMISC_DITHER_8_BPC               (0 << 5)
 -#define   PIPEMISC_DITHER_10_BPC      (1 << 5)
 -#define   PIPEMISC_DITHER_6_BPC               (2 << 5)
 -#define   PIPEMISC_DITHER_12_BPC      (3 << 5)
 +/*
 + * For Display < 13, Bits 5-7 of PIPE MISC represent DITHER BPC with
 + * valid values of: 6, 8, 10 BPC.
 + * ADLP+, the bits 5-7 represent PORT OUTPUT BPC with valid values of:
 + * 6, 8, 10, 12 BPC.
 + */
 +#define   PIPEMISC_BPC_MASK           (7 << 5)
 +#define   PIPEMISC_8_BPC              (0 << 5)
 +#define   PIPEMISC_10_BPC             (1 << 5)
 +#define   PIPEMISC_6_BPC              (2 << 5)
 +#define   PIPEMISC_12_BPC_ADLP                (4 << 5) /* adlp+ */
  #define   PIPEMISC_DITHER_ENABLE      (1 << 4)
  #define   PIPEMISC_DITHER_TYPE_MASK   (3 << 2)
  #define   PIPEMISC_DITHER_TYPE_SP     (0 << 2)
  #define SKL_PS_ECC_STAT(pipe, id)  _MMIO_PIPE(pipe,     \
                        _ID(id, _PS_ECC_STAT_1A, _PS_ECC_STAT_2A),   \
                        _ID(id, _PS_ECC_STAT_1B, _PS_ECC_STAT_2B))
- #define CNL_PS_COEF_INDEX_SET(pipe, id, set)  _MMIO_PIPE(pipe,    \
+ #define GLK_PS_COEF_INDEX_SET(pipe, id, set)  _MMIO_PIPE(pipe,    \
                        _ID(id, _PS_COEF_SET0_INDEX_1A, _PS_COEF_SET0_INDEX_2A) + (set) * 8, \
                        _ID(id, _PS_COEF_SET0_INDEX_1B, _PS_COEF_SET0_INDEX_2B) + (set) * 8)
  
- #define CNL_PS_COEF_DATA_SET(pipe, id, set)  _MMIO_PIPE(pipe,     \
+ #define GLK_PS_COEF_DATA_SET(pipe, id, set)  _MMIO_PIPE(pipe,     \
                        _ID(id, _PS_COEF_SET0_DATA_1A, _PS_COEF_SET0_DATA_2A) + (set) * 8, \
                        _ID(id, _PS_COEF_SET0_DATA_1B, _PS_COEF_SET0_DATA_2B) + (set) * 8)
  /* legacy palette */
  #define  GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED        (3 << 0) /* icl + */
  
  /* DMC */
- #define DMC_PROGRAM(i)                _MMIO(0x80000 + (i) * 4)
+ #define DMC_PROGRAM(addr, i)  _MMIO((addr) + (i) * 4)
  #define DMC_SSP_BASE_ADDR_GEN9        0x00002FC0
  #define DMC_HTP_ADDR_SKL      0x00500034
  #define DMC_SSP_BASE          _MMIO(0x8F074)
  #define  DSI1_NON_TE                  (1 << 31)
  #define  DSI0_NON_TE                  (1 << 30)
  #define  ICL_AUX_CHANNEL_E            (1 << 29)
- #define  CNL_AUX_CHANNEL_F            (1 << 28)
+ #define  ICL_AUX_CHANNEL_F            (1 << 28)
  #define  GEN9_AUX_CHANNEL_D           (1 << 27)
  #define  GEN9_AUX_CHANNEL_C           (1 << 26)
  #define  GEN9_AUX_CHANNEL_B           (1 << 25)
  #define  GEN11_GT_DW1_IRQ             (1 << 1)
  #define  GEN11_GT_DW0_IRQ             (1 << 0)
  
- #define DG1_MSTR_UNIT_INTR            _MMIO(0x190008)
+ #define DG1_MSTR_TILE_INTR            _MMIO(0x190008)
  #define   DG1_MSTR_IRQ                        REG_BIT(31)
- #define   DG1_MSTR_UNIT(u)            REG_BIT(u)
+ #define   DG1_MSTR_TILE(t)            REG_BIT(t)
  
  #define GEN11_DISPLAY_INT_CTL         _MMIO(0x44200)
  #define  GEN11_DISPLAY_IRQ_ENABLE     (1 << 31)
  #define GEN11_BCS_RSVD_INTR_MASK      _MMIO(0x1900a0)
  #define GEN11_VCS0_VCS1_INTR_MASK     _MMIO(0x1900a8)
  #define GEN11_VCS2_VCS3_INTR_MASK     _MMIO(0x1900ac)
+ #define GEN12_VCS4_VCS5_INTR_MASK     _MMIO(0x1900b0)
+ #define GEN12_VCS6_VCS7_INTR_MASK     _MMIO(0x1900b4)
  #define GEN11_VECS0_VECS1_INTR_MASK   _MMIO(0x1900d0)
+ #define GEN12_VECS2_VECS3_INTR_MASK   _MMIO(0x1900d4)
  #define GEN11_GUC_SG_INTR_MASK                _MMIO(0x1900e8)
  #define GEN11_GPM_WGBOXPERF_INTR_MASK _MMIO(0x1900ec)
  #define GEN11_CRYPTO_RSVD_INTR_MASK   _MMIO(0x1900f0)
  # define CHICKEN3_DGMG_DONE_FIX_DISABLE               (1 << 2)
  
  #define CHICKEN_PAR1_1                        _MMIO(0x42080)
+ #define  IGNORE_KVMR_PIPE_A           REG_BIT(23)
  #define  KBL_ARB_FILL_SPARE_22                REG_BIT(22)
  #define  DIS_RAM_BYPASS_PSR2_MAN_TRACK        (1 << 16)
  #define  SKL_DE_COMPRESSED_HASH_MODE  (1 << 15)
  #define  KVM_CONFIG_CHANGE_NOTIFICATION_SELECT        (1 << 14)
  
  #define CHICKEN_MISC_2                _MMIO(0x42084)
- #define  CNL_COMP_PWR_DOWN    (1 << 23)
  #define  KBL_ARB_FILL_SPARE_14        REG_BIT(14)
  #define  KBL_ARB_FILL_SPARE_13        REG_BIT(13)
  #define  GLK_CL2_PWR_DOWN     (1 << 12)
                                            [TRANSCODER_B] = _CHICKEN_TRANS_B, \
                                            [TRANSCODER_C] = _CHICKEN_TRANS_C, \
                                            [TRANSCODER_D] = _CHICKEN_TRANS_D))
- #define  HSW_FRAME_START_DELAY_MASK   (3 << 27)
- #define  HSW_FRAME_START_DELAY(x)     ((x) << 27) /* 0-3 */
- #define  VSC_DATA_SEL_SOFTWARE_CONTROL        (1 << 25) /* GLK and CNL+ */
- #define  DDI_TRAINING_OVERRIDE_ENABLE (1 << 19)
- #define  DDI_TRAINING_OVERRIDE_VALUE  (1 << 18)
- #define  DDIE_TRAINING_OVERRIDE_ENABLE        (1 << 17) /* CHICKEN_TRANS_A only */
- #define  DDIE_TRAINING_OVERRIDE_VALUE (1 << 16) /* CHICKEN_TRANS_A only */
- #define  PSR2_ADD_VERTICAL_LINE_COUNT   (1 << 15)
- #define  PSR2_VSC_ENABLE_PROG_HEADER    (1 << 12)
+ #define  HSW_FRAME_START_DELAY_MASK   REG_GENMASK(28, 27)
+ #define  HSW_FRAME_START_DELAY(x)     REG_FIELD_PREP(HSW_FRAME_START_DELAY_MASK, x)
+ #define  VSC_DATA_SEL_SOFTWARE_CONTROL        REG_BIT(25) /* GLK */
+ #define  FECSTALL_DIS_DPTSTREAM_DPTTG REG_BIT(23)
+ #define  DDI_TRAINING_OVERRIDE_ENABLE REG_BIT(19)
+ #define  DDI_TRAINING_OVERRIDE_VALUE  REG_BIT(18)
+ #define  DDIE_TRAINING_OVERRIDE_ENABLE        REG_BIT(17) /* CHICKEN_TRANS_A only */
+ #define  DDIE_TRAINING_OVERRIDE_VALUE REG_BIT(16) /* CHICKEN_TRANS_A only */
+ #define  PSR2_ADD_VERTICAL_LINE_COUNT REG_BIT(15)
+ #define  PSR2_VSC_ENABLE_PROG_HEADER  REG_BIT(12)
  
  #define DISP_ARB_CTL  _MMIO(0x45000)
  #define  DISP_FBC_MEMORY_WAKE         (1 << 31)
  
  #define GEN8_CHICKEN_DCPR_1           _MMIO(0x46430)
  #define   SKL_SELECT_ALTERNATE_DC_EXIT        (1 << 30)
- #define   CNL_DELAY_PMRSP             (1 << 22)
+ #define   ICL_DELAY_PMRSP             (1 << 22)
  #define   MASK_WAKEMEM                        (1 << 13)
- #define   CNL_DDI_CLOCK_REG_ACCESS_ON (1 << 7)
  
  #define GEN11_CHICKEN_DCPR_2                  _MMIO(0x46434)
  #define   DCPR_MASK_MAXLATENCY_MEMUP_CLR      REG_BIT(27)
  #define   SKL_DFSM_PIPE_B_DISABLE     (1 << 21)
  #define   SKL_DFSM_PIPE_C_DISABLE     (1 << 28)
  #define   TGL_DFSM_PIPE_D_DISABLE     (1 << 22)
- #define   CNL_DFSM_DISPLAY_DSC_DISABLE        (1 << 7)
+ #define   GLK_DFSM_DISPLAY_DSC_DISABLE        (1 << 7)
  
  #define SKL_DSSM                              _MMIO(0x51004)
- #define CNL_DSSM_CDCLK_PLL_REFCLK_24MHz               (1 << 31)
  #define ICL_DSSM_CDCLK_PLL_REFCLK_MASK                (7 << 29)
  #define ICL_DSSM_CDCLK_PLL_REFCLK_24MHz               (0 << 29)
  #define ICL_DSSM_CDCLK_PLL_REFCLK_19_2MHz     (1 << 29)
  
  /* GEN8 chicken */
  #define HDC_CHICKEN0                          _MMIO(0x7300)
- #define CNL_HDC_CHICKEN0                      _MMIO(0xE5F0)
  #define ICL_HDC_MODE                          _MMIO(0xE5F4)
  #define  HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE       (1 << 15)
  #define  HDC_FENCE_DEST_SLM_DISABLE           (1 << 14)
  #define _PIPEC_CHICKEN                                0x72038
  #define PIPE_CHICKEN(pipe)                    _MMIO_PIPE(pipe, _PIPEA_CHICKEN,\
                                                           _PIPEB_CHICKEN)
- #define   UNDERRUN_RECOVERY_DISABLE           REG_BIT(30)
+ #define   UNDERRUN_RECOVERY_DISABLE_ADLP      REG_BIT(30)
+ #define   UNDERRUN_RECOVERY_ENABLE_DG2                REG_BIT(30)
  #define   PIXEL_ROUNDING_TRUNC_FB_PASSTHRU    (1 << 15)
  #define   PER_PIXEL_ALPHA_BYPASS_EN           (1 << 7)
  
  #define   GEN9_FREQUENCY(x)                   ((x) << 23)
  #define   GEN6_OFFSET(x)                      ((x) << 19)
  #define   GEN6_AGGRESSIVE_TURBO                       (0 << 15)
+ #define   GEN9_SW_REQ_UNSLICE_RATIO_SHIFT     23
  #define GEN6_RC_VIDEO_FREQ                    _MMIO(0xA00C)
  #define GEN6_RC_CONTROL                               _MMIO(0xA090)
  #define   GEN6_RC_CTL_RC6pp_ENABLE            (1 << 16)
  #define   ICL_PCODE_MEM_SUBSYSYSTEM_INFO      0xd
  #define     ICL_PCODE_MEM_SS_READ_GLOBAL_INFO (0x0 << 8)
  #define     ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point)       (((point) << 16) | (0x1 << 8))
+ #define     ADL_PCODE_MEM_SS_READ_PSF_GV_INFO ((0) | (0x2 << 8))
  #define   ICL_PCODE_SAGV_DE_MEM_SS_CONFIG     0xe
  #define     ICL_PCODE_POINTS_RESTRICTED               0x0
- #define     ICL_PCODE_POINTS_RESTRICTED_MASK  0x1
+ #define     ICL_PCODE_POINTS_RESTRICTED_MASK  0xf
+ #define   ADLS_PSF_PT_SHIFT                   8
+ #define   ADLS_QGV_PT_MASK                    REG_GENMASK(7, 0)
+ #define   ADLS_PSF_PT_MASK                    REG_GENMASK(10, 8)
  #define   GEN6_PCODE_READ_D_COMP              0x10
  #define   GEN6_PCODE_WRITE_D_COMP             0x11
  #define   ICL_PCODE_EXIT_TCCOLD                       0x12
  #define   HSW_SAMPLE_C_PERFORMANCE    (1 << 9)
  #define   GEN8_CENTROID_PIXEL_OPT_DIS (1 << 8)
  #define   GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC (1 << 5)
- #define   CNL_FAST_ANISO_L1_BANKING_FIX       (1 << 4)
  #define   GEN8_SAMPLER_POWER_BYPASS_DIS       (1 << 1)
  
  #define GEN9_HALF_SLICE_CHICKEN7      _MMIO(0xe194)
  /* HSW/BDW power well */
  #define   HSW_PW_CTL_IDX_GLOBAL                       15
  
- /* SKL/BXT/GLK/CNL power wells */
+ /* SKL/BXT/GLK power wells */
  #define   SKL_PW_CTL_IDX_PW_2                 15
  #define   SKL_PW_CTL_IDX_PW_1                 14
- #define   CNL_PW_CTL_IDX_AUX_F                        12
- #define   CNL_PW_CTL_IDX_AUX_D                        11
  #define   GLK_PW_CTL_IDX_AUX_C                        10
  #define   GLK_PW_CTL_IDX_AUX_B                        9
  #define   GLK_PW_CTL_IDX_AUX_A                        8
- #define   CNL_PW_CTL_IDX_DDI_F                        6
  #define   SKL_PW_CTL_IDX_DDI_D                        4
  #define   SKL_PW_CTL_IDX_DDI_C                        3
  #define   SKL_PW_CTL_IDX_DDI_B                        2
@@@ -9818,19 -9856,6 +9862,6 @@@ enum skl_power_gate 
        ((pw_idx) - ICL_PW_CTL_IDX_PW_1 + SKL_PG1)
  #define  SKL_FUSE_PG_DIST_STATUS(pg)          (1 << (27 - (pg)))
  
- #define _CNL_AUX_REG_IDX(pw_idx)      ((pw_idx) - GLK_PW_CTL_IDX_AUX_B)
- #define _CNL_AUX_ANAOVRD1_B           0x162250
- #define _CNL_AUX_ANAOVRD1_C           0x162210
- #define _CNL_AUX_ANAOVRD1_D           0x1622D0
- #define _CNL_AUX_ANAOVRD1_F           0x162A90
- #define CNL_AUX_ANAOVRD1(pw_idx)      _MMIO(_PICK(_CNL_AUX_REG_IDX(pw_idx), \
-                                                   _CNL_AUX_ANAOVRD1_B, \
-                                                   _CNL_AUX_ANAOVRD1_C, \
-                                                   _CNL_AUX_ANAOVRD1_D, \
-                                                   _CNL_AUX_ANAOVRD1_F))
- #define   CNL_AUX_ANAOVRD1_ENABLE     (1 << 16)
- #define   CNL_AUX_ANAOVRD1_LDO_BYPASS (1 << 23)
  #define _ICL_AUX_REG_IDX(pw_idx)      ((pw_idx) - ICL_PW_CTL_IDX_AUX_A)
  #define _ICL_AUX_ANAOVRD1_A           0x162398
  #define _ICL_AUX_ANAOVRD1_B           0x6C398
  #define  TRANS_DDI_BPC_10             (1 << 20)
  #define  TRANS_DDI_BPC_6              (2 << 20)
  #define  TRANS_DDI_BPC_12             (3 << 20)
- #define  TRANS_DDI_PORT_SYNC_MASTER_SELECT_MASK       REG_GENMASK(19, 18) /* bdw-cnl */
+ #define  TRANS_DDI_PORT_SYNC_MASTER_SELECT_MASK       REG_GENMASK(19, 18)
  #define  TRANS_DDI_PORT_SYNC_MASTER_SELECT(x) REG_FIELD_PREP(TRANS_DDI_PORT_SYNC_MASTER_SELECT_MASK, (x))
  #define  TRANS_DDI_PVSYNC             (1 << 17)
  #define  TRANS_DDI_PHSYNC             (1 << 16)
- #define  TRANS_DDI_PORT_SYNC_ENABLE   REG_BIT(15) /* bdw-cnl */
+ #define  TRANS_DDI_PORT_SYNC_ENABLE   REG_BIT(15)
  #define  TRANS_DDI_EDP_INPUT_MASK     (7 << 12)
  #define  TRANS_DDI_EDP_INPUT_A_ON     (0 << 12)
  #define  TRANS_DDI_EDP_INPUT_A_ONOFF  (4 << 12)
  #define  PORT_SYNC_MODE_MASTER_SELECT_MASK    REG_GENMASK(2, 0)
  #define  PORT_SYNC_MODE_MASTER_SELECT(x)      REG_FIELD_PREP(PORT_SYNC_MODE_MASTER_SELECT_MASK, (x))
  
+ #define TRANS_CMTG_CHICKEN            _MMIO(0x6fa90)
+ #define  DISABLE_DPT_CLK_GATING               REG_BIT(1)
  /* DisplayPort Transport Control */
  #define _DP_TP_CTL_A                  0x64040
  #define _DP_TP_CTL_B                  0x64140
  #define TRANS_MSA_MISC(tran) _MMIO_TRANS2(tran, _TRANSA_MSA_MISC)
  /* See DP_MSA_MISC_* for the bit definitions */
  
+ #define _TRANS_A_SET_CONTEXT_LATENCY          0x6007C
+ #define _TRANS_B_SET_CONTEXT_LATENCY          0x6107C
+ #define _TRANS_C_SET_CONTEXT_LATENCY          0x6207C
+ #define _TRANS_D_SET_CONTEXT_LATENCY          0x6307C
+ #define TRANS_SET_CONTEXT_LATENCY(tran)               _MMIO_TRANS2(tran, _TRANS_A_SET_CONTEXT_LATENCY)
+ #define  TRANS_SET_CONTEXT_LATENCY_MASK               REG_GENMASK(15, 0)
+ #define  TRANS_SET_CONTEXT_LATENCY_VALUE(x)   REG_FIELD_PREP(TRANS_SET_CONTEXT_LATENCY_MASK, (x))
  /* LCPLL Control */
  #define LCPLL_CTL                     _MMIO(0x130040)
  #define  LCPLL_PLL_DISABLE            (1 << 31)
  #define DPLL_CFGCR1(id)       _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR1, _DPLL2_CFGCR1)
  #define DPLL_CFGCR2(id)       _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR2, _DPLL2_CFGCR2)
  
- /*
-  * CNL Clocks
-  */
- #define DPCLKA_CFGCR0                         _MMIO(0x6C200)
- #define  DPCLKA_CFGCR0_DDI_CLK_OFF(port)      (1 << ((port) ==  PORT_F ? 23 : \
-                                                     (port) + 10))
- #define  DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port)        ((port) == PORT_F ? 21 : \
-                                               (port) * 2)
- #define  DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port) (3 << DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port))
- #define  DPCLKA_CFGCR0_DDI_CLK_SEL(pll, port) ((pll) << DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port))
  /* ICL Clocks */
  #define ICL_DPCLKA_CFGCR0                     _MMIO(0x164280)
  #define  ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy)   (1 << _PICK(phy, 10, 11, 24, 4, 5))
                                                        ADLS_DPCLKA_DDIJ_SEL_MASK, \
                                                        ADLS_DPCLKA_DDIK_SEL_MASK)
  
- /* CNL PLL */
+ /* ICL PLL */
  #define DPLL0_ENABLE          0x46010
  #define DPLL1_ENABLE          0x46014
  #define _ADLS_DPLL2_ENABLE    0x46018
  #define  PLL_LOCK             (1 << 30)
  #define  PLL_POWER_ENABLE     (1 << 27)
  #define  PLL_POWER_STATE      (1 << 26)
- #define CNL_DPLL_ENABLE(pll)  _MMIO_PLL3(pll, DPLL0_ENABLE, DPLL1_ENABLE, \
+ #define ICL_DPLL_ENABLE(pll)  _MMIO_PLL3(pll, DPLL0_ENABLE, DPLL1_ENABLE, \
                                           _ADLS_DPLL2_ENABLE, _ADLS_DPLL3_ENABLE)
  
+ #define _DG2_PLL3_ENABLE      0x4601C
+ #define DG2_PLL_ENABLE(pll) _MMIO_PLL3(pll, DPLL0_ENABLE, DPLL1_ENABLE, \
+                                      _ADLS_DPLL2_ENABLE, _DG2_PLL3_ENABLE)
  #define TBT_PLL_ENABLE                _MMIO(0x46020)
  
  #define _MG_PLL1_ENABLE               0x46030
                                                   _MG_PLL_TDC_COLDST_BIAS_PORT1, \
                                                   _MG_PLL_TDC_COLDST_BIAS_PORT2)
  
- #define _CNL_DPLL0_CFGCR0             0x6C000
- #define _CNL_DPLL1_CFGCR0             0x6C080
- #define  DPLL_CFGCR0_HDMI_MODE                (1 << 30)
- #define  DPLL_CFGCR0_SSC_ENABLE               (1 << 29)
- #define  DPLL_CFGCR0_SSC_ENABLE_ICL   (1 << 25)
- #define  DPLL_CFGCR0_LINK_RATE_MASK   (0xf << 25)
- #define  DPLL_CFGCR0_LINK_RATE_2700   (0 << 25)
- #define  DPLL_CFGCR0_LINK_RATE_1350   (1 << 25)
- #define  DPLL_CFGCR0_LINK_RATE_810    (2 << 25)
- #define  DPLL_CFGCR0_LINK_RATE_1620   (3 << 25)
- #define  DPLL_CFGCR0_LINK_RATE_1080   (4 << 25)
- #define  DPLL_CFGCR0_LINK_RATE_2160   (5 << 25)
- #define  DPLL_CFGCR0_LINK_RATE_3240   (6 << 25)
- #define  DPLL_CFGCR0_LINK_RATE_4050   (7 << 25)
- #define  DPLL_CFGCR0_DCO_FRACTION_MASK        (0x7fff << 10)
- #define  DPLL_CFGCR0_DCO_FRACTION_SHIFT       (10)
- #define  DPLL_CFGCR0_DCO_FRACTION(x)  ((x) << 10)
- #define  DPLL_CFGCR0_DCO_INTEGER_MASK (0x3ff)
- #define CNL_DPLL_CFGCR0(pll)          _MMIO_PLL(pll, _CNL_DPLL0_CFGCR0, _CNL_DPLL1_CFGCR0)
- #define _CNL_DPLL0_CFGCR1             0x6C004
- #define _CNL_DPLL1_CFGCR1             0x6C084
- #define  DPLL_CFGCR1_QDIV_RATIO_MASK  (0xff << 10)
- #define  DPLL_CFGCR1_QDIV_RATIO_SHIFT (10)
- #define  DPLL_CFGCR1_QDIV_RATIO(x)    ((x) << 10)
- #define  DPLL_CFGCR1_QDIV_MODE_SHIFT  (9)
- #define  DPLL_CFGCR1_QDIV_MODE(x)     ((x) << 9)
- #define  DPLL_CFGCR1_KDIV_MASK                (7 << 6)
- #define  DPLL_CFGCR1_KDIV_SHIFT               (6)
- #define  DPLL_CFGCR1_KDIV(x)          ((x) << 6)
- #define  DPLL_CFGCR1_KDIV_1           (1 << 6)
- #define  DPLL_CFGCR1_KDIV_2           (2 << 6)
- #define  DPLL_CFGCR1_KDIV_3           (4 << 6)
- #define  DPLL_CFGCR1_PDIV_MASK                (0xf << 2)
- #define  DPLL_CFGCR1_PDIV_SHIFT               (2)
- #define  DPLL_CFGCR1_PDIV(x)          ((x) << 2)
- #define  DPLL_CFGCR1_PDIV_2           (1 << 2)
- #define  DPLL_CFGCR1_PDIV_3           (2 << 2)
- #define  DPLL_CFGCR1_PDIV_5           (4 << 2)
- #define  DPLL_CFGCR1_PDIV_7           (8 << 2)
- #define  DPLL_CFGCR1_CENTRAL_FREQ     (3 << 0)
- #define  DPLL_CFGCR1_CENTRAL_FREQ_8400        (3 << 0)
- #define  TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL        (0 << 0)
- #define CNL_DPLL_CFGCR1(pll)          _MMIO_PLL(pll, _CNL_DPLL0_CFGCR1, _CNL_DPLL1_CFGCR1)
  #define _ICL_DPLL0_CFGCR0             0x164000
  #define _ICL_DPLL1_CFGCR0             0x164080
  #define ICL_DPLL_CFGCR0(pll)          _MMIO_PLL(pll, _ICL_DPLL0_CFGCR0, \
                                                  _ICL_DPLL1_CFGCR0)
+ #define   DPLL_CFGCR0_HDMI_MODE               (1 << 30)
+ #define   DPLL_CFGCR0_SSC_ENABLE      (1 << 29)
+ #define   DPLL_CFGCR0_SSC_ENABLE_ICL  (1 << 25)
+ #define   DPLL_CFGCR0_LINK_RATE_MASK  (0xf << 25)
+ #define   DPLL_CFGCR0_LINK_RATE_2700  (0 << 25)
+ #define   DPLL_CFGCR0_LINK_RATE_1350  (1 << 25)
+ #define   DPLL_CFGCR0_LINK_RATE_810   (2 << 25)
+ #define   DPLL_CFGCR0_LINK_RATE_1620  (3 << 25)
+ #define   DPLL_CFGCR0_LINK_RATE_1080  (4 << 25)
+ #define   DPLL_CFGCR0_LINK_RATE_2160  (5 << 25)
+ #define   DPLL_CFGCR0_LINK_RATE_3240  (6 << 25)
+ #define   DPLL_CFGCR0_LINK_RATE_4050  (7 << 25)
+ #define   DPLL_CFGCR0_DCO_FRACTION_MASK       (0x7fff << 10)
+ #define   DPLL_CFGCR0_DCO_FRACTION_SHIFT      (10)
+ #define   DPLL_CFGCR0_DCO_FRACTION(x) ((x) << 10)
+ #define   DPLL_CFGCR0_DCO_INTEGER_MASK        (0x3ff)
  
  #define _ICL_DPLL0_CFGCR1             0x164004
  #define _ICL_DPLL1_CFGCR1             0x164084
  #define ICL_DPLL_CFGCR1(pll)          _MMIO_PLL(pll, _ICL_DPLL0_CFGCR1, \
                                                  _ICL_DPLL1_CFGCR1)
+ #define   DPLL_CFGCR1_QDIV_RATIO_MASK (0xff << 10)
+ #define   DPLL_CFGCR1_QDIV_RATIO_SHIFT        (10)
+ #define   DPLL_CFGCR1_QDIV_RATIO(x)   ((x) << 10)
+ #define   DPLL_CFGCR1_QDIV_MODE_SHIFT (9)
+ #define   DPLL_CFGCR1_QDIV_MODE(x)    ((x) << 9)
+ #define   DPLL_CFGCR1_KDIV_MASK               (7 << 6)
+ #define   DPLL_CFGCR1_KDIV_SHIFT              (6)
+ #define   DPLL_CFGCR1_KDIV(x)         ((x) << 6)
+ #define   DPLL_CFGCR1_KDIV_1          (1 << 6)
+ #define   DPLL_CFGCR1_KDIV_2          (2 << 6)
+ #define   DPLL_CFGCR1_KDIV_3          (4 << 6)
+ #define   DPLL_CFGCR1_PDIV_MASK               (0xf << 2)
+ #define   DPLL_CFGCR1_PDIV_SHIFT              (2)
+ #define   DPLL_CFGCR1_PDIV(x)         ((x) << 2)
+ #define   DPLL_CFGCR1_PDIV_2          (1 << 2)
+ #define   DPLL_CFGCR1_PDIV_3          (2 << 2)
+ #define   DPLL_CFGCR1_PDIV_5          (4 << 2)
+ #define   DPLL_CFGCR1_PDIV_7          (8 << 2)
+ #define   DPLL_CFGCR1_CENTRAL_FREQ    (3 << 0)
+ #define   DPLL_CFGCR1_CENTRAL_FREQ_8400       (3 << 0)
+ #define   TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL       (0 << 0)
  
  #define _TGL_DPLL0_CFGCR0             0x164284
  #define _TGL_DPLL1_CFGCR0             0x16428C
  #define   BXT_DE_PLL_LOCK             (1 << 30)
  #define   BXT_DE_PLL_FREQ_REQ         (1 << 23)
  #define   BXT_DE_PLL_FREQ_REQ_ACK     (1 << 22)
- #define   CNL_CDCLK_PLL_RATIO(x)      (x)
- #define   CNL_CDCLK_PLL_RATIO_MASK    0xff
+ #define   ICL_CDCLK_PLL_RATIO(x)      (x)
+ #define   ICL_CDCLK_PLL_RATIO_MASK    0xff
  
  /* GEN9 DC */
  #define DC_STATE_EN                   _MMIO(0x45504)
  #define SKL_MEMORY_FREQ_MULTIPLIER_HZ         266666666
  #define SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU     _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5E04)
  #define  SKL_REQ_DATA_MASK                    (0xF << 0)
+ #define  DG1_GEAR_TYPE                                REG_BIT(16)
  
  #define SKL_MAD_INTER_CHANNEL_0_0_0_MCHBAR_MCMAIN _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5000)
  #define  SKL_DRAM_DDR_TYPE_MASK                       (0x3 << 0)
  #define  SKL_DRAM_RANK_1                      (0x0 << 10)
  #define  SKL_DRAM_RANK_2                      (0x1 << 10)
  #define  SKL_DRAM_RANK_MASK                   (0x1 << 10)
- #define  CNL_DRAM_SIZE_MASK                   0x7F
- #define  CNL_DRAM_WIDTH_MASK                  (0x3 << 7)
- #define  CNL_DRAM_WIDTH_SHIFT                 7
- #define  CNL_DRAM_WIDTH_X8                    (0x0 << 7)
- #define  CNL_DRAM_WIDTH_X16                   (0x1 << 7)
- #define  CNL_DRAM_WIDTH_X32                   (0x2 << 7)
- #define  CNL_DRAM_RANK_MASK                   (0x3 << 9)
- #define  CNL_DRAM_RANK_SHIFT                  9
- #define  CNL_DRAM_RANK_1                      (0x0 << 9)
- #define  CNL_DRAM_RANK_2                      (0x1 << 9)
- #define  CNL_DRAM_RANK_3                      (0x2 << 9)
- #define  CNL_DRAM_RANK_4                      (0x3 << 9)
+ #define  ICL_DRAM_SIZE_MASK                   0x7F
+ #define  ICL_DRAM_WIDTH_MASK                  (0x3 << 7)
+ #define  ICL_DRAM_WIDTH_SHIFT                 7
+ #define  ICL_DRAM_WIDTH_X8                    (0x0 << 7)
+ #define  ICL_DRAM_WIDTH_X16                   (0x1 << 7)
+ #define  ICL_DRAM_WIDTH_X32                   (0x2 << 7)
+ #define  ICL_DRAM_RANK_MASK                   (0x3 << 9)
+ #define  ICL_DRAM_RANK_SHIFT                  9
+ #define  ICL_DRAM_RANK_1                      (0x0 << 9)
+ #define  ICL_DRAM_RANK_2                      (0x1 << 9)
+ #define  ICL_DRAM_RANK_3                      (0x2 << 9)
+ #define  ICL_DRAM_RANK_4                      (0x3 << 9)
+ #define SA_PERF_STATUS_0_0_0_MCHBAR_PC                _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5918)
+ #define  DG1_QCLK_RATIO_MASK                  REG_GENMASK(9, 2)
+ #define  DG1_QCLK_REFERENCE                   REG_BIT(10)
+ #define MCHBAR_CH0_CR_TC_PRE_0_0_0_MCHBAR     _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x4000)
+ #define   DG1_DRAM_T_RDPRE_MASK                       REG_GENMASK(16, 11)
+ #define   DG1_DRAM_T_RP_MASK                  REG_GENMASK(6, 0)
+ #define MCHBAR_CH0_CR_TC_PRE_0_0_0_MCHBAR_HIGH        _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x4004)
+ #define   DG1_DRAM_T_RCD_MASK                 REG_GENMASK(15, 9)
+ #define   DG1_DRAM_T_RAS_MASK                 REG_GENMASK(8, 1)
  
  /*
   * Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register,
  
  /* MOCS (Memory Object Control State) registers */
  #define GEN9_LNCFCMOCS(i)     _MMIO(0xb020 + (i) * 4) /* L3 Cache Control */
+ #define GEN9_LNCFCMOCS_REG_COUNT      32
  
  #define __GEN9_RCS0_MOCS0     0xc800
  #define GEN9_GFX_MOCS(i)      _MMIO(__GEN9_RCS0_MOCS0 + (i) * 4)
                                                 _ICL_PHY_MISC_B)
  #define  ICL_PHY_MISC_MUX_DDID                        (1 << 28)
  #define  ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN     (1 << 23)
+ #define  DG2_PHY_DP_TX_ACK_MASK                       REG_GENMASK(23, 20)
  
  /* Icelake Display Stream Compression Registers */
  #define DSCA_PICTURE_PARAMETER_SET_0          _MMIO(0x6B200)
@@@ -683,7 -683,7 +683,7 @@@ static void ipu_plane_atomic_update(str
                break;
        }
  
 -      ipu_dmfc_config_wait4eot(ipu_plane->dmfc, drm_rect_width(dst));
 +      ipu_dmfc_config_wait4eot(ipu_plane->dmfc, ALIGN(drm_rect_width(dst), 8));
  
        width = ipu_src_rect_width(new_state);
        height = drm_rect_height(&new_state->src) >> 16;
  }
  
  static const struct drm_plane_helper_funcs ipu_plane_helper_funcs = {
-       .prepare_fb = drm_gem_plane_helper_prepare_fb,
        .atomic_check = ipu_plane_atomic_check,
        .atomic_disable = ipu_plane_atomic_disable,
        .atomic_update = ipu_plane_atomic_update,
@@@ -17,7 -17,6 +17,6 @@@
  #include <drm/drm_drv.h>
  #include <drm/drm_gem_cma_helper.h>
  #include <drm/drm_gem_framebuffer_helper.h>
- #include <drm/drm_irq.h>
  #include <drm/drm_probe_helper.h>
  #include <drm/drm_vblank.h>
  
@@@ -203,7 -202,6 +202,7 @@@ static irqreturn_t handle_lcd_irq(struc
        unsigned long status, val, val1;
        int plane_id, dma0_state, dma1_state;
        struct kmb_drm_private *kmb = to_kmb(dev);
 +      u32 ctrl = 0;
  
        status = kmb_read_lcd(kmb, LCD_INT_STATUS);
  
                                kmb_clr_bitmask_lcd(kmb, LCD_CONTROL,
                                                    kmb->plane_status[plane_id].ctrl);
  
 +                              ctrl = kmb_read_lcd(kmb, LCD_CONTROL);
 +                              if (!(ctrl & (LCD_CTRL_VL1_ENABLE |
 +                                  LCD_CTRL_VL2_ENABLE |
 +                                  LCD_CTRL_GL1_ENABLE |
 +                                  LCD_CTRL_GL2_ENABLE))) {
 +                                      /* If no LCD layers are using DMA,
 +                                       * then disable DMA pipelined AXI read
 +                                       * transactions.
 +                                       */
 +                                      kmb_clr_bitmask_lcd(kmb, LCD_CONTROL,
 +                                                          LCD_CTRL_PIPELINE_DMA);
 +                              }
 +
                                kmb->plane_status[plane_id].disable = false;
                        }
                }
@@@ -413,22 -398,37 +412,37 @@@ static void kmb_irq_reset(struct drm_de
        kmb_write_lcd(to_kmb(drm), LCD_INT_ENABLE, 0);
  }
  
+ static int kmb_irq_install(struct drm_device *drm, unsigned int irq)
+ {
+       if (irq == IRQ_NOTCONNECTED)
+               return -ENOTCONN;
+       kmb_irq_reset(drm);
+       return request_irq(irq, kmb_isr, 0, drm->driver->name, drm);
+ }
+ static void kmb_irq_uninstall(struct drm_device *drm)
+ {
+       struct kmb_drm_private *kmb = to_kmb(drm);
+       kmb_irq_reset(drm);
+       free_irq(kmb->irq_lcd, drm);
+ }
  DEFINE_DRM_GEM_CMA_FOPS(fops);
  
  static const struct drm_driver kmb_driver = {
        .driver_features = DRIVER_GEM |
            DRIVER_MODESET | DRIVER_ATOMIC,
-       .irq_handler = kmb_isr,
-       .irq_preinstall = kmb_irq_reset,
-       .irq_uninstall = kmb_irq_reset,
        /* GEM Operations */
        .fops = &fops,
        DRM_GEM_CMA_DRIVER_OPS_VMAP,
        .name = "kmb-drm",
 -      .desc = "KEEMBAY DISPLAY DRIVER ",
 -      .date = "20201008",
 -      .major = 1,
 -      .minor = 0,
 +      .desc = "KEEMBAY DISPLAY DRIVER",
 +      .date = DRIVER_DATE,
 +      .major = DRIVER_MAJOR,
 +      .minor = DRIVER_MINOR,
  };
  
  static int kmb_remove(struct platform_device *pdev)
        of_node_put(kmb->crtc.port);
        kmb->crtc.port = NULL;
        pm_runtime_get_sync(drm->dev);
-       drm_irq_uninstall(drm);
+       kmb_irq_uninstall(drm);
        pm_runtime_put_sync(drm->dev);
        pm_runtime_disable(drm->dev);
  
@@@ -532,7 -532,7 +546,7 @@@ static int kmb_probe(struct platform_de
        if (ret)
                goto err_free;
  
-       ret = drm_irq_install(&kmb->drm, kmb->irq_lcd);
+       ret = kmb_irq_install(&kmb->drm, kmb->irq_lcd);
        if (ret < 0) {
                drm_err(&kmb->drm, "failed to install IRQ handler\n");
                goto err_irq;
index 0000000,64b4528..f46d4ab
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,167 +1,169 @@@
+ // SPDX-License-Identifier: GPL-2.0-only
+ /*
+  * Copyright (c) 2021 MediaTek Inc.
+  */
+ #include <linux/clk.h>
+ #include <linux/component.h>
+ #include <linux/module.h>
+ #include <linux/of_device.h>
+ #include <linux/of_irq.h>
+ #include <linux/platform_device.h>
+ #include <linux/soc/mediatek/mtk-cmdq.h>
+ #include "mtk_disp_drv.h"
+ #include "mtk_drm_crtc.h"
+ #include "mtk_drm_ddp_comp.h"
+ #define DISP_AAL_EN                           0x0000
+ #define AAL_EN                                                BIT(0)
+ #define DISP_AAL_SIZE                         0x0030
++#define DISP_AAL_OUTPUT_SIZE                  0x04d8
+ struct mtk_disp_aal_data {
+       bool has_gamma;
+ };
+ /**
+  * struct mtk_disp_aal - DISP_AAL driver structure
+  * @ddp_comp - structure containing type enum and hardware resources
+  * @crtc - associated crtc to report irq events to
+  */
+ struct mtk_disp_aal {
+       struct clk *clk;
+       void __iomem *regs;
+       struct cmdq_client_reg cmdq_reg;
+       const struct mtk_disp_aal_data *data;
+ };
+ int mtk_aal_clk_enable(struct device *dev)
+ {
+       struct mtk_disp_aal *aal = dev_get_drvdata(dev);
+       return clk_prepare_enable(aal->clk);
+ }
+ void mtk_aal_clk_disable(struct device *dev)
+ {
+       struct mtk_disp_aal *aal = dev_get_drvdata(dev);
+       clk_disable_unprepare(aal->clk);
+ }
+ void mtk_aal_config(struct device *dev, unsigned int w,
+                          unsigned int h, unsigned int vrefresh,
+                          unsigned int bpc, struct cmdq_pkt *cmdq_pkt)
+ {
+       struct mtk_disp_aal *aal = dev_get_drvdata(dev);
+       mtk_ddp_write(cmdq_pkt, w << 16 | h, &aal->cmdq_reg, aal->regs, DISP_AAL_SIZE);
++      mtk_ddp_write(cmdq_pkt, w << 16 | h, &aal->cmdq_reg, aal->regs, DISP_AAL_OUTPUT_SIZE);
+ }
+ void mtk_aal_gamma_set(struct device *dev, struct drm_crtc_state *state)
+ {
+       struct mtk_disp_aal *aal = dev_get_drvdata(dev);
+       if (aal->data && aal->data->has_gamma)
+               mtk_gamma_set_common(aal->regs, state);
+ }
+ void mtk_aal_start(struct device *dev)
+ {
+       struct mtk_disp_aal *aal = dev_get_drvdata(dev);
+       writel(AAL_EN, aal->regs + DISP_AAL_EN);
+ }
+ void mtk_aal_stop(struct device *dev)
+ {
+       struct mtk_disp_aal *aal = dev_get_drvdata(dev);
+       writel_relaxed(0x0, aal->regs + DISP_AAL_EN);
+ }
+ static int mtk_disp_aal_bind(struct device *dev, struct device *master,
+                              void *data)
+ {
+       return 0;
+ }
+ static void mtk_disp_aal_unbind(struct device *dev, struct device *master,
+                                 void *data)
+ {
+ }
+ static const struct component_ops mtk_disp_aal_component_ops = {
+       .bind   = mtk_disp_aal_bind,
+       .unbind = mtk_disp_aal_unbind,
+ };
+ static int mtk_disp_aal_probe(struct platform_device *pdev)
+ {
+       struct device *dev = &pdev->dev;
+       struct mtk_disp_aal *priv;
+       struct resource *res;
+       int ret;
+       priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+       priv->clk = devm_clk_get(dev, NULL);
+       if (IS_ERR(priv->clk)) {
+               dev_err(dev, "failed to get aal clk\n");
+               return PTR_ERR(priv->clk);
+       }
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       priv->regs = devm_ioremap_resource(dev, res);
+       if (IS_ERR(priv->regs)) {
+               dev_err(dev, "failed to ioremap aal\n");
+               return PTR_ERR(priv->regs);
+       }
+ #if IS_REACHABLE(CONFIG_MTK_CMDQ)
+       ret = cmdq_dev_get_client_reg(dev, &priv->cmdq_reg, 0);
+       if (ret)
+               dev_dbg(dev, "get mediatek,gce-client-reg fail!\n");
+ #endif
+       priv->data = of_device_get_match_data(dev);
+       platform_set_drvdata(pdev, priv);
+       ret = component_add(dev, &mtk_disp_aal_component_ops);
+       if (ret)
+               dev_err(dev, "Failed to add component: %d\n", ret);
+       return ret;
+ }
+ static int mtk_disp_aal_remove(struct platform_device *pdev)
+ {
+       component_del(&pdev->dev, &mtk_disp_aal_component_ops);
+       return 0;
+ }
+ static const struct mtk_disp_aal_data mt8173_aal_driver_data = {
+       .has_gamma = true,
+ };
+ static const struct of_device_id mtk_disp_aal_driver_dt_match[] = {
+       { .compatible = "mediatek,mt8173-disp-aal",
+         .data = &mt8173_aal_driver_data},
+       { .compatible = "mediatek,mt8183-disp-aal"},
+       {},
+ };
+ MODULE_DEVICE_TABLE(of, mtk_disp_aal_driver_dt_match);
+ struct platform_driver mtk_disp_aal_driver = {
+       .probe          = mtk_disp_aal_probe,
+       .remove         = mtk_disp_aal_remove,
+       .driver         = {
+               .name   = "mediatek-disp-aal",
+               .owner  = THIS_MODULE,
+               .of_match_table = mtk_disp_aal_driver_dt_match,
+       },
+ };
@@@ -605,15 -605,11 +605,15 @@@ static int mtk_dpi_bridge_atomic_check(
                                       struct drm_crtc_state *crtc_state,
                                       struct drm_connector_state *conn_state)
  {
 -      struct mtk_dpi *dpi = bridge->driver_private;
 +      struct mtk_dpi *dpi = bridge_to_dpi(bridge);
        unsigned int out_bus_format;
  
        out_bus_format = bridge_state->output_bus_cfg.format;
  
 +      if (out_bus_format == MEDIA_BUS_FMT_FIXED)
 +              if (dpi->conf->num_output_fmts)
 +                      out_bus_format = dpi->conf->output_fmts[0];
 +
        dev_dbg(dpi->dev, "input format 0x%04x, output format 0x%04x\n",
                bridge_state->input_bus_cfg.format,
                bridge_state->output_bus_cfg.format);
@@@ -718,10 -714,8 +718,8 @@@ static int mtk_dpi_bind(struct device *
  
        ret = drm_bridge_attach(&dpi->encoder, &dpi->bridge, NULL,
                                DRM_BRIDGE_ATTACH_NO_CONNECTOR);
-       if (ret) {
-               dev_err(dev, "Failed to attach bridge: %d\n", ret);
+       if (ret)
                goto err_cleanup;
-       }
  
        dpi->connector = drm_bridge_connector_init(drm_dev, &dpi->encoder);
        if (IS_ERR(dpi->connector)) {
@@@ -4,6 -4,8 +4,8 @@@
   */
  
  #include <linux/clk.h>
+ #include <linux/dma-mapping.h>
+ #include <linux/mailbox_controller.h>
  #include <linux/pm_runtime.h>
  #include <linux/soc/mediatek/mtk-cmdq.h>
  #include <linux/soc/mediatek/mtk-mmsys.h>
@@@ -50,8 -52,11 +52,11 @@@ struct mtk_drm_crtc 
        bool                            pending_async_planes;
  
  #if IS_REACHABLE(CONFIG_MTK_CMDQ)
-       struct cmdq_client              *cmdq_client;
+       struct mbox_client              cmdq_cl;
+       struct mbox_chan                *cmdq_chan;
+       struct cmdq_pkt                 cmdq_handle;
        u32                             cmdq_event;
+       u32                             cmdq_vblank_cnt;
  #endif
  
        struct device                   *mmsys_dev;
@@@ -222,9 -227,79 +227,79 @@@ struct mtk_ddp_comp *mtk_drm_ddp_comp_f
  }
  
  #if IS_REACHABLE(CONFIG_MTK_CMDQ)
- static void ddp_cmdq_cb(struct cmdq_cb_data data)
+ static int mtk_drm_cmdq_pkt_create(struct mbox_chan *chan, struct cmdq_pkt *pkt,
+                                   size_t size)
  {
-       cmdq_pkt_destroy(data.data);
+       struct device *dev;
+       dma_addr_t dma_addr;
+       pkt->va_base = kzalloc(size, GFP_KERNEL);
+       if (!pkt->va_base) {
+               kfree(pkt);
+               return -ENOMEM;
+       }
+       pkt->buf_size = size;
+       dev = chan->mbox->dev;
+       dma_addr = dma_map_single(dev, pkt->va_base, pkt->buf_size,
+                                 DMA_TO_DEVICE);
+       if (dma_mapping_error(dev, dma_addr)) {
+               dev_err(dev, "dma map failed, size=%u\n", (u32)(u64)size);
+               kfree(pkt->va_base);
+               kfree(pkt);
+               return -ENOMEM;
+       }
+       pkt->pa_base = dma_addr;
+       return 0;
+ }
+ static void mtk_drm_cmdq_pkt_destroy(struct mbox_chan *chan, struct cmdq_pkt *pkt)
+ {
+       dma_unmap_single(chan->mbox->dev, pkt->pa_base, pkt->buf_size,
+                        DMA_TO_DEVICE);
+       kfree(pkt->va_base);
+       kfree(pkt);
+ }
+ static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg)
+ {
+       struct mtk_drm_crtc *mtk_crtc = container_of(cl, struct mtk_drm_crtc, cmdq_cl);
+       struct cmdq_cb_data *data = mssg;
+       struct mtk_crtc_state *state;
+       unsigned int i;
+       state = to_mtk_crtc_state(mtk_crtc->base.state);
+       state->pending_config = false;
+       if (mtk_crtc->pending_planes) {
+               for (i = 0; i < mtk_crtc->layer_nr; i++) {
+                       struct drm_plane *plane = &mtk_crtc->planes[i];
+                       struct mtk_plane_state *plane_state;
+                       plane_state = to_mtk_plane_state(plane->state);
+                       plane_state->pending.config = false;
+               }
+               mtk_crtc->pending_planes = false;
+       }
+       if (mtk_crtc->pending_async_planes) {
+               for (i = 0; i < mtk_crtc->layer_nr; i++) {
+                       struct drm_plane *plane = &mtk_crtc->planes[i];
+                       struct mtk_plane_state *plane_state;
+                       plane_state = to_mtk_plane_state(plane->state);
+                       plane_state->pending.async_config = false;
+               }
+               mtk_crtc->pending_async_planes = false;
+       }
+       mtk_crtc->cmdq_vblank_cnt = 0;
+       mtk_drm_cmdq_pkt_destroy(mtk_crtc->cmdq_chan, data->pkt);
  }
  #endif
  
@@@ -378,7 -453,8 +453,8 @@@ static void mtk_crtc_ddp_config(struct 
                                    state->pending_vrefresh, 0,
                                    cmdq_handle);
  
-               state->pending_config = false;
+               if (!cmdq_handle)
+                       state->pending_config = false;
        }
  
        if (mtk_crtc->pending_planes) {
                                mtk_ddp_comp_layer_config(comp, local_layer,
                                                          plane_state,
                                                          cmdq_handle);
-                       plane_state->pending.config = false;
+                       if (!cmdq_handle)
+                               plane_state->pending.config = false;
                }
-               mtk_crtc->pending_planes = false;
+               if (!cmdq_handle)
+                       mtk_crtc->pending_planes = false;
        }
  
        if (mtk_crtc->pending_async_planes) {
                                mtk_ddp_comp_layer_config(comp, local_layer,
                                                          plane_state,
                                                          cmdq_handle);
-                       plane_state->pending.async_config = false;
+                       if (!cmdq_handle)
+                               plane_state->pending.async_config = false;
                }
-               mtk_crtc->pending_async_planes = false;
+               if (!cmdq_handle)
+                       mtk_crtc->pending_async_planes = false;
        }
  }
  
@@@ -430,7 -512,7 +512,7 @@@ static void mtk_drm_crtc_update_config(
                                       bool needs_vblank)
  {
  #if IS_REACHABLE(CONFIG_MTK_CMDQ)
-       struct cmdq_pkt *cmdq_handle;
+       struct cmdq_pkt *cmdq_handle = &mtk_crtc->cmdq_handle;
  #endif
        struct drm_crtc *crtc = &mtk_crtc->base;
        struct mtk_drm_private *priv = crtc->dev->dev_private;
                mtk_mutex_release(mtk_crtc->mutex);
        }
  #if IS_REACHABLE(CONFIG_MTK_CMDQ)
-       if (mtk_crtc->cmdq_client) {
-               mbox_flush(mtk_crtc->cmdq_client->chan, 2000);
-               cmdq_handle = cmdq_pkt_create(mtk_crtc->cmdq_client, PAGE_SIZE);
+       if (mtk_crtc->cmdq_chan) {
+               mbox_flush(mtk_crtc->cmdq_chan, 2000);
+               cmdq_handle->cmd_buf_size = 0;
                cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event);
                cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event, false);
                mtk_crtc_ddp_config(crtc, cmdq_handle);
                cmdq_pkt_finalize(cmdq_handle);
-               cmdq_pkt_flush_async(cmdq_handle, ddp_cmdq_cb, cmdq_handle);
+               dma_sync_single_for_device(mtk_crtc->cmdq_chan->mbox->dev,
+                                           cmdq_handle->pa_base,
+                                           cmdq_handle->cmd_buf_size,
+                                           DMA_TO_DEVICE);
+               /*
+                * CMDQ command should execute in next vblank,
+                * If it fail to execute in next 2 vblank, timeout happen.
+                */
+               mtk_crtc->cmdq_vblank_cnt = 2;
+               mbox_send_message(mtk_crtc->cmdq_chan, cmdq_handle);
+               mbox_client_txdone(mtk_crtc->cmdq_chan, 0);
        }
  #endif
        mtk_crtc->config_updating = false;
@@@ -489,12 -581,15 +581,15 @@@ static void mtk_crtc_ddp_irq(void *data
        struct mtk_drm_private *priv = crtc->dev->dev_private;
  
  #if IS_REACHABLE(CONFIG_MTK_CMDQ)
-       if (!priv->data->shadow_register && !mtk_crtc->cmdq_client)
+       if (!priv->data->shadow_register && !mtk_crtc->cmdq_chan)
+               mtk_crtc_ddp_config(crtc, NULL);
+       else if (mtk_crtc->cmdq_vblank_cnt > 0 && --mtk_crtc->cmdq_vblank_cnt == 0)
+               DRM_ERROR("mtk_crtc %d CMDQ execute command timeout!\n",
+                         drm_crtc_index(&mtk_crtc->base));
  #else
        if (!priv->data->shadow_register)
- #endif
                mtk_crtc_ddp_config(crtc, NULL);
+ #endif
        mtk_drm_finish_page_flip(mtk_crtc);
  }
  
@@@ -532,10 -627,13 +627,10 @@@ void mtk_drm_crtc_async_update(struct d
                               struct drm_atomic_state *state)
  {
        struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
 -      const struct drm_plane_helper_funcs *plane_helper_funcs =
 -                      plane->helper_private;
  
        if (!mtk_crtc->enabled)
                return;
  
 -      plane_helper_funcs->atomic_update(plane, state);
        mtk_drm_crtc_update_config(mtk_crtc, false);
  }
  
@@@ -752,14 -850,22 +847,22 @@@ int mtk_drm_crtc_create(struct drm_devi
        for (i = 0; i < path_len; i++) {
                enum mtk_ddp_comp_id comp_id = path[i];
                struct device_node *node;
+               struct mtk_ddp_comp *comp;
  
                node = priv->comp_node[comp_id];
+               comp = &priv->ddp_comp[comp_id];
                if (!node) {
                        dev_info(dev,
                                 "Not creating crtc %d because component %d is disabled or missing\n",
                                 pipe, comp_id);
                        return 0;
                }
+               if (!comp->dev) {
+                       dev_err(dev, "Component %pOF not initialized\n", node);
+                       return -ENODEV;
+               }
        }
  
        mtk_crtc = devm_kzalloc(dev, sizeof(*mtk_crtc), GFP_KERNEL);
        for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
                enum mtk_ddp_comp_id comp_id = path[i];
                struct mtk_ddp_comp *comp;
-               struct device_node *node;
  
-               node = priv->comp_node[comp_id];
                comp = &priv->ddp_comp[comp_id];
-               if (!comp) {
-                       dev_err(dev, "Component %pOF not initialized\n", node);
-                       ret = -ENODEV;
-                       return ret;
-               }
                mtk_crtc->ddp_comp[i] = comp;
  
                if (comp->funcs) {
        mutex_init(&mtk_crtc->hw_lock);
  
  #if IS_REACHABLE(CONFIG_MTK_CMDQ)
-       mtk_crtc->cmdq_client =
-                       cmdq_mbox_create(mtk_crtc->mmsys_dev,
-                                        drm_crtc_index(&mtk_crtc->base));
-       if (IS_ERR(mtk_crtc->cmdq_client)) {
+       mtk_crtc->cmdq_cl.dev = mtk_crtc->mmsys_dev;
+       mtk_crtc->cmdq_cl.tx_block = false;
+       mtk_crtc->cmdq_cl.knows_txdone = true;
+       mtk_crtc->cmdq_cl.rx_callback = ddp_cmdq_cb;
+       mtk_crtc->cmdq_chan =
+                       mbox_request_channel(&mtk_crtc->cmdq_cl,
+                                             drm_crtc_index(&mtk_crtc->base));
+       if (IS_ERR(mtk_crtc->cmdq_chan)) {
                dev_dbg(dev, "mtk_crtc %d failed to create mailbox client, writing register by CPU now\n",
                        drm_crtc_index(&mtk_crtc->base));
-               mtk_crtc->cmdq_client = NULL;
+               mtk_crtc->cmdq_chan = NULL;
        }
  
-       if (mtk_crtc->cmdq_client) {
+       if (mtk_crtc->cmdq_chan) {
                ret = of_property_read_u32_index(priv->mutex_node,
                                                 "mediatek,gce-events",
                                                 drm_crtc_index(&mtk_crtc->base),
                if (ret) {
                        dev_dbg(dev, "mtk_crtc %d failed to get mediatek,gce-events property\n",
                                drm_crtc_index(&mtk_crtc->base));
-                       cmdq_mbox_destroy(mtk_crtc->cmdq_client);
-                       mtk_crtc->cmdq_client = NULL;
+                       mbox_free_channel(mtk_crtc->cmdq_chan);
+                       mtk_crtc->cmdq_chan = NULL;
+               } else {
+                       ret = mtk_drm_cmdq_pkt_create(mtk_crtc->cmdq_chan,
+                                                      &mtk_crtc->cmdq_handle,
+                                                      PAGE_SIZE);
+                       if (ret) {
+                               dev_dbg(dev, "mtk_crtc %d failed to create cmdq packet\n",
+                                       drm_crtc_index(&mtk_crtc->base));
+                               mbox_free_channel(mtk_crtc->cmdq_chan);
+                               mtk_crtc->cmdq_chan = NULL;
+                       }
                }
        }
  #endif
@@@ -110,35 -110,6 +110,35 @@@ static int mtk_plane_atomic_async_check
                                                   true, true);
  }
  
 +static void mtk_plane_update_new_state(struct drm_plane_state *new_state,
 +                                     struct mtk_plane_state *mtk_plane_state)
 +{
 +      struct drm_framebuffer *fb = new_state->fb;
 +      struct drm_gem_object *gem;
 +      struct mtk_drm_gem_obj *mtk_gem;
 +      unsigned int pitch, format;
 +      dma_addr_t addr;
 +
 +      gem = fb->obj[0];
 +      mtk_gem = to_mtk_gem_obj(gem);
 +      addr = mtk_gem->dma_addr;
 +      pitch = fb->pitches[0];
 +      format = fb->format->format;
 +
 +      addr += (new_state->src.x1 >> 16) * fb->format->cpp[0];
 +      addr += (new_state->src.y1 >> 16) * pitch;
 +
 +      mtk_plane_state->pending.enable = true;
 +      mtk_plane_state->pending.pitch = pitch;
 +      mtk_plane_state->pending.format = format;
 +      mtk_plane_state->pending.addr = addr;
 +      mtk_plane_state->pending.x = new_state->dst.x1;
 +      mtk_plane_state->pending.y = new_state->dst.y1;
 +      mtk_plane_state->pending.width = drm_rect_width(&new_state->dst);
 +      mtk_plane_state->pending.height = drm_rect_height(&new_state->dst);
 +      mtk_plane_state->pending.rotation = new_state->rotation;
 +}
 +
  static void mtk_plane_atomic_async_update(struct drm_plane *plane,
                                          struct drm_atomic_state *state)
  {
        plane->state->src_h = new_state->src_h;
        plane->state->src_w = new_state->src_w;
        swap(plane->state->fb, new_state->fb);
 -      new_plane_state->pending.async_dirty = true;
  
 +      mtk_plane_update_new_state(new_state, new_plane_state);
 +      wmb(); /* Make sure the above parameters are set before update */
 +      new_plane_state->pending.async_dirty = true;
        mtk_drm_crtc_async_update(new_state->crtc, plane, state);
  }
  
@@@ -220,8 -189,14 +220,8 @@@ static void mtk_plane_atomic_update(str
        struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
                                                                           plane);
        struct mtk_plane_state *mtk_plane_state = to_mtk_plane_state(new_state);
 -      struct drm_crtc *crtc = new_state->crtc;
 -      struct drm_framebuffer *fb = new_state->fb;
 -      struct drm_gem_object *gem;
 -      struct mtk_drm_gem_obj *mtk_gem;
 -      unsigned int pitch, format;
 -      dma_addr_t addr;
  
 -      if (!crtc || WARN_ON(!fb))
 +      if (!new_state->crtc || WARN_ON(!new_state->fb))
                return;
  
        if (!new_state->visible) {
                return;
        }
  
 -      gem = fb->obj[0];
 -      mtk_gem = to_mtk_gem_obj(gem);
 -      addr = mtk_gem->dma_addr;
 -      pitch = fb->pitches[0];
 -      format = fb->format->format;
 -
 -      addr += (new_state->src.x1 >> 16) * fb->format->cpp[0];
 -      addr += (new_state->src.y1 >> 16) * pitch;
 -
 -      mtk_plane_state->pending.enable = true;
 -      mtk_plane_state->pending.pitch = pitch;
 -      mtk_plane_state->pending.format = format;
 -      mtk_plane_state->pending.addr = addr;
 -      mtk_plane_state->pending.x = new_state->dst.x1;
 -      mtk_plane_state->pending.y = new_state->dst.y1;
 -      mtk_plane_state->pending.width = drm_rect_width(&new_state->dst);
 -      mtk_plane_state->pending.height = drm_rect_height(&new_state->dst);
 -      mtk_plane_state->pending.rotation = new_state->rotation;
 +      mtk_plane_update_new_state(new_state, mtk_plane_state);
        wmb(); /* Make sure the above parameters are set before update */
        mtk_plane_state->pending.dirty = true;
  }
  
  static const struct drm_plane_helper_funcs mtk_plane_helper_funcs = {
-       .prepare_fb = drm_gem_plane_helper_prepare_fb,
        .atomic_check = mtk_plane_atomic_check,
        .atomic_update = mtk_plane_atomic_update,
        .atomic_disable = mtk_plane_atomic_disable,
@@@ -296,7 -296,7 +296,7 @@@ static const struct dpu_mdp_cfg sc7180_
  static const struct dpu_mdp_cfg sm8250_mdp[] = {
        {
        .name = "top_0", .id = MDP_TOP,
 -      .base = 0x0, .len = 0x45C,
 +      .base = 0x0, .len = 0x494,
        .features = 0,
        .highest_bank_bit = 0x3, /* TODO: 2 for LP_DDR4 */
        .clk_ctrls[DPU_CLK_CTRL_VIG0] = {
@@@ -974,6 -974,7 +974,7 @@@ static const struct dpu_perf_cfg sdm845
        .amortizable_threshold = 25,
        .min_prefill_lines = 24,
        .danger_lut_tbl = {0xf, 0xffff, 0x0},
+       .safe_lut_tbl = {0xfff0, 0xf000, 0xffff},
        .qos_lut_tbl = {
                {.nentry = ARRAY_SIZE(sdm845_qos_linear),
                .entries = sdm845_qos_linear
@@@ -1001,6 -1002,7 +1002,7 @@@ static const struct dpu_perf_cfg sc7180
        .min_dram_ib = 1600000,
        .min_prefill_lines = 24,
        .danger_lut_tbl = {0xff, 0xffff, 0x0},
+       .safe_lut_tbl = {0xfff0, 0xff00, 0xffff},
        .qos_lut_tbl = {
                {.nentry = ARRAY_SIZE(sc7180_qos_linear),
                .entries = sc7180_qos_linear
@@@ -1028,6 -1030,7 +1030,7 @@@ static const struct dpu_perf_cfg sm8150
        .min_dram_ib = 800000,
        .min_prefill_lines = 24,
        .danger_lut_tbl = {0xf, 0xffff, 0x0},
+       .safe_lut_tbl = {0xfff8, 0xf000, 0xffff},
        .qos_lut_tbl = {
                {.nentry = ARRAY_SIZE(sm8150_qos_linear),
                .entries = sm8150_qos_linear
@@@ -1056,6 -1059,7 +1059,7 @@@ static const struct dpu_perf_cfg sm8250
        .min_dram_ib = 800000,
        .min_prefill_lines = 35,
        .danger_lut_tbl = {0xf, 0xffff, 0x0},
+       .safe_lut_tbl = {0xfff0, 0xff00, 0xffff},
        .qos_lut_tbl = {
                {.nentry = ARRAY_SIZE(sc7180_qos_linear),
                .entries = sc7180_qos_linear
@@@ -1084,6 -1088,7 +1088,7 @@@ static const struct dpu_perf_cfg sc7280
        .min_dram_ib = 1600000,
        .min_prefill_lines = 24,
        .danger_lut_tbl = {0xffff, 0xffff, 0x0},
+       .safe_lut_tbl = {0xff00, 0xff00, 0xffff},
        .qos_lut_tbl = {
                {.nentry = ARRAY_SIZE(sc7180_qos_macrotile),
                .entries = sc7180_qos_macrotile
@@@ -372,6 -372,7 +372,7 @@@ void dp_catalog_ctrl_mainlink_ctrl(stru
        struct dp_catalog_private *catalog = container_of(dp_catalog,
                                struct dp_catalog_private, dp_catalog);
  
+       DRM_DEBUG_DP("enable=%d\n", enable);
        if (enable) {
                /*
                 * To make sure link reg writes happens before other operation,
@@@ -580,6 -581,7 +581,7 @@@ void dp_catalog_hpd_config_intr(struct 
  
        config = (en ? config | intr_mask : config & ~intr_mask);
  
+       DRM_DEBUG_DP("intr_mask=%#x config=%#x\n", intr_mask, config);
        dp_write_aux(catalog, REG_DP_DP_HPD_INT_MASK,
                                config & DP_DP_HPD_INT_MASK);
  }
@@@ -610,6 -612,7 +612,7 @@@ u32 dp_catalog_link_is_connected(struc
        u32 status;
  
        status = dp_read_aux(catalog, REG_DP_DP_HPD_INT_STATUS);
+       DRM_DEBUG_DP("aux status: %#x\n", status);
        status >>= DP_DP_HPD_STATE_STATUS_BITS_SHIFT;
        status &= DP_DP_HPD_STATE_STATUS_BITS_MASK;
  
@@@ -685,6 -688,7 +688,7 @@@ void dp_catalog_ctrl_send_phy_pattern(s
        /* Make sure to clear the current pattern before starting a new one */
        dp_write_link(catalog, REG_DP_STATE_CTRL, 0x0);
  
+       DRM_DEBUG_DP("pattern: %#x\n", pattern);
        switch (pattern) {
        case DP_PHY_TEST_PATTERN_D10_2:
                dp_write_link(catalog, REG_DP_STATE_CTRL,
                                DP_STATE_CTRL_LINK_TRAINING_PATTERN4);
                break;
        default:
-               DRM_DEBUG_DP("No valid test pattern requested:0x%x\n", pattern);
+               DRM_DEBUG_DP("No valid test pattern requested: %#x\n", pattern);
                break;
        }
  }
@@@ -771,7 -775,6 +775,7 @@@ int dp_catalog_panel_timing_cfg(struct 
        dp_write_link(catalog, REG_DP_HSYNC_VSYNC_WIDTH_POLARITY,
                                dp_catalog->width_blanking);
        dp_write_link(catalog, REG_DP_ACTIVE_HOR_VER, dp_catalog->dp_active);
 +      dp_write_p0(catalog, MMSS_DP_INTF_CONFIG, 0);
        return 0;
  }
  
@@@ -929,7 -932,7 +933,7 @@@ void dp_catalog_audio_config_acr(struc
        select = dp_catalog->audio_data;
        acr_ctrl = select << 4 | BIT(31) | BIT(8) | BIT(14);
  
-       DRM_DEBUG_DP("select = 0x%x, acr_ctrl = 0x%x\n", select, acr_ctrl);
+       DRM_DEBUG_DP("select: %#x, acr_ctrl: %#x\n", select, acr_ctrl);
  
        dp_write_link(catalog, MMSS_DP_AUDIO_ACR_CTRL, acr_ctrl);
  }
@@@ -55,7 -55,6 +55,6 @@@ enum 
        EV_HPD_INIT_SETUP,
        EV_HPD_PLUG_INT,
        EV_IRQ_HPD_INT,
-       EV_HPD_REPLUG_INT,
        EV_HPD_UNPLUG_INT,
        EV_USER_NOTIFICATION,
        EV_CONNECT_PENDING_TIMEOUT,
@@@ -102,8 -101,6 +101,6 @@@ struct dp_display_private 
        struct dp_display_mode dp_mode;
        struct msm_dp dp_display;
  
-       bool encoder_mode_set;
        /* wait for audio signaling */
        struct completion audio_comp;
  
@@@ -219,7 -216,6 +216,7 @@@ static int dp_display_bind(struct devic
                goto end;
        }
  
 +      dp->aux->drm_dev = drm;
        rc = dp_aux_register(dp->aux);
        if (rc) {
                DRM_ERROR("DRM DP AUX register failed\n");
@@@ -268,6 -264,8 +265,8 @@@ static bool dp_display_is_ds_bridge(str
  
  static bool dp_display_is_sink_count_zero(struct dp_display_private *dp)
  {
+       DRM_DEBUG_DP("present=%#x sink_count=%d\n", dp->panel->dpcd[DP_DOWNSTREAMPORT_PRESENT],
+               dp->link->sink_count);
        return dp_display_is_ds_bridge(dp->panel) &&
                (dp->link->sink_count == 0);
  }
@@@ -284,20 -282,6 +283,6 @@@ static void dp_display_send_hpd_event(s
  }
  
  
- static void dp_display_set_encoder_mode(struct dp_display_private *dp)
- {
-       struct msm_drm_private *priv = dp->dp_display.drm_dev->dev_private;
-       struct msm_kms *kms = priv->kms;
-       if (!dp->encoder_mode_set && dp->dp_display.encoder &&
-                               kms->funcs->set_encoder_mode) {
-               kms->funcs->set_encoder_mode(kms,
-                               dp->dp_display.encoder, false);
-               dp->encoder_mode_set = true;
-       }
- }
  static int dp_display_send_hpd_notification(struct dp_display_private *dp,
                                            bool hpd)
  {
  
        dp->dp_display.is_connected = hpd;
  
+       DRM_DEBUG_DP("hpd=%d\n", hpd);
        dp_display_send_hpd_event(&dp->dp_display);
  
        return 0;
@@@ -362,6 -347,7 +348,7 @@@ static void dp_display_host_init(struc
  {
        bool flip = false;
  
+       DRM_DEBUG_DP("core_initialized=%d\n", dp->core_initialized);
        if (dp->core_initialized) {
                DRM_DEBUG_DP("DP core already initialized\n");
                return;
        if (dp->usbpd->orientation == ORIENTATION_CC2)
                flip = true;
  
-       dp_display_set_encoder_mode(dp);
        dp_power_init(dp->power, flip);
        dp_ctrl_host_init(dp->ctrl, flip, reset);
        dp_aux_init(dp->aux);
@@@ -466,8 -450,10 +451,10 @@@ static int dp_display_handle_irq_hpd(st
  {
        u32 sink_request = dp->link->sink_request;
  
+       DRM_DEBUG_DP("%d\n", sink_request);
        if (dp->hpd_state == ST_DISCONNECTED) {
                if (sink_request & DP_LINK_STATUS_UPDATED) {
+                       DRM_DEBUG_DP("Disconnected sink_request: %d\n", sink_request);
                        DRM_ERROR("Disconnected, no DP_LINK_STATUS_UPDATED\n");
                        return -EINVAL;
                }
@@@ -499,6 -485,7 +486,7 @@@ static int dp_display_usbpd_attention_c
        rc = dp_link_process_request(dp->link);
        if (!rc) {
                sink_request = dp->link->sink_request;
+               DRM_DEBUG_DP("hpd_state=%d sink_request=%d\n", dp->hpd_state, sink_request);
                if (sink_request & DS_PORT_STATUS_CHANGED)
                        rc = dp_display_handle_port_ststus_changed(dp);
                else
@@@ -521,6 -508,7 +509,7 @@@ static int dp_hpd_plug_handle(struct dp
        mutex_lock(&dp->event_mutex);
  
        state =  dp->hpd_state;
+       DRM_DEBUG_DP("hpd_state=%d\n", state);
        if (state == ST_DISPLAY_OFF || state == ST_SUSPENDED) {
                mutex_unlock(&dp->event_mutex);
                return 0;
@@@ -656,6 -644,7 +645,7 @@@ static int dp_hpd_unplug_handle(struct 
        /* start sentinel checking in case of missing uevent */
        dp_add_event(dp, EV_DISCONNECT_PENDING_TIMEOUT, 0, DP_TIMEOUT_5_SECOND);
  
+       DRM_DEBUG_DP("hpd_state=%d\n", state);
        /* signal the disconnect event early to ensure proper teardown */
        dp_display_handle_plugged_change(g_dp_display, false);
  
@@@ -714,6 -703,7 +704,7 @@@ static int dp_irq_hpd_handle(struct dp_
        if (ret == -ECONNRESET) { /* cable unplugged */
                dp->core_initialized = false;
        }
+       DRM_DEBUG_DP("hpd_state=%d\n", state);
  
        mutex_unlock(&dp->event_mutex);
  
@@@ -855,6 -845,7 +846,7 @@@ static int dp_display_enable(struct dp_
  
        dp_display = g_dp_display;
  
+       DRM_DEBUG_DP("sink_count=%d\n", dp->link->sink_count);
        if (dp_display->power_on) {
                DRM_DEBUG_DP("Link already setup, return\n");
                return 0;
@@@ -916,6 -907,7 +908,7 @@@ static int dp_display_disable(struct dp
  
        dp_display->power_on = false;
  
+       DRM_DEBUG_DP("sink count: %d\n", dp->link->sink_count);
        return 0;
  }
  
@@@ -1015,10 -1007,8 +1008,8 @@@ int dp_display_get_test_bpp(struct msm_
  void msm_dp_snapshot(struct msm_disp_state *disp_state, struct msm_dp *dp)
  {
        struct dp_display_private *dp_display;
-       struct drm_device *drm;
  
        dp_display = container_of(dp, struct dp_display_private, dp_display);
-       drm = dp->drm_dev;
  
        /*
         * if we are reading registers we need the link clocks to be on
@@@ -1119,9 -1109,6 +1110,6 @@@ static int hpd_event_thread(void *data
                case EV_IRQ_HPD_INT:
                        dp_irq_hpd_handle(dp_priv, todo->data);
                        break;
-               case EV_HPD_REPLUG_INT:
-                       /* do nothing */
-                       break;
                case EV_USER_NOTIFICATION:
                        dp_display_send_hpd_notification(dp_priv,
                                                todo->data);
@@@ -1163,12 -1150,11 +1151,11 @@@ static irqreturn_t dp_display_irq_handl
  
        hpd_isr_status = dp_catalog_hpd_get_intr_status(dp->catalog);
  
+       DRM_DEBUG_DP("hpd isr status=%#x\n", hpd_isr_status);
        if (hpd_isr_status & 0x0F) {
                /* hpd related interrupts */
-               if (hpd_isr_status & DP_DP_HPD_PLUG_INT_MASK ||
-                       hpd_isr_status & DP_DP_HPD_REPLUG_INT_MASK) {
+               if (hpd_isr_status & DP_DP_HPD_PLUG_INT_MASK)
                        dp_add_event(dp, EV_HPD_PLUG_INT, 0, 0);
-               }
  
                if (hpd_isr_status & DP_DP_IRQ_HPD_INT_MASK) {
                        /* stop sentinel connect pending checking */
                        dp_add_event(dp, EV_IRQ_HPD_INT, 0, 0);
                }
  
-               if (hpd_isr_status & DP_DP_HPD_REPLUG_INT_MASK)
-                       dp_add_event(dp, EV_HPD_REPLUG_INT, 0, 0);
+               if (hpd_isr_status & DP_DP_HPD_REPLUG_INT_MASK) {
+                       dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 0);
+                       dp_add_event(dp, EV_HPD_PLUG_INT, 0, 3);
+               }
  
                if (hpd_isr_status & DP_DP_HPD_UNPLUG_INT_MASK)
                        dp_add_event(dp, EV_HPD_UNPLUG_INT, 0, 0);
@@@ -1286,12 -1274,15 +1275,15 @@@ static int dp_pm_resume(struct device *
        struct platform_device *pdev = to_platform_device(dev);
        struct msm_dp *dp_display = platform_get_drvdata(pdev);
        struct dp_display_private *dp;
-       u32 status;
+       int sink_count = 0;
  
        dp = container_of(dp_display, struct dp_display_private, dp_display);
  
        mutex_lock(&dp->event_mutex);
  
+       DRM_DEBUG_DP("Before, core_inited=%d power_on=%d\n",
+                       dp->core_initialized, dp_display->power_on);
        /* start from disconnected state */
        dp->hpd_state = ST_DISCONNECTED;
  
  
        dp_catalog_ctrl_hpd_config(dp->catalog);
  
-       status = dp_catalog_link_is_connected(dp->catalog);
+       /*
+        * set sink to normal operation mode -- D0
+        * before dpcd read
+        */
+       dp_link_psm_config(dp->link, &dp->panel->link_info, false);
+       if (dp_catalog_link_is_connected(dp->catalog)) {
+               sink_count = drm_dp_read_sink_count(dp->aux);
+               if (sink_count < 0)
+                       sink_count = 0;
+       }
  
+       dp->link->sink_count = sink_count;
        /*
         * can not declared display is connected unless
         * HDMI cable is plugged in and sink_count of
         * dongle become 1
         */
-       if (status && dp->link->sink_count)
+       if (dp->link->sink_count)
                dp->dp_display.is_connected = true;
        else
                dp->dp_display.is_connected = false;
  
 +      dp_display_handle_plugged_change(g_dp_display,
 +                              dp->dp_display.is_connected);
 +
+       DRM_DEBUG_DP("After, sink_count=%d is_connected=%d core_inited=%d power_on=%d\n",
+                       dp->link->sink_count, dp->dp_display.is_connected,
+                       dp->core_initialized, dp_display->power_on);
  
        mutex_unlock(&dp->event_mutex);
  
@@@ -1331,6 -1333,9 +1337,9 @@@ static int dp_pm_suspend(struct device 
  
        mutex_lock(&dp->event_mutex);
  
+       DRM_DEBUG_DP("Before, core_inited=%d power_on=%d\n",
+                       dp->core_initialized, dp_display->power_on);
        if (dp->core_initialized == true) {
                /* mainlink enabled */
                if (dp_power_clk_status(dp->power, DP_CTRL_PM))
        /* host_init will be called at pm_resume */
        dp->core_initialized = false;
  
+       DRM_DEBUG_DP("After, core_inited=%d power_on=%d\n",
+                       dp->core_initialized, dp_display->power_on);
        mutex_unlock(&dp->event_mutex);
  
        return 0;
@@@ -31,6 -31,7 +31,7 @@@
  #include <linux/dma-mapping.h>
  #include <linux/hdmi.h>
  #include <linux/component.h>
+ #include <linux/iopoll.h>
  
  #include <drm/drm_atomic.h>
  #include <drm/drm_atomic_helper.h>
@@@ -1649,17 -1650,36 +1650,36 @@@ nv50_sor_update(struct nouveau_encoder 
        core->func->sor->ctrl(core, nv_encoder->or, nv_encoder->ctrl, asyh);
  }
  
+ /* TODO: Should we extend this to PWM-only backlights?
+  * As well, should we add a DRM helper for waiting for the backlight to acknowledge
+  * the panel backlight has been shut off? Intel doesn't seem to do this, and uses a
+  * fixed time delay from the vbios…
+  */
  static void
  nv50_sor_atomic_disable(struct drm_encoder *encoder, struct drm_atomic_state *state)
  {
        struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
        struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
        struct nouveau_connector *nv_connector = nv50_outp_get_old_connector(state, nv_encoder);
+ #ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
+       struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
+       struct nouveau_backlight *backlight = nv_connector->backlight;
+ #endif
        struct drm_dp_aux *aux = &nv_connector->aux;
+       int ret;
        u8 pwr;
  
+ #ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
+       if (backlight && backlight->uses_dpcd) {
+               ret = drm_edp_backlight_disable(aux, &backlight->edp_info);
+               if (ret < 0)
+                       NV_ERROR(drm, "Failed to disable backlight on [CONNECTOR:%d:%s]: %d\n",
+                                nv_connector->base.base.id, nv_connector->base.name, ret);
+       }
+ #endif
        if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
-               int ret = drm_dp_dpcd_readb(aux, DP_SET_POWER, &pwr);
+               ret = drm_dp_dpcd_readb(aux, DP_SET_POWER, &pwr);
  
                if (ret == 0) {
                        pwr &= ~DP_SET_POWER_MASK;
@@@ -1696,6 -1716,9 +1716,9 @@@ nv50_sor_atomic_enable(struct drm_encod
        struct drm_device *dev = encoder->dev;
        struct nouveau_drm *drm = nouveau_drm(dev);
        struct nouveau_connector *nv_connector;
+ #ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
+       struct nouveau_backlight *backlight;
+ #endif
        struct nvbios *bios = &drm->vbios;
        bool hda = false;
        u8 proto = NV507D_SOR_SET_CONTROL_PROTOCOL_CUSTOM;
                        proto = NV887D_SOR_SET_CONTROL_PROTOCOL_DP_B;
  
                nv50_audio_enable(encoder, nv_crtc, nv_connector, state, mode);
+ #ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
+               backlight = nv_connector->backlight;
+               if (backlight && backlight->uses_dpcd)
+                       drm_edp_backlight_enable(&nv_connector->aux, &backlight->edp_info,
+                                                (u16)backlight->dev->props.brightness);
+ #endif
                break;
        default:
                BUG();
@@@ -2237,33 -2268,6 +2268,33 @@@ nv50_disp_atomic_commit_tail(struct drm
                interlock[NV50_DISP_INTERLOCK_CORE] = 0;
        }
  
 +      /* Finish updating head(s)...
 +       *
 +       * NVD is rather picky about both where window assignments can change,
 +       * *and* about certain core and window channel states matching.
 +       *
 +       * The EFI GOP driver on newer GPUs configures window channels with a
 +       * different output format to what we do, and the core channel update
 +       * in the assign_windows case above would result in a state mismatch.
 +       *
 +       * Delay some of the head update until after that point to workaround
 +       * the issue.  This only affects the initial modeset.
 +       *
 +       * TODO: handle this better when adding flexible window mapping
 +       */
 +      for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
 +              struct nv50_head_atom *asyh = nv50_head_atom(new_crtc_state);
 +              struct nv50_head *head = nv50_head(crtc);
 +
 +              NV_ATOMIC(drm, "%s: set %04x (clr %04x)\n", crtc->name,
 +                        asyh->set.mask, asyh->clr.mask);
 +
 +              if (asyh->set.mask) {
 +                      nv50_head_flush_set_wndw(head, asyh);
 +                      interlock[NV50_DISP_INTERLOCK_CORE] = 1;
 +              }
 +      }
 +
        /* Update plane(s). */
        for_each_new_plane_in_state(state, plane, new_plane_state, i) {
                struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
        nv50_crc_atomic_start_reporting(state);
        if (!flushed)
                nv50_crc_atomic_release_notifier_contexts(state);
        drm_atomic_helper_commit_hw_done(state);
        drm_atomic_helper_cleanup_planes(dev, state);
        drm_atomic_helper_commit_cleanup_done(state);
@@@ -244,7 -244,6 +244,7 @@@ nouveau_cli_init(struct nouveau_drm *dr
        ret = nvif_device_ctor(&cli->base.object, "drmDevice", 0, NV_DEVICE,
                               &(struct nv_device_v0) {
                                        .device = ~0,
 +                                      .priv = true,
                               }, sizeof(struct nv_device_v0),
                               &cli->device);
        if (ret) {
@@@ -554,8 -553,6 +554,6 @@@ nouveau_drm_device_init(struct drm_devi
        if (ret)
                goto fail_master;
  
-       dev->irq_enabled = true;
        nvxx_client(&drm->client.base)->debug =
                nvkm_dbgopt(nouveau_debug, "DRM");
  
@@@ -739,7 -736,7 +737,7 @@@ static int nouveau_drm_probe(struct pci
        nvkm_device_del(&device);
  
        /* Remove conflicting drivers (vesafb, efifb etc). */
-       ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, "nouveaufb");
+       ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver_pci);
        if (ret)
                return ret;
  
@@@ -796,7 -793,6 +794,6 @@@ nouveau_drm_device_remove(struct drm_de
  
        drm_dev_unregister(dev);
  
-       dev->irq_enabled = false;
        client = nvxx_client(&drm->client.base);
        device = nvkm_device_find(client->device);
  
@@@ -1087,6 -1083,8 +1084,6 @@@ nouveau_drm_open(struct drm_device *dev
        if (ret)
                goto done;
  
 -      cli->base.super = false;
 -
        fpriv->driver_priv = cli;
  
        mutex_lock(&drm->client.mutex);
@@@ -36,6 -36,8 +36,8 @@@
  
  #include <drm/drm_crtc.h>
  #include <drm/drm_device.h>
+ #include <drm/drm_dp_aux_bus.h>
+ #include <drm/drm_dp_helper.h>
  #include <drm/drm_mipi_dsi.h>
  #include <drm/drm_panel.h>
  
@@@ -185,6 -187,7 +187,7 @@@ struct panel_simple 
  
        struct regulator *supply;
        struct i2c_adapter *ddc;
+       struct drm_dp_aux *aux;
  
        struct gpio_desc *enable_gpio;
        struct gpio_desc *hpd_gpio;
@@@ -657,7 -660,8 +660,8 @@@ static void panel_simple_parse_panel_ti
                dev_err(dev, "Reject override mode: No display_timing found\n");
  }
  
- static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
+ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc,
+                             struct drm_dp_aux *aux)
  {
        struct panel_simple *panel;
        struct display_timing dt;
        panel->enabled = false;
        panel->prepared_time = 0;
        panel->desc = desc;
+       panel->aux = aux;
  
        panel->no_hpd = of_property_read_bool(dev->of_node, "no-hpd");
        if (!panel->no_hpd) {
  
                if (!panel->ddc)
                        return -EPROBE_DEFER;
+       } else if (aux) {
+               panel->ddc = &aux->ddc;
        }
  
        if (desc == &panel_dpi) {
                        desc->bpc != 8);
                break;
        case DRM_MODE_CONNECTOR_eDP:
-               if (desc->bus_format == 0)
-                       dev_warn(dev, "Specify missing bus_format\n");
-               if (desc->bpc != 6 && desc->bpc != 8)
-                       dev_warn(dev, "Expected bpc in {6,8} but got: %u\n", desc->bpc);
+               if (desc->bpc != 6 && desc->bpc != 8 && desc->bpc != 10)
+                       dev_warn(dev, "Expected bpc in {6,8,10} but got: %u\n", desc->bpc);
                break;
        case DRM_MODE_CONNECTOR_DSI:
                if (desc->bpc != 6 && desc->bpc != 8)
        if (err)
                goto disable_pm_runtime;
  
+       if (!panel->base.backlight && panel->aux) {
+               pm_runtime_get_sync(dev);
+               err = drm_panel_dp_aux_backlight(&panel->base, panel->aux);
+               pm_runtime_mark_last_busy(dev);
+               pm_runtime_put_autosuspend(dev);
+               if (err)
+                       goto disable_pm_runtime;
+       }
        drm_panel_add(&panel->base);
  
        return 0;
@@@ -801,7 -815,7 +815,7 @@@ disable_pm_runtime
        pm_runtime_dont_use_autosuspend(dev);
        pm_runtime_disable(dev);
  free_ddc:
-       if (panel->ddc)
+       if (panel->ddc && (!panel->aux || panel->ddc != &panel->aux->ddc))
                put_device(&panel->ddc->dev);
  
        return err;
@@@ -817,7 -831,7 +831,7 @@@ static int panel_simple_remove(struct d
  
        pm_runtime_dont_use_autosuspend(dev);
        pm_runtime_disable(dev);
-       if (panel->ddc)
+       if (panel->ddc && (!panel->aux || panel->ddc != &panel->aux->ddc))
                put_device(&panel->ddc->dev);
  
        return 0;
@@@ -1080,6 -1094,36 +1094,36 @@@ static const struct panel_desc auo_b133
        },
  };
  
+ static const struct drm_display_mode auo_b133han05_mode = {
+       .clock = 142600,
+       .hdisplay = 1920,
+       .hsync_start = 1920 + 58,
+       .hsync_end = 1920 + 58 + 42,
+       .htotal = 1920 + 58 + 42 + 60,
+       .vdisplay = 1080,
+       .vsync_start = 1080 + 3,
+       .vsync_end = 1080 + 3 + 5,
+       .vtotal = 1080 + 3 + 5 + 54,
+ };
+ static const struct panel_desc auo_b133han05 = {
+       .modes = &auo_b133han05_mode,
+       .num_modes = 1,
+       .bpc = 8,
+       .size = {
+               .width = 293,
+               .height = 165,
+       },
+       .delay = {
+               .prepare = 100,
+               .enable = 20,
+               .unprepare = 50,
+       },
+       .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+       .bus_flags = DRM_BUS_FLAG_DATA_MSB_TO_LSB,
+       .connector_type = DRM_MODE_CONNECTOR_eDP,
+ };
  static const struct drm_display_mode auo_b133htn01_mode = {
        .clock = 150660,
        .hdisplay = 1920,
@@@ -1107,6 -1151,36 +1151,36 @@@ static const struct panel_desc auo_b133
        },
  };
  
+ static const struct drm_display_mode auo_b140han06_mode = {
+       .clock = 141000,
+       .hdisplay = 1920,
+       .hsync_start = 1920 + 16,
+       .hsync_end = 1920 + 16 + 16,
+       .htotal = 1920 + 16 + 16 + 152,
+       .vdisplay = 1080,
+       .vsync_start = 1080 + 3,
+       .vsync_end = 1080 + 3 + 14,
+       .vtotal = 1080 + 3 + 14 + 19,
+ };
+ static const struct panel_desc auo_b140han06 = {
+       .modes = &auo_b140han06_mode,
+       .num_modes = 1,
+       .bpc = 8,
+       .size = {
+               .width = 309,
+               .height = 174,
+       },
+       .delay = {
+               .prepare = 100,
+               .enable = 20,
+               .unprepare = 50,
+       },
+       .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+       .bus_flags = DRM_BUS_FLAG_DATA_MSB_TO_LSB,
+       .connector_type = DRM_MODE_CONNECTOR_eDP,
+ };
  static const struct display_timing auo_g070vvn01_timings = {
        .pixelclock = { 33300000, 34209000, 45000000 },
        .hactive = { 800, 800, 800 },
@@@ -1179,6 -1253,8 +1253,8 @@@ static const struct panel_desc auo_g104
                .width = 211,
                .height = 158,
        },
+       .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+       .connector_type = DRM_MODE_CONNECTOR_LVDS,
  };
  
  static const struct drm_display_mode auo_g121ean01_mode = {
@@@ -1929,6 -2005,32 +2005,32 @@@ static const struct panel_desc edt_et03
        .bus_flags = DRM_BUS_FLAG_DE_LOW | DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE,
  };
  
+ static const struct drm_display_mode edt_etm0350g0dh6_mode = {
+       .clock = 6520,
+       .hdisplay = 320,
+       .hsync_start = 320 + 20,
+       .hsync_end = 320 + 20 + 68,
+       .htotal = 320 + 20 + 68,
+       .vdisplay = 240,
+       .vsync_start = 240 + 4,
+       .vsync_end = 240 + 4 + 18,
+       .vtotal = 240 + 4 + 18,
+       .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+ };
+ static const struct panel_desc edt_etm0350g0dh6 = {
+       .modes = &edt_etm0350g0dh6_mode,
+       .num_modes = 1,
+       .bpc = 6,
+       .size = {
+               .width = 70,
+               .height = 53,
+       },
+       .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+       .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE,
+       .connector_type = DRM_MODE_CONNECTOR_DPI,
+ };
  static const struct drm_display_mode edt_etm043080dh6gp_mode = {
        .clock = 10870,
        .hdisplay = 480,
@@@ -1980,6 -2082,9 +2082,9 @@@ static const struct panel_desc edt_etm0
                .width = 95,
                .height = 54,
        },
+       .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+       .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE,
+       .connector_type = DRM_MODE_CONNECTOR_DPI,
  };
  
  static const struct drm_display_mode edt_et057090dhu_mode = {
@@@ -2044,6 -2149,60 +2149,60 @@@ static const struct panel_desc edt_etm0
        },
        .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
        .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
+       .connector_type = DRM_MODE_CONNECTOR_DPI,
+ };
+ static const struct drm_display_mode edt_etmv570g2dhu_mode = {
+       .clock = 25175,
+       .hdisplay = 640,
+       .hsync_start = 640,
+       .hsync_end = 640 + 16,
+       .htotal = 640 + 16 + 30 + 114,
+       .vdisplay = 480,
+       .vsync_start = 480 + 10,
+       .vsync_end = 480 + 10 + 3,
+       .vtotal = 480 + 10 + 3 + 35,
+       .flags = DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_PHSYNC,
+ };
+ static const struct panel_desc edt_etmv570g2dhu = {
+       .modes = &edt_etmv570g2dhu_mode,
+       .num_modes = 1,
+       .bpc = 6,
+       .size = {
+               .width = 115,
+               .height = 86,
+       },
+       .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+       .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE,
+       .connector_type = DRM_MODE_CONNECTOR_DPI,
+ };
+ static const struct display_timing eink_vb3300_kca_timing = {
+       .pixelclock = { 40000000, 40000000, 40000000 },
+       .hactive = { 334, 334, 334 },
+       .hfront_porch = { 1, 1, 1 },
+       .hback_porch = { 1, 1, 1 },
+       .hsync_len = { 1, 1, 1 },
+       .vactive = { 1405, 1405, 1405 },
+       .vfront_porch = { 1, 1, 1 },
+       .vback_porch = { 1, 1, 1 },
+       .vsync_len = { 1, 1, 1 },
+       .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+                DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE,
+ };
+ static const struct panel_desc eink_vb3300_kca = {
+       .timings = &eink_vb3300_kca_timing,
+       .num_timings = 1,
+       .bpc = 6,
+       .size = {
+               .width = 157,
+               .height = 209,
+       },
+       .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+       .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
+       .connector_type = DRM_MODE_CONNECTOR_DPI,
  };
  
  static const struct display_timing evervision_vgg804821_timing = {
@@@ -2967,6 -3126,38 +3126,38 @@@ static const struct panel_desc logictec
        .connector_type = DRM_MODE_CONNECTOR_LVDS,
  };
  
+ static const struct drm_display_mode logictechno_lttd800480070_l6wh_rt_mode = {
+       .clock = 33000,
+       .hdisplay = 800,
+       .hsync_start = 800 + 154,
+       .hsync_end = 800 + 154 + 3,
+       .htotal = 800 + 154 + 3 + 43,
+       .vdisplay = 480,
+       .vsync_start = 480 + 47,
+       .vsync_end = 480 + 47 + 3,
+       .vtotal = 480 + 47 + 3 + 20,
+       .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+ };
+ static const struct panel_desc logictechno_lttd800480070_l6wh_rt = {
+       .modes = &logictechno_lttd800480070_l6wh_rt_mode,
+       .num_modes = 1,
+       .bpc = 8,
+       .size = {
+               .width = 154,
+               .height = 86,
+       },
+       .delay = {
+               .prepare = 45,
+               .enable = 100,
+               .disable = 100,
+               .unprepare = 45
+       },
+       .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+       .bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE,
+       .connector_type = DRM_MODE_CONNECTOR_DPI,
+ };
  static const struct drm_display_mode mitsubishi_aa070mc01_mode = {
        .clock = 30400,
        .hdisplay = 800,
@@@ -3033,6 -3224,37 +3224,37 @@@ static const struct panel_desc mitsubis
        .bus_flags = DRM_BUS_FLAG_DE_HIGH,
  };
  
+ static const struct display_timing multi_inno_mi1010ait_1cp_timing = {
+       .pixelclock = { 68900000, 70000000, 73400000 },
+       .hactive = { 1280, 1280, 1280 },
+       .hfront_porch = { 30, 60, 71 },
+       .hback_porch = { 30, 60, 71 },
+       .hsync_len = { 10, 10, 48 },
+       .vactive = { 800, 800, 800 },
+       .vfront_porch = { 5, 10, 10 },
+       .vback_porch = { 5, 10, 10 },
+       .vsync_len = { 5, 6, 13 },
+       .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+                DISPLAY_FLAGS_DE_HIGH,
+ };
+ static const struct panel_desc multi_inno_mi1010ait_1cp = {
+       .timings = &multi_inno_mi1010ait_1cp_timing,
+       .num_timings = 1,
+       .bpc = 8,
+       .size = {
+               .width = 217,
+               .height = 136,
+       },
+       .delay = {
+               .enable = 50,
+               .disable = 50,
+       },
+       .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+       .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+       .connector_type = DRM_MODE_CONNECTOR_LVDS,
+ };
  static const struct display_timing nec_nl12880bc20_05_timing = {
        .pixelclock = { 67000000, 71000000, 75000000 },
        .hactive = { 1280, 1280, 1280 },
@@@ -3463,6 -3685,46 +3685,46 @@@ static const struct panel_desc qd43003c
        .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
  };
  
+ static const struct drm_display_mode qishenglong_gopher2b_lcd_modes[] = {
+       { /* 60 Hz */
+               .clock = 10800,
+               .hdisplay = 480,
+               .hsync_start = 480 + 77,
+               .hsync_end = 480 + 77 + 41,
+               .htotal = 480 + 77 + 41 + 2,
+               .vdisplay = 272,
+               .vsync_start = 272 + 16,
+               .vsync_end = 272 + 16 + 10,
+               .vtotal = 272 + 16 + 10 + 2,
+               .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+       },
+       { /* 50 Hz */
+               .clock = 10800,
+               .hdisplay = 480,
+               .hsync_start = 480 + 17,
+               .hsync_end = 480 + 17 + 41,
+               .htotal = 480 + 17 + 41 + 2,
+               .vdisplay = 272,
+               .vsync_start = 272 + 116,
+               .vsync_end = 272 + 116 + 10,
+               .vtotal = 272 + 116 + 10 + 2,
+               .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+       },
+ };
+ static const struct panel_desc qishenglong_gopher2b_lcd = {
+       .modes = qishenglong_gopher2b_lcd_modes,
+       .num_modes = ARRAY_SIZE(qishenglong_gopher2b_lcd_modes),
+       .bpc = 8,
+       .size = {
+               .width = 95,
+               .height = 54,
+       },
+       .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+       .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE,
+       .connector_type = DRM_MODE_CONNECTOR_DPI,
+ };
  static const struct display_timing rocktech_rk070er9427_timing = {
        .pixelclock = { 26400000, 33300000, 46800000 },
        .hactive = { 800, 800, 800 },
@@@ -4166,7 -4428,7 +4428,7 @@@ static const struct drm_display_mode ye
  static const struct panel_desc yes_optoelectronics_ytc700tlag_05_201c = {
        .modes = &yes_optoelectronics_ytc700tlag_05_201c_mode,
        .num_modes = 1,
 -      .bpc = 6,
 +      .bpc = 8,
        .size = {
                .width = 154,
                .height = 90,
@@@ -4233,9 -4495,15 +4495,15 @@@ static const struct of_device_id platfo
        }, {
                .compatible = "auo,b116xw03",
                .data = &auo_b116xw03,
+       }, {
+               .compatible = "auo,b133han05",
+               .data = &auo_b133han05,
        }, {
                .compatible = "auo,b133htn01",
                .data = &auo_b133htn01,
+       }, {
+               .compatible = "auo,b140han06",
+               .data = &auo_b140han06,
        }, {
                .compatible = "auo,b133xtn01",
                .data = &auo_b133xtn01,
        }, {
                .compatible = "edt,et035012dm6",
                .data = &edt_et035012dm6,
+       }, {
+               .compatible = "edt,etm0350g0dh6",
+               .data = &edt_etm0350g0dh6,
        }, {
                .compatible = "edt,etm043080dh6gp",
                .data = &edt_etm043080dh6gp,
        }, {
                .compatible = "edt,etm0700g0edh6",
                .data = &edt_etm0700g0bdh6,
+       }, {
+               .compatible = "edt,etmv570g2dhu",
+               .data = &edt_etmv570g2dhu,
+       }, {
+               .compatible = "eink,vb3300-kca",
+               .data = &eink_vb3300_kca,
        }, {
                .compatible = "evervision,vgg804821",
                .data = &evervision_vgg804821,
        }, {
                .compatible = "logictechno,lt170410-2whc",
                .data = &logictechno_lt170410_2whc,
+       }, {
+               .compatible = "logictechno,lttd800480070-l6wh-rt",
+               .data = &logictechno_lttd800480070_l6wh_rt,
        }, {
                .compatible = "mitsubishi,aa070mc01-ca1",
                .data = &mitsubishi_aa070mc01,
+       }, {
+               .compatible = "multi-inno,mi1010ait-1cp",
+               .data = &multi_inno_mi1010ait_1cp,
        }, {
                .compatible = "nec,nl12880bc20-05",
                .data = &nec_nl12880bc20_05,
        }, {
                .compatible = "qiaodian,qd43003c0-40",
                .data = &qd43003c0_40,
+       }, {
+               .compatible = "qishenglong,gopher2b-lcd",
+               .data = &qishenglong_gopher2b_lcd,
        }, {
                .compatible = "rocktech,rk070er9427",
                .data = &rocktech_rk070er9427,
@@@ -4632,7 -4918,7 +4918,7 @@@ static int panel_simple_platform_probe(
        if (!id)
                return -ENODEV;
  
-       return panel_simple_probe(&pdev->dev, id->data);
+       return panel_simple_probe(&pdev->dev, id->data, NULL);
  }
  
  static int panel_simple_platform_remove(struct platform_device *pdev)
@@@ -4867,7 -5153,7 +5153,7 @@@ static const struct panel_desc_dsi osd1
        },
        .flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
                 MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
-                MIPI_DSI_MODE_EOT_PACKET,
+                MIPI_DSI_MODE_NO_EOT_PACKET,
        .format = MIPI_DSI_FMT_RGB888,
        .lanes = 4,
  };
@@@ -4912,7 -5198,7 +5198,7 @@@ static int panel_simple_dsi_probe(struc
  
        desc = id->data;
  
-       err = panel_simple_probe(&dsi->dev, &desc->desc);
+       err = panel_simple_probe(&dsi->dev, &desc->desc, NULL);
        if (err < 0)
                return err;
  
@@@ -4957,6 -5243,38 +5243,38 @@@ static struct mipi_dsi_driver panel_sim
        .shutdown = panel_simple_dsi_shutdown,
  };
  
+ static int panel_simple_dp_aux_ep_probe(struct dp_aux_ep_device *aux_ep)
+ {
+       const struct of_device_id *id;
+       id = of_match_node(platform_of_match, aux_ep->dev.of_node);
+       if (!id)
+               return -ENODEV;
+       return panel_simple_probe(&aux_ep->dev, id->data, aux_ep->aux);
+ }
+ static void panel_simple_dp_aux_ep_remove(struct dp_aux_ep_device *aux_ep)
+ {
+       panel_simple_remove(&aux_ep->dev);
+ }
+ static void panel_simple_dp_aux_ep_shutdown(struct dp_aux_ep_device *aux_ep)
+ {
+       panel_simple_shutdown(&aux_ep->dev);
+ }
+ static struct dp_aux_ep_driver panel_simple_dp_aux_ep_driver = {
+       .driver = {
+               .name = "panel-simple-dp-aux",
+               .of_match_table = platform_of_match,    /* Same as platform one! */
+               .pm = &panel_simple_pm_ops,
+       },
+       .probe = panel_simple_dp_aux_ep_probe,
+       .remove = panel_simple_dp_aux_ep_remove,
+       .shutdown = panel_simple_dp_aux_ep_shutdown,
+ };
  static int __init panel_simple_init(void)
  {
        int err;
        if (err < 0)
                return err;
  
+       err = dp_aux_dp_driver_register(&panel_simple_dp_aux_ep_driver);
+       if (err < 0)
+               goto err_did_platform_register;
        if (IS_ENABLED(CONFIG_DRM_MIPI_DSI)) {
                err = mipi_dsi_driver_register(&panel_simple_dsi_driver);
-               if (err < 0) {
-                       platform_driver_unregister(&panel_simple_platform_driver);
-                       return err;
-               }
+               if (err < 0)
+                       goto err_did_aux_ep_register;
        }
  
        return 0;
+ err_did_aux_ep_register:
+       dp_aux_dp_driver_unregister(&panel_simple_dp_aux_ep_driver);
+ err_did_platform_register:
+       platform_driver_unregister(&panel_simple_platform_driver);
+       return err;
  }
  module_init(panel_simple_init);
  
@@@ -4982,6 -5310,7 +5310,7 @@@ static void __exit panel_simple_exit(vo
        if (IS_ENABLED(CONFIG_DRM_MIPI_DSI))
                mipi_dsi_driver_unregister(&panel_simple_dsi_driver);
  
+       dp_aux_dp_driver_unregister(&panel_simple_dp_aux_ep_driver);
        platform_driver_unregister(&panel_simple_platform_driver);
  }
  module_exit(panel_simple_exit);