Merge v5.6-rc5 into drm-next
authorDave Airlie <airlied@redhat.com>
Tue, 10 Mar 2020 21:27:21 +0000 (07:27 +1000)
committerDave Airlie <airlied@redhat.com>
Tue, 10 Mar 2020 21:27:21 +0000 (07:27 +1000)
Requested my mripard for some misc patches that need this as a base.

Signed-off-by: Dave Airlie <airlied@redhat.com>
26 files changed:
1  2 
Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tcon.yaml
MAINTAINERS
arch/arm/configs/omap2plus_defconfig
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
drivers/gpu/drm/amd/powerplay/renoir_ppt.c
drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
drivers/gpu/drm/bridge/tc358767.c
drivers/gpu/drm/bridge/ti-tfp410.c
drivers/gpu/drm/drm_client_modeset.c
drivers/gpu/drm/exynos/exynos_drm_dsi.c
drivers/gpu/drm/exynos/exynos_hdmi.c
drivers/gpu/drm/i915/gt/intel_workarounds.c
drivers/gpu/drm/i915/i915_perf.c
drivers/gpu/drm/i915/i915_request.c
drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
drivers/gpu/drm/msm/dsi/dsi_manager.c
drivers/gpu/drm/panfrost/panfrost_job.c
drivers/gpu/drm/panfrost/panfrost_mmu.c
drivers/gpu/drm/ttm/ttm_bo_util.c
drivers/gpu/drm/virtio/virtgpu_object.c
drivers/video/backlight/Kconfig

@@@ -34,12 -34,6 +34,12 @@@ properties
        - const: allwinner,sun9i-a80-tcon-lcd
        - const: allwinner,sun9i-a80-tcon-tv
  
 +      - items:
 +        - enum:
 +          - allwinner,sun7i-a20-tcon0
 +          - allwinner,sun7i-a20-tcon1
 +        - const: allwinner,sun7i-a20-tcon
 +
        - items:
          - enum:
              - allwinner,sun50i-a64-tcon-lcd
          - enum:
            - allwinner,sun8i-h3-tcon-tv
            - allwinner,sun50i-a64-tcon-tv
-           - allwinner,sun50i-h6-tcon-tv
          - const: allwinner,sun8i-a83t-tcon-tv
  
+       - items:
+         - enum:
+           - allwinner,sun50i-h6-tcon-tv
+         - const: allwinner,sun8i-r40-tcon-tv
    reg:
      maxItems: 1
  
diff --combined MAINTAINERS
@@@ -693,7 -693,7 +693,7 @@@ ALLWINNER CPUFREQ DRIVE
  M:    Yangtao Li <tiny.windzz@gmail.com>
  L:    linux-pm@vger.kernel.org
  S:    Maintained
- F:    Documentation/devicetree/bindings/opp/sun50i-nvmem-cpufreq.txt
+ F:    Documentation/devicetree/bindings/opp/allwinner,sun50i-h6-operating-points.yaml
  F:    drivers/cpufreq/sun50i-cpufreq-nvmem.c
  
  ALLWINNER CRYPTO DRIVERS
@@@ -3649,6 -3649,7 +3649,7 @@@ F:      sound/pci/oxygen
  
  C-SKY ARCHITECTURE
  M:    Guo Ren <guoren@kernel.org>
+ L:    linux-csky@vger.kernel.org
  T:    git https://github.com/c-sky/csky-linux.git
  S:    Supported
  F:    arch/csky/
@@@ -3909,7 -3910,7 +3910,7 @@@ S:      Supporte
  F:    Documentation/filesystems/ceph.txt
  F:    fs/ceph/
  
- CERTIFICATE HANDLING:
+ CERTIFICATE HANDLING
  M:    David Howells <dhowells@redhat.com>
  M:    David Woodhouse <dwmw2@infradead.org>
  L:    keyrings@vger.kernel.org
@@@ -3919,7 -3920,7 +3920,7 @@@ F:      certs
  F:    scripts/sign-file.c
  F:    scripts/extract-cert.c
  
- CERTIFIED WIRELESS USB (WUSB) SUBSYSTEM:
+ CERTIFIED WIRELESS USB (WUSB) SUBSYSTEM
  L:    devel@driverdev.osuosl.org
  S:    Obsolete
  F:    drivers/staging/wusbcore/
@@@ -4016,7 -4017,7 +4017,7 @@@ M:      Cheng-Yi Chiang <cychiang@chromium.o
  S:    Maintained
  R:    Enric Balletbo i Serra <enric.balletbo@collabora.com>
  R:    Guenter Roeck <groeck@chromium.org>
- F:    Documentation/devicetree/bindings/sound/google,cros-ec-codec.txt
+ F:    Documentation/devicetree/bindings/sound/google,cros-ec-codec.yaml
  F:    sound/soc/codecs/cros_ec_codec.*
  
  CIRRUS LOGIC AUDIO CODEC DRIVERS
@@@ -4474,7 -4475,7 +4475,7 @@@ L:      linux-media@vger.kernel.or
  T:    git git://linuxtv.org/media_tree.git
  S:    Maintained
  F:    drivers/media/platform/sunxi/sun6i-csi/
- F:    Documentation/devicetree/bindings/media/sun6i-csi.txt
+ F:    Documentation/devicetree/bindings/media/allwinner,sun6i-a31-csi.yaml
  
  CW1200 WLAN driver
  M:    Solomon Peachy <pizza@shaftnet.org>
@@@ -5262,12 -5263,6 +5263,12 @@@ T:    git git://anongit.freedesktop.org/dr
  S:    Maintained
  F:    drivers/gpu/drm/tve200/
  
 +DRM DRIVER FOR FEIXIN K101 IM2BA02 MIPI-DSI LCD PANELS
 +M:    Icenowy Zheng <icenowy@aosc.io>
 +S:    Maintained
 +F:    drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c
 +F:    Documentation/devicetree/bindings/display/panel/feixin,k101-im2ba02.yaml
 +
  DRM DRIVER FOR FEIYANG FY07024DI26A30-D MIPI-DSI LCD PANELS
  M:    Jagan Teki <jagan@amarulasolutions.com>
  S:    Maintained
@@@ -5287,13 -5282,6 +5288,13 @@@ S:    Maintaine
  F:    drivers/gpu/drm/tiny/ili9225.c
  F:    Documentation/devicetree/bindings/display/ilitek,ili9225.txt
  
 +DRM DRIVER FOR ILITEK ILI9486 PANELS
 +M:    Kamlesh Gurudasani <kamlesh.gurudasani@gmail.com>
 +T:    git git://anongit.freedesktop.org/drm/drm-misc
 +S:    Maintained
 +F:    drivers/gpu/drm/tiny/ili9486.c
 +F:    Documentation/devicetree/bindings/display/ilitek,ili9486.yaml
 +
  DRM DRIVER FOR HX8357D PANELS
  M:    Eric Anholt <eric@anholt.net>
  T:    git git://anongit.freedesktop.org/drm/drm-misc
@@@ -5422,7 -5410,7 +5423,7 @@@ M:      David Lechner <david@lechnology.com
  T:    git git://anongit.freedesktop.org/drm/drm-misc
  S:    Maintained
  F:    drivers/gpu/drm/tiny/st7735r.c
 -F:    Documentation/devicetree/bindings/display/sitronix,st7735r.txt
 +F:    Documentation/devicetree/bindings/display/sitronix,st7735r.yaml
  
  DRM DRIVER FOR SONY ACX424AKP PANELS
  M:    Linus Walleij <linus.walleij@linaro.org>
@@@ -5502,7 -5490,6 +5503,7 @@@ F:      include/linux/vga
  DRM DRIVERS AND MISC GPU PATCHES
  M:    Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
  M:    Maxime Ripard <mripard@kernel.org>
 +M:    Thomas Zimmermann <tzimmermann@suse.de>
  W:    https://01.org/linuxgraphics/gfx-docs/maintainer-tools/drm-misc.html
  S:    Maintained
  T:    git git://anongit.freedesktop.org/drm/drm-misc
@@@ -5582,6 -5569,7 +5583,6 @@@ S:      Supporte
  F:    drivers/gpu/drm/fsl-dcu/
  F:    Documentation/devicetree/bindings/display/fsl,dcu.txt
  F:    Documentation/devicetree/bindings/display/fsl,tcon.txt
 -F:    Documentation/devicetree/bindings/display/panel/nec,nl4827hc19-05b.txt
  T:    git git://anongit.freedesktop.org/drm/drm-misc
  
  DRM DRIVERS FOR FREESCALE IMX
@@@ -5600,13 -5588,12 +5601,13 @@@ S:   Maintaine
  F:    drivers/gpu/drm/gma500/
  
  DRM DRIVERS FOR HISILICON
 -M:    Xinliang Liu <z.liuxinliang@hisilicon.com>
 +M:    Xinliang Liu <xinliang.liu@linaro.org>
  M:    Rongrong Zou <zourongrong@gmail.com>
 +R:    John Stultz <john.stultz@linaro.org>
  R:    Xinwei Kong <kong.kongxinwei@hisilicon.com>
  R:    Chen Feng <puck.chen@hisilicon.com>
  L:    dri-devel@lists.freedesktop.org
 -T:    git git://github.com/xin3liang/linux.git
 +T:    git git://anongit.freedesktop.org/drm/drm-misc
  S:    Maintained
  F:    drivers/gpu/drm/hisilicon/
  F:    Documentation/devicetree/bindings/display/hisilicon/
@@@ -5681,7 -5668,7 +5682,7 @@@ L:      dri-devel@lists.freedesktop.or
  T:    git git://anongit.freedesktop.org/drm/drm-misc
  S:    Maintained
  F:    drivers/gpu/drm/stm
- F:    Documentation/devicetree/bindings/display/st,stm32-ltdc.txt
+ F:    Documentation/devicetree/bindings/display/st,stm32-ltdc.yaml
  
  DRM DRIVERS FOR TI LCDC
  M:    Jyri Sarha <jsarha@ti.com>
@@@ -5698,17 -5685,6 +5699,17 @@@ S:    Maintaine
  F:    drivers/gpu/drm/omapdrm/
  F:    Documentation/devicetree/bindings/display/ti/
  
 +DRM DRIVERS FOR TI KEYSTONE
 +M:    Jyri Sarha <jsarha@ti.com>
 +M:    Tomi Valkeinen <tomi.valkeinen@ti.com>
 +L:    dri-devel@lists.freedesktop.org
 +S:    Maintained
 +F:    drivers/gpu/drm/tidss/
 +F:    Documentation/devicetree/bindings/display/ti/ti,k2g-dss.yaml
 +F:    Documentation/devicetree/bindings/display/ti/ti,am65x-dss.yaml
 +F:    Documentation/devicetree/bindings/display/ti/ti,j721e-dss.yaml
 +T:    git git://anongit.freedesktop.org/drm/drm-misc
 +
  DRM DRIVERS FOR V3D
  M:    Eric Anholt <eric@anholt.net>
  S:    Supported
@@@ -5957,12 -5933,12 +5958,12 @@@ S:   Maintaine
  F:    drivers/media/dvb-frontends/ec100*
  
  ECRYPT FILE SYSTEM
- M:    Tyler Hicks <tyhicks@canonical.com>
+ M:    Tyler Hicks <code@tyhicks.com>
  L:    ecryptfs@vger.kernel.org
  W:    http://ecryptfs.org
  W:    https://launchpad.net/ecryptfs
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/tyhicks/ecryptfs.git
- S:    Supported
+ S:    Odd Fixes
  F:    Documentation/filesystems/ecryptfs.txt
  F:    fs/ecryptfs/
  
@@@ -7072,7 -7048,7 +7073,7 @@@ L:      kvm@vger.kernel.or
  S:    Supported
  F:    drivers/uio/uio_pci_generic.c
  
- GENERIC VDSO LIBRARY:
+ GENERIC VDSO LIBRARY
  M:    Andy Lutomirski <luto@kernel.org>
  M:    Thomas Gleixner <tglx@linutronix.de>
  M:    Vincenzo Frascino <vincenzo.frascino@arm.com>
@@@ -7762,7 -7738,7 +7763,7 @@@ Hyper-V CORE AND DRIVER
  M:    "K. Y. Srinivasan" <kys@microsoft.com>
  M:    Haiyang Zhang <haiyangz@microsoft.com>
  M:    Stephen Hemminger <sthemmin@microsoft.com>
- M:    Sasha Levin <sashal@kernel.org>
+ M:    Wei Liu <wei.liu@kernel.org>
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/hyperv/linux.git
  L:    linux-hyperv@vger.kernel.org
  S:    Supported
@@@ -9303,7 -9279,7 +9304,7 @@@ F:      include/keys/trusted-type.
  F:    security/keys/trusted.c
  F:    include/keys/trusted.h
  
- KEYS/KEYRINGS:
+ KEYS/KEYRINGS
  M:    David Howells <dhowells@redhat.com>
  M:    Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
  L:    keyrings@vger.kernel.org
@@@ -10188,7 -10164,7 +10189,7 @@@ MAXBOTIX ULTRASONIC RANGER IIO DRIVE
  M:    Andreas Klinger <ak@it-klinger.de>
  L:    linux-iio@vger.kernel.org
  S:    Maintained
- F:    Documentation/devicetree/bindings/iio/proximity/maxbotix,mb1232.txt
+ F:    Documentation/devicetree/bindings/iio/proximity/maxbotix,mb1232.yaml
  F:    drivers/iio/proximity/mb1232.c
  
  MAXIM MAX77650 PMIC MFD DRIVER
@@@ -10491,7 -10467,7 +10492,7 @@@ M:   Hugues Fruchet <hugues.fruchet@st.co
  L:    linux-media@vger.kernel.org
  T:    git git://linuxtv.org/media_tree.git
  S:    Supported
- F:    Documentation/devicetree/bindings/media/st,stm32-dcmi.txt
+ F:    Documentation/devicetree/bindings/media/st,stm32-dcmi.yaml
  F:    drivers/media/platform/stm32/stm32-dcmi.c
  
  MEDIA DRIVERS FOR NVIDIA TEGRA - VDE
@@@ -11139,14 -11115,12 +11140,12 @@@ S:        Maintaine
  F:    drivers/usb/image/microtek.*
  
  MIPS
- M:    Ralf Baechle <ralf@linux-mips.org>
- M:    Paul Burton <paulburton@kernel.org>
+ M:    Thomas Bogendoerfer <tsbogend@alpha.franken.de>
  L:    linux-mips@vger.kernel.org
  W:    http://www.linux-mips.org/
- T:    git git://git.linux-mips.org/pub/scm/ralf/linux.git
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux.git
  Q:    http://patchwork.linux-mips.org/project/linux-mips/list/
- S:    Supported
+ S:    Maintained
  F:    Documentation/devicetree/bindings/mips/
  F:    Documentation/mips/
  F:    arch/mips/
@@@ -11509,7 -11483,7 +11508,7 @@@ F:   drivers/scsi/mac_scsi.
  F:    drivers/scsi/sun3_scsi.*
  F:    drivers/scsi/sun3_scsi_vme.c
  
- NCSI LIBRARY:
+ NCSI LIBRARY
  M:    Samuel Mendoza-Jonas <sam@mendozajonas.com>
  S:    Maintained
  F:    net/ncsi/
@@@ -12765,7 -12739,7 +12764,7 @@@ M:   Tom Joseph <tjoseph@cadence.com
  L:    linux-pci@vger.kernel.org
  S:    Maintained
  F:    Documentation/devicetree/bindings/pci/cdns,*.txt
- F:    drivers/pci/controller/pcie-cadence*
+ F:    drivers/pci/controller/cadence/
  
  PCI DRIVER FOR FREESCALE LAYERSCAPE
  M:    Minghuan Lian <minghuan.Lian@nxp.com>
@@@ -12978,7 -12952,6 +12977,6 @@@ M:   Robert Richter <rrichter@marvell.com
  L:    linux-pci@vger.kernel.org
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  S:    Supported
- F:    Documentation/devicetree/bindings/pci/pci-thunder-*
  F:    drivers/pci/controller/pci-thunder-*
  
  PCIE DRIVER FOR HISILICON
@@@ -13537,7 -13510,7 +13535,7 @@@ L:   linuxppc-dev@lists.ozlabs.or
  S:    Maintained
  F:    drivers/block/ps3vram.c
  
- PSAMPLE PACKET SAMPLING SUPPORT:
+ PSAMPLE PACKET SAMPLING SUPPORT
  M:    Yotam Gigi <yotam.gi@gmail.com>
  S:    Maintained
  F:    net/psample
@@@ -14253,7 -14226,7 +14251,7 @@@ F:   include/dt-bindings/reset
  F:    include/linux/reset.h
  F:    include/linux/reset/
  F:    include/linux/reset-controller.h
- K:      \b(?:devm_|of_)?reset_control(?:ler_[a-z]+|_[a-z_]+)?\b
+ K:    \b(?:devm_|of_)?reset_control(?:ler_[a-z]+|_[a-z_]+)?\b
  
  RESTARTABLE SEQUENCES SUPPORT
  M:    Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
@@@ -14607,10 -14580,10 +14605,10 @@@ F:        drivers/media/pci/saa7146
  F:    include/media/drv-intf/saa7146*
  
  SAFESETID SECURITY MODULE
- M:     Micah Morton <mortonm@chromium.org>
- S:     Supported
- F:     security/safesetid/
- F:     Documentation/admin-guide/LSM/SafeSetID.rst
+ M:    Micah Morton <mortonm@chromium.org>
+ S:    Supported
+ F:    security/safesetid/
+ F:    Documentation/admin-guide/LSM/SafeSetID.rst
  
  SAMSUNG AUDIO (ASoC) DRIVERS
  M:    Krzysztof Kozlowski <krzk@kernel.org>
@@@ -15948,7 -15921,7 +15946,7 @@@ F:   drivers/*/stm32-*timer
  F:    drivers/pwm/pwm-stm32*
  F:    include/linux/*/stm32-*tim*
  F:    Documentation/ABI/testing/*timer-stm32
- F:    Documentation/devicetree/bindings/*/stm32-*timer*
+ F:    Documentation/devicetree/bindings/*/*stm32-*timer*
  F:    Documentation/devicetree/bindings/pwm/pwm-stm32*
  
  STMMAC ETHERNET DRIVER
@@@ -16107,6 -16080,8 +16105,8 @@@ SYNOPSYS DESIGNWARE 8250 UART DRIVE
  R:    Andy Shevchenko <andriy.shevchenko@linux.intel.com>
  S:    Maintained
  F:    drivers/tty/serial/8250/8250_dw.c
+ F:    drivers/tty/serial/8250/8250_dwlib.*
+ F:    drivers/tty/serial/8250/8250_lpss.c
  
  SYNOPSYS DESIGNWARE APB GPIO DRIVER
  M:    Hoan Tran <hoan@os.amperecomputing.com>
@@@ -16577,8 -16552,8 +16577,8 @@@ M:   Michael Jamet <michael.jamet@intel.c
  M:    Mika Westerberg <mika.westerberg@linux.intel.com>
  M:    Yehezkel Bernat <YehezkelShB@gmail.com>
  L:    linux-usb@vger.kernel.org
- T:    git git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt.git
  S:    Maintained
+ T:    git git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt.git
  F:    Documentation/admin-guide/thunderbolt.rst
  F:    drivers/thunderbolt/
  F:    include/linux/thunderbolt.h
@@@ -17105,7 -17080,7 +17105,7 @@@ S:   Maintaine
  F:    Documentation/admin-guide/ufs.rst
  F:    fs/ufs/
  
- UHID USERSPACE HID IO DRIVER:
+ UHID USERSPACE HID IO DRIVER
  M:    David Herrmann <dh.herrmann@googlemail.com>
  L:    linux-input@vger.kernel.org
  S:    Maintained
@@@ -17119,18 -17094,18 +17119,18 @@@ S:        Maintaine
  F:    drivers/usb/common/ulpi.c
  F:    include/linux/ulpi/
  
- ULTRA-WIDEBAND (UWB) SUBSYSTEM:
+ ULTRA-WIDEBAND (UWB) SUBSYSTEM
  L:    devel@driverdev.osuosl.org
  S:    Obsolete
  F:    drivers/staging/uwb/
  
- UNICODE SUBSYSTEM:
+ UNICODE SUBSYSTEM
  M:    Gabriel Krisman Bertazi <krisman@collabora.com>
  L:    linux-fsdevel@vger.kernel.org
  S:    Supported
  F:    fs/unicode/
  
- UNICORE32 ARCHITECTURE:
+ UNICORE32 ARCHITECTURE
  M:    Guan Xuetao <gxt@pku.edu.cn>
  W:    http://mprc.pku.edu.cn/~guanxuetao/linux
  S:    Maintained
@@@ -17417,11 -17392,14 +17417,14 @@@ F:        drivers/usb
  F:    include/linux/usb.h
  F:    include/linux/usb/
  
- USB TYPEC PI3USB30532 MUX DRIVER
- M:    Hans de Goede <hdegoede@redhat.com>
+ USB TYPEC BUS FOR ALTERNATE MODES
+ M:    Heikki Krogerus <heikki.krogerus@linux.intel.com>
  L:    linux-usb@vger.kernel.org
  S:    Maintained
- F:    drivers/usb/typec/mux/pi3usb30532.c
+ F:    Documentation/ABI/testing/sysfs-bus-typec
+ F:    Documentation/driver-api/usb/typec_bus.rst
+ F:    drivers/usb/typec/altmodes/
+ F:    include/linux/usb/typec_altmode.h
  
  USB TYPEC CLASS
  M:    Heikki Krogerus <heikki.krogerus@linux.intel.com>
@@@ -17432,14 -17410,11 +17435,11 @@@ F:        Documentation/driver-api/usb/typec.r
  F:    drivers/usb/typec/
  F:    include/linux/usb/typec.h
  
- USB TYPEC BUS FOR ALTERNATE MODES
- M:    Heikki Krogerus <heikki.krogerus@linux.intel.com>
+ USB TYPEC PI3USB30532 MUX DRIVER
+ M:    Hans de Goede <hdegoede@redhat.com>
  L:    linux-usb@vger.kernel.org
  S:    Maintained
- F:    Documentation/ABI/testing/sysfs-bus-typec
- F:    Documentation/driver-api/usb/typec_bus.rst
- F:    drivers/usb/typec/altmodes/
- F:    include/linux/usb/typec_altmode.h
+ F:    drivers/usb/typec/mux/pi3usb30532.c
  
  USB TYPEC PORT CONTROLLER DRIVERS
  M:    Guenter Roeck <linux@roeck-us.net>
@@@ -17816,7 -17791,7 +17816,7 @@@ F:   include/linux/vbox_utils.
  F:    include/uapi/linux/vbox*.h
  F:    drivers/virt/vboxguest/
  
- VIRTUAL BOX SHARED FOLDER VFS DRIVER:
+ VIRTUAL BOX SHARED FOLDER VFS DRIVER
  M:    Hans de Goede <hdegoede@redhat.com>
  L:    linux-fsdevel@vger.kernel.org
  S:    Maintained
@@@ -350,13 -350,14 +350,13 @@@ CONFIG_DRM_OMAP=
  CONFIG_OMAP5_DSS_HDMI=y
  CONFIG_OMAP2_DSS_SDI=y
  CONFIG_OMAP2_DSS_DSI=y
 -CONFIG_DRM_OMAP_ENCODER_OPA362=m
 -CONFIG_DRM_OMAP_ENCODER_TPD12S015=m
 -CONFIG_DRM_OMAP_CONNECTOR_HDMI=m
 -CONFIG_DRM_OMAP_CONNECTOR_ANALOG_TV=m
  CONFIG_DRM_OMAP_PANEL_DSI_CM=m
  CONFIG_DRM_TILCDC=m
  CONFIG_DRM_PANEL_SIMPLE=m
 +CONFIG_DRM_DISPLAY_CONNECTOR=m
 +CONFIG_DRM_SIMPLE_BRIDGE=m
  CONFIG_DRM_TI_TFP410=m
 +CONFIG_DRM_TI_TPD12S015=m
  CONFIG_DRM_PANEL_LG_LB035Q02=m
  CONFIG_DRM_PANEL_NEC_NL8048HL11=m
  CONFIG_DRM_PANEL_SHARP_LS037V7DW01=m
@@@ -374,6 -375,7 +374,7 @@@ CONFIG_BACKLIGHT_GENERIC=
  CONFIG_BACKLIGHT_PWM=m
  CONFIG_BACKLIGHT_PANDORA=m
  CONFIG_BACKLIGHT_GPIO=m
+ CONFIG_BACKLIGHT_LED=m
  CONFIG_FRAMEBUFFER_CONSOLE=y
  CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
  CONFIG_LOGO=y
@@@ -52,7 -52,7 +52,7 @@@
   * 1. Primary ring
   * 2. Async ring
   */
- #define GFX10_NUM_GFX_RINGS   2
+ #define GFX10_NUM_GFX_RINGS_NV1X      1
  #define GFX10_MEC_HPD_SIZE    2048
  
  #define F32_CE_PROGRAM_RAM_SIZE               65536
@@@ -1304,7 -1304,7 +1304,7 @@@ static int gfx_v10_0_sw_init(void *hand
        case CHIP_NAVI14:
        case CHIP_NAVI12:
                adev->gfx.me.num_me = 1;
-               adev->gfx.me.num_pipe_per_me = 2;
+               adev->gfx.me.num_pipe_per_me = 1;
                adev->gfx.me.num_queue_per_pipe = 1;
                adev->gfx.mec.num_mec = 2;
                adev->gfx.mec.num_pipe_per_mec = 4;
@@@ -2710,18 -2710,20 +2710,20 @@@ static int gfx_v10_0_cp_gfx_start(struc
        amdgpu_ring_commit(ring);
  
        /* submit cs packet to copy state 0 to next available state */
-       ring = &adev->gfx.gfx_ring[1];
-       r = amdgpu_ring_alloc(ring, 2);
-       if (r) {
-               DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
-               return r;
-       }
-       amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
-       amdgpu_ring_write(ring, 0);
+       if (adev->gfx.num_gfx_rings > 1) {
+               /* maximum supported gfx ring is 2 */
+               ring = &adev->gfx.gfx_ring[1];
+               r = amdgpu_ring_alloc(ring, 2);
+               if (r) {
+                       DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
+                       return r;
+               }
  
-       amdgpu_ring_commit(ring);
+               amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
+               amdgpu_ring_write(ring, 0);
  
+               amdgpu_ring_commit(ring);
+       }
        return 0;
  }
  
@@@ -2818,39 -2820,41 +2820,41 @@@ static int gfx_v10_0_cp_gfx_resume(stru
        mutex_unlock(&adev->srbm_mutex);
  
        /* Init gfx ring 1 for pipe 1 */
-       mutex_lock(&adev->srbm_mutex);
-       gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID1);
-       ring = &adev->gfx.gfx_ring[1];
-       rb_bufsz = order_base_2(ring->ring_size / 8);
-       tmp = REG_SET_FIELD(0, CP_RB1_CNTL, RB_BUFSZ, rb_bufsz);
-       tmp = REG_SET_FIELD(tmp, CP_RB1_CNTL, RB_BLKSZ, rb_bufsz - 2);
-       WREG32_SOC15(GC, 0, mmCP_RB1_CNTL, tmp);
-       /* Initialize the ring buffer's write pointers */
-       ring->wptr = 0;
-       WREG32_SOC15(GC, 0, mmCP_RB1_WPTR, lower_32_bits(ring->wptr));
-       WREG32_SOC15(GC, 0, mmCP_RB1_WPTR_HI, upper_32_bits(ring->wptr));
-       /* Set the wb address wether it's enabled or not */
-       rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
-       WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr));
-       WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) &
-               CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
-       wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
-       WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO,
-               lower_32_bits(wptr_gpu_addr));
-       WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI,
-               upper_32_bits(wptr_gpu_addr));
-       mdelay(1);
-       WREG32_SOC15(GC, 0, mmCP_RB1_CNTL, tmp);
-       rb_addr = ring->gpu_addr >> 8;
-       WREG32_SOC15(GC, 0, mmCP_RB1_BASE, rb_addr);
-       WREG32_SOC15(GC, 0, mmCP_RB1_BASE_HI, upper_32_bits(rb_addr));
-       WREG32_SOC15(GC, 0, mmCP_RB1_ACTIVE, 1);
-       gfx_v10_0_cp_gfx_set_doorbell(adev, ring);
-       mutex_unlock(&adev->srbm_mutex);
+       if (adev->gfx.num_gfx_rings > 1) {
+               mutex_lock(&adev->srbm_mutex);
+               gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID1);
+               /* maximum supported gfx ring is 2 */
+               ring = &adev->gfx.gfx_ring[1];
+               rb_bufsz = order_base_2(ring->ring_size / 8);
+               tmp = REG_SET_FIELD(0, CP_RB1_CNTL, RB_BUFSZ, rb_bufsz);
+               tmp = REG_SET_FIELD(tmp, CP_RB1_CNTL, RB_BLKSZ, rb_bufsz - 2);
+               WREG32_SOC15(GC, 0, mmCP_RB1_CNTL, tmp);
+               /* Initialize the ring buffer's write pointers */
+               ring->wptr = 0;
+               WREG32_SOC15(GC, 0, mmCP_RB1_WPTR, lower_32_bits(ring->wptr));
+               WREG32_SOC15(GC, 0, mmCP_RB1_WPTR_HI, upper_32_bits(ring->wptr));
+               /* Set the wb address wether it's enabled or not */
+               rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
+               WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr));
+               WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) &
+                            CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
+               wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
+               WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO,
+                            lower_32_bits(wptr_gpu_addr));
+               WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI,
+                            upper_32_bits(wptr_gpu_addr));
+               mdelay(1);
+               WREG32_SOC15(GC, 0, mmCP_RB1_CNTL, tmp);
+               rb_addr = ring->gpu_addr >> 8;
+               WREG32_SOC15(GC, 0, mmCP_RB1_BASE, rb_addr);
+               WREG32_SOC15(GC, 0, mmCP_RB1_BASE_HI, upper_32_bits(rb_addr));
+               WREG32_SOC15(GC, 0, mmCP_RB1_ACTIVE, 1);
+               gfx_v10_0_cp_gfx_set_doorbell(adev, ring);
+               mutex_unlock(&adev->srbm_mutex);
+       }
        /* Switch to pipe 0 */
        mutex_lock(&adev->srbm_mutex);
        gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID0);
@@@ -3164,7 -3168,12 +3168,7 @@@ static int gfx_v10_0_kiq_enable_kgq(str
        for (i = 0; i < adev->gfx.num_gfx_rings; i++)
                kiq->pmf->kiq_map_queues(kiq_ring, &adev->gfx.gfx_ring[i]);
  
 -      r = amdgpu_ring_test_ring(kiq_ring);
 -      if (r) {
 -              DRM_ERROR("kfq enable failed\n");
 -              kiq_ring->sched.ready = false;
 -      }
 -      return r;
 +      return amdgpu_ring_test_helper(kiq_ring);
  }
  #endif
  
@@@ -3508,6 -3517,7 +3512,7 @@@ static int gfx_v10_0_kcq_init_queue(str
  
                /* reset ring buffer */
                ring->wptr = 0;
+               atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], 0);
                amdgpu_ring_clear_ring(ring);
        } else {
                amdgpu_ring_clear_ring(ring);
@@@ -3780,7 -3790,7 +3785,7 @@@ static int gfx_v10_0_kiq_disable_kgq(st
                kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.gfx_ring[i],
                                           PREEMPT_QUEUES, 0, 0);
  
 -      return amdgpu_ring_test_ring(kiq_ring);
 +      return amdgpu_ring_test_helper(kiq_ring);
  }
  #endif
  
@@@ -3961,7 -3971,8 +3966,8 @@@ static int gfx_v10_0_early_init(void *h
  {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  
-       adev->gfx.num_gfx_rings = GFX10_NUM_GFX_RINGS;
+       adev->gfx.num_gfx_rings = GFX10_NUM_GFX_RINGS_NV1X;
        adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
  
        gfx_v10_0_set_kiq_pm4_funcs(adev);
@@@ -4408,7 -4419,7 +4414,7 @@@ static void gfx_v10_0_ring_emit_ib_gfx(
  
        control |= ib->length_dw | (vmid << 24);
  
 -      if (amdgpu_mcbp && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
 +      if ((amdgpu_sriov_vf(ring->adev) || amdgpu_mcbp) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
                control |= INDIRECT_BUFFER_PRE_ENB(1);
  
                if (flags & AMDGPU_IB_PREEMPTED)
  
                if (!(ib->flags & AMDGPU_IB_FLAG_CE))
                        gfx_v10_0_ring_emit_de_meta(ring,
 -                                  flags & AMDGPU_IB_PREEMPTED ? true : false);
 +                                  (!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false);
        }
  
        amdgpu_ring_write(ring, header);
@@@ -4563,9 -4574,9 +4569,9 @@@ static void gfx_v10_0_ring_emit_cntxcnt
  {
        uint32_t dw2 = 0;
  
 -      if (amdgpu_mcbp)
 +      if (amdgpu_mcbp || amdgpu_sriov_vf(ring->adev))
                gfx_v10_0_ring_emit_ce_meta(ring,
 -                                  flags & AMDGPU_IB_PREEMPTED ? true : false);
 +                                  (!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false);
  
        gfx_v10_0_ring_emit_tmz(ring, true);
  
@@@ -1106,11 -1106,10 +1106,11 @@@ static void gfx_v9_0_check_fw_write_wai
        adev->gfx.me_fw_write_wait = false;
        adev->gfx.mec_fw_write_wait = false;
  
 -      if ((adev->gfx.mec_fw_version < 0x000001a5) ||
 +      if ((adev->asic_type != CHIP_ARCTURUS) &&
 +          ((adev->gfx.mec_fw_version < 0x000001a5) ||
            (adev->gfx.mec_feature_version < 46) ||
            (adev->gfx.pfp_fw_version < 0x000000b7) ||
 -          (adev->gfx.pfp_feature_version < 46))
 +          (adev->gfx.pfp_feature_version < 46)))
                DRM_WARN_ONCE("CP firmware version too old, please update!");
  
        switch (adev->asic_type) {
@@@ -3664,6 -3663,7 +3664,7 @@@ static int gfx_v9_0_kcq_init_queue(stru
  
                /* reset ring buffer */
                ring->wptr = 0;
+               atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], 0);
                amdgpu_ring_clear_ring(ring);
        } else {
                amdgpu_ring_clear_ring(ring);
@@@ -98,9 -98,6 +98,9 @@@ MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB)
  #define FIRMWARE_RAVEN_DMCU           "amdgpu/raven_dmcu.bin"
  MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
  
 +#define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
 +MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
 +
  /* Number of bytes in PSP header for firmware. */
  #define PSP_HEADER_BYTES 0x100
  
@@@ -386,8 -383,8 +386,8 @@@ static void dm_pflip_high_irq(void *int
         * of pageflip completion, so last_flip_vblank is the forbidden count
         * for queueing new pageflips if vsync + VRR is enabled.
         */
 -      amdgpu_crtc->last_flip_vblank = amdgpu_get_vblank_counter_kms(adev->ddev,
 -                                                      amdgpu_crtc->crtc_id);
 +      amdgpu_crtc->last_flip_vblank =
 +              amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
  
        amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
        spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
@@@ -804,20 -801,10 +804,20 @@@ static int dm_dmub_hw_init(struct amdgp
  
        fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
  
 -      memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
 -             fw_inst_const_size);
 +      /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
 +       * amdgpu_ucode_init_single_fw will load dmub firmware
 +       * fw_inst_const part to cw0; otherwise, the firmware back door load
 +       * will be done by dm_dmub_hw_init
 +       */
 +      if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
 +              memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
 +                              fw_inst_const_size);
 +      }
 +
        memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr, fw_bss_data,
               fw_bss_data_size);
 +
 +      /* Copy firmware bios info into FB memory. */
        memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
               adev->bios_size);
  
        hw_params.fb_base = adev->gmc.fb_start;
        hw_params.fb_offset = adev->gmc.aper_base;
  
 +      /* backdoor load firmware and trigger dmub running */
 +      if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
 +              hw_params.load_inst_const = true;
 +
        if (dmcu)
                hw_params.psp_version = dmcu->psp_version;
  
@@@ -977,7 -960,7 +977,7 @@@ static int amdgpu_dm_init(struct amdgpu
  
  #ifdef CONFIG_DRM_AMD_DC_HDCP
        if (adev->asic_type >= CHIP_RAVEN) {
 -              adev->dm.hdcp_workqueue = hdcp_create_workqueue(&adev->psp, &init_params.cp_psp, adev->dm.dc);
 +              adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
  
                if (!adev->dm.hdcp_workqueue)
                        DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
                goto error;
        }
  
 -#if defined(CONFIG_DEBUG_FS)
 -      if (dtn_debugfs_init(adev))
 -              DRM_ERROR("amdgpu: failed initialize dtn debugfs support.\n");
 -#endif
 -
        DRM_DEBUG_DRIVER("KMS initialized.\n");
  
        return 0;
@@@ -1091,11 -1079,9 +1091,11 @@@ static int load_dmcu_fw(struct amdgpu_d
        case CHIP_VEGA20:
        case CHIP_NAVI10:
        case CHIP_NAVI14:
 -      case CHIP_NAVI12:
        case CHIP_RENOIR:
                return 0;
 +      case CHIP_NAVI12:
 +              fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
 +              break;
        case CHIP_RAVEN:
                if (ASICREV_IS_PICASSO(adev->external_rev_id))
                        fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
@@@ -1206,21 -1192,22 +1206,21 @@@ static int dm_dmub_sw_init(struct amdgp
                return 0;
        }
  
 -      if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
 -              DRM_WARN("Only PSP firmware loading is supported for DMUB\n");
 -              return 0;
 -      }
 -
        hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
 -      adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
 -              AMDGPU_UCODE_ID_DMCUB;
 -      adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw = adev->dm.dmub_fw;
 -      adev->firmware.fw_size +=
 -              ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
  
 -      adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
 +      if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
 +              adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
 +                      AMDGPU_UCODE_ID_DMCUB;
 +              adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
 +                      adev->dm.dmub_fw;
 +              adev->firmware.fw_size +=
 +                      ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
  
 -      DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
 -               adev->dm.dmcub_fw_version);
 +              DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
 +                       adev->dm.dmcub_fw_version);
 +      }
 +
 +      adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
  
        adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
        dmub_srv = adev->dm.dmub_srv;
@@@ -1435,6 -1422,73 +1435,73 @@@ static void s3_handle_mst(struct drm_de
                drm_kms_helper_hotplug_event(dev);
  }
  
+ static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
+ {
+       struct smu_context *smu = &adev->smu;
+       int ret = 0;
+       if (!is_support_sw_smu(adev))
+               return 0;
+       /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
+        * on window driver dc implementation.
+        * For Navi1x, clock settings of dcn watermarks are fixed. the settings
+        * should be passed to smu during boot up and resume from s3.
+        * boot up: dc calculate dcn watermark clock settings within dc_create,
+        * dcn20_resource_construct
+        * then call pplib functions below to pass the settings to smu:
+        * smu_set_watermarks_for_clock_ranges
+        * smu_set_watermarks_table
+        * navi10_set_watermarks_table
+        * smu_write_watermarks_table
+        *
+        * For Renoir, clock settings of dcn watermark are also fixed values.
+        * dc has implemented different flow for window driver:
+        * dc_hardware_init / dc_set_power_state
+        * dcn10_init_hw
+        * notify_wm_ranges
+        * set_wm_ranges
+        * -- Linux
+        * smu_set_watermarks_for_clock_ranges
+        * renoir_set_watermarks_table
+        * smu_write_watermarks_table
+        *
+        * For Linux,
+        * dc_hardware_init -> amdgpu_dm_init
+        * dc_set_power_state --> dm_resume
+        *
+        * therefore, this function apply to navi10/12/14 but not Renoir
+        * *
+        */
+       switch(adev->asic_type) {
+       case CHIP_NAVI10:
+       case CHIP_NAVI14:
+       case CHIP_NAVI12:
+               break;
+       default:
+               return 0;
+       }
+       mutex_lock(&smu->mutex);
+       /* pass data to smu controller */
+       if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
+                       !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
+               ret = smu_write_watermarks_table(smu);
+               if (ret) {
+                       mutex_unlock(&smu->mutex);
+                       DRM_ERROR("Failed to update WMTABLE!\n");
+                       return ret;
+               }
+               smu->watermarks_bitmap |= WATERMARKS_LOADED;
+       }
+       mutex_unlock(&smu->mutex);
+       return 0;
+ }
  /**
   * dm_hw_init() - Initialize DC device
   * @handle: The base driver device containing the amdgpu_dm device.
@@@ -1713,6 -1767,8 +1780,8 @@@ static int dm_resume(void *handle
  
        amdgpu_dm_irq_resume_late(adev);
  
+       amdgpu_dm_smu_write_watermarks_table(adev);
        return 0;
  }
  
@@@ -1771,61 -1827,6 +1840,61 @@@ static struct drm_mode_config_helper_fu
        .atomic_commit_tail = amdgpu_dm_atomic_commit_tail
  };
  
 +static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
 +{
 +      u32 max_cll, min_cll, max, min, q, r;
 +      struct amdgpu_dm_backlight_caps *caps;
 +      struct amdgpu_display_manager *dm;
 +      struct drm_connector *conn_base;
 +      struct amdgpu_device *adev;
 +      static const u8 pre_computed_values[] = {
 +              50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
 +              71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
 +
 +      if (!aconnector || !aconnector->dc_link)
 +              return;
 +
 +      conn_base = &aconnector->base;
 +      adev = conn_base->dev->dev_private;
 +      dm = &adev->dm;
 +      caps = &dm->backlight_caps;
 +      caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
 +      caps->aux_support = false;
 +      max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
 +      min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
 +
 +      if (caps->ext_caps->bits.oled == 1 ||
 +          caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
 +          caps->ext_caps->bits.hdr_aux_backlight_control == 1)
 +              caps->aux_support = true;
 +
 +      /* From the specification (CTA-861-G), for calculating the maximum
 +       * luminance we need to use:
 +       *      Luminance = 50*2**(CV/32)
 +       * Where CV is a one-byte value.
 +       * For calculating this expression we may need float point precision;
 +       * to avoid this complexity level, we take advantage that CV is divided
 +       * by a constant. From the Euclids division algorithm, we know that CV
 +       * can be written as: CV = 32*q + r. Next, we replace CV in the
 +       * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
 +       * need to pre-compute the value of r/32. For pre-computing the values
 +       * We just used the following Ruby line:
 +       *      (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
 +       * The results of the above expressions can be verified at
 +       * pre_computed_values.
 +       */
 +      q = max_cll >> 5;
 +      r = max_cll % 32;
 +      max = (1 << q) * pre_computed_values[r];
 +
 +      // min luminance: maxLum * (CV/255)^2 / 100
 +      q = DIV_ROUND_CLOSEST(min_cll, 255);
 +      min = max * DIV_ROUND_CLOSEST((q * q), 100);
 +
 +      caps->aux_max_input_signal = max;
 +      caps->aux_min_input_signal = min;
 +}
 +
  static void
  amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
  {
                                            aconnector->edid);
                }
                amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
 -
 +              update_connector_ext_caps(aconnector);
        } else {
                drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
                amdgpu_dm_update_freesync_caps(connector, NULL);
@@@ -2554,7 -2555,6 +2623,7 @@@ static int amdgpu_dm_mode_config_init(s
  
  #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
  #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
 +#define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
  
  #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
        defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
@@@ -2569,11 -2569,9 +2638,11 @@@ static void amdgpu_dm_update_backlight_
  
        amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
        if (caps.caps_valid) {
 +              dm->backlight_caps.caps_valid = true;
 +              if (caps.aux_support)
 +                      return;
                dm->backlight_caps.min_input_signal = caps.min_input_signal;
                dm->backlight_caps.max_input_signal = caps.max_input_signal;
 -              dm->backlight_caps.caps_valid = true;
        } else {
                dm->backlight_caps.min_input_signal =
                                AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
                                AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
        }
  #else
 +      if (dm->backlight_caps.aux_support)
 +              return;
 +
        dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
        dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
  #endif
  }
  
 +static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
 +{
 +      bool rc;
 +
 +      if (!link)
 +              return 1;
 +
 +      rc = dc_link_set_backlight_level_nits(link, true, brightness,
 +                                            AUX_BL_DEFAULT_TRANSITION_TIME_MS);
 +
 +      return rc ? 0 : 1;
 +}
 +
 +static u32 convert_brightness(const struct amdgpu_dm_backlight_caps *caps,
 +                            const uint32_t user_brightness)
 +{
 +      u32 min, max, conversion_pace;
 +      u32 brightness = user_brightness;
 +
 +      if (!caps)
 +              goto out;
 +
 +      if (!caps->aux_support) {
 +              max = caps->max_input_signal;
 +              min = caps->min_input_signal;
 +              /*
 +               * The brightness input is in the range 0-255
 +               * It needs to be rescaled to be between the
 +               * requested min and max input signal
 +               * It also needs to be scaled up by 0x101 to
 +               * match the DC interface which has a range of
 +               * 0 to 0xffff
 +               */
 +              conversion_pace = 0x101;
 +              brightness =
 +                      user_brightness
 +                      * conversion_pace
 +                      * (max - min)
 +                      / AMDGPU_MAX_BL_LEVEL
 +                      + min * conversion_pace;
 +      } else {
 +              /* TODO
 +               * We are doing a linear interpolation here, which is OK but
 +               * does not provide the optimal result. We probably want
 +               * something close to the Perceptual Quantizer (PQ) curve.
 +               */
 +              max = caps->aux_max_input_signal;
 +              min = caps->aux_min_input_signal;
 +
 +              brightness = (AMDGPU_MAX_BL_LEVEL - user_brightness) * min
 +                             + user_brightness * max;
 +              // Multiple the value by 1000 since we use millinits
 +              brightness *= 1000;
 +              brightness = DIV_ROUND_CLOSEST(brightness, AMDGPU_MAX_BL_LEVEL);
 +      }
 +
 +out:
 +      return brightness;
 +}
 +
  static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
  {
        struct amdgpu_display_manager *dm = bl_get_data(bd);
        struct amdgpu_dm_backlight_caps caps;
 -      uint32_t brightness = bd->props.brightness;
 +      struct dc_link *link = NULL;
 +      u32 brightness;
 +      bool rc;
  
        amdgpu_dm_update_backlight_caps(dm);
        caps = dm->backlight_caps;
 -      /*
 -       * The brightness input is in the range 0-255
 -       * It needs to be rescaled to be between the
 -       * requested min and max input signal
 -       *
 -       * It also needs to be scaled up by 0x101 to
 -       * match the DC interface which has a range of
 -       * 0 to 0xffff
 -       */
 -      brightness =
 -              brightness
 -              * 0x101
 -              * (caps.max_input_signal - caps.min_input_signal)
 -              / AMDGPU_MAX_BL_LEVEL
 -              + caps.min_input_signal * 0x101;
 -
 -      if (dc_link_set_backlight_level(dm->backlight_link,
 -                      brightness, 0))
 -              return 0;
 -      else
 -              return 1;
 +
 +      link = (struct dc_link *)dm->backlight_link;
 +
 +      brightness = convert_brightness(&caps, bd->props.brightness);
 +      // Change brightness based on AUX property
 +      if (caps.aux_support)
 +              return set_backlight_via_aux(link, brightness);
 +
 +      rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
 +
 +      return rc ? 0 : 1;
  }
  
  static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
@@@ -4397,10 -4340,8 +4466,10 @@@ static const struct drm_crtc_funcs amdg
        .set_crc_source = amdgpu_dm_crtc_set_crc_source,
        .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
        .get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
 +      .get_vblank_counter = amdgpu_get_vblank_counter_kms,
        .enable_vblank = dm_enable_vblank,
        .disable_vblank = dm_disable_vblank,
 +      .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
  };
  
  static enum drm_connector_status
@@@ -4621,19 -4562,6 +4690,19 @@@ amdgpu_dm_connector_atomic_duplicate_st
        return &new_state->base;
  }
  
 +static int
 +amdgpu_dm_connector_late_register(struct drm_connector *connector)
 +{
 +      struct amdgpu_dm_connector *amdgpu_dm_connector =
 +              to_amdgpu_dm_connector(connector);
 +
 +#if defined(CONFIG_DEBUG_FS)
 +      connector_debugfs_init(amdgpu_dm_connector);
 +#endif
 +
 +      return 0;
 +}
 +
  static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
        .reset = amdgpu_dm_connector_funcs_reset,
        .detect = amdgpu_dm_connector_detect,
        .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
        .atomic_set_property = amdgpu_dm_connector_atomic_set_property,
        .atomic_get_property = amdgpu_dm_connector_atomic_get_property,
 +      .late_register = amdgpu_dm_connector_late_register,
        .early_unregister = amdgpu_dm_connector_unregister
  };
  
@@@ -5020,8 -4947,7 +5089,8 @@@ static bool dm_crtc_helper_mode_fixup(s
  static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
        .disable = dm_crtc_helper_disable,
        .atomic_check = dm_crtc_helper_atomic_check,
 -      .mode_fixup = dm_crtc_helper_mode_fixup
 +      .mode_fixup = dm_crtc_helper_mode_fixup,
 +      .get_scanout_position = amdgpu_crtc_get_scanout_position,
  };
  
  static void dm_encoder_helper_disable(struct drm_encoder *encoder)
@@@ -5984,6 -5910,13 +6053,6 @@@ static int amdgpu_dm_connector_init(str
        drm_connector_attach_encoder(
                &aconnector->base, &aencoder->base);
  
 -      drm_connector_register(&aconnector->base);
 -#if defined(CONFIG_DEBUG_FS)
 -      connector_debugfs_init(aconnector);
 -      aconnector->debugfs_dpcd_address = 0;
 -      aconnector->debugfs_dpcd_size = 0;
 -#endif
 -
        if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
                || connector_type == DRM_MODE_CONNECTOR_eDP)
                amdgpu_dm_initialize_dp_connector(dm, aconnector);
@@@ -6618,7 -6551,7 +6687,7 @@@ static void amdgpu_dm_commit_planes(str
                         * clients using the GLX_OML_sync_control extension or
                         * DRI3/Present extension with defined target_msc.
                         */
 -                      last_flip_vblank = amdgpu_get_vblank_counter_kms(dm->ddev, acrtc_attach->crtc_id);
 +                      last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
                }
                else {
                        /* For variable refresh rate mode only:
                         & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
                        (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
                        (int)(target_vblank -
 -                        amdgpu_get_vblank_counter_kms(dm->ddev, acrtc_attach->crtc_id)) > 0)) {
 +                        amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
                        usleep_range(1000, 1100);
                }
  
@@@ -154,18 -154,15 +154,18 @@@ amdgpu_dm_mst_connector_late_register(s
  {
        struct amdgpu_dm_connector *amdgpu_dm_connector =
                to_amdgpu_dm_connector(connector);
 -      struct drm_dp_mst_port *port = amdgpu_dm_connector->port;
 +      int r;
 +
 +      amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
 +      r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
 +      if (r)
 +              return r;
  
  #if defined(CONFIG_DEBUG_FS)
        connector_debugfs_init(amdgpu_dm_connector);
 -      amdgpu_dm_connector->debugfs_dpcd_address = 0;
 -      amdgpu_dm_connector->debugfs_dpcd_size = 0;
  #endif
  
 -      return drm_dp_mst_connector_late_register(connector, port);
 +      return r;
  }
  
  static void
@@@ -454,6 -451,7 +454,7 @@@ static void dm_dp_destroy_mst_connector
                                           aconnector->dc_sink);
                dc_sink_release(aconnector->dc_sink);
                aconnector->dc_sink = NULL;
+               aconnector->dc_link->cur_link_settings.lane_count = 0;
        }
  
        drm_connector_unregister(connector);
@@@ -485,10 -483,11 +486,10 @@@ void amdgpu_dm_initialize_dp_connector(
                                       struct amdgpu_dm_connector *aconnector)
  {
        aconnector->dm_dp_aux.aux.name = "dmdc";
 -      aconnector->dm_dp_aux.aux.dev = aconnector->base.kdev;
        aconnector->dm_dp_aux.aux.transfer = dm_dp_aux_transfer;
        aconnector->dm_dp_aux.ddc_service = aconnector->dc_link->ddc;
  
 -      drm_dp_aux_register(&aconnector->dm_dp_aux.aux);
 +      drm_dp_aux_init(&aconnector->dm_dp_aux.aux);
        drm_dp_cec_register_connector(&aconnector->dm_dp_aux.aux,
                                      &aconnector->base);
  
@@@ -128,8 -128,8 +128,8 @@@ bool hubbub1_verify_allow_pstate_change
         * pstate takes around ~100us on linux. Unknown currently as to
         * why it takes that long on linux
         */
 -      static unsigned int pstate_wait_timeout_us = 200;
 -      static unsigned int pstate_wait_expected_timeout_us = 40;
 +      const unsigned int pstate_wait_timeout_us = 200;
 +      const unsigned int pstate_wait_expected_timeout_us = 40;
        static unsigned int max_sampled_pstate_wait_us; /* data collection */
        static bool forced_pstate_allow; /* help with revert wa */
  
                forced_pstate_allow = false;
        }
  
 -      /* RV2:
 -       * dchubbubdebugind, at: 0xB
 +      /* The following table only applies to DCN1 and DCN2,
 +       * for newer DCNs, need to consult with HW IP folks to read RTL
 +       * HUBBUB:DCHUBBUB_TEST_ARB_DEBUG10 DCHUBBUBDEBUGIND:0xB
         * description
         * 0:     Pipe0 Plane0 Allow Pstate Change
         * 1:     Pipe0 Plane1 Allow Pstate Change
         * 28:    WB0 Allow Pstate Change
         * 29:    WB1 Allow Pstate Change
         * 30:    Arbiter's allow_pstate_change
 -       * 31:    SOC pstate change request"
 -       */
 -      /*DCN2.x:
 -      HUBBUB:DCHUBBUB_TEST_ARB_DEBUG10 DCHUBBUBDEBUGIND:0xB
 -      0: Pipe0 Plane0 Allow P-state Change
 -      1: Pipe0 Plane1 Allow P-state Change
 -      2: Pipe0 Cursor0 Allow P-state Change
 -      3: Pipe0 Cursor1 Allow P-state Change
 -      4: Pipe1 Plane0 Allow P-state Change
 -      5: Pipe1 Plane1 Allow P-state Change
 -      6: Pipe1 Cursor0 Allow P-state Change
 -      7: Pipe1 Cursor1 Allow P-state Change
 -      8: Pipe2 Plane0 Allow P-state Change
 -      9: Pipe2 Plane1 Allow P-state Change
 -      10: Pipe2 Cursor0 Allow P-state Change
 -      11: Pipe2 Cursor1 Allow P-state Change
 -      12: Pipe3 Plane0 Allow P-state Change
 -      13: Pipe3 Plane1 Allow P-state Change
 -      14: Pipe3 Cursor0 Allow P-state Change
 -      15: Pipe3 Cursor1 Allow P-state Change
 -      16: Pipe4 Plane0 Allow P-state Change
 -      17: Pipe4 Plane1 Allow P-state Change
 -      18: Pipe4 Cursor0 Allow P-state Change
 -      19: Pipe4 Cursor1 Allow P-state Change
 -      20: Pipe5 Plane0 Allow P-state Change
 -      21: Pipe5 Plane1 Allow P-state Change
 -      22: Pipe5 Cursor0 Allow P-state Change
 -      23: Pipe5 Cursor1 Allow P-state Change
 -      24: Pipe6 Plane0 Allow P-state Change
 -      25: Pipe6 Plane1 Allow P-state Change
 -      26: Pipe6 Cursor0 Allow P-state Change
 -      27: Pipe6 Cursor1 Allow P-state Change
 -      28: WB0 Allow P-state Change
 -      29: WB1 Allow P-state Change
 -      30: Arbiter`s Allow P-state Change
 -      31: SOC P-state Change request
 -      */
 -      /* RV1:
 -       * dchubbubdebugind, at: 0x7
 -       * description "3-0:   Pipe0 cursor0 QOS
 -       * 7-4:   Pipe1 cursor0 QOS
 -       * 11-8:  Pipe2 cursor0 QOS
 -       * 15-12: Pipe3 cursor0 QOS
 -       * 16:    Pipe0 Plane0 Allow Pstate Change
 -       * 17:    Pipe1 Plane0 Allow Pstate Change
 -       * 18:    Pipe2 Plane0 Allow Pstate Change
 -       * 19:    Pipe3 Plane0 Allow Pstate Change
 -       * 20:    Pipe0 Plane1 Allow Pstate Change
 -       * 21:    Pipe1 Plane1 Allow Pstate Change
 -       * 22:    Pipe2 Plane1 Allow Pstate Change
 -       * 23:    Pipe3 Plane1 Allow Pstate Change
 -       * 24:    Pipe0 cursor0 Allow Pstate Change
 -       * 25:    Pipe1 cursor0 Allow Pstate Change
 -       * 26:    Pipe2 cursor0 Allow Pstate Change
 -       * 27:    Pipe3 cursor0 Allow Pstate Change
 -       * 28:    WB0 Allow Pstate Change
 -       * 29:    WB1 Allow Pstate Change
 -       * 30:    Arbiter's allow_pstate_change
         * 31:    SOC pstate change request
         */
  
@@@ -243,7 -300,7 +243,7 @@@ void hubbub1_wm_change_req_wa(struct hu
                        DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 1);
  }
  
 -void hubbub1_program_urgent_watermarks(
 +bool hubbub1_program_urgent_watermarks(
                struct hubbub *hubbub,
                struct dcn_watermark_set *watermarks,
                unsigned int refclk_mhz,
  {
        struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
        uint32_t prog_wm_value;
 +      bool wm_pending = false;
  
        /* Repeat for water mark set A, B, C and D. */
        /* clock state A */
                DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n"
                        "HW register value = 0x%x\n",
                        watermarks->a.urgent_ns, prog_wm_value);
 -      }
 +      } else if (watermarks->a.urgent_ns < hubbub1->watermarks.a.urgent_ns)
 +              wm_pending = true;
  
        if (safe_to_lower || watermarks->a.pte_meta_urgent_ns > hubbub1->watermarks.a.pte_meta_urgent_ns) {
                hubbub1->watermarks.a.pte_meta_urgent_ns = watermarks->a.pte_meta_urgent_ns;
                DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_A calculated =%d\n"
                        "HW register value = 0x%x\n",
                        watermarks->a.pte_meta_urgent_ns, prog_wm_value);
 -      }
 +      } else if (watermarks->a.pte_meta_urgent_ns < hubbub1->watermarks.a.pte_meta_urgent_ns)
 +              wm_pending = true;
  
        /* clock state B */
        if (safe_to_lower || watermarks->b.urgent_ns > hubbub1->watermarks.b.urgent_ns) {
                DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n"
                        "HW register value = 0x%x\n",
                        watermarks->b.urgent_ns, prog_wm_value);
 -      }
 +      } else if (watermarks->b.urgent_ns < hubbub1->watermarks.b.urgent_ns)
 +              wm_pending = true;
  
        if (safe_to_lower || watermarks->b.pte_meta_urgent_ns > hubbub1->watermarks.b.pte_meta_urgent_ns) {
                hubbub1->watermarks.b.pte_meta_urgent_ns = watermarks->b.pte_meta_urgent_ns;
                DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_B calculated =%d\n"
                        "HW register value = 0x%x\n",
                        watermarks->b.pte_meta_urgent_ns, prog_wm_value);
 -      }
 +      } else if (watermarks->b.pte_meta_urgent_ns < hubbub1->watermarks.b.pte_meta_urgent_ns)
 +              wm_pending = true;
  
        /* clock state C */
        if (safe_to_lower || watermarks->c.urgent_ns > hubbub1->watermarks.c.urgent_ns) {
                DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n"
                        "HW register value = 0x%x\n",
                        watermarks->c.urgent_ns, prog_wm_value);
 -      }
 +      } else if (watermarks->c.urgent_ns < hubbub1->watermarks.c.urgent_ns)
 +              wm_pending = true;
  
        if (safe_to_lower || watermarks->c.pte_meta_urgent_ns > hubbub1->watermarks.c.pte_meta_urgent_ns) {
                hubbub1->watermarks.c.pte_meta_urgent_ns = watermarks->c.pte_meta_urgent_ns;
                DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_C calculated =%d\n"
                        "HW register value = 0x%x\n",
                        watermarks->c.pte_meta_urgent_ns, prog_wm_value);
 -      }
 +      } else if (watermarks->c.pte_meta_urgent_ns < hubbub1->watermarks.c.pte_meta_urgent_ns)
 +              wm_pending = true;
  
        /* clock state D */
        if (safe_to_lower || watermarks->d.urgent_ns > hubbub1->watermarks.d.urgent_ns) {
                DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n"
                        "HW register value = 0x%x\n",
                        watermarks->d.urgent_ns, prog_wm_value);
 -      }
 +      } else if (watermarks->d.urgent_ns < hubbub1->watermarks.d.urgent_ns)
 +              wm_pending = true;
  
        if (safe_to_lower || watermarks->d.pte_meta_urgent_ns > hubbub1->watermarks.d.pte_meta_urgent_ns) {
                hubbub1->watermarks.d.pte_meta_urgent_ns = watermarks->d.pte_meta_urgent_ns;
                DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_D calculated =%d\n"
                        "HW register value = 0x%x\n",
                        watermarks->d.pte_meta_urgent_ns, prog_wm_value);
 -      }
 +      } else if (watermarks->d.pte_meta_urgent_ns < hubbub1->watermarks.d.pte_meta_urgent_ns)
 +              wm_pending = true;
 +
 +      return wm_pending;
  }
  
 -void hubbub1_program_stutter_watermarks(
 +bool hubbub1_program_stutter_watermarks(
                struct hubbub *hubbub,
                struct dcn_watermark_set *watermarks,
                unsigned int refclk_mhz,
  {
        struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
        uint32_t prog_wm_value;
 +      bool wm_pending = false;
  
        /* clock state A */
        if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
                DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
                        "HW register value = 0x%x\n",
                        watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
 -      }
 +      } else if (watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
 +                      < hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns)
 +              wm_pending = true;
  
        if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_ns
                        > hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns) {
                DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
                        "HW register value = 0x%x\n",
                        watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value);
 -      }
 +      } else if (watermarks->a.cstate_pstate.cstate_exit_ns
 +                      < hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns)
 +              wm_pending = true;
  
        /* clock state B */
        if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
                DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n"
                        "HW register value = 0x%x\n",
                        watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
 -      }
 +      } else if (watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
 +                      < hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns)
 +              wm_pending = true;
  
        if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_ns
                        > hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns) {
                DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
                        "HW register value = 0x%x\n",
                        watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value);
 -      }
 +      } else if (watermarks->b.cstate_pstate.cstate_exit_ns
 +                      < hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns)
 +              wm_pending = true;
  
        /* clock state C */
        if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
                DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n"
                        "HW register value = 0x%x\n",
                        watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
 -      }
 +      } else if (watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
 +                      < hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns)
 +              wm_pending = true;
  
        if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_ns
                        > hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns) {
                DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
                        "HW register value = 0x%x\n",
                        watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value);
 -      }
 +      } else if (watermarks->c.cstate_pstate.cstate_exit_ns
 +                      < hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns)
 +              wm_pending = true;
  
        /* clock state D */
        if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
                DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n"
                        "HW register value = 0x%x\n",
                        watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
 -      }
 +      } else if (watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
 +                      < hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns)
 +              wm_pending = true;
  
        if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_ns
                        > hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns) {
                DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
                        "HW register value = 0x%x\n",
                        watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value);
 -      }
 +      } else if (watermarks->d.cstate_pstate.cstate_exit_ns
 +                      < hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns)
 +              wm_pending = true;
  
 +      return wm_pending;
  }
  
 -void hubbub1_program_pstate_watermarks(
 +bool hubbub1_program_pstate_watermarks(
                struct hubbub *hubbub,
                struct dcn_watermark_set *watermarks,
                unsigned int refclk_mhz,
  {
        struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
        uint32_t prog_wm_value;
 +      bool wm_pending = false;
  
        /* clock state A */
        if (safe_to_lower || watermarks->a.cstate_pstate.pstate_change_ns
                DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
                        "HW register value = 0x%x\n\n",
                        watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value);
 -      }
 +      } else if (watermarks->a.cstate_pstate.pstate_change_ns
 +                      < hubbub1->watermarks.a.cstate_pstate.pstate_change_ns)
 +              wm_pending = true;
  
        /* clock state B */
        if (safe_to_lower || watermarks->b.cstate_pstate.pstate_change_ns
                DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n"
                        "HW register value = 0x%x\n\n",
                        watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value);
 -      }
 +      } else if (watermarks->b.cstate_pstate.pstate_change_ns
 +                      < hubbub1->watermarks.b.cstate_pstate.pstate_change_ns)
 +              wm_pending = true;
  
        /* clock state C */
        if (safe_to_lower || watermarks->c.cstate_pstate.pstate_change_ns
                DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n"
                        "HW register value = 0x%x\n\n",
                        watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value);
 -      }
 +      } else if (watermarks->c.cstate_pstate.pstate_change_ns
 +                      < hubbub1->watermarks.c.cstate_pstate.pstate_change_ns)
 +              wm_pending = true;
  
        /* clock state D */
        if (safe_to_lower || watermarks->d.cstate_pstate.pstate_change_ns
                DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
                        "HW register value = 0x%x\n\n",
                        watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value);
 -      }
 +      } else if (watermarks->d.cstate_pstate.pstate_change_ns
 +                      < hubbub1->watermarks.d.cstate_pstate.pstate_change_ns)
 +              wm_pending = true;
 +
 +      return wm_pending;
  }
  
 -void hubbub1_program_watermarks(
 +bool hubbub1_program_watermarks(
                struct hubbub *hubbub,
                struct dcn_watermark_set *watermarks,
                unsigned int refclk_mhz,
                bool safe_to_lower)
  {
        struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
 +      bool wm_pending = false;
        /*
         * Need to clamp to max of the register values (i.e. no wrap)
         * for dcn1, all wm registers are 21-bit wide
         */
 -      hubbub1_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
 -      hubbub1_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
 -      hubbub1_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
 +      if (hubbub1_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
 +              wm_pending = true;
 +
 +      if (hubbub1_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
 +              wm_pending = true;
 +
 +      if (hubbub1_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
 +              wm_pending = true;
  
        REG_UPDATE(DCHUBBUB_ARB_SAT_LEVEL,
                        DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz);
                        DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_DISABLE, 1,
                        DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 1);
  #endif
 +      return wm_pending;
  }
  
  void hubbub1_update_dchub(
@@@ -830,8 -840,8 +830,8 @@@ static void hubbub1_det_request_size
  
        hubbub1_get_blk256_size(&blk256_width, &blk256_height, bpe);
  
-       swath_bytes_horz_wc = height * blk256_height * bpe;
-       swath_bytes_vert_wc = width * blk256_width * bpe;
+       swath_bytes_horz_wc = width * blk256_height * bpe;
+       swath_bytes_vert_wc = height * blk256_width * bpe;
  
        *req128_horz_wc = (2 * swath_bytes_horz_wc <= detile_buf_size) ?
                        false : /* full 256B request */
@@@ -222,7 -222,7 +222,7 @@@ int smu_set_soft_freq_range(struct smu_
  {
        int ret = 0;
  
-       if (min <= 0 && max <= 0)
+       if (min < 0 && max < 0)
                return -EINVAL;
  
        if (!smu_clk_dpm_is_enabled(smu, clk_type))
@@@ -1113,12 -1113,12 +1113,12 @@@ static int smu_smc_table_hw_init(struc
                        return ret;
        }
  
 +      ret = smu_set_driver_table_location(smu);
 +      if (ret)
 +              return ret;
 +
        /* smu_dump_pptable(smu); */
        if (!amdgpu_sriov_vf(adev)) {
 -              ret = smu_set_driver_table_location(smu);
 -              if (ret)
 -                      return ret;
 -
                /*
                 * Copy pptable bo in the vram to smc with SMU MSGs such as
                 * SetDriverDramAddr and TransferTableDram2Smu.
@@@ -1454,79 -1454,29 +1454,79 @@@ int smu_reset(struct smu_context *smu
        return ret;
  }
  
 +static int smu_disable_dpm(struct smu_context *smu)
 +{
 +      struct amdgpu_device *adev = smu->adev;
 +      uint32_t smu_version;
 +      int ret = 0;
 +      bool use_baco = !smu->is_apu &&
 +              ((adev->in_gpu_reset &&
 +                (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
 +               (adev->in_runpm && amdgpu_asic_supports_baco(adev)));
 +
 +      ret = smu_get_smc_version(smu, NULL, &smu_version);
 +      if (ret) {
 +              pr_err("Failed to get smu version.\n");
 +              return ret;
 +      }
 +
 +      /*
 +       * For baco on Arcturus, this operation
 +       * (disable all smu feature) will be handled by SMU FW.
 +       */
 +      if (adev->asic_type == CHIP_ARCTURUS) {
 +              if (use_baco && (smu_version > 0x360e00))
 +                      return 0;
 +      }
 +
 +      /* Disable all enabled SMU features */
 +      ret = smu_system_features_control(smu, false);
 +      if (ret) {
 +              pr_err("Failed to disable smu features.\n");
 +              return ret;
 +      }
 +
 +      /* For baco, need to leave BACO feature enabled */
 +      if (use_baco) {
 +              /*
 +               * Correct the way for checking whether SMU_FEATURE_BACO_BIT
 +               * is supported.
 +               *
 +               * Since 'smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)' will
 +               * always return false as the 'smu_system_features_control(smu, false)'
 +               * was just issued above which disabled all SMU features.
 +               *
 +               * Thus 'smu_feature_get_index(smu, SMU_FEATURE_BACO_BIT)' is used
 +               * now for the checking.
 +               */
 +              if (smu_feature_get_index(smu, SMU_FEATURE_BACO_BIT) >= 0) {
 +                      ret = smu_feature_set_enabled(smu, SMU_FEATURE_BACO_BIT, true);
 +                      if (ret) {
 +                              pr_warn("set BACO feature enabled failed, return %d\n", ret);
 +                              return ret;
 +                      }
 +              }
 +      }
 +
 +      return ret;
 +}
 +
  static int smu_suspend(void *handle)
  {
 -      int ret;
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
        struct smu_context *smu = &adev->smu;
 -      bool baco_feature_is_enabled = false;
 +      int ret;
  
 -      if (!smu->pm_enabled)
 +      if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
                return 0;
  
 -      if(!smu->is_apu)
 -              baco_feature_is_enabled = smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT);
 -
 -      ret = smu_system_features_control(smu, false);
 -      if (ret)
 -              return ret;
 +      if (!smu->pm_enabled)
 +              return 0;
  
 -      if (baco_feature_is_enabled) {
 -              ret = smu_feature_set_enabled(smu, SMU_FEATURE_BACO_BIT, true);
 -              if (ret) {
 -                      pr_warn("set BACO feature enabled failed, return %d\n", ret);
 +      if(!amdgpu_sriov_vf(adev)) {
 +              ret = smu_disable_dpm(smu);
 +              if (ret)
                        return ret;
 -              }
        }
  
        smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
@@@ -111,8 -111,8 +111,8 @@@ static struct smu_12_0_cmn2aisc_mappin
        CLK_MAP(GFXCLK, CLOCK_GFXCLK),
        CLK_MAP(SCLK,   CLOCK_GFXCLK),
        CLK_MAP(SOCCLK, CLOCK_SOCCLK),
-       CLK_MAP(UCLK, CLOCK_UMCCLK),
-       CLK_MAP(MCLK, CLOCK_UMCCLK),
+       CLK_MAP(UCLK, CLOCK_FCLK),
+       CLK_MAP(MCLK, CLOCK_FCLK),
  };
  
  static struct smu_12_0_cmn2aisc_mapping renoir_table_map[SMU_TABLE_COUNT] = {
@@@ -280,7 -280,7 +280,7 @@@ static int renoir_print_clk_levels(stru
                break;
        case SMU_MCLK:
                count = NUM_MEMCLK_DPM_LEVELS;
-               cur_value = metrics.ClockFrequency[CLOCK_UMCCLK];
+               cur_value = metrics.ClockFrequency[CLOCK_FCLK];
                break;
        case SMU_DCEFCLK:
                count = NUM_DCFCLK_DPM_LEVELS;
@@@ -672,18 -672,14 +672,18 @@@ static int renoir_set_power_profile_mod
        /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
        workload_type = smu_workload_get_type(smu, smu->power_profile_mode);
        if (workload_type < 0) {
 -              pr_err("Unsupported power profile mode %d on RENOIR\n",smu->power_profile_mode);
 +              /*
 +               * TODO: If some case need switch to powersave/default power mode
 +               * then can consider enter WORKLOAD_COMPUTE/WORKLOAD_CUSTOM for power saving.
 +               */
 +              pr_err_once("Unsupported power profile mode %d on RENOIR\n",smu->power_profile_mode);
                return -EINVAL;
        }
  
        ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
                                    1 << workload_type);
        if (ret) {
 -              pr_err("Fail to set workload type %d\n", workload_type);
 +              pr_err_once("Fail to set workload type %d\n", workload_type);
                return ret;
        }
  
@@@ -210,8 -210,7 +210,7 @@@ static int anx6345_dp_link_training(str
        if (err)
                return err;
  
-       dpcd[0] = drm_dp_max_link_rate(anx6345->dpcd);
-       dpcd[0] = drm_dp_link_rate_to_bw_code(dpcd[0]);
+       dpcd[0] = dp_bw;
        err = regmap_write(anx6345->map[I2C_IDX_DPTX],
                           SP_DP_MAIN_LINK_BW_SET_REG, dpcd[0]);
        if (err)
@@@ -520,17 -519,11 +519,17 @@@ static const struct drm_connector_func
        .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
  };
  
 -static int anx6345_bridge_attach(struct drm_bridge *bridge)
 +static int anx6345_bridge_attach(struct drm_bridge *bridge,
 +                               enum drm_bridge_attach_flags flags)
  {
        struct anx6345 *anx6345 = bridge_to_anx6345(bridge);
        int err;
  
 +      if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
 +              DRM_ERROR("Fix bridge driver to make connector optional!");
 +              return -EINVAL;
 +      }
 +
        if (!bridge->encoder) {
                DRM_ERROR("Parent encoder object not found");
                return -ENODEV;
@@@ -718,14 -711,14 +717,14 @@@ static int anx6345_i2c_probe(struct i2c
                DRM_DEBUG("No panel found\n");
  
        /* 1.2V digital core power regulator  */
 -      anx6345->dvdd12 = devm_regulator_get(dev, "dvdd12-supply");
 +      anx6345->dvdd12 = devm_regulator_get(dev, "dvdd12");
        if (IS_ERR(anx6345->dvdd12)) {
                DRM_ERROR("dvdd12-supply not found\n");
                return PTR_ERR(anx6345->dvdd12);
        }
  
        /* 2.5V digital core power regulator  */
 -      anx6345->dvdd25 = devm_regulator_get(dev, "dvdd25-supply");
 +      anx6345->dvdd25 = devm_regulator_get(dev, "dvdd25");
        if (IS_ERR(anx6345->dvdd25)) {
                DRM_ERROR("dvdd25-supply not found\n");
                return PTR_ERR(anx6345->dvdd25);
@@@ -31,7 -31,6 +31,7 @@@
  #include <drm/drm_edid.h>
  #include <drm/drm_of.h>
  #include <drm/drm_panel.h>
 +#include <drm/drm_print.h>
  #include <drm/drm_probe_helper.h>
  
  /* Registers */
@@@ -298,7 -297,7 +298,7 @@@ static inline int tc_poll_timeout(struc
  
  static int tc_aux_wait_busy(struct tc_data *tc)
  {
-       return tc_poll_timeout(tc, DP0_AUXSTATUS, AUX_BUSY, 0, 1000, 100000);
+       return tc_poll_timeout(tc, DP0_AUXSTATUS, AUX_BUSY, 0, 100, 100000);
  }
  
  static int tc_aux_write_data(struct tc_data *tc, const void *data,
@@@ -641,7 -640,7 +641,7 @@@ static int tc_aux_link_setup(struct tc_
        if (ret)
                goto err;
  
-       ret = tc_poll_timeout(tc, DP_PHY_CTRL, PHY_RDY, PHY_RDY, 1, 1000);
+       ret = tc_poll_timeout(tc, DP_PHY_CTRL, PHY_RDY, PHY_RDY, 100, 100000);
        if (ret == -ETIMEDOUT) {
                dev_err(tc->dev, "Timeout waiting for PHY to become ready");
                return ret;
@@@ -877,7 -876,7 +877,7 @@@ static int tc_wait_link_training(struc
        int ret;
  
        ret = tc_poll_timeout(tc, DP0_LTSTAT, LT_LOOPDONE,
-                             LT_LOOPDONE, 1, 1000);
+                             LT_LOOPDONE, 500, 100000);
        if (ret) {
                dev_err(tc->dev, "Link training timeout waiting for LT_LOOPDONE!\n");
                return ret;
@@@ -950,7 -949,7 +950,7 @@@ static int tc_main_link_enable(struct t
        dp_phy_ctrl &= ~(DP_PHY_RST | PHY_M1_RST | PHY_M0_RST);
        ret = regmap_write(tc->regmap, DP_PHY_CTRL, dp_phy_ctrl);
  
-       ret = tc_poll_timeout(tc, DP_PHY_CTRL, PHY_RDY, PHY_RDY, 1, 1000);
+       ret = tc_poll_timeout(tc, DP_PHY_CTRL, PHY_RDY, PHY_RDY, 500, 100000);
        if (ret) {
                dev_err(dev, "timeout waiting for phy become ready");
                return ret;
@@@ -1404,19 -1403,13 +1404,19 @@@ static const struct drm_connector_func
        .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
  };
  
 -static int tc_bridge_attach(struct drm_bridge *bridge)
 +static int tc_bridge_attach(struct drm_bridge *bridge,
 +                          enum drm_bridge_attach_flags flags)
  {
        u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
        struct tc_data *tc = bridge_to_tc(bridge);
        struct drm_device *drm = bridge->dev;
        int ret;
  
 +      if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
 +              DRM_ERROR("Fix bridge driver to make connector optional!");
 +              return -EINVAL;
 +      }
 +
        /* Create DP/eDP connector */
        drm_connector_helper_add(&tc->connector, &tc_connector_helper_funcs);
        ret = drm_connector_init(drm, &tc->connector, &tc_connector_funcs,
@@@ -4,12 -4,14 +4,12 @@@
   * Author: Jyri Sarha <jsarha@ti.com>
   */
  
 -#include <linux/delay.h>
 -#include <linux/fwnode.h>
  #include <linux/gpio/consumer.h>
  #include <linux/i2c.h>
 -#include <linux/irq.h>
  #include <linux/module.h>
  #include <linux/of_graph.h>
  #include <linux/platform_device.h>
 +#include <linux/workqueue.h>
  
  #include <drm/drm_atomic_helper.h>
  #include <drm/drm_bridge.h>
  struct tfp410 {
        struct drm_bridge       bridge;
        struct drm_connector    connector;
 -      unsigned int            connector_type;
  
        u32                     bus_format;
 -      struct i2c_adapter      *ddc;
 -      struct gpio_desc        *hpd;
 -      int                     hpd_irq;
        struct delayed_work     hpd_work;
        struct gpio_desc        *powerdown;
  
        struct drm_bridge_timings timings;
 +      struct drm_bridge       *next_bridge;
  
        struct device *dev;
  };
@@@ -51,18 -56,13 +51,18 @@@ static int tfp410_get_modes(struct drm_
        struct edid *edid;
        int ret;
  
 -      if (!dvi->ddc)
 -              goto fallback;
 +      edid = drm_bridge_get_edid(dvi->next_bridge, connector);
 +      if (IS_ERR_OR_NULL(edid)) {
 +              if (edid != ERR_PTR(-ENOTSUPP))
 +                      DRM_INFO("EDID read failed. Fallback to standard modes\n");
  
 -      edid = drm_get_edid(connector, dvi->ddc);
 -      if (!edid) {
 -              DRM_INFO("EDID read failed. Fallback to standard modes\n");
 -              goto fallback;
 +              /*
 +               * No EDID, fallback on the XGA standard modes and prefer a mode
 +               * pretty much anything can handle.
 +               */
 +              ret = drm_add_modes_noedid(connector, 1920, 1200);
 +              drm_set_preferred_mode(connector, 1024, 768);
 +              return ret;
        }
  
        drm_connector_update_edid_property(connector, edid);
  
        kfree(edid);
  
 -      return ret;
 -
 -fallback:
 -      /* No EDID, fallback on the XGA standard modes */
 -      ret = drm_add_modes_noedid(connector, 1920, 1200);
 -
 -      /* And prefer a mode pretty much anything can handle */
 -      drm_set_preferred_mode(connector, 1024, 768);
 -
        return ret;
  }
  
@@@ -83,7 -92,21 +83,7 @@@ tfp410_connector_detect(struct drm_conn
  {
        struct tfp410 *dvi = drm_connector_to_tfp410(connector);
  
 -      if (dvi->hpd) {
 -              if (gpiod_get_value_cansleep(dvi->hpd))
 -                      return connector_status_connected;
 -              else
 -                      return connector_status_disconnected;
 -      }
 -
 -      if (dvi->ddc) {
 -              if (drm_probe_ddc(dvi->ddc))
 -                      return connector_status_connected;
 -              else
 -                      return connector_status_disconnected;
 -      }
 -
 -      return connector_status_unknown;
 +      return drm_bridge_detect(dvi->next_bridge);
  }
  
  static const struct drm_connector_funcs tfp410_con_funcs = {
        .atomic_destroy_state   = drm_atomic_helper_connector_destroy_state,
  };
  
 -static int tfp410_attach(struct drm_bridge *bridge)
 +static void tfp410_hpd_work_func(struct work_struct *work)
 +{
 +      struct tfp410 *dvi;
 +
 +      dvi = container_of(work, struct tfp410, hpd_work.work);
 +
 +      if (dvi->bridge.dev)
 +              drm_helper_hpd_irq_event(dvi->bridge.dev);
 +}
 +
 +static void tfp410_hpd_callback(void *arg, enum drm_connector_status status)
 +{
 +      struct tfp410 *dvi = arg;
 +
 +      mod_delayed_work(system_wq, &dvi->hpd_work,
 +                       msecs_to_jiffies(HOTPLUG_DEBOUNCE_MS));
 +}
 +
 +static int tfp410_attach(struct drm_bridge *bridge,
 +                       enum drm_bridge_attach_flags flags)
  {
        struct tfp410 *dvi = drm_bridge_to_tfp410(bridge);
        int ret;
  
 +      ret = drm_bridge_attach(bridge->encoder, dvi->next_bridge, bridge,
 +                              DRM_BRIDGE_ATTACH_NO_CONNECTOR);
 +      if (ret < 0)
 +              return ret;
 +
 +      if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)
 +              return 0;
 +
        if (!bridge->encoder) {
                dev_err(dvi->dev, "Missing encoder\n");
                return -ENODEV;
        }
  
 -      if (dvi->hpd_irq >= 0)
 +      if (dvi->next_bridge->ops & DRM_BRIDGE_OP_DETECT)
                dvi->connector.polled = DRM_CONNECTOR_POLL_HPD;
        else
                dvi->connector.polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
  
 +      if (dvi->next_bridge->ops & DRM_BRIDGE_OP_HPD) {
 +              INIT_DELAYED_WORK(&dvi->hpd_work, tfp410_hpd_work_func);
 +              drm_bridge_hpd_enable(dvi->next_bridge, tfp410_hpd_callback,
 +                                    dvi);
 +      }
 +
        drm_connector_helper_add(&dvi->connector,
                                 &tfp410_con_helper_funcs);
        ret = drm_connector_init_with_ddc(bridge->dev, &dvi->connector,
                                          &tfp410_con_funcs,
 -                                        dvi->connector_type,
 -                                        dvi->ddc);
 +                                        dvi->next_bridge->type,
 +                                        dvi->next_bridge->ddc);
        if (ret) {
-               dev_err(dvi->dev, "drm_connector_init() failed: %d\n", ret);
+               dev_err(dvi->dev, "drm_connector_init_with_ddc() failed: %d\n",
+                       ret);
                return ret;
        }
  
        drm_display_info_set_bus_formats(&dvi->connector.display_info,
                                         &dvi->bus_format, 1);
  
 -      drm_connector_attach_encoder(&dvi->connector,
 -                                        bridge->encoder);
 +      drm_connector_attach_encoder(&dvi->connector, bridge->encoder);
  
        return 0;
  }
  
 +static void tfp410_detach(struct drm_bridge *bridge)
 +{
 +      struct tfp410 *dvi = drm_bridge_to_tfp410(bridge);
 +
 +      if (dvi->connector.dev && dvi->next_bridge->ops & DRM_BRIDGE_OP_HPD) {
 +              drm_bridge_hpd_disable(dvi->next_bridge);
 +              cancel_delayed_work_sync(&dvi->hpd_work);
 +      }
 +}
 +
  static void tfp410_enable(struct drm_bridge *bridge)
  {
        struct tfp410 *dvi = drm_bridge_to_tfp410(bridge);
@@@ -186,25 -168,31 +187,25 @@@ static void tfp410_disable(struct drm_b
        gpiod_set_value_cansleep(dvi->powerdown, 1);
  }
  
 -static const struct drm_bridge_funcs tfp410_bridge_funcs = {
 -      .attach         = tfp410_attach,
 -      .enable         = tfp410_enable,
 -      .disable        = tfp410_disable,
 -};
 -
 -static void tfp410_hpd_work_func(struct work_struct *work)
 +static enum drm_mode_status tfp410_mode_valid(struct drm_bridge *bridge,
 +                                            const struct drm_display_mode *mode)
  {
 -      struct tfp410 *dvi;
 +      if (mode->clock < 25000)
 +              return MODE_CLOCK_LOW;
  
 -      dvi = container_of(work, struct tfp410, hpd_work.work);
 +      if (mode->clock > 165000)
 +              return MODE_CLOCK_HIGH;
  
 -      if (dvi->bridge.dev)
 -              drm_helper_hpd_irq_event(dvi->bridge.dev);
 +      return MODE_OK;
  }
  
 -static irqreturn_t tfp410_hpd_irq_thread(int irq, void *arg)
 -{
 -      struct tfp410 *dvi = arg;
 -
 -      mod_delayed_work(system_wq, &dvi->hpd_work,
 -                      msecs_to_jiffies(HOTPLUG_DEBOUNCE_MS));
 -
 -      return IRQ_HANDLED;
 -}
 +static const struct drm_bridge_funcs tfp410_bridge_funcs = {
 +      .attach         = tfp410_attach,
 +      .detach         = tfp410_detach,
 +      .enable         = tfp410_enable,
 +      .disable        = tfp410_disable,
 +      .mode_valid     = tfp410_mode_valid,
 +};
  
  static const struct drm_bridge_timings tfp410_default_timings = {
        .input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE
@@@ -283,9 -271,51 +284,9 @@@ static int tfp410_parse_timings(struct 
        return 0;
  }
  
 -static int tfp410_get_connector_properties(struct tfp410 *dvi)
 -{
 -      struct device_node *connector_node, *ddc_phandle;
 -      int ret = 0;
 -
 -      /* port@1 is the connector node */
 -      connector_node = of_graph_get_remote_node(dvi->dev->of_node, 1, -1);
 -      if (!connector_node)
 -              return -ENODEV;
 -
 -      if (of_device_is_compatible(connector_node, "hdmi-connector"))
 -              dvi->connector_type = DRM_MODE_CONNECTOR_HDMIA;
 -      else
 -              dvi->connector_type = DRM_MODE_CONNECTOR_DVID;
 -
 -      dvi->hpd = fwnode_gpiod_get_index(&connector_node->fwnode,
 -                                        "hpd", 0, GPIOD_IN, "hpd");
 -      if (IS_ERR(dvi->hpd)) {
 -              ret = PTR_ERR(dvi->hpd);
 -              dvi->hpd = NULL;
 -              if (ret == -ENOENT)
 -                      ret = 0;
 -              else
 -                      goto fail;
 -      }
 -
 -      ddc_phandle = of_parse_phandle(connector_node, "ddc-i2c-bus", 0);
 -      if (!ddc_phandle)
 -              goto fail;
 -
 -      dvi->ddc = of_get_i2c_adapter_by_node(ddc_phandle);
 -      if (dvi->ddc)
 -              dev_info(dvi->dev, "Connector's ddc i2c bus found\n");
 -      else
 -              ret = -EPROBE_DEFER;
 -
 -      of_node_put(ddc_phandle);
 -
 -fail:
 -      of_node_put(connector_node);
 -      return ret;
 -}
 -
  static int tfp410_init(struct device *dev, bool i2c)
  {
 +      struct device_node *node;
        struct tfp410 *dvi;
        int ret;
  
        dvi = devm_kzalloc(dev, sizeof(*dvi), GFP_KERNEL);
        if (!dvi)
                return -ENOMEM;
 +
 +      dvi->dev = dev;
        dev_set_drvdata(dev, dvi);
  
        dvi->bridge.funcs = &tfp410_bridge_funcs;
        dvi->bridge.of_node = dev->of_node;
        dvi->bridge.timings = &dvi->timings;
 -      dvi->dev = dev;
 +      dvi->bridge.type = DRM_MODE_CONNECTOR_DVID;
  
        ret = tfp410_parse_timings(dvi, i2c);
        if (ret)
 -              goto fail;
 +              return ret;
  
 -      ret = tfp410_get_connector_properties(dvi);
 -      if (ret)
 -              goto fail;
 +      /* Get the next bridge, connected to port@1. */
 +      node = of_graph_get_remote_node(dev->of_node, 1, -1);
 +      if (!node)
 +              return -ENODEV;
 +
 +      dvi->next_bridge = of_drm_find_bridge(node);
 +      of_node_put(node);
  
 +      if (!dvi->next_bridge)
 +              return -EPROBE_DEFER;
 +
 +      /* Get the powerdown GPIO. */
        dvi->powerdown = devm_gpiod_get_optional(dev, "powerdown",
                                                 GPIOD_OUT_HIGH);
        if (IS_ERR(dvi->powerdown)) {
                return PTR_ERR(dvi->powerdown);
        }
  
 -      if (dvi->hpd)
 -              dvi->hpd_irq = gpiod_to_irq(dvi->hpd);
 -      else
 -              dvi->hpd_irq = -ENXIO;
 -
 -      if (dvi->hpd_irq >= 0) {
 -              INIT_DELAYED_WORK(&dvi->hpd_work, tfp410_hpd_work_func);
 -
 -              ret = devm_request_threaded_irq(dev, dvi->hpd_irq,
 -                      NULL, tfp410_hpd_irq_thread, IRQF_TRIGGER_RISING |
 -                      IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
 -                      "hdmi-hpd", dvi);
 -              if (ret) {
 -                      DRM_ERROR("failed to register hpd interrupt\n");
 -                      goto fail;
 -              }
 -      }
 -
 +      /*  Register the DRM bridge. */
        drm_bridge_add(&dvi->bridge);
  
        return 0;
 -fail:
 -      i2c_put_adapter(dvi->ddc);
 -      if (dvi->hpd)
 -              gpiod_put(dvi->hpd);
 -      return ret;
  }
  
  static int tfp410_fini(struct device *dev)
  {
        struct tfp410 *dvi = dev_get_drvdata(dev);
  
 -      if (dvi->hpd_irq >= 0)
 -              cancel_delayed_work_sync(&dvi->hpd_work);
 -
        drm_bridge_remove(&dvi->bridge);
  
 -      if (dvi->ddc)
 -              i2c_put_adapter(dvi->ddc);
 -      if (dvi->hpd)
 -              gpiod_put(dvi->hpd);
 -
        return 0;
  }
  
@@@ -951,7 -951,8 +951,8 @@@ bool drm_client_rotation(struct drm_mod
         * depending on the hardware this may require the framebuffer
         * to be in a specific tiling format.
         */
-       if ((*rotation & DRM_MODE_ROTATE_MASK) != DRM_MODE_ROTATE_180 ||
+       if (((*rotation & DRM_MODE_ROTATE_MASK) != DRM_MODE_ROTATE_0 &&
+            (*rotation & DRM_MODE_ROTATE_MASK) != DRM_MODE_ROTATE_180) ||
            !plane->rotation_property)
                return false;
  
  }
  
  /**
 - * drm_client_modeset_commit_force() - Force commit CRTC configuration
 + * drm_client_modeset_commit_locked() - Force commit CRTC configuration
   * @client: DRM client
   *
 - * Commit modeset configuration to crtcs without checking if there is a DRM master.
 + * Commit modeset configuration to crtcs without checking if there is a DRM
 + * master. The assumption is that the caller already holds an internal DRM
 + * master reference acquired with drm_master_internal_acquire().
   *
   * Returns:
   * Zero on success or negative error code on failure.
   */
 -int drm_client_modeset_commit_force(struct drm_client_dev *client)
 +int drm_client_modeset_commit_locked(struct drm_client_dev *client)
  {
        struct drm_device *dev = client->dev;
        int ret;
  
        return ret;
  }
 -EXPORT_SYMBOL(drm_client_modeset_commit_force);
 +EXPORT_SYMBOL(drm_client_modeset_commit_locked);
  
  /**
   * drm_client_modeset_commit() - Commit CRTC configuration
@@@ -1137,7 -1136,7 +1138,7 @@@ int drm_client_modeset_commit(struct dr
        if (!drm_master_internal_acquire(dev))
                return -EBUSY;
  
 -      ret = drm_client_modeset_commit_force(client);
 +      ret = drm_client_modeset_commit_locked(client);
  
        drm_master_internal_release(dev);
  
@@@ -1540,7 -1540,7 +1540,7 @@@ static int exynos_dsi_host_attach(struc
  
        out_bridge  = of_drm_find_bridge(device->dev.of_node);
        if (out_bridge) {
 -              drm_bridge_attach(encoder, out_bridge, NULL);
 +              drm_bridge_attach(encoder, out_bridge, NULL, 0);
                dsi->out_bridge = out_bridge;
                list_splice_init(&encoder->bridge_chain, &dsi->bridge_chain);
        } else {
@@@ -1717,7 -1717,7 +1717,7 @@@ static int exynos_dsi_bind(struct devic
        if (dsi->in_bridge_node) {
                in_bridge = of_drm_find_bridge(dsi->in_bridge_node);
                if (in_bridge)
 -                      drm_bridge_attach(encoder, in_bridge, NULL);
 +                      drm_bridge_attach(encoder, in_bridge, NULL, 0);
        }
  
        return mipi_dsi_host_register(&dsi->dsi_host);
@@@ -1773,8 -1773,9 +1773,9 @@@ static int exynos_dsi_probe(struct plat
        ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(dsi->supplies),
                                      dsi->supplies);
        if (ret) {
-               dev_info(dev, "failed to get regulators: %d\n", ret);
-               return -EPROBE_DEFER;
+               if (ret != -EPROBE_DEFER)
+                       dev_info(dev, "failed to get regulators: %d\n", ret);
+               return ret;
        }
  
        dsi->clks = devm_kcalloc(dev,
                dsi->clks[i] = devm_clk_get(dev, clk_names[i]);
                if (IS_ERR(dsi->clks[i])) {
                        if (strcmp(clk_names[i], "sclk_mipi") == 0) {
-                               strcpy(clk_names[i], OLD_SCLK_MIPI_CLK_NAME);
-                               i--;
-                               continue;
+                               dsi->clks[i] = devm_clk_get(dev,
+                                                       OLD_SCLK_MIPI_CLK_NAME);
+                               if (!IS_ERR(dsi->clks[i]))
+                                       continue;
                        }
  
                        dev_info(dev, "failed to get the clock: %s\n",
@@@ -960,7 -960,7 +960,7 @@@ static int hdmi_create_connector(struc
        drm_connector_attach_encoder(connector, encoder);
  
        if (hdata->bridge) {
 -              ret = drm_bridge_attach(encoder, hdata->bridge, NULL);
 +              ret = drm_bridge_attach(encoder, hdata->bridge, NULL, 0);
                if (ret)
                        DRM_DEV_ERROR(hdata->dev, "Failed to attach bridge\n");
        }
@@@ -1805,18 -1805,10 +1805,10 @@@ static int hdmi_resources_init(struct h
  
        hdata->reg_hdmi_en = devm_regulator_get_optional(dev, "hdmi-en");
  
-       if (PTR_ERR(hdata->reg_hdmi_en) != -ENODEV) {
+       if (PTR_ERR(hdata->reg_hdmi_en) != -ENODEV)
                if (IS_ERR(hdata->reg_hdmi_en))
                        return PTR_ERR(hdata->reg_hdmi_en);
  
-               ret = regulator_enable(hdata->reg_hdmi_en);
-               if (ret) {
-                       DRM_DEV_ERROR(dev,
-                                     "failed to enable hdmi-en regulator\n");
-                       return ret;
-               }
-       }
        return hdmi_bridge_init(hdata);
  }
  
@@@ -2023,6 -2015,15 +2015,15 @@@ static int hdmi_probe(struct platform_d
                }
        }
  
+       if (!IS_ERR(hdata->reg_hdmi_en)) {
+               ret = regulator_enable(hdata->reg_hdmi_en);
+               if (ret) {
+                       DRM_DEV_ERROR(dev,
+                             "failed to enable hdmi-en regulator\n");
+                       goto err_hdmiphy;
+               }
+       }
        pm_runtime_enable(dev);
  
        audio_infoframe = &hdata->audio.infoframe;
@@@ -2047,7 -2048,8 +2048,8 @@@ err_unregister_audio
  
  err_rpm_disable:
        pm_runtime_disable(dev);
+       if (!IS_ERR(hdata->reg_hdmi_en))
+               regulator_disable(hdata->reg_hdmi_en);
  err_hdmiphy:
        if (hdata->hdmiphy_port)
                put_device(&hdata->hdmiphy_port->dev);
@@@ -116,17 -116,17 +116,17 @@@ static void _wa_add(struct i915_wa_lis
                } else {
                        wa_ = &wal->list[mid];
  
 -                      if ((wa->mask & ~wa_->mask) == 0) {
 -                              DRM_ERROR("Discarding overwritten w/a for reg %04x (mask: %08x, value: %08x)\n",
 +                      if ((wa->clr | wa_->clr) && !(wa->clr & ~wa_->clr)) {
 +                              DRM_ERROR("Discarding overwritten w/a for reg %04x (clear: %08x, set: %08x)\n",
                                          i915_mmio_reg_offset(wa_->reg),
 -                                        wa_->mask, wa_->val);
 +                                        wa_->clr, wa_->set);
  
 -                              wa_->val &= ~wa->mask;
 +                              wa_->set &= ~wa->clr;
                        }
  
                        wal->wa_count++;
 -                      wa_->val |= wa->val;
 -                      wa_->mask |= wa->mask;
 +                      wa_->set |= wa->set;
 +                      wa_->clr |= wa->clr;
                        wa_->read |= wa->read;
                        return;
                }
        }
  }
  
 -static void wa_add(struct i915_wa_list *wal, i915_reg_t reg, u32 mask,
 -                 u32 val, u32 read_mask)
 +static void wa_add(struct i915_wa_list *wal, i915_reg_t reg,
 +                 u32 clear, u32 set, u32 read_mask)
  {
        struct i915_wa wa = {
                .reg  = reg,
 -              .mask = mask,
 -              .val  = val,
 +              .clr  = clear,
 +              .set  = set,
                .read = read_mask,
        };
  
  }
  
  static void
 -wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask,
 -                 u32 val)
 +wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 clear, u32 set)
  {
 -      wa_add(wal, reg, mask, val, mask);
 +      wa_add(wal, reg, clear, set, clear);
  }
  
  static void
 -wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
 +wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 set)
 +{
 +      wa_write_masked_or(wal, reg, ~0, set);
 +}
 +
 +static void
 +wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 set)
  {
 -      wa_write_masked_or(wal, reg, val, _MASKED_BIT_ENABLE(val));
 +      wa_write_masked_or(wal, reg, set, set);
  }
  
  static void
 -wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
 +wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
  {
 -      wa_write_masked_or(wal, reg, ~0, val);
 +      wa_add(wal, reg, 0, _MASKED_BIT_ENABLE(val), val);
  }
  
  static void
 -wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
 +wa_masked_dis(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
  {
 -      wa_write_masked_or(wal, reg, val, val);
 +      wa_add(wal, reg, 0, _MASKED_BIT_DISABLE(val), val);
  }
  
  #define WA_SET_BIT_MASKED(addr, mask) \
 -      wa_write_masked_or(wal, (addr), (mask), _MASKED_BIT_ENABLE(mask))
 +      wa_masked_en(wal, (addr), (mask))
  
  #define WA_CLR_BIT_MASKED(addr, mask) \
 -      wa_write_masked_or(wal, (addr), (mask), _MASKED_BIT_DISABLE(mask))
 +      wa_masked_dis(wal, (addr), (mask))
  
  #define WA_SET_FIELD_MASKED(addr, mask, value) \
 -      wa_write_masked_or(wal, (addr), (mask), _MASKED_FIELD((mask), (value)))
 +      wa_write_masked_or(wal, (addr), 0, _MASKED_FIELD((mask), (value)))
  
  static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine,
                                      struct i915_wa_list *wal)
@@@ -580,24 -575,19 +580,19 @@@ static void icl_ctx_workarounds_init(st
  static void tgl_ctx_workarounds_init(struct intel_engine_cs *engine,
                                     struct i915_wa_list *wal)
  {
-       u32 val;
        /* Wa_1409142259:tgl */
        WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3,
                          GEN12_DISABLE_CPS_AWARE_COLOR_PIPE);
  
-       /* Wa_1604555607:tgl */
-       val = intel_uncore_read(engine->uncore, FF_MODE2);
-       val &= ~FF_MODE2_TDS_TIMER_MASK;
-       val |= FF_MODE2_TDS_TIMER_128;
        /*
-        * FIXME: FF_MODE2 register is not readable till TGL B0. We can
-        * enable verification of WA from the later steppings, which enables
-        * the read of FF_MODE2.
+        * Wa_1604555607:gen12 and Wa_1608008084:gen12
+        * FF_MODE2 register will return the wrong value when read. The default
+        * value for this register is zero for all fields and there are no bit
+        * masks. So instead of doing a RMW we should just write the TDS timer
+        * value for Wa_1604555607.
         */
-       wa_add(wal, FF_MODE2, FF_MODE2_TDS_TIMER_MASK, val,
-              IS_TGL_REVID(engine->i915, TGL_REVID_A0, TGL_REVID_A0) ? 0 :
-                           FF_MODE2_TDS_TIMER_MASK);
+       wa_add(wal, FF_MODE2, FF_MODE2_TDS_TIMER_MASK,
+              FF_MODE2_TDS_TIMER_128, 0);
  }
  
  static void
@@@ -667,7 -657,7 +662,7 @@@ int intel_engine_emit_ctx_wa(struct i91
        *cs++ = MI_LOAD_REGISTER_IMM(wal->count);
        for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
                *cs++ = i915_mmio_reg_offset(wa->reg);
 -              *cs++ = wa->val;
 +              *cs++ = wa->set;
        }
        *cs++ = MI_NOOP;
  
@@@ -832,7 -822,7 +827,7 @@@ wa_init_mcr(struct drm_i915_private *i9
                DRM_WARN("No common index found between subslice mask %x and L3 bank mask %x!\n",
                         intel_sseu_get_subslices(sseu, slice), l3_en);
                subslice = fls(l3_en);
 -              WARN_ON(!subslice);
 +              drm_WARN_ON(&i915->drm, !subslice);
        }
        subslice--;
  
@@@ -996,10 -986,11 +991,10 @@@ wal_get_fw_for_rmw(struct intel_uncore 
  static bool
  wa_verify(const struct i915_wa *wa, u32 cur, const char *name, const char *from)
  {
 -      if ((cur ^ wa->val) & wa->read) {
 -              DRM_ERROR("%s workaround lost on %s! (%x=%x/%x, expected %x, mask=%x)\n",
 +      if ((cur ^ wa->set) & wa->read) {
 +              DRM_ERROR("%s workaround lost on %s! (%x=%x/%x, expected %x)\n",
                          name, from, i915_mmio_reg_offset(wa->reg),
 -                        cur, cur & wa->read,
 -                        wa->val, wa->mask);
 +                        cur, cur & wa->read, wa->set);
  
                return false;
        }
@@@ -1024,10 -1015,7 +1019,10 @@@ wa_list_apply(struct intel_uncore *unco
        intel_uncore_forcewake_get__locked(uncore, fw);
  
        for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
 -              intel_uncore_rmw_fw(uncore, wa->reg, wa->mask, wa->val);
 +              if (wa->clr)
 +                      intel_uncore_rmw_fw(uncore, wa->reg, wa->clr, wa->set);
 +              else
 +                      intel_uncore_write_fw(uncore, wa->reg, wa->set);
                if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
                        wa_verify(wa,
                                  intel_uncore_read_fw(uncore, wa->reg),
@@@ -1261,9 -1249,6 +1256,9 @@@ static void tgl_whitelist_build(struct 
                whitelist_reg_ext(w, PS_INVOCATION_COUNT,
                                  RING_FORCE_TO_NONPRIV_ACCESS_RD |
                                  RING_FORCE_TO_NONPRIV_RANGE_4);
 +
 +              /* Wa_1808121037:tgl */
 +              whitelist_reg(w, GEN7_COMMON_SLICE_CHICKEN1);
                break;
        default:
                break;
@@@ -1355,16 -1340,6 +1350,16 @@@ rcs_engine_wa_init(struct intel_engine_
                wa_write_or(wal,
                            GEN7_SARCHKMD,
                            GEN7_DISABLE_SAMPLER_PREFETCH);
 +
 +              /* Wa_1407928979:tgl */
 +              wa_write_or(wal,
 +                          GEN7_FF_THREAD_MODE,
 +                          GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
 +
 +              /* Wa_1606931601:tgl */
 +              wa_masked_en(wal,
 +                           GEN7_ROW_CHICKEN2,
 +                           GEN12_DISABLE_EARLY_READ);
        }
  
        if (IS_GEN(i915, 11)) {
                            GEN8_L3SQCREG4,
                            GEN8_LQSC_FLUSH_COHERENT_LINES);
        }
 +
 +      if (IS_GEN(i915, 7))
 +              /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
 +              wa_masked_en(wal,
 +                           GFX_MODE_GEN7,
 +                           GFX_TLB_INVALIDATE_EXPLICIT | GFX_REPLAY_MODE);
 +
 +      if (IS_GEN_RANGE(i915, 6, 7))
 +              /*
 +               * We need to disable the AsyncFlip performance optimisations in
 +               * order to use MI_WAIT_FOR_EVENT within the CS. It should
 +               * already be programmed to '1' on all products.
 +               *
 +               * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
 +               */
 +              wa_masked_en(wal,
 +                           MI_MODE,
 +                           ASYNC_FLIP_PERF_DISABLE);
 +
 +      if (IS_GEN(i915, 6)) {
 +              /*
 +               * Required for the hardware to program scanline values for
 +               * waiting
 +               * WaEnableFlushTlbInvalidationMode:snb
 +               */
 +              wa_masked_en(wal,
 +                           GFX_MODE,
 +                           GFX_TLB_INVALIDATE_EXPLICIT);
 +
 +              /*
 +               * From the Sandybridge PRM, volume 1 part 3, page 24:
 +               * "If this bit is set, STCunit will have LRA as replacement
 +               *  policy. [...] This bit must be reset. LRA replacement
 +               *  policy is not supported."
 +               */
 +              wa_masked_dis(wal,
 +                            CACHE_MODE_0,
 +                            CM0_STC_EVICT_DISABLE_LRA_SNB);
 +      }
 +
 +      if (IS_GEN_RANGE(i915, 4, 6))
 +              /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
 +              wa_add(wal, MI_MODE,
 +                     0, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH),
 +                     /* XXX bit doesn't stick on Broadwater */
 +                     IS_I965G(i915) ? 0 : VS_TIMER_DISPATCH);
  }
  
  static void
@@@ -1541,7 -1470,7 +1536,7 @@@ xcs_engine_wa_init(struct intel_engine_
  static void
  engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal)
  {
 -      if (I915_SELFTEST_ONLY(INTEL_GEN(engine->i915) < 8))
 +      if (I915_SELFTEST_ONLY(INTEL_GEN(engine->i915) < 4))
                return;
  
        if (engine->class == RENDER_CLASS)
@@@ -1554,7 -1483,7 +1549,7 @@@ void intel_engine_init_workarounds(stru
  {
        struct i915_wa_list *wal = &engine->wa_list;
  
 -      if (INTEL_GEN(engine->i915) < 8)
 +      if (INTEL_GEN(engine->i915) < 4)
                return;
  
        wa_init_start(wal, "engine", engine->name);
@@@ -1678,16 -1607,6 +1673,16 @@@ static int engine_wa_list_verify(struc
                goto err_vma;
        }
  
 +      i915_vma_lock(vma);
 +      err = i915_request_await_object(rq, vma->obj, true);
 +      if (err == 0)
 +              err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
 +      i915_vma_unlock(vma);
 +      if (err) {
 +              i915_request_add(rq);
 +              goto err_vma;
 +      }
 +
        err = wa_list_srm(rq, wal, vma);
        if (err)
                goto err_vma;
@@@ -555,9 -555,8 +555,9 @@@ static bool oa_buffer_check_unlocked(st
                                aging_tail = hw_tail;
                        stream->oa_buffer.aging_timestamp = now;
                } else {
 -                      DRM_ERROR("Ignoring spurious out of range OA buffer tail pointer = %x\n",
 -                                hw_tail);
 +                      drm_err(&stream->perf->i915->drm,
 +                              "Ignoring spurious out of range OA buffer tail pointer = %x\n",
 +                              hw_tail);
                }
        }
  
@@@ -687,7 -686,7 +687,7 @@@ static int gen8_append_oa_reports(struc
        u32 taken;
        int ret = 0;
  
 -      if (WARN_ON(!stream->enabled))
 +      if (drm_WARN_ON(&uncore->i915->drm, !stream->enabled))
                return -EIO;
  
        spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
         * only be incremented by multiples of the report size (notably also
         * all a power of two).
         */
 -      if (WARN_ONCE(head > OA_BUFFER_SIZE || head % report_size ||
 -                    tail > OA_BUFFER_SIZE || tail % report_size,
 -                    "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
 -                    head, tail))
 +      if (drm_WARN_ONCE(&uncore->i915->drm,
 +                        head > OA_BUFFER_SIZE || head % report_size ||
 +                        tail > OA_BUFFER_SIZE || tail % report_size,
 +                        "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
 +                        head, tail))
                return -EIO;
  
  
                 * here would imply a driver bug that would result
                 * in an overrun.
                 */
 -              if (WARN_ON((OA_BUFFER_SIZE - head) < report_size)) {
 -                      DRM_ERROR("Spurious OA head ptr: non-integral report offset\n");
 +              if (drm_WARN_ON(&uncore->i915->drm,
 +                              (OA_BUFFER_SIZE - head) < report_size)) {
 +                      drm_err(&uncore->i915->drm,
 +                              "Spurious OA head ptr: non-integral report offset\n");
                        break;
                }
  
@@@ -900,7 -896,7 +900,7 @@@ static int gen8_oa_read(struct i915_per
        i915_reg_t oastatus_reg;
        int ret;
  
 -      if (WARN_ON(!stream->oa_buffer.vaddr))
 +      if (drm_WARN_ON(&uncore->i915->drm, !stream->oa_buffer.vaddr))
                return -EIO;
  
        oastatus_reg = IS_GEN(stream->perf->i915, 12) ?
@@@ -990,7 -986,7 +990,7 @@@ static int gen7_append_oa_reports(struc
        u32 taken;
        int ret = 0;
  
 -      if (WARN_ON(!stream->enabled))
 +      if (drm_WARN_ON(&uncore->i915->drm, !stream->enabled))
                return -EIO;
  
        spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
         * only be incremented by multiples of the report size (notably also
         * all a power of two).
         */
 -      if (WARN_ONCE(head > OA_BUFFER_SIZE || head % report_size ||
 -                    tail > OA_BUFFER_SIZE || tail % report_size,
 -                    "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
 -                    head, tail))
 +      if (drm_WARN_ONCE(&uncore->i915->drm,
 +                        head > OA_BUFFER_SIZE || head % report_size ||
 +                        tail > OA_BUFFER_SIZE || tail % report_size,
 +                        "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
 +                        head, tail))
                return -EIO;
  
  
                 * here would imply a driver bug that would result
                 * in an overrun.
                 */
 -              if (WARN_ON((OA_BUFFER_SIZE - head) < report_size)) {
 -                      DRM_ERROR("Spurious OA head ptr: non-integral report offset\n");
 +              if (drm_WARN_ON(&uncore->i915->drm,
 +                              (OA_BUFFER_SIZE - head) < report_size)) {
 +                      drm_err(&uncore->i915->drm,
 +                              "Spurious OA head ptr: non-integral report offset\n");
                        break;
                }
  
@@@ -1117,7 -1110,7 +1117,7 @@@ static int gen7_oa_read(struct i915_per
        u32 oastatus1;
        int ret;
  
 -      if (WARN_ON(!stream->oa_buffer.vaddr))
 +      if (drm_WARN_ON(&uncore->i915->drm, !stream->oa_buffer.vaddr))
                return -EIO;
  
        oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1);
@@@ -1326,13 -1319,7 +1326,13 @@@ static int oa_get_render_ctx_id(struct 
        case 12: {
                stream->specific_ctx_id_mask =
                        ((1U << GEN11_SW_CTX_ID_WIDTH) - 1) << (GEN11_SW_CTX_ID_SHIFT - 32);
 -              stream->specific_ctx_id = stream->specific_ctx_id_mask;
 +              /*
 +               * Pick an unused context id
 +               * 0 - (NUM_CONTEXT_TAG - 1) are used by other contexts
 +               * GEN12_MAX_CONTEXT_HW_ID (0x7ff) is used by idle context
 +               */
 +              stream->specific_ctx_id = (GEN12_MAX_CONTEXT_HW_ID - 1) << (GEN11_SW_CTX_ID_SHIFT - 32);
 +              BUILD_BUG_ON((GEN12_MAX_CONTEXT_HW_ID - 1) < NUM_CONTEXT_TAG);
                break;
        }
  
                MISSING_CASE(INTEL_GEN(ce->engine->i915));
        }
  
 -      ce->tag = stream->specific_ctx_id_mask;
 +      ce->tag = stream->specific_ctx_id;
  
 -      DRM_DEBUG_DRIVER("filtering on ctx_id=0x%x ctx_id_mask=0x%x\n",
 -                       stream->specific_ctx_id,
 -                       stream->specific_ctx_id_mask);
 +      drm_dbg(&stream->perf->i915->drm,
 +              "filtering on ctx_id=0x%x ctx_id_mask=0x%x\n",
 +              stream->specific_ctx_id,
 +              stream->specific_ctx_id_mask);
  
        return 0;
  }
@@@ -1589,12 -1575,11 +1589,12 @@@ static void gen12_init_oa_buffer(struc
  
  static int alloc_oa_buffer(struct i915_perf_stream *stream)
  {
 +      struct drm_i915_private *i915 = stream->perf->i915;
        struct drm_i915_gem_object *bo;
        struct i915_vma *vma;
        int ret;
  
 -      if (WARN_ON(stream->oa_buffer.vma))
 +      if (drm_WARN_ON(&i915->drm, stream->oa_buffer.vma))
                return -ENODEV;
  
        BUILD_BUG_ON_NOT_POWER_OF_2(OA_BUFFER_SIZE);
  
        bo = i915_gem_object_create_shmem(stream->perf->i915, OA_BUFFER_SIZE);
        if (IS_ERR(bo)) {
 -              DRM_ERROR("Failed to allocate OA buffer\n");
 +              drm_err(&i915->drm, "Failed to allocate OA buffer\n");
                return PTR_ERR(bo);
        }
  
@@@ -1684,8 -1669,7 +1684,8 @@@ static int alloc_noa_wait(struct i915_p
  
        bo = i915_gem_object_create_internal(i915, 4096);
        if (IS_ERR(bo)) {
 -              DRM_ERROR("Failed to allocate NOA wait batchbuffer\n");
 +              drm_err(&i915->drm,
 +                      "Failed to allocate NOA wait batchbuffer\n");
                return PTR_ERR(bo);
        }
  
        return i915_vma_get(oa_bo->vma);
  }
  
- static int emit_oa_config(struct i915_perf_stream *stream,
-                         struct i915_oa_config *oa_config,
-                         struct intel_context *ce)
+ static struct i915_request *
+ emit_oa_config(struct i915_perf_stream *stream,
+              struct i915_oa_config *oa_config,
+              struct intel_context *ce)
  {
        struct i915_request *rq;
        struct i915_vma *vma;
  
        vma = get_oa_vma(stream, oa_config);
        if (IS_ERR(vma))
-               return PTR_ERR(vma);
+               return ERR_CAST(vma);
  
        err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
        if (err)
        err = rq->engine->emit_bb_start(rq,
                                        vma->node.start, 0,
                                        I915_DISPATCH_SECURE);
+       if (err)
+               goto err_add_request;
+       i915_request_get(rq);
  err_add_request:
        i915_request_add(rq);
  err_vma_unpin:
        i915_vma_unpin(vma);
  err_vma_put:
        i915_vma_put(vma);
-       return err;
+       return err ? ERR_PTR(err) : rq;
  }
  
  static struct intel_context *oa_context(struct i915_perf_stream *stream)
        return stream->pinned_ctx ?: stream->engine->kernel_context;
  }
  
- static int hsw_enable_metric_set(struct i915_perf_stream *stream)
+ static struct i915_request *
+ hsw_enable_metric_set(struct i915_perf_stream *stream)
  {
        struct intel_uncore *uncore = stream->uncore;
  
@@@ -2422,7 -2412,8 +2428,8 @@@ static int lrc_configure_all_contexts(s
        return oa_configure_all_contexts(stream, regs, ARRAY_SIZE(regs));
  }
  
- static int gen8_enable_metric_set(struct i915_perf_stream *stream)
+ static struct i915_request *
+ gen8_enable_metric_set(struct i915_perf_stream *stream)
  {
        struct intel_uncore *uncore = stream->uncore;
        struct i915_oa_config *oa_config = stream->oa_config;
         */
        ret = lrc_configure_all_contexts(stream, oa_config);
        if (ret)
-               return ret;
+               return ERR_PTR(ret);
  
        return emit_oa_config(stream, oa_config, oa_context(stream));
  }
@@@ -2476,7 -2467,8 +2483,8 @@@ static u32 oag_report_ctx_switches(cons
                             0 : GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS);
  }
  
- static int gen12_enable_metric_set(struct i915_perf_stream *stream)
+ static struct i915_request *
+ gen12_enable_metric_set(struct i915_perf_stream *stream)
  {
        struct intel_uncore *uncore = stream->uncore;
        struct i915_oa_config *oa_config = stream->oa_config;
         */
        ret = gen12_configure_all_contexts(stream, oa_config);
        if (ret)
-               return ret;
+               return ERR_PTR(ret);
  
        /*
         * For Gen12, performance counters are context
        if (stream->ctx) {
                ret = gen12_configure_oar_context(stream, true);
                if (ret)
-                       return ret;
+                       return ERR_PTR(ret);
        }
  
        return emit_oa_config(stream, oa_config, oa_context(stream));
@@@ -2661,8 -2653,7 +2669,8 @@@ static void gen7_oa_disable(struct i915
        if (intel_wait_for_register(uncore,
                                    GEN7_OACONTROL, GEN7_OACONTROL_ENABLE, 0,
                                    50))
 -              DRM_ERROR("wait for OA to be disabled timed out\n");
 +              drm_err(&stream->perf->i915->drm,
 +                      "wait for OA to be disabled timed out\n");
  }
  
  static void gen8_oa_disable(struct i915_perf_stream *stream)
        if (intel_wait_for_register(uncore,
                                    GEN8_OACONTROL, GEN8_OA_COUNTER_ENABLE, 0,
                                    50))
 -              DRM_ERROR("wait for OA to be disabled timed out\n");
 +              drm_err(&stream->perf->i915->drm,
 +                      "wait for OA to be disabled timed out\n");
  }
  
  static void gen12_oa_disable(struct i915_perf_stream *stream)
                                    GEN12_OAG_OACONTROL,
                                    GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE, 0,
                                    50))
 -              DRM_ERROR("wait for OA to be disabled timed out\n");
 +              drm_err(&stream->perf->i915->drm,
 +                      "wait for OA to be disabled timed out\n");
  }
  
  /**
@@@ -2715,6 -2704,20 +2723,20 @@@ static const struct i915_perf_stream_op
        .read = i915_oa_read,
  };
  
+ static int i915_perf_stream_enable_sync(struct i915_perf_stream *stream)
+ {
+       struct i915_request *rq;
+       rq = stream->perf->ops.enable_metric_set(stream);
+       if (IS_ERR(rq))
+               return PTR_ERR(rq);
+       i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
+       i915_request_put(rq);
+       return 0;
+ }
  /**
   * i915_oa_stream_init - validate combined props for OA stream and init
   * @stream: An i915 perf stream
@@@ -2737,7 -2740,6 +2759,7 @@@ static int i915_oa_stream_init(struct i
                               struct drm_i915_perf_open_param *param,
                               struct perf_open_properties *props)
  {
 +      struct drm_i915_private *i915 = stream->perf->i915;
        struct i915_perf *perf = stream->perf;
        int format_size;
        int ret;
        stream->sample_size += format_size;
  
        stream->oa_buffer.format_size = format_size;
 -      if (WARN_ON(stream->oa_buffer.format_size == 0))
 +      if (drm_WARN_ON(&i915->drm, stream->oa_buffer.format_size == 0))
                return -EINVAL;
  
        stream->hold_preemption = props->hold_preemption;
        stream->ops = &i915_oa_stream_ops;
        perf->exclusive_stream = stream;
  
-       ret = perf->ops.enable_metric_set(stream);
+       ret = i915_perf_stream_enable_sync(stream);
        if (ret) {
                DRM_DEBUG("Unable to enable metric set\n");
                goto err_enable;
@@@ -3167,7 -3169,7 +3189,7 @@@ static long i915_perf_config_locked(str
                return -EINVAL;
  
        if (config != stream->oa_config) {
-               int err;
+               struct i915_request *rq;
  
                /*
                 * If OA is bound to a specific context, emit the
                 * When set globally, we use a low priority kernel context,
                 * so it will effectively take effect when idle.
                 */
-               err = emit_oa_config(stream, config, oa_context(stream));
-               if (err == 0)
+               rq = emit_oa_config(stream, config, oa_context(stream));
+               if (!IS_ERR(rq)) {
                        config = xchg(&stream->oa_config, config);
-               else
-                       ret = err;
+                       i915_request_put(rq);
+               } else {
+                       ret = PTR_ERR(rq);
+               }
        }
  
        i915_oa_config_put(config);
@@@ -51,6 -51,7 +51,6 @@@ struct execute_cb 
  static struct i915_global_request {
        struct i915_global base;
        struct kmem_cache *slab_requests;
 -      struct kmem_cache *slab_dependencies;
        struct kmem_cache *slab_execute_cbs;
  } global;
  
@@@ -202,19 -203,6 +202,19 @@@ static void free_capture_list(struct i9
        }
  }
  
 +static void __i915_request_fill(struct i915_request *rq, u8 val)
 +{
 +      void *vaddr = rq->ring->vaddr;
 +      u32 head;
 +
 +      head = rq->infix;
 +      if (rq->postfix < head) {
 +              memset(vaddr + head, val, rq->ring->size - head);
 +              head = 0;
 +      }
 +      memset(vaddr + head, val, rq->postfix - head);
 +}
 +
  static void remove_from_engine(struct i915_request *rq)
  {
        struct intel_engine_cs *engine, *locked;
@@@ -259,9 -247,6 +259,9 @@@ bool i915_request_retire(struct i915_re
         */
        GEM_BUG_ON(!list_is_first(&rq->link,
                                  &i915_request_timeline(rq)->requests));
 +      if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
 +              /* Poison before we release our space in the ring */
 +              __i915_request_fill(rq, POISON_FREE);
        rq->ring->head = rq->postfix;
  
        /*
        spin_unlock_irq(&rq->lock);
  
        remove_from_client(rq);
-       list_del(&rq->link);
+       list_del_rcu(&rq->link);
  
        intel_context_exit(rq->context);
        intel_context_unpin(rq->context);
@@@ -736,6 -721,8 +736,8 @@@ __i915_request_create(struct intel_cont
        rq->infix = rq->ring->emit; /* end of header; start of user payload */
  
        intel_context_mark_active(ce);
+       list_add_tail_rcu(&rq->link, &tl->requests);
        return rq;
  
  err_unwind:
@@@ -792,13 -779,23 +794,23 @@@ i915_request_await_start(struct i915_re
        GEM_BUG_ON(i915_request_timeline(rq) ==
                   rcu_access_pointer(signal->timeline));
  
+       if (i915_request_started(signal))
+               return 0;
        fence = NULL;
        rcu_read_lock();
        spin_lock_irq(&signal->lock);
-       if (!i915_request_started(signal) &&
-           !list_is_first(&signal->link,
-                          &rcu_dereference(signal->timeline)->requests)) {
-               struct i915_request *prev = list_prev_entry(signal, link);
+       do {
+               struct list_head *pos = READ_ONCE(signal->link.prev);
+               struct i915_request *prev;
+               /* Confirm signal has not been retired, the link is valid */
+               if (unlikely(i915_request_started(signal)))
+                       break;
+               /* Is signal the earliest request on its timeline? */
+               if (pos == &rcu_dereference(signal->timeline)->requests)
+                       break;
  
                /*
                 * Peek at the request before us in the timeline. That
                 * after acquiring a reference to it, confirm that it is
                 * still part of the signaler's timeline.
                 */
-               if (i915_request_get_rcu(prev)) {
-                       if (list_next_entry(prev, link) == signal)
-                               fence = &prev->fence;
-                       else
-                               i915_request_put(prev);
+               prev = list_entry(pos, typeof(*prev), link);
+               if (!i915_request_get_rcu(prev))
+                       break;
+               /* After the strong barrier, confirm prev is still attached */
+               if (unlikely(READ_ONCE(prev->link.next) != &signal->link)) {
+                       i915_request_put(prev);
+                       break;
                }
-       }
+               fence = &prev->fence;
+       } while (0);
        spin_unlock_irq(&signal->lock);
        rcu_read_unlock();
        if (!fence)
@@@ -901,12 -903,6 +918,12 @@@ emit_semaphore_wait(struct i915_reques
                    struct i915_request *from,
                    gfp_t gfp)
  {
 +      if (!intel_context_use_semaphores(to->context))
 +              goto await_fence;
 +
 +      if (!rcu_access_pointer(from->hwsp_cacheline))
 +              goto await_fence;
 +
        /* Just emit the first semaphore we see as request space is limited. */
        if (already_busywaiting(to) & from->engine->mask)
                goto await_fence;
@@@ -952,8 -948,12 +969,8 @@@ i915_request_await_request(struct i915_
                ret = i915_sw_fence_await_sw_fence_gfp(&to->submit,
                                                       &from->submit,
                                                       I915_FENCE_GFP);
 -      else if (intel_context_use_semaphores(to->context))
 -              ret = emit_semaphore_wait(to, from, I915_FENCE_GFP);
        else
 -              ret = i915_sw_fence_await_dma_fence(&to->submit,
 -                                                  &from->fence, 0,
 -                                                  I915_FENCE_GFP);
 +              ret = emit_semaphore_wait(to, from, I915_FENCE_GFP);
        if (ret < 0)
                return ret;
  
@@@ -1052,8 -1052,6 +1069,8 @@@ __i915_request_await_execution(struct i
  {
        int err;
  
 +      GEM_BUG_ON(intel_context_is_barrier(from->context));
 +
        /* Submit both requests at the same time */
        err = __await_execution(to, from, hook, I915_FENCE_GFP);
        if (err)
@@@ -1194,6 -1192,9 +1211,6 @@@ i915_request_await_object(struct i915_r
  
  void i915_request_skip(struct i915_request *rq, int error)
  {
 -      void *vaddr = rq->ring->vaddr;
 -      u32 head;
 -
        GEM_BUG_ON(!IS_ERR_VALUE((long)error));
        dma_fence_set_error(&rq->fence, error);
  
         * context, clear out all the user operations leaving the
         * breadcrumb at the end (so we get the fence notifications).
         */
 -      head = rq->infix;
 -      if (rq->postfix < head) {
 -              memset(vaddr + head, 0, rq->ring->size - head);
 -              head = 0;
 -      }
 -      memset(vaddr + head, 0, rq->postfix - head);
 +      __i915_request_fill(rq, 0);
        rq->infix = rq->postfix;
  }
  
@@@ -1253,8 -1259,6 +1270,6 @@@ __i915_request_add_to_timeline(struct i
                                                         0);
        }
  
-       list_add_tail(&rq->link, &timeline->requests);
        /*
         * Make sure that no request gazumped us - if it was allocated after
         * our i915_request_alloc() and called __i915_request_add() before
@@@ -1582,8 -1586,6 +1597,8 @@@ long i915_request_wait(struct i915_requ
                        break;
                }
  
 +              intel_engine_flush_submission(rq->engine);
 +
                if (signal_pending_state(state, current)) {
                        timeout = -ERESTARTSYS;
                        break;
                        break;
                }
  
 -              intel_engine_flush_submission(rq->engine);
                timeout = io_schedule_timeout(timeout);
        }
        __set_current_state(TASK_RUNNING);
  
  static void i915_global_request_shrink(void)
  {
 -      kmem_cache_shrink(global.slab_dependencies);
        kmem_cache_shrink(global.slab_execute_cbs);
        kmem_cache_shrink(global.slab_requests);
  }
  
  static void i915_global_request_exit(void)
  {
 -      kmem_cache_destroy(global.slab_dependencies);
        kmem_cache_destroy(global.slab_execute_cbs);
        kmem_cache_destroy(global.slab_requests);
  }
@@@ -1648,9 -1653,17 +1663,9 @@@ int __init i915_global_request_init(voi
        if (!global.slab_execute_cbs)
                goto err_requests;
  
 -      global.slab_dependencies = KMEM_CACHE(i915_dependency,
 -                                            SLAB_HWCACHE_ALIGN |
 -                                            SLAB_RECLAIM_ACCOUNT);
 -      if (!global.slab_dependencies)
 -              goto err_execute_cbs;
 -
        i915_global_register(&global.base);
        return 0;
  
 -err_execute_cbs:
 -      kmem_cache_destroy(global.slab_execute_cbs);
  err_requests:
        kmem_cache_destroy(global.slab_requests);
        return -ENOMEM;
@@@ -405,83 -405,6 +405,83 @@@ static void mdp5_crtc_mode_set_nofb(str
        spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
  }
  
 +static struct drm_encoder *get_encoder_from_crtc(struct drm_crtc *crtc)
 +{
 +      struct drm_device *dev = crtc->dev;
 +      struct drm_encoder *encoder;
 +
 +      drm_for_each_encoder(encoder, dev)
 +              if (encoder->crtc == crtc)
 +                      return encoder;
 +
 +      return NULL;
 +}
 +
 +static bool mdp5_crtc_get_scanout_position(struct drm_crtc *crtc,
 +                                         bool in_vblank_irq,
 +                                         int *vpos, int *hpos,
 +                                         ktime_t *stime, ktime_t *etime,
 +                                         const struct drm_display_mode *mode)
 +{
 +      unsigned int pipe = crtc->index;
 +      struct drm_encoder *encoder;
 +      int line, vsw, vbp, vactive_start, vactive_end, vfp_end;
 +
 +
 +      encoder = get_encoder_from_crtc(crtc);
 +      if (!encoder) {
 +              DRM_ERROR("no encoder found for crtc %d\n", pipe);
 +              return false;
 +      }
 +
 +      vsw = mode->crtc_vsync_end - mode->crtc_vsync_start;
 +      vbp = mode->crtc_vtotal - mode->crtc_vsync_end;
 +
 +      /*
 +       * the line counter is 1 at the start of the VSYNC pulse and VTOTAL at
 +       * the end of VFP. Translate the porch values relative to the line
 +       * counter positions.
 +       */
 +
 +      vactive_start = vsw + vbp + 1;
 +
 +      vactive_end = vactive_start + mode->crtc_vdisplay;
 +
 +      /* last scan line before VSYNC */
 +      vfp_end = mode->crtc_vtotal;
 +
 +      if (stime)
 +              *stime = ktime_get();
 +
 +      line = mdp5_encoder_get_linecount(encoder);
 +
 +      if (line < vactive_start)
 +              line -= vactive_start;
 +      else if (line > vactive_end)
 +              line = line - vfp_end - vactive_start;
 +      else
 +              line -= vactive_start;
 +
 +      *vpos = line;
 +      *hpos = 0;
 +
 +      if (etime)
 +              *etime = ktime_get();
 +
 +      return true;
 +}
 +
 +static u32 mdp5_crtc_get_vblank_counter(struct drm_crtc *crtc)
 +{
 +      struct drm_encoder *encoder;
 +
 +      encoder = get_encoder_from_crtc(crtc);
 +      if (!encoder)
 +              return 0;
 +
 +      return mdp5_encoder_get_framecount(encoder);
 +}
 +
  static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc,
                                     struct drm_crtc_state *old_state)
  {
@@@ -1131,10 -1054,6 +1131,10 @@@ static const struct drm_crtc_funcs mdp5
        .cursor_set = mdp5_crtc_cursor_set,
        .cursor_move = mdp5_crtc_cursor_move,
        .atomic_print_state = mdp5_crtc_atomic_print_state,
 +      .get_vblank_counter = mdp5_crtc_get_vblank_counter,
 +      .enable_vblank  = msm_crtc_enable_vblank,
 +      .disable_vblank = msm_crtc_disable_vblank,
 +      .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
  };
  
  static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
        .atomic_flush = mdp5_crtc_atomic_flush,
        .atomic_enable = mdp5_crtc_atomic_enable,
        .atomic_disable = mdp5_crtc_atomic_disable,
 +      .get_scanout_position = mdp5_crtc_get_scanout_position,
  };
  
  static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
@@@ -1191,8 -1109,8 +1191,8 @@@ static void mdp5_crtc_wait_for_pp_done(
        ret = wait_for_completion_timeout(&mdp5_crtc->pp_completion,
                                                msecs_to_jiffies(50));
        if (ret == 0)
-               dev_warn(dev->dev, "pp done time out, lm=%d\n",
-                        mdp5_cstate->pipeline.mixer->lm);
+               dev_warn_ratelimited(dev->dev, "pp done time out, lm=%d\n",
+                                    mdp5_cstate->pipeline.mixer->lm);
  }
  
  static void mdp5_crtc_wait_for_flush_done(struct drm_crtc *crtc)
@@@ -336,7 -336,7 +336,7 @@@ static int dsi_mgr_connector_get_modes(
        return num;
  }
  
- static int dsi_mgr_connector_mode_valid(struct drm_connector *connector,
+ static enum drm_mode_status dsi_mgr_connector_mode_valid(struct drm_connector *connector,
                                struct drm_display_mode *mode)
  {
        int id = dsi_mgr_connector_get_id(connector);
@@@ -506,6 -506,7 +506,7 @@@ static void dsi_mgr_bridge_post_disable
        struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1);
        struct mipi_dsi_host *host = msm_dsi->host;
        struct drm_panel *panel = msm_dsi->panel;
+       struct msm_dsi_pll *src_pll;
        bool is_dual_dsi = IS_DUAL_DSI();
        int ret;
  
                                                                id, ret);
        }
  
+       /* Save PLL status if it is a clock source */
+       src_pll = msm_dsi_phy_get_pll(msm_dsi->phy);
+       msm_dsi_pll_save_state(src_pll);
        ret = msm_dsi_host_power_off(host);
        if (ret)
                pr_err("%s: host %d power off failed,%d\n", __func__, id, ret);
@@@ -684,7 -689,7 +689,7 @@@ struct drm_bridge *msm_dsi_manager_brid
        bridge = &dsi_bridge->base;
        bridge->funcs = &dsi_mgr_bridge_funcs;
  
 -      ret = drm_bridge_attach(encoder, bridge, NULL);
 +      ret = drm_bridge_attach(encoder, bridge, NULL, 0);
        if (ret)
                goto fail;
  
@@@ -713,7 -718,7 +718,7 @@@ struct drm_connector *msm_dsi_manager_e
        encoder = msm_dsi->encoder;
  
        /* link the internal dsi bridge to the external bridge */
 -      drm_bridge_attach(encoder, ext_bridge, int_bridge);
 +      drm_bridge_attach(encoder, ext_bridge, int_bridge, 0);
  
        /*
         * we need the drm_connector created by the external bridge
@@@ -280,12 -280,8 +280,8 @@@ static void panfrost_job_cleanup(struc
        }
  
        if (job->bos) {
-               struct panfrost_gem_object *bo;
-               for (i = 0; i < job->bo_count; i++) {
-                       bo = to_panfrost_bo(job->bos[i]);
+               for (i = 0; i < job->bo_count; i++)
                        drm_gem_object_put_unlocked(job->bos[i]);
-               }
  
                kvfree(job->bos);
        }
@@@ -512,7 -508,7 +508,7 @@@ int panfrost_job_init(struct panfrost_d
                return -ENODEV;
  
        ret = devm_request_irq(pfdev->dev, irq, panfrost_job_irq_handler,
 -                             IRQF_SHARED, "job", pfdev);
 +                             IRQF_SHARED, KBUILD_MODNAME "-job", pfdev);
        if (ret) {
                dev_err(pfdev->dev, "failed to request job irq");
                return ret;
@@@ -151,7 -151,12 +151,12 @@@ u32 panfrost_mmu_as_get(struct panfrost
        as = mmu->as;
        if (as >= 0) {
                int en = atomic_inc_return(&mmu->as_count);
-               WARN_ON(en >= NUM_JOB_SLOTS);
+               /*
+                * AS can be retained by active jobs or a perfcnt context,
+                * hence the '+ 1' here.
+                */
+               WARN_ON(en >= (NUM_JOB_SLOTS + 1));
  
                list_move(&mmu->list, &pfdev->as_lru_list);
                goto out;
@@@ -596,33 -601,27 +601,27 @@@ static irqreturn_t panfrost_mmu_irq_han
                source_id = (fault_status >> 16);
  
                /* Page fault only */
-               if ((status & mask) == BIT(i)) {
-                       WARN_ON(exception_type < 0xC1 || exception_type > 0xC4);
+               ret = -1;
+               if ((status & mask) == BIT(i) && (exception_type & 0xF8) == 0xC0)
                        ret = panfrost_mmu_map_fault_addr(pfdev, i, addr);
-                       if (!ret) {
-                               mmu_write(pfdev, MMU_INT_CLEAR, BIT(i));
-                               status &= ~mask;
-                               continue;
-                       }
-               }
  
-               /* terminal fault, print info about the fault */
-               dev_err(pfdev->dev,
-                       "Unhandled Page fault in AS%d at VA 0x%016llX\n"
-                       "Reason: %s\n"
-                       "raw fault status: 0x%X\n"
-                       "decoded fault status: %s\n"
-                       "exception type 0x%X: %s\n"
-                       "access type 0x%X: %s\n"
-                       "source id 0x%X\n",
-                       i, addr,
-                       "TODO",
-                       fault_status,
-                       (fault_status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"),
-                       exception_type, panfrost_exception_name(pfdev, exception_type),
-                       access_type, access_type_name(pfdev, fault_status),
-                       source_id);
+               if (ret)
+                       /* terminal fault, print info about the fault */
+                       dev_err(pfdev->dev,
+                               "Unhandled Page fault in AS%d at VA 0x%016llX\n"
+                               "Reason: %s\n"
+                               "raw fault status: 0x%X\n"
+                               "decoded fault status: %s\n"
+                               "exception type 0x%X: %s\n"
+                               "access type 0x%X: %s\n"
+                               "source id 0x%X\n",
+                               i, addr,
+                               "TODO",
+                               fault_status,
+                               (fault_status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"),
+                               exception_type, panfrost_exception_name(pfdev, exception_type),
+                               access_type, access_type_name(pfdev, fault_status),
+                               source_id);
  
                mmu_write(pfdev, MMU_INT_CLEAR, mask);
  
@@@ -641,11 -640,9 +640,11 @@@ int panfrost_mmu_init(struct panfrost_d
        if (irq <= 0)
                return -ENODEV;
  
 -      err = devm_request_threaded_irq(pfdev->dev, irq, panfrost_mmu_irq_handler,
 +      err = devm_request_threaded_irq(pfdev->dev, irq,
 +                                      panfrost_mmu_irq_handler,
                                        panfrost_mmu_irq_handler_thread,
 -                                      IRQF_SHARED, "mmu", pfdev);
 +                                      IRQF_SHARED, KBUILD_MODNAME "-mmu",
 +                                      pfdev);
  
        if (err) {
                dev_err(pfdev->dev, "failed to request mmu irq");
@@@ -507,13 -507,15 +507,14 @@@ static int ttm_buffer_object_transfer(s
        fbo->base.moving = NULL;
        drm_vma_node_reset(&fbo->base.base.vma_node);
  
 -      kref_init(&fbo->base.list_kref);
        kref_init(&fbo->base.kref);
        fbo->base.destroy = &ttm_transfered_destroy;
        fbo->base.acc_size = 0;
 -      if (bo->base.resv == &bo->base._resv)
 +      if (bo->type != ttm_bo_type_sg)
                fbo->base.base.resv = &fbo->base.base._resv;
  
        dma_resv_init(&fbo->base.base._resv);
+       fbo->base.base.dev = NULL;
        ret = dma_resv_trylock(&fbo->base.base._resv);
        WARN_ON(!ret);
  
@@@ -23,7 -23,6 +23,7 @@@
   * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
   */
  
 +#include <linux/dma-mapping.h>
  #include <linux/moduleparam.h>
  
  #include "virtgpu_drv.h"
@@@ -43,8 -42,8 +43,8 @@@ static int virtio_gpu_resource_id_get(s
                 * "f91a9dd35715 Fix unlinking resources from hash
                 * table." (Feb 2019) fixes the bug.
                 */
-               static int handle;
-               handle++;
+               static atomic_t seqno = ATOMIC_INIT(0);
+               int handle = atomic_inc_return(&seqno);
                *resid = handle + 1;
        } else {
                int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL);
@@@ -62,40 -61,21 +62,40 @@@ static void virtio_gpu_resource_id_put(
        }
  }
  
 -static void virtio_gpu_free_object(struct drm_gem_object *obj)
 +void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo)
  {
 -      struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
        struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
  
 -      if (bo->pages)
 -              virtio_gpu_object_detach(vgdev, bo);
 -      if (bo->created)
 -              virtio_gpu_cmd_unref_resource(vgdev, bo->hw_res_handle);
 +      if (bo->pages) {
 +              if (bo->mapped) {
 +                      dma_unmap_sg(vgdev->vdev->dev.parent,
 +                                   bo->pages->sgl, bo->mapped,
 +                                   DMA_TO_DEVICE);
 +                      bo->mapped = 0;
 +              }
 +              sg_free_table(bo->pages);
 +              bo->pages = NULL;
 +              drm_gem_shmem_unpin(&bo->base.base);
 +      }
        virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
 +      drm_gem_shmem_free_object(&bo->base.base);
 +}
 +
 +static void virtio_gpu_free_object(struct drm_gem_object *obj)
 +{
 +      struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
 +      struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
  
 -      drm_gem_shmem_free_object(obj);
 +      if (bo->created) {
 +              virtio_gpu_cmd_unref_resource(vgdev, bo);
 +              virtio_gpu_notify(vgdev);
 +              /* completion handler calls virtio_gpu_cleanup_object() */
 +              return;
 +      }
 +      virtio_gpu_cleanup_object(bo);
  }
  
 -static const struct drm_gem_object_funcs virtio_gpu_gem_funcs = {
 +static const struct drm_gem_object_funcs virtio_gpu_shmem_funcs = {
        .free = virtio_gpu_free_object,
        .open = virtio_gpu_gem_object_open,
        .close = virtio_gpu_gem_object_close,
        .get_sg_table = drm_gem_shmem_get_sg_table,
        .vmap = drm_gem_shmem_vmap,
        .vunmap = drm_gem_shmem_vunmap,
 -      .mmap = &drm_gem_shmem_mmap,
 +      .mmap = drm_gem_shmem_mmap,
  };
  
 +bool virtio_gpu_is_shmem(struct drm_gem_object *obj)
 +{
 +      return obj->funcs == &virtio_gpu_shmem_funcs;
 +}
 +
  struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
                                                size_t size)
  {
        if (!bo)
                return NULL;
  
 -      bo->base.base.funcs = &virtio_gpu_gem_funcs;
 +      bo->base.base.funcs = &virtio_gpu_shmem_funcs;
+       bo->base.map_cached = true;
        return &bo->base.base;
  }
  
 +static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
 +                                      struct virtio_gpu_object *bo,
 +                                      struct virtio_gpu_mem_entry **ents,
 +                                      unsigned int *nents)
 +{
 +      bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
 +      struct scatterlist *sg;
 +      int si, ret;
 +
 +      ret = drm_gem_shmem_pin(&bo->base.base);
 +      if (ret < 0)
 +              return -EINVAL;
 +
 +      bo->pages = drm_gem_shmem_get_sg_table(&bo->base.base);
 +      if (!bo->pages) {
 +              drm_gem_shmem_unpin(&bo->base.base);
 +              return -EINVAL;
 +      }
 +
 +      if (use_dma_api) {
 +              bo->mapped = dma_map_sg(vgdev->vdev->dev.parent,
 +                                      bo->pages->sgl, bo->pages->nents,
 +                                      DMA_TO_DEVICE);
 +              *nents = bo->mapped;
 +      } else {
 +              *nents = bo->pages->nents;
 +      }
 +
 +      *ents = kmalloc_array(*nents, sizeof(struct virtio_gpu_mem_entry),
 +                            GFP_KERNEL);
 +      if (!(*ents)) {
 +              DRM_ERROR("failed to allocate ent list\n");
 +              return -ENOMEM;
 +      }
 +
 +      for_each_sg(bo->pages->sgl, sg, *nents, si) {
 +              (*ents)[si].addr = cpu_to_le64(use_dma_api
 +                                             ? sg_dma_address(sg)
 +                                             : sg_phys(sg));
 +              (*ents)[si].length = cpu_to_le32(sg->length);
 +              (*ents)[si].padding = 0;
 +      }
 +      return 0;
 +}
 +
  int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
                             struct virtio_gpu_object_params *params,
                             struct virtio_gpu_object **bo_ptr,
        struct virtio_gpu_object_array *objs = NULL;
        struct drm_gem_shmem_object *shmem_obj;
        struct virtio_gpu_object *bo;
 +      struct virtio_gpu_mem_entry *ents;
 +      unsigned int nents;
        int ret;
  
        *bo_ptr = NULL;
                                               objs, fence);
        }
  
 -      ret = virtio_gpu_object_attach(vgdev, bo, NULL);
 +      ret = virtio_gpu_object_shmem_init(vgdev, bo, &ents, &nents);
 +      if (ret != 0) {
 +              virtio_gpu_free_object(&shmem_obj->base);
 +              return ret;
 +      }
 +
 +      ret = virtio_gpu_object_attach(vgdev, bo, ents, nents);
        if (ret != 0) {
                virtio_gpu_free_object(&shmem_obj->base);
                return ret;
        }
  
 +      virtio_gpu_notify(vgdev);
        *bo_ptr = bo;
        return 0;
  
@@@ -9,7 -9,7 +9,7 @@@ menu "Backlight & LCD device support
  # LCD
  #
  config LCD_CLASS_DEVICE
 -        tristate "Lowlevel LCD controls"
 +      tristate "Lowlevel LCD controls"
        help
          This framework adds support for low-level control of LCD.
          Some framebuffer devices connect to platform-specific LCD modules
@@@ -141,10 -141,10 +141,10 @@@ endif # LCD_CLASS_DEVIC
  # Backlight
  #
  config BACKLIGHT_CLASS_DEVICE
 -        tristate "Lowlevel Backlight controls"
 +      tristate "Lowlevel Backlight controls"
        help
          This framework adds support for low-level control of the LCD
 -          backlight. This includes support for brightness and power.
 +        backlight. This includes support for brightness and power.
  
          To have support for your specific LCD panel you will have to
          select the proper drivers which depend on this option.
@@@ -272,7 -272,7 +272,7 @@@ config BACKLIGHT_APPL
         tristate "Apple Backlight Driver"
         depends on X86 && ACPI
         help
 -         If you have an Intel-based Apple say Y to enable a driver for its
 +       If you have an Intel-based Apple say Y to enable a driver for its
         backlight.
  
  config BACKLIGHT_TOSA
@@@ -456,6 -456,13 +456,13 @@@ config BACKLIGHT_RAVE_S
        help
          Support for backlight control on RAVE SP device.
  
+ config BACKLIGHT_LED
+       tristate "Generic LED based Backlight Driver"
+       depends on LEDS_CLASS && OF
+       help
+         If you have a LCD backlight adjustable by LED class driver, say Y
+         to enable this driver.
  endif # BACKLIGHT_CLASS_DEVICE
  
  endmenu