Merge tag 'drm-misc-next-2019-08-08' of git://anongit.freedesktop.org/drm/drm-misc...
authorDave Airlie <airlied@redhat.com>
Fri, 9 Aug 2019 06:04:15 +0000 (16:04 +1000)
committerDave Airlie <airlied@redhat.com>
Fri, 9 Aug 2019 06:04:31 +0000 (16:04 +1000)
drm-misc-next for 5.4:

UAPI Changes:
 - HDCP: Add a Content protection type property

Cross-subsystem Changes:

Core Changes:
 - Continue to rework the include dependencies
 - fb: Remove the unused drm_gem_fbdev_fb_create function
 - drm-dp-helper: Make the link rate calculation more tolerant to
                  non-explicitly defined, yet supported, rates
 - fb-helper: Map DRM client buffer only when required, and instanciate a
              shadow buffer when the device has a dirty function or says so
 - connector: Add a helper to link the DDC adapter used by that connector to
              the userspace
 - vblank: Switch from DRM_WAIT_ON to wait_event_interruptible_timeout
 - dma-buf: Fix a stack corruption
 - ttm: Embed a drm_gem_object struct to make ttm_buffer_object a
        superclass of GEM, and convert drivers to use it.
 - hdcp: Improvements to report the content protection type to the
         userspace

Driver Changes:
 - Remove drm_gem_prime_import/export from being defined in the drivers
 - Drop DRM_AUTH usage from drivers
 - Continue to drop drmP.h
 - Convert drivers to the connector ddc helper

 - ingenic: Add support for more panel-related cases
 - komeda: Support for dual-link
 - lima: Reduce logging
 - mpag200: Fix the cursor support
 - panfrost: Export GPU features register to userspace through an ioctl
 - pl111: Remove the CLD pads wiring support from the DT
 - rockchip: Rework to use DRM PSR helpers, fix a bug in the VOP_WIN_GET
             macro
 - sun4i: Improve support for color encoding and range
 - tinydrm: Rework SPI support, improve MIPI-DBI support, move to drm/tiny
 - vkms: Rework of the CRC tracking

 - bridges:
   - sii902x: Add support for audio graph card
   - tc358767: Rework AUX data handling code
   - ti-sn65dsi86: Add Debugfs and proper DSI mode flags support

 - panels
   - Support for GiantPlus GPM940B0, Sharp LQ070Y3DG3B, Ortustech
     COM37H3M, Novatek NT39016, Sharp LS020B1DD01D, Raydium RM67191,
     Boe Himax8279d, Sharp LD-D5116Z01B
   - Conversion of the device tree bindings to the YAML description
   - jh057n00900: Rework the enable / disable path

 - fbdev:
   - ssd1307fb: Support more devices based on that controller

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Maxime Ripard <maxime.ripard@bootlin.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190808121423.xzpedzkpyecvsiy4@flea
13 files changed:
1  2 
MAINTAINERS
drivers/gpu/drm/Kconfig
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/bridge/Kconfig
drivers/gpu/drm/drm_modes.c
drivers/gpu/drm/i915/display/intel_ddi.c
drivers/gpu/drm/i915/display/intel_hdcp.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/msm/msm_gem.c

diff --combined MAINTAINERS
@@@ -899,7 -899,7 +899,7 @@@ L: linux-iio@vger.kernel.or
  W:    http://ez.analog.com/community/linux-device-drivers
  S:    Supported
  F:    drivers/iio/adc/ad7124.c
 -F:    Documentation/devicetree/bindings/iio/adc/adi,ad7124.txt
 +F:    Documentation/devicetree/bindings/iio/adc/adi,ad7124.yaml
  
  ANALOG DEVICES INC AD7606 DRIVER
  M:    Stefan Popa <stefan.popa@analog.com>
@@@ -1194,7 -1194,7 +1194,7 @@@ F:      include/uapi/linux/if_arcnet.
  
  ARM ARCHITECTED TIMER DRIVER
  M:    Mark Rutland <mark.rutland@arm.com>
 -M:    Marc Zyngier <marc.zyngier@arm.com>
 +M:    Marc Zyngier <maz@kernel.org>
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  S:    Maintained
  F:    arch/arm/include/asm/arch_timer.h
@@@ -2155,12 -2155,10 +2155,12 @@@ F:   Documentation/devicetree/bindings/ar
  
  ARM/RENESAS ARM64 ARCHITECTURE
  M:    Simon Horman <horms@verge.net.au>
 +M:    Geert Uytterhoeven <geert+renesas@glider.be>
  M:    Magnus Damm <magnus.damm@gmail.com>
  L:    linux-renesas-soc@vger.kernel.org
  Q:    http://patchwork.kernel.org/project/linux-renesas-soc/list/
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/horms/renesas.git next
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/geert/renesas-devel.git next
  S:    Supported
  F:    arch/arm64/boot/dts/renesas/
  F:    Documentation/devicetree/bindings/arm/renesas.yaml
@@@ -2271,12 -2269,10 +2271,12 @@@ F:   drivers/media/platform/s5p-mfc
  
  ARM/SHMOBILE ARM ARCHITECTURE
  M:    Simon Horman <horms@verge.net.au>
 +M:    Geert Uytterhoeven <geert+renesas@glider.be>
  M:    Magnus Damm <magnus.damm@gmail.com>
  L:    linux-renesas-soc@vger.kernel.org
  Q:    http://patchwork.kernel.org/project/linux-renesas-soc/list/
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/horms/renesas.git next
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/geert/renesas-devel.git next
  S:    Supported
  F:    arch/arm/boot/dts/emev2*
  F:    arch/arm/boot/dts/gr-peach*
@@@ -4194,7 -4190,7 +4194,7 @@@ M:      Jens Axboe <axboe@kernel.dk
  L:    cgroups@vger.kernel.org
  L:    linux-block@vger.kernel.org
  T:    git git://git.kernel.dk/linux-block
 -F:    Documentation/cgroup-v1/blkio-controller.rst
 +F:    Documentation/admin-guide/cgroup-v1/blkio-controller.rst
  F:    block/blk-cgroup.c
  F:    include/linux/blk-cgroup.h
  F:    block/blk-throttle.c
@@@ -4473,7 -4469,7 +4473,7 @@@ F:      arch/powerpc/platforms/powernv/pci-c
  F:    drivers/misc/cxl/
  F:    include/misc/cxl*
  F:    include/uapi/misc/cxl.h
 -F:    Documentation/powerpc/cxl.txt
 +F:    Documentation/powerpc/cxl.rst
  F:    Documentation/ABI/testing/sysfs-class-cxl
  
  CXLFLASH (IBM Coherent Accelerator Processor Interface CAPI Flash) SCSI DRIVER
@@@ -4484,7 -4480,7 +4484,7 @@@ L:      linux-scsi@vger.kernel.or
  S:    Supported
  F:    drivers/scsi/cxlflash/
  F:    include/uapi/scsi/cxlflash_ioctl.h
 -F:    Documentation/powerpc/cxlflash.txt
 +F:    Documentation/powerpc/cxlflash.rst
  
  CYBERPRO FB DRIVER
  M:    Russell King <linux@armlinux.org.uk>
@@@ -5099,17 -5095,24 +5099,24 @@@ S:   Maintaine
  F:    drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
  F:    Documentation/devicetree/bindings/display/panel/feiyang,fy07024di26a30d.txt
  
+ DRM DRIVER FOR GRAIN MEDIA GM12U320 PROJECTORS
+ M:    Hans de Goede <hdegoede@redhat.com>
+ T:    git git://anongit.freedesktop.org/drm/drm-misc
+ S:    Maintained
+ F:    drivers/gpu/drm/tiny/gm12u320.c
  DRM DRIVER FOR ILITEK ILI9225 PANELS
  M:    David Lechner <david@lechnology.com>
+ T:    git git://anongit.freedesktop.org/drm/drm-misc
  S:    Maintained
- F:    drivers/gpu/drm/tinydrm/ili9225.c
+ F:    drivers/gpu/drm/tiny/ili9225.c
  F:    Documentation/devicetree/bindings/display/ilitek,ili9225.txt
  
  DRM DRIVER FOR HX8357D PANELS
  M:    Eric Anholt <eric@anholt.net>
  T:    git git://anongit.freedesktop.org/drm/drm-misc
  S:    Maintained
- F:    drivers/gpu/drm/tinydrm/hx8357d.c
+ F:    drivers/gpu/drm/tiny/hx8357d.c
  F:    Documentation/devicetree/bindings/display/himax,hx8357d.txt
  
  DRM DRIVER FOR INTEL I810 VIDEO CARDS
@@@ -5129,8 -5132,9 +5136,9 @@@ F:      drivers/gpu/drm/mgag200
  
  DRM DRIVER FOR MI0283QT
  M:    Noralf Trønnes <noralf@tronnes.org>
+ T:    git git://anongit.freedesktop.org/drm/drm-misc
  S:    Maintained
- F:    drivers/gpu/drm/tinydrm/mi0283qt.c
+ F:    drivers/gpu/drm/tiny/mi0283qt.c
  F:    Documentation/devicetree/bindings/display/multi-inno,mi0283qt.txt
  
  DRM DRIVER FOR MSM ADRENO GPU
@@@ -5162,8 -5166,9 +5170,9 @@@ F:      Documentation/devicetree/bindings/di
  
  DRM DRIVER FOR PERVASIVE DISPLAYS REPAPER PANELS
  M:    Noralf Trønnes <noralf@tronnes.org>
+ T:    git git://anongit.freedesktop.org/drm/drm-misc
  S:    Maintained
- F:    drivers/gpu/drm/tinydrm/repaper.c
+ F:    drivers/gpu/drm/tiny/repaper.c
  F:    Documentation/devicetree/bindings/display/repaper.txt
  
  DRM DRIVER FOR QEMU'S CIRRUS DEVICE
@@@ -5185,6 -5190,12 +5194,12 @@@ S:    Maintaine
  F:    drivers/gpu/drm/qxl/
  F:    include/uapi/drm/qxl_drm.h
  
+ DRM DRIVER FOR RAYDIUM RM67191 PANELS
+ M:    Robert Chiras <robert.chiras@nxp.com>
+ S:    Maintained
+ F:    drivers/gpu/drm/panel/panel-raydium-rm67191.c
+ F:    Documentation/devicetree/bindings/display/panel/raydium,rm67191.txt
  DRM DRIVER FOR RAGE 128 VIDEO CARDS
  S:    Orphan / Obsolete
  F:    drivers/gpu/drm/r128/
@@@ -5192,6 -5203,7 +5207,7 @@@ F:      include/uapi/drm/r128_drm.
  
  DRM DRIVER FOR ROCKTECH JH057N00900 PANELS
  M:    Guido Günther <agx@sigxcpu.org>
+ R:    Purism Kernel Team <kernel@puri.sm>
  S:    Maintained
  F:    drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c
  F:    Documentation/devicetree/bindings/display/panel/rocktech,jh057n00900.txt
@@@ -5214,14 -5226,16 +5230,16 @@@ F:   Documentation/devicetree/bindings/di
  
  DRM DRIVER FOR SITRONIX ST7586 PANELS
  M:    David Lechner <david@lechnology.com>
+ T:    git git://anongit.freedesktop.org/drm/drm-misc
  S:    Maintained
- F:    drivers/gpu/drm/tinydrm/st7586.c
+ F:    drivers/gpu/drm/tiny/st7586.c
  F:    Documentation/devicetree/bindings/display/sitronix,st7586.txt
  
  DRM DRIVER FOR SITRONIX ST7735R PANELS
  M:    David Lechner <david@lechnology.com>
+ T:    git git://anongit.freedesktop.org/drm/drm-misc
  S:    Maintained
- F:    drivers/gpu/drm/tinydrm/st7735r.c
+ F:    drivers/gpu/drm/tiny/st7735r.c
  F:    Documentation/devicetree/bindings/display/sitronix,st7735r.txt
  
  DRM DRIVER FOR ST-ERICSSON MCDE
@@@ -5240,7 -5254,7 +5258,7 @@@ M:      Linus Walleij <linus.walleij@linaro.
  T:    git git://anongit.freedesktop.org/drm/drm-misc
  S:    Maintained
  F:    drivers/gpu/drm/panel/panel-tpo-tpg110.c
- F:    Documentation/devicetree/bindings/display/panel/tpo,tpg110.txt
+ F:    Documentation/devicetree/bindings/display/panel/tpo,tpg110.yaml
  
  DRM DRIVER FOR USB DISPLAYLINK VIDEO ADAPTERS
  M:    Dave Airlie <airlied@redhat.com>
@@@ -5328,6 -5342,7 +5346,7 @@@ F:      Documentation/gpu/meson.rs
  T:    git git://anongit.freedesktop.org/drm/drm-misc
  
  DRM DRIVERS FOR ATMEL HLCDC
+ M:    Sam Ravnborg <sam@ravnborg.org>
  M:    Boris Brezillon <bbrezillon@kernel.org>
  L:    dri-devel@lists.freedesktop.org
  S:    Supported
@@@ -5337,7 -5352,10 +5356,10 @@@ T:    git git://anongit.freedesktop.org/dr
  
  DRM DRIVERS FOR BRIDGE CHIPS
  M:    Andrzej Hajda <a.hajda@samsung.com>
+ M:    Neil Armstrong <narmstrong@baylibre.com>
  R:    Laurent Pinchart <Laurent.pinchart@ideasonboard.com>
+ R:    Jonas Karlman <jonas@kwiboo.se>
+ R:    Jernej Skrabec <jernej.skrabec@siol.net>
  S:    Maintained
  T:    git git://anongit.freedesktop.org/drm/drm-misc
  F:    drivers/gpu/drm/bridge/
@@@ -5525,14 -5543,6 +5547,6 @@@ F:     drivers/gpu/drm/panel
  F:    include/drm/drm_panel.h
  F:    Documentation/devicetree/bindings/display/panel/
  
- DRM TINYDRM DRIVERS
- M:    Noralf Trønnes <noralf@tronnes.org>
- W:    https://github.com/notro/tinydrm/wiki/Development
- T:    git git://anongit.freedesktop.org/drm/drm-misc
- S:    Maintained
- F:    drivers/gpu/drm/tinydrm/
- F:    include/drm/tinydrm/
  DRM DRIVERS FOR XEN
  M:    Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
  T:    git git://anongit.freedesktop.org/drm/drm-misc
@@@ -6326,8 -6336,7 +6340,8 @@@ F:      Documentation/devicetree/bindings/co
  F:    drivers/counter/ftm-quaddec.c
  
  FLOPPY DRIVER
 -S:    Orphan
 +M:    Denis Efremov <efremov@linux.com>
 +S:    Odd Fixes
  L:    linux-block@vger.kernel.org
  F:    drivers/block/floppy.c
  
@@@ -6861,7 -6870,7 +6875,7 @@@ R:      Sagi Shahar <sagis@google.com
  R:    Jon Olson <jonolson@google.com>
  L:    netdev@vger.kernel.org
  S:    Supported
 -F:    Documentation/networking/device_drivers/google/gve.txt
 +F:    Documentation/networking/device_drivers/google/gve.rst
  F:    drivers/net/ethernet/google
  
  GPD POCKET FAN DRIVER
@@@ -8495,7 -8504,7 +8509,7 @@@ S:      Obsolet
  F:    include/uapi/linux/ipx.h
  
  IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY)
 -M:    Marc Zyngier <marc.zyngier@arm.com>
 +M:    Marc Zyngier <maz@kernel.org>
  S:    Maintained
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
  F:    Documentation/IRQ-domain.txt
@@@ -8513,7 -8522,7 +8527,7 @@@ F:      kernel/irq
  IRQCHIP DRIVERS
  M:    Thomas Gleixner <tglx@linutronix.de>
  M:    Jason Cooper <jason@lakedaemon.net>
 -M:    Marc Zyngier <marc.zyngier@arm.com>
 +M:    Marc Zyngier <maz@kernel.org>
  L:    linux-kernel@vger.kernel.org
  S:    Maintained
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
@@@ -8813,7 -8822,7 +8827,7 @@@ L:      kvm@vger.kernel.or
  W:    http://www.linux-kvm.org
  T:    git git://git.kernel.org/pub/scm/virt/kvm/kvm.git
  S:    Supported
 -F:    Documentation/virtual/kvm/
 +F:    Documentation/virt/kvm/
  F:    include/trace/events/kvm.h
  F:    include/uapi/asm-generic/kvm*
  F:    include/uapi/linux/kvm*
@@@ -8833,10 -8842,10 +8847,10 @@@ F:   arch/x86/include/asm/svm.
  F:    arch/x86/kvm/svm.c
  
  KERNEL VIRTUAL MACHINE FOR ARM/ARM64 (KVM/arm, KVM/arm64)
 -M:    Marc Zyngier <marc.zyngier@arm.com>
 +M:    Marc Zyngier <maz@kernel.org>
  R:    James Morse <james.morse@arm.com>
 -R:    Julien Thierry <julien.thierry@arm.com>
 -R:    Suzuki K Pouloze <suzuki.poulose@arm.com>
 +R:    Julien Thierry <julien.thierry.kdev@gmail.com>
 +R:    Suzuki K Poulose <suzuki.poulose@arm.com>
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  L:    kvmarm@lists.cs.columbia.edu
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm.git
@@@ -12142,7 -12151,7 +12156,7 @@@ M:   Thomas Hellstrom <thellstrom@vmware.
  M:    "VMware, Inc." <pv-drivers@vmware.com>
  L:    virtualization@lists.linux-foundation.org
  S:    Supported
 -F:    Documentation/virtual/paravirt_ops.txt
 +F:    Documentation/virt/paravirt_ops.rst
  F:    arch/*/kernel/paravirt*
  F:    arch/*/include/asm/paravirt*.h
  F:    include/linux/hypervisor.h
@@@ -12399,7 -12408,7 +12413,7 @@@ F:   Documentation/PCI/pci-error-recovery
  F:    drivers/pci/pcie/aer.c
  F:    drivers/pci/pcie/dpc.c
  F:    drivers/pci/pcie/err.c
 -F:    Documentation/powerpc/eeh-pci-error-recovery.txt
 +F:    Documentation/powerpc/eeh-pci-error-recovery.rst
  F:    arch/powerpc/kernel/eeh*.c
  F:    arch/powerpc/platforms/*/eeh*.c
  F:    arch/powerpc/include/*/eeh*.h
@@@ -13730,7 -13739,6 +13744,7 @@@ F:   drivers/mtd/nand/raw/r852.
  F:    drivers/mtd/nand/raw/r852.h
  
  RISC-V ARCHITECTURE
 +M:    Paul Walmsley <paul.walmsley@sifive.com>
  M:    Palmer Dabbelt <palmer@sifive.com>
  M:    Albert Ou <aou@eecs.berkeley.edu>
  L:    linux-riscv@lists.infradead.org
@@@ -13953,6 -13961,7 +13967,6 @@@ F:   drivers/pci/hotplug/s390_pci_hpc.
  
  S390 VFIO-CCW DRIVER
  M:    Cornelia Huck <cohuck@redhat.com>
 -M:    Farhan Ali <alifm@linux.ibm.com>
  M:    Eric Farman <farman@linux.ibm.com>
  R:    Halil Pasic <pasic@linux.ibm.com>
  L:    linux-s390@vger.kernel.org
@@@ -14021,12 -14030,6 +14035,12 @@@ F: drivers/media/common/saa7146
  F:    drivers/media/pci/saa7146/
  F:    include/media/drv-intf/saa7146*
  
 +SAFESETID SECURITY MODULE
 +M:     Micah Morton <mortonm@chromium.org>
 +S:     Supported
 +F:     security/safesetid/
 +F:     Documentation/admin-guide/LSM/SafeSetID.rst
 +
  SAMSUNG AUDIO (ASoC) DRIVERS
  M:    Krzysztof Kozlowski <krzk@kernel.org>
  M:    Sangbeom Kim <sbkim73@samsung.com>
@@@ -16865,7 -16868,7 +16879,7 @@@ W:   http://user-mode-linux.sourceforge.n
  Q:    https://patchwork.ozlabs.org/project/linux-um/list/
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/rw/uml.git
  S:    Maintained
 -F:    Documentation/virtual/uml/
 +F:    Documentation/virt/uml/
  F:    arch/um/
  F:    arch/x86/um/
  F:    fs/hostfs/
@@@ -17134,7 -17137,7 +17148,7 @@@ F:   drivers/virtio/virtio_input.
  F:    include/uapi/linux/virtio_input.h
  
  VIRTIO IOMMU DRIVER
 -M:    Jean-Philippe Brucker <jean-philippe.brucker@arm.com>
 +M:    Jean-Philippe Brucker <jean-philippe@linaro.org>
  L:    virtualization@lists.linux-foundation.org
  S:    Maintained
  F:    drivers/iommu/virtio-iommu.c
@@@ -17183,6 -17186,7 +17197,6 @@@ F:   drivers/vme
  F:    include/linux/vme*
  
  VMWARE BALLOON DRIVER
 -M:    Julien Freche <jfreche@vmware.com>
  M:    Nadav Amit <namit@vmware.com>
  M:    "VMware, Inc." <pv-drivers@vmware.com>
  L:    linux-kernel@vger.kernel.org
diff --combined drivers/gpu/drm/Kconfig
@@@ -24,6 -24,10 +24,10 @@@ menuconfig DR
          details.  You should also select and configure AGP
          (/dev/agpgart) support if it is available for your platform.
  
+ config DRM_MIPI_DBI
+       tristate
+       depends on DRM
  config DRM_MIPI_DSI
        bool
        depends on DRM
@@@ -336,7 -340,7 +340,7 @@@ source "drivers/gpu/drm/mxsfb/Kconfig
  
  source "drivers/gpu/drm/meson/Kconfig"
  
- source "drivers/gpu/drm/tinydrm/Kconfig"
+ source "drivers/gpu/drm/tiny/Kconfig"
  
  source "drivers/gpu/drm/pl111/Kconfig"
  
@@@ -394,7 -398,7 +398,7 @@@ config DRM_R12
  config DRM_I810
        tristate "Intel I810"
        # !PREEMPT because of missing ioctl locking
 -      depends on DRM && AGP && AGP_INTEL && (!PREEMPT || BROKEN)
 +      depends on DRM && AGP && AGP_INTEL && (!PREEMPTION || BROKEN)
        help
          Choose this option if you have an Intel I810 graphics card.  If M is
          selected, the module will be called i810.  AGP support is required
@@@ -218,7 -218,7 +218,7 @@@ void amdgpu_amdkfd_unreserve_memory_lim
  static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
                                        struct amdgpu_amdkfd_fence *ef)
  {
-       struct reservation_object *resv = bo->tbo.resv;
+       struct reservation_object *resv = bo->tbo.base.resv;
        struct reservation_object_list *old, *new;
        unsigned int i, j, k;
  
@@@ -812,7 -812,7 +812,7 @@@ static int process_sync_pds_resv(struc
                struct amdgpu_bo *pd = peer_vm->root.base.bo;
  
                ret = amdgpu_sync_resv(NULL,
-                                       sync, pd->tbo.resv,
+                                       sync, pd->tbo.base.resv,
                                        AMDGPU_FENCE_OWNER_KFD, false);
                if (ret)
                        return ret;
@@@ -887,7 -887,7 +887,7 @@@ static int init_kfd_vm(struct amdgpu_v
                                  AMDGPU_FENCE_OWNER_KFD, false);
        if (ret)
                goto wait_pd_fail;
-       ret = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv, 1);
+       ret = reservation_object_reserve_shared(vm->root.base.bo->tbo.base.resv, 1);
        if (ret)
                goto reserve_shared_fail;
        amdgpu_bo_fence(vm->root.base.bo,
@@@ -1140,8 -1140,7 +1140,8 @@@ int amdgpu_amdkfd_gpuvm_alloc_memory_of
                        adev->asic_type != CHIP_FIJI &&
                        adev->asic_type != CHIP_POLARIS10 &&
                        adev->asic_type != CHIP_POLARIS11 &&
 -                      adev->asic_type != CHIP_POLARIS12) ?
 +                      adev->asic_type != CHIP_POLARIS12 &&
 +                      adev->asic_type != CHIP_VEGAM) ?
                        VI_BO_SIZE_ALIGN : 1;
  
        mapping_flags = AMDGPU_VM_PAGE_READABLE;
@@@ -2133,7 -2132,7 +2133,7 @@@ int amdgpu_amdkfd_add_gws_to_process(vo
         * Add process eviction fence to bo so they can
         * evict each other.
         */
-       ret = reservation_object_reserve_shared(gws_bo->tbo.resv, 1);
+       ret = reservation_object_reserve_shared(gws_bo->tbo.base.resv, 1);
        if (ret)
                goto reserve_shared_fail;
        amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true);
@@@ -402,7 -402,7 +402,7 @@@ static int amdgpu_cs_bo_validate(struc
        struct ttm_operation_ctx ctx = {
                .interruptible = true,
                .no_wait_gpu = false,
-               .resv = bo->tbo.resv,
+               .resv = bo->tbo.base.resv,
                .flags = 0
        };
        uint32_t domain;
@@@ -730,7 -730,7 +730,7 @@@ static int amdgpu_cs_sync_rings(struct 
  
        list_for_each_entry(e, &p->validated, tv.head) {
                struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
-               struct reservation_object *resv = bo->tbo.resv;
+               struct reservation_object *resv = bo->tbo.base.resv;
  
                r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp,
                                     amdgpu_bo_explicit_sync(bo));
@@@ -1044,27 -1044,29 +1044,27 @@@ static int amdgpu_cs_process_fence_dep(
                        return r;
                }
  
 -              fence = amdgpu_ctx_get_fence(ctx, entity,
 -                                           deps[i].handle);
 +              fence = amdgpu_ctx_get_fence(ctx, entity, deps[i].handle);
 +              amdgpu_ctx_put(ctx);
 +
 +              if (IS_ERR(fence))
 +                      return PTR_ERR(fence);
 +              else if (!fence)
 +                      continue;
  
                if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) {
 -                      struct drm_sched_fence *s_fence = to_drm_sched_fence(fence);
 +                      struct drm_sched_fence *s_fence;
                        struct dma_fence *old = fence;
  
 +                      s_fence = to_drm_sched_fence(fence);
                        fence = dma_fence_get(&s_fence->scheduled);
                        dma_fence_put(old);
                }
  
 -              if (IS_ERR(fence)) {
 -                      r = PTR_ERR(fence);
 -                      amdgpu_ctx_put(ctx);
 +              r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true);
 +              dma_fence_put(fence);
 +              if (r)
                        return r;
 -              } else if (fence) {
 -                      r = amdgpu_sync_fence(p->adev, &p->job->sync, fence,
 -                                      true);
 -                      dma_fence_put(fence);
 -                      amdgpu_ctx_put(ctx);
 -                      if (r)
 -                              return r;
 -              }
        }
        return 0;
  }
@@@ -1727,7 -1729,7 +1727,7 @@@ int amdgpu_cs_find_mapping(struct amdgp
        *map = mapping;
  
        /* Double check that the BO is reserved by this CS */
-       if (READ_ONCE((*bo)->tbo.resv->lock.ctx) != &parser->ticket)
+       if (reservation_object_locking_ctx((*bo)->tbo.base.resv) != &parser->ticket)
                return -EINVAL;
  
        if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
@@@ -148,7 -148,7 +148,7 @@@ struct amdgpu_mgpu_info mgpu_info = 
        .mutex = __MUTEX_INITIALIZER(mgpu_info.mutex),
  };
  int amdgpu_ras_enable = -1;
 -uint amdgpu_ras_mask = 0xffffffff;
 +uint amdgpu_ras_mask = 0xfffffffb;
  
  /**
   * DOC: vramlimit (int)
@@@ -1373,7 -1373,7 +1373,7 @@@ static struct drm_driver kms_driver = 
        .driver_features =
            DRIVER_USE_AGP | DRIVER_ATOMIC |
            DRIVER_GEM |
-           DRIVER_PRIME | DRIVER_RENDER | DRIVER_MODESET | DRIVER_SYNCOBJ,
+           DRIVER_RENDER | DRIVER_MODESET | DRIVER_SYNCOBJ,
        .load = amdgpu_driver_load_kms,
        .open = amdgpu_driver_open_kms,
        .postclose = amdgpu_driver_postclose_kms,
        .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
        .gem_prime_export = amdgpu_gem_prime_export,
        .gem_prime_import = amdgpu_gem_prime_import,
-       .gem_prime_res_obj = amdgpu_gem_prime_res_obj,
        .gem_prime_get_sg_table = amdgpu_gem_prime_get_sg_table,
        .gem_prime_import_sg_table = amdgpu_gem_prime_import_sg_table,
        .gem_prime_vmap = amdgpu_gem_prime_vmap,
@@@ -21,7 -21,7 +21,7 @@@ config DRM_ANALOGIX_ANX78X
        select DRM_KMS_HELPER
        select REGMAP_I2C
        ---help---
-         ANX78XX is an ultra-low Full-HD SlimPort transmitter
+         ANX78XX is an ultra-low power Full-HD SlimPort transmitter
          designed for portable devices. The ANX78XX transforms
          the HDMI output of an application processor to MyDP
          or DisplayPort.
@@@ -48,7 -48,6 +48,7 @@@ config DRM_DUMB_VGA_DA
  config DRM_LVDS_ENCODER
        tristate "Transparent parallel to LVDS encoder support"
        depends on OF
 +      select DRM_KMS_HELPER
        select DRM_PANEL_BRIDGE
        help
          Support for transparent parallel to LVDS encoders that don't require
@@@ -117,10 -116,9 +117,10 @@@ config DRM_THINE_THC63LVD102
  
  config DRM_TOSHIBA_TC358764
        tristate "TC358764 DSI/LVDS bridge"
 -      depends on DRM && DRM_PANEL
        depends on OF
        select DRM_MIPI_DSI
 +      select DRM_KMS_HELPER
 +      select DRM_PANEL
        help
          Toshiba TC358764 DSI/LVDS bridge driver.
  
@@@ -1686,7 -1686,7 +1686,7 @@@ static int drm_mode_parse_cmdline_optio
   *
   * Additionals options can be provided following the mode, using a comma to
   * separate each option. Valid options can be found in
 - * Documentation/fb/modedb.txt.
 + * Documentation/fb/modedb.rst.
   *
   * The intermediate drm_cmdline_mode structure is required to store additional
   * options from the command line modline like the force-enable/disable flag.
@@@ -1912,8 -1912,11 +1912,11 @@@ void drm_mode_convert_to_umode(struct d
        case HDMI_PICTURE_ASPECT_256_135:
                out->flags |= DRM_MODE_FLAG_PIC_AR_256_135;
                break;
-       case HDMI_PICTURE_ASPECT_RESERVED:
        default:
+               WARN(1, "Invalid aspect ratio (0%x) on mode\n",
+                    in->picture_aspect_ratio);
+               /* fall through */
+       case HDMI_PICTURE_ASPECT_NONE:
                out->flags |= DRM_MODE_FLAG_PIC_AR_NONE;
                break;
        }
@@@ -1972,20 -1975,22 +1975,22 @@@ int drm_mode_convert_umode(struct drm_d
  
        switch (in->flags & DRM_MODE_FLAG_PIC_AR_MASK) {
        case DRM_MODE_FLAG_PIC_AR_4_3:
-               out->picture_aspect_ratio |= HDMI_PICTURE_ASPECT_4_3;
+               out->picture_aspect_ratio = HDMI_PICTURE_ASPECT_4_3;
                break;
        case DRM_MODE_FLAG_PIC_AR_16_9:
-               out->picture_aspect_ratio |= HDMI_PICTURE_ASPECT_16_9;
+               out->picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9;
                break;
        case DRM_MODE_FLAG_PIC_AR_64_27:
-               out->picture_aspect_ratio |= HDMI_PICTURE_ASPECT_64_27;
+               out->picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27;
                break;
        case DRM_MODE_FLAG_PIC_AR_256_135:
-               out->picture_aspect_ratio |= HDMI_PICTURE_ASPECT_256_135;
+               out->picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135;
                break;
-       default:
+       case DRM_MODE_FLAG_PIC_AR_NONE:
                out->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
                break;
+       default:
+               return -EINVAL;
        }
  
        out->status = drm_mode_validate_driver(dev, out);
@@@ -45,7 -45,6 +45,7 @@@
  #include "intel_lspcon.h"
  #include "intel_panel.h"
  #include "intel_psr.h"
 +#include "intel_tc.h"
  #include "intel_vdsc.h"
  
  struct ddi_buf_trans {
@@@ -847,8 -846,8 +847,8 @@@ cnl_get_buf_trans_edp(struct drm_i915_p
  }
  
  static const struct cnl_ddi_buf_trans *
 -icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, enum port port,
 -                      int type, int rate, int *n_entries)
 +icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
 +                      int *n_entries)
  {
        if (type == INTEL_OUTPUT_HDMI) {
                *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi);
  static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port port)
  {
        int n_entries, level, default_entry;
 +      enum phy phy = intel_port_to_phy(dev_priv, port);
  
        level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
  
        if (INTEL_GEN(dev_priv) >= 11) {
 -              if (intel_port_is_combophy(dev_priv, port))
 -                      icl_get_combo_buf_trans(dev_priv, port, INTEL_OUTPUT_HDMI,
 +              if (intel_phy_is_combo(dev_priv, phy))
 +                      icl_get_combo_buf_trans(dev_priv, INTEL_OUTPUT_HDMI,
                                                0, &n_entries);
                else
                        n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations);
@@@ -1488,10 -1486,9 +1488,10 @@@ static void icl_ddi_clock_get(struct in
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_dpll_hw_state *pll_state = &pipe_config->dpll_hw_state;
        enum port port = encoder->port;
 +      enum phy phy = intel_port_to_phy(dev_priv, port);
        int link_clock;
  
 -      if (intel_port_is_combophy(dev_priv, port)) {
 +      if (intel_phy_is_combo(dev_priv, phy)) {
                link_clock = cnl_calc_wrpll_link(dev_priv, pll_state);
        } else {
                enum intel_dpll_id pll_id = intel_get_shared_dpll_id(dev_priv,
@@@ -1773,10 -1770,7 +1773,10 @@@ void intel_ddi_enable_transcoder_func(c
  
        /* Enable TRANS_DDI_FUNC_CTL for the pipe to work in HDMI mode */
        temp = TRANS_DDI_FUNC_ENABLE;
 -      temp |= TRANS_DDI_SELECT_PORT(port);
 +      if (INTEL_GEN(dev_priv) >= 12)
 +              temp |= TGL_TRANS_DDI_SELECT_PORT(port);
 +      else
 +              temp |= TRANS_DDI_SELECT_PORT(port);
  
        switch (crtc_state->pipe_bpp) {
        case 18:
@@@ -1856,13 -1850,8 +1856,13 @@@ void intel_ddi_disable_transcoder_func(
        i915_reg_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
        u32 val = I915_READ(reg);
  
 -      val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK | TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
 -      val |= TRANS_DDI_PORT_NONE;
 +      if (INTEL_GEN(dev_priv) >= 12) {
 +              val &= ~(TRANS_DDI_FUNC_ENABLE | TGL_TRANS_DDI_PORT_MASK |
 +                       TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
 +      } else {
 +              val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK |
 +                       TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
 +      }
        I915_WRITE(reg, val);
  
        if (dev_priv->quirks & QUIRK_INCREASE_DDI_DISABLED_TIME &&
@@@ -2014,19 -2003,10 +2014,19 @@@ static void intel_ddi_get_encoder_pipes
        mst_pipe_mask = 0;
        for_each_pipe(dev_priv, p) {
                enum transcoder cpu_transcoder = (enum transcoder)p;
 +              unsigned int port_mask, ddi_select;
 +
 +              if (INTEL_GEN(dev_priv) >= 12) {
 +                      port_mask = TGL_TRANS_DDI_PORT_MASK;
 +                      ddi_select = TGL_TRANS_DDI_SELECT_PORT(port);
 +              } else {
 +                      port_mask = TRANS_DDI_PORT_MASK;
 +                      ddi_select = TRANS_DDI_SELECT_PORT(port);
 +              }
  
                tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
  
 -              if ((tmp & TRANS_DDI_PORT_MASK) != TRANS_DDI_SELECT_PORT(port))
 +              if ((tmp & port_mask) != ddi_select)
                        continue;
  
                if ((tmp & TRANS_DDI_MODE_SELECT_MASK) ==
@@@ -2105,7 -2085,6 +2105,7 @@@ static void intel_ddi_get_power_domains
  {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_digital_port *dig_port;
 +      enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
  
        /*
         * TODO: Add support for MST encoders. Atm, the following should never
         * ports.
         */
        if (intel_crtc_has_dp_encoder(crtc_state) ||
 -          intel_port_is_tc(dev_priv, encoder->port))
 +          intel_phy_is_tc(dev_priv, phy))
                intel_display_power_get(dev_priv,
                                        intel_ddi_main_link_aux_domain(dig_port));
  
@@@ -2143,14 -2122,9 +2143,14 @@@ void intel_ddi_enable_pipe_clock(const 
        enum port port = encoder->port;
        enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
  
 -      if (cpu_transcoder != TRANSCODER_EDP)
 -              I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
 -                         TRANS_CLK_SEL_PORT(port));
 +      if (cpu_transcoder != TRANSCODER_EDP) {
 +              if (INTEL_GEN(dev_priv) >= 12)
 +                      I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
 +                                 TGL_TRANS_CLK_SEL_PORT(port));
 +              else
 +                      I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
 +                                 TRANS_CLK_SEL_PORT(port));
 +      }
  }
  
  void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state)
        struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
        enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
  
 -      if (cpu_transcoder != TRANSCODER_EDP)
 -              I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
 -                         TRANS_CLK_SEL_DISABLED);
 +      if (cpu_transcoder != TRANSCODER_EDP) {
 +              if (INTEL_GEN(dev_priv) >= 12)
 +                      I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
 +                                 TGL_TRANS_CLK_SEL_DISABLED);
 +              else
 +                      I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
 +                                 TRANS_CLK_SEL_DISABLED);
 +      }
  }
  
  static void _skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
@@@ -2258,12 -2227,11 +2258,12 @@@ u8 intel_ddi_dp_voltage_max(struct inte
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
        enum port port = encoder->port;
 +      enum phy phy = intel_port_to_phy(dev_priv, port);
        int n_entries;
  
        if (INTEL_GEN(dev_priv) >= 11) {
 -              if (intel_port_is_combophy(dev_priv, port))
 -                      icl_get_combo_buf_trans(dev_priv, port, encoder->type,
 +              if (intel_phy_is_combo(dev_priv, phy))
 +                      icl_get_combo_buf_trans(dev_priv, encoder->type,
                                                intel_dp->link_rate, &n_entries);
                else
                        n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations);
@@@ -2445,15 -2413,15 +2445,15 @@@ static void cnl_ddi_vswing_sequence(str
  }
  
  static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv,
 -                                      u32 level, enum port port, int type,
 +                                      u32 level, enum phy phy, int type,
                                        int rate)
  {
        const struct cnl_ddi_buf_trans *ddi_translations = NULL;
        u32 n_entries, val;
        int ln;
  
 -      ddi_translations = icl_get_combo_buf_trans(dev_priv, port, type,
 -                                                 rate, &n_entries);
 +      ddi_translations = icl_get_combo_buf_trans(dev_priv, type, rate,
 +                                                 &n_entries);
        if (!ddi_translations)
                return;
  
        }
  
        /* Set PORT_TX_DW5 */
 -      val = I915_READ(ICL_PORT_TX_DW5_LN0(port));
 +      val = I915_READ(ICL_PORT_TX_DW5_LN0(phy));
        val &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK |
                  TAP2_DISABLE | TAP3_DISABLE);
        val |= SCALING_MODE_SEL(0x2);
        val |= RTERM_SELECT(0x6);
        val |= TAP3_DISABLE;
 -      I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val);
 +      I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), val);
  
        /* Program PORT_TX_DW2 */
 -      val = I915_READ(ICL_PORT_TX_DW2_LN0(port));
 +      val = I915_READ(ICL_PORT_TX_DW2_LN0(phy));
        val &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
                 RCOMP_SCALAR_MASK);
        val |= SWING_SEL_UPPER(ddi_translations[level].dw2_swing_sel);
        val |= SWING_SEL_LOWER(ddi_translations[level].dw2_swing_sel);
        /* Program Rcomp scalar for every table entry */
        val |= RCOMP_SCALAR(0x98);
 -      I915_WRITE(ICL_PORT_TX_DW2_GRP(port), val);
 +      I915_WRITE(ICL_PORT_TX_DW2_GRP(phy), val);
  
        /* Program PORT_TX_DW4 */
        /* We cannot write to GRP. It would overwrite individual loadgen. */
        for (ln = 0; ln <= 3; ln++) {
 -              val = I915_READ(ICL_PORT_TX_DW4_LN(ln, port));
 +              val = I915_READ(ICL_PORT_TX_DW4_LN(ln, phy));
                val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
                         CURSOR_COEFF_MASK);
                val |= POST_CURSOR_1(ddi_translations[level].dw4_post_cursor_1);
                val |= POST_CURSOR_2(ddi_translations[level].dw4_post_cursor_2);
                val |= CURSOR_COEFF(ddi_translations[level].dw4_cursor_coeff);
 -              I915_WRITE(ICL_PORT_TX_DW4_LN(ln, port), val);
 +              I915_WRITE(ICL_PORT_TX_DW4_LN(ln, phy), val);
        }
  
        /* Program PORT_TX_DW7 */
 -      val = I915_READ(ICL_PORT_TX_DW7_LN0(port));
 +      val = I915_READ(ICL_PORT_TX_DW7_LN0(phy));
        val &= ~N_SCALAR_MASK;
        val |= N_SCALAR(ddi_translations[level].dw7_n_scalar);
 -      I915_WRITE(ICL_PORT_TX_DW7_GRP(port), val);
 +      I915_WRITE(ICL_PORT_TX_DW7_GRP(phy), val);
  }
  
  static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
                                              enum intel_output_type type)
  {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 -      enum port port = encoder->port;
 +      enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
        int width = 0;
        int rate = 0;
        u32 val;
         * set PORT_PCS_DW1 cmnkeeper_enable to 1b,
         * else clear to 0b.
         */
 -      val = I915_READ(ICL_PORT_PCS_DW1_LN0(port));
 +      val = I915_READ(ICL_PORT_PCS_DW1_LN0(phy));
        if (type == INTEL_OUTPUT_HDMI)
                val &= ~COMMON_KEEPER_EN;
        else
                val |= COMMON_KEEPER_EN;
 -      I915_WRITE(ICL_PORT_PCS_DW1_GRP(port), val);
 +      I915_WRITE(ICL_PORT_PCS_DW1_GRP(phy), val);
  
        /* 2. Program loadgen select */
        /*
         * > 6 GHz (LN0=0, LN1=0, LN2=0, LN3=0)
         */
        for (ln = 0; ln <= 3; ln++) {
 -              val = I915_READ(ICL_PORT_TX_DW4_LN(ln, port));
 +              val = I915_READ(ICL_PORT_TX_DW4_LN(ln, phy));
                val &= ~LOADGEN_SELECT;
  
                if ((rate <= 600000 && width == 4 && ln >= 1) ||
                    (rate <= 600000 && width < 4 && (ln == 1 || ln == 2))) {
                        val |= LOADGEN_SELECT;
                }
 -              I915_WRITE(ICL_PORT_TX_DW4_LN(ln, port), val);
 +              I915_WRITE(ICL_PORT_TX_DW4_LN(ln, phy), val);
        }
  
        /* 3. Set PORT_CL_DW5 SUS Clock Config to 11b */
 -      val = I915_READ(ICL_PORT_CL_DW5(port));
 +      val = I915_READ(ICL_PORT_CL_DW5(phy));
        val |= SUS_CLOCK_CONFIG;
 -      I915_WRITE(ICL_PORT_CL_DW5(port), val);
 +      I915_WRITE(ICL_PORT_CL_DW5(phy), val);
  
        /* 4. Clear training enable to change swing values */
 -      val = I915_READ(ICL_PORT_TX_DW5_LN0(port));
 +      val = I915_READ(ICL_PORT_TX_DW5_LN0(phy));
        val &= ~TX_TRAINING_EN;
 -      I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val);
 +      I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), val);
  
        /* 5. Program swing and de-emphasis */
 -      icl_ddi_combo_vswing_program(dev_priv, level, port, type, rate);
 +      icl_ddi_combo_vswing_program(dev_priv, level, phy, type, rate);
  
        /* 6. Set training enable to trigger update */
 -      val = I915_READ(ICL_PORT_TX_DW5_LN0(port));
 +      val = I915_READ(ICL_PORT_TX_DW5_LN0(phy));
        val |= TX_TRAINING_EN;
 -      I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val);
 +      I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), val);
  }
  
  static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
@@@ -2695,9 -2663,9 +2695,9 @@@ static void icl_ddi_vswing_sequence(str
                                    enum intel_output_type type)
  {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 -      enum port port = encoder->port;
 +      enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
  
 -      if (intel_port_is_combophy(dev_priv, port))
 +      if (intel_phy_is_combo(dev_priv, phy))
                icl_combo_phy_ddi_vswing_sequence(encoder, level, type);
        else
                icl_mg_phy_ddi_vswing_sequence(encoder, link_clock, level);
@@@ -2760,13 -2728,12 +2760,13 @@@ u32 ddi_signal_levels(struct intel_dp *
  
  static inline
  u32 icl_dpclka_cfgcr0_clk_off(struct drm_i915_private *dev_priv,
 -                            enum port port)
 +                            enum phy phy)
  {
 -      if (intel_port_is_combophy(dev_priv, port)) {
 -              return ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(port);
 -      } else if (intel_port_is_tc(dev_priv, port)) {
 -              enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
 +      if (intel_phy_is_combo(dev_priv, phy)) {
 +              return ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
 +      } else if (intel_phy_is_tc(dev_priv, phy)) {
 +              enum tc_port tc_port = intel_port_to_tc(dev_priv,
 +                                                      (enum port)phy);
  
                return ICL_DPCLKA_CFGCR0_TC_CLK_OFF(tc_port);
        }
@@@ -2779,33 -2746,23 +2779,33 @@@ static void icl_map_plls_to_ports(struc
  {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_shared_dpll *pll = crtc_state->shared_dpll;
 -      enum port port = encoder->port;
 +      enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
        u32 val;
  
        mutex_lock(&dev_priv->dpll_lock);
  
 -      val = I915_READ(DPCLKA_CFGCR0_ICL);
 -      WARN_ON((val & icl_dpclka_cfgcr0_clk_off(dev_priv, port)) == 0);
 +      val = I915_READ(ICL_DPCLKA_CFGCR0);
 +      WARN_ON((val & icl_dpclka_cfgcr0_clk_off(dev_priv, phy)) == 0);
  
 -      if (intel_port_is_combophy(dev_priv, port)) {
 -              val &= ~DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
 -              val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port);
 -              I915_WRITE(DPCLKA_CFGCR0_ICL, val);
 -              POSTING_READ(DPCLKA_CFGCR0_ICL);
 +      if (intel_phy_is_combo(dev_priv, phy)) {
 +              /*
 +               * Even though this register references DDIs, note that we
 +               * want to pass the PHY rather than the port (DDI).  For
 +               * ICL, port=phy in all cases so it doesn't matter, but for
 +               * EHL the bspec notes the following:
 +               *
 +               *   "DDID clock tied to DDIA clock, so DPCLKA_CFGCR0 DDIA
 +               *   Clock Select chooses the PLL for both DDIA and DDID and
 +               *   drives port A in all cases."
 +               */
 +              val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
 +              val |= ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy);
 +              I915_WRITE(ICL_DPCLKA_CFGCR0, val);
 +              POSTING_READ(ICL_DPCLKA_CFGCR0);
        }
  
 -      val &= ~icl_dpclka_cfgcr0_clk_off(dev_priv, port);
 -      I915_WRITE(DPCLKA_CFGCR0_ICL, val);
 +      val &= ~icl_dpclka_cfgcr0_clk_off(dev_priv, phy);
 +      I915_WRITE(ICL_DPCLKA_CFGCR0, val);
  
        mutex_unlock(&dev_priv->dpll_lock);
  }
  static void icl_unmap_plls_to_ports(struct intel_encoder *encoder)
  {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 -      enum port port = encoder->port;
 +      enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
        u32 val;
  
        mutex_lock(&dev_priv->dpll_lock);
  
 -      val = I915_READ(DPCLKA_CFGCR0_ICL);
 -      val |= icl_dpclka_cfgcr0_clk_off(dev_priv, port);
 -      I915_WRITE(DPCLKA_CFGCR0_ICL, val);
 +      val = I915_READ(ICL_DPCLKA_CFGCR0);
 +      val |= icl_dpclka_cfgcr0_clk_off(dev_priv, phy);
 +      I915_WRITE(ICL_DPCLKA_CFGCR0, val);
  
        mutex_unlock(&dev_priv->dpll_lock);
  }
@@@ -2878,13 -2835,11 +2878,13 @@@ void icl_sanitize_encoder_pll_mapping(s
                ddi_clk_needed = false;
        }
  
 -      val = I915_READ(DPCLKA_CFGCR0_ICL);
 +      val = I915_READ(ICL_DPCLKA_CFGCR0);
        for_each_port_masked(port, port_mask) {
 +              enum phy phy = intel_port_to_phy(dev_priv, port);
 +
                bool ddi_clk_ungated = !(val &
                                         icl_dpclka_cfgcr0_clk_off(dev_priv,
 -                                                                 port));
 +                                                                 phy));
  
                if (ddi_clk_needed == ddi_clk_ungated)
                        continue;
                if (WARN_ON(ddi_clk_needed))
                        continue;
  
 -              DRM_NOTE("Port %c is disabled/in DSI mode with an ungated DDI clock, gate it\n",
 -                       port_name(port));
 -              val |= icl_dpclka_cfgcr0_clk_off(dev_priv, port);
 -              I915_WRITE(DPCLKA_CFGCR0_ICL, val);
 +              DRM_NOTE("PHY %c is disabled/in DSI mode with an ungated DDI clock, gate it\n",
 +                       phy_name(port));
 +              val |= icl_dpclka_cfgcr0_clk_off(dev_priv, phy);
 +              I915_WRITE(ICL_DPCLKA_CFGCR0, val);
        }
  }
  
@@@ -2908,7 -2863,6 +2908,7 @@@ static void intel_ddi_clk_select(struc
  {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        enum port port = encoder->port;
 +      enum phy phy = intel_port_to_phy(dev_priv, port);
        u32 val;
        const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
  
        mutex_lock(&dev_priv->dpll_lock);
  
        if (INTEL_GEN(dev_priv) >= 11) {
 -              if (!intel_port_is_combophy(dev_priv, port))
 +              if (!intel_phy_is_combo(dev_priv, phy))
                        I915_WRITE(DDI_CLK_SEL(port),
                                   icl_pll_to_ddi_clk_sel(encoder, crtc_state));
        } else if (IS_CANNONLAKE(dev_priv)) {
@@@ -2958,10 -2912,9 +2958,10 @@@ static void intel_ddi_clk_disable(struc
  {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        enum port port = encoder->port;
 +      enum phy phy = intel_port_to_phy(dev_priv, port);
  
        if (INTEL_GEN(dev_priv) >= 11) {
 -              if (!intel_port_is_combophy(dev_priv, port))
 +              if (!intel_phy_is_combo(dev_priv, phy))
                        I915_WRITE(DDI_CLK_SEL(port), DDI_CLK_SEL_NONE);
        } else if (IS_CANNONLAKE(dev_priv)) {
                I915_WRITE(DPCLKA_CFGCR0, I915_READ(DPCLKA_CFGCR0) |
@@@ -3042,22 -2995,25 +3042,22 @@@ static void icl_program_mg_dp_mode(stru
  {
        struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
        enum port port = intel_dig_port->base.port;
 -      enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
 -      u32 ln0, ln1, lane_info;
 +      u32 ln0, ln1, lane_mask;
  
 -      if (tc_port == PORT_TC_NONE || intel_dig_port->tc_type == TC_PORT_TBT)
 +      if (intel_dig_port->tc_mode == TC_PORT_TBT_ALT)
                return;
  
        ln0 = I915_READ(MG_DP_MODE(0, port));
        ln1 = I915_READ(MG_DP_MODE(1, port));
  
 -      switch (intel_dig_port->tc_type) {
 -      case TC_PORT_TYPEC:
 +      switch (intel_dig_port->tc_mode) {
 +      case TC_PORT_DP_ALT:
                ln0 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
                ln1 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
  
 -              lane_info = (I915_READ(PORT_TX_DFLEXDPSP) &
 -                           DP_LANE_ASSIGNMENT_MASK(tc_port)) >>
 -                          DP_LANE_ASSIGNMENT_SHIFT(tc_port);
 +              lane_mask = intel_tc_port_get_lane_mask(intel_dig_port);
  
 -              switch (lane_info) {
 +              switch (lane_mask) {
                case 0x1:
                case 0x4:
                        break;
                               MG_DP_MODE_CFG_DP_X2_MODE;
                        break;
                default:
 -                      MISSING_CASE(lane_info);
 +                      MISSING_CASE(lane_mask);
                }
                break;
  
                break;
  
        default:
 -              MISSING_CASE(intel_dig_port->tc_type);
 +              MISSING_CASE(intel_dig_port->tc_mode);
                return;
        }
  
@@@ -3154,7 -3110,6 +3154,7 @@@ static void intel_ddi_pre_enable_dp(str
        struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        enum port port = encoder->port;
 +      enum phy phy = intel_port_to_phy(dev_priv, port);
        struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
        bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
        int level = intel_ddi_dp_level(intel_dp);
  
        intel_ddi_clk_select(encoder, crtc_state);
  
 -      intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
 +      if (!intel_phy_is_tc(dev_priv, phy) ||
 +          dig_port->tc_mode != TC_PORT_TBT_ALT)
 +              intel_display_power_get(dev_priv,
 +                                      dig_port->ddi_io_power_domain);
  
        icl_program_mg_dp_mode(dig_port);
        icl_disable_phy_clock_gating(dig_port);
        else
                intel_prepare_dp_ddi_buffers(encoder, crtc_state);
  
 -      if (intel_port_is_combophy(dev_priv, port)) {
 +      if (intel_phy_is_combo(dev_priv, phy)) {
                bool lane_reversal =
                        dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
  
 -              intel_combo_phy_power_up_lanes(dev_priv, port, false,
 +              intel_combo_phy_power_up_lanes(dev_priv, phy, false,
                                               crtc_state->lane_count,
                                               lane_reversal);
        }
@@@ -3338,7 -3290,6 +3338,7 @@@ static void intel_ddi_post_disable_dp(s
        struct intel_dp *intel_dp = &dig_port->dp;
        bool is_mst = intel_crtc_has_type(old_crtc_state,
                                          INTEL_OUTPUT_DP_MST);
 +      enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
  
        if (!is_mst) {
                intel_ddi_disable_pipe_clock(old_crtc_state);
        intel_edp_panel_vdd_on(intel_dp);
        intel_edp_panel_off(intel_dp);
  
 -      intel_display_power_put_unchecked(dev_priv,
 -                                        dig_port->ddi_io_power_domain);
 +      if (!intel_phy_is_tc(dev_priv, phy) ||
 +          dig_port->tc_mode != TC_PORT_TBT_ALT)
 +              intel_display_power_put_unchecked(dev_priv,
 +                                                dig_port->ddi_io_power_domain);
  
        intel_ddi_clk_disable(encoder);
  }
@@@ -3562,7 -3511,8 +3562,8 @@@ static void intel_enable_ddi(struct int
        /* Enable hdcp if it's desired */
        if (conn_state->content_protection ==
            DRM_MODE_CONTENT_PROTECTION_DESIRED)
-               intel_hdcp_enable(to_intel_connector(conn_state->connector));
+               intel_hdcp_enable(to_intel_connector(conn_state->connector),
+                                 (u8)conn_state->hdcp_content_type);
  }
  
  static void intel_disable_ddi_dp(struct intel_encoder *encoder,
@@@ -3631,39 -3581,70 +3632,65 @@@ static void intel_ddi_update_pipe(struc
                                  const struct intel_crtc_state *crtc_state,
                                  const struct drm_connector_state *conn_state)
  {
+       struct intel_connector *connector =
+                               to_intel_connector(conn_state->connector);
+       struct intel_hdcp *hdcp = &connector->hdcp;
+       bool content_protection_type_changed =
+                       (conn_state->hdcp_content_type != hdcp->content_type &&
+                        conn_state->content_protection !=
+                        DRM_MODE_CONTENT_PROTECTION_UNDESIRED);
        if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
                intel_ddi_update_pipe_dp(encoder, crtc_state, conn_state);
  
+       /*
+        * During the HDCP encryption session if Type change is requested,
+        * disable the HDCP and reenable it with new TYPE value.
+        */
        if (conn_state->content_protection ==
-           DRM_MODE_CONTENT_PROTECTION_DESIRED)
-               intel_hdcp_enable(to_intel_connector(conn_state->connector));
-       else if (conn_state->content_protection ==
-                DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
-               intel_hdcp_disable(to_intel_connector(conn_state->connector));
+           DRM_MODE_CONTENT_PROTECTION_UNDESIRED ||
+           content_protection_type_changed)
+               intel_hdcp_disable(connector);
+       /*
+        * Mark the hdcp state as DESIRED after the hdcp disable of type
+        * change procedure.
+        */
+       if (content_protection_type_changed) {
+               mutex_lock(&hdcp->mutex);
+               hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+               schedule_work(&hdcp->prop_work);
+               mutex_unlock(&hdcp->mutex);
+       }
+       if (conn_state->content_protection ==
+           DRM_MODE_CONTENT_PROTECTION_DESIRED ||
+           content_protection_type_changed)
+               intel_hdcp_enable(connector, (u8)conn_state->hdcp_content_type);
  }
  
 -static void intel_ddi_set_fia_lane_count(struct intel_encoder *encoder,
 -                                       const struct intel_crtc_state *pipe_config,
 -                                       enum port port)
 +static void
 +intel_ddi_update_prepare(struct intel_atomic_state *state,
 +                       struct intel_encoder *encoder,
 +                       struct intel_crtc *crtc)
  {
 -      struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 -      struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
 -      enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
 -      u32 val = I915_READ(PORT_TX_DFLEXDPMLE1);
 -      bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL;
 -
 -      val &= ~DFLEXDPMLE1_DPMLETC_MASK(tc_port);
 -      switch (pipe_config->lane_count) {
 -      case 1:
 -              val |= (lane_reversal) ? DFLEXDPMLE1_DPMLETC_ML3(tc_port) :
 -              DFLEXDPMLE1_DPMLETC_ML0(tc_port);
 -              break;
 -      case 2:
 -              val |= (lane_reversal) ? DFLEXDPMLE1_DPMLETC_ML3_2(tc_port) :
 -              DFLEXDPMLE1_DPMLETC_ML1_0(tc_port);
 -              break;
 -      case 4:
 -              val |= DFLEXDPMLE1_DPMLETC_ML3_0(tc_port);
 -              break;
 -      default:
 -              MISSING_CASE(pipe_config->lane_count);
 -      }
 -      I915_WRITE(PORT_TX_DFLEXDPMLE1, val);
 +      struct intel_crtc_state *crtc_state =
 +              crtc ? intel_atomic_get_new_crtc_state(state, crtc) : NULL;
 +      int required_lanes = crtc_state ? crtc_state->lane_count : 1;
 +
 +      WARN_ON(crtc && crtc->active);
 +
 +      intel_tc_port_get_link(enc_to_dig_port(&encoder->base), required_lanes);
 +      if (crtc_state && crtc_state->base.active)
 +              intel_update_active_dpll(state, crtc, encoder);
 +}
 +
 +static void
 +intel_ddi_update_complete(struct intel_atomic_state *state,
 +                        struct intel_encoder *encoder,
 +                        struct intel_crtc *crtc)
 +{
 +      intel_tc_port_put_link(enc_to_dig_port(&encoder->base));
  }
  
  static void
@@@ -3673,25 -3654,26 +3700,25 @@@ intel_ddi_pre_pll_enable(struct intel_e
  {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
 -      enum port port = encoder->port;
 +      enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
 +      bool is_tc_port = intel_phy_is_tc(dev_priv, phy);
  
 -      if (intel_crtc_has_dp_encoder(crtc_state) ||
 -          intel_port_is_tc(dev_priv, encoder->port))
 +      if (is_tc_port)
 +              intel_tc_port_get_link(dig_port, crtc_state->lane_count);
 +
 +      if (intel_crtc_has_dp_encoder(crtc_state) || is_tc_port)
                intel_display_power_get(dev_priv,
                                        intel_ddi_main_link_aux_domain(dig_port));
  
 -      if (IS_GEN9_LP(dev_priv))
 +      if (is_tc_port && dig_port->tc_mode != TC_PORT_TBT_ALT)
 +              /*
 +               * Program the lane count for static/dynamic connections on
 +               * Type-C ports.  Skip this step for TBT.
 +               */
 +              intel_tc_port_set_fia_lane_count(dig_port, crtc_state->lane_count);
 +      else if (IS_GEN9_LP(dev_priv))
                bxt_ddi_phy_set_lane_optim_mask(encoder,
                                                crtc_state->lane_lat_optim_mask);
 -
 -      /*
 -       * Program the lane count for static/dynamic connections on Type-C ports.
 -       * Skip this step for TBT.
 -       */
 -      if (dig_port->tc_type == TC_PORT_UNKNOWN ||
 -          dig_port->tc_type == TC_PORT_TBT)
 -              return;
 -
 -      intel_ddi_set_fia_lane_count(encoder, crtc_state, port);
  }
  
  static void
@@@ -3701,15 -3683,11 +3728,15 @@@ intel_ddi_post_pll_disable(struct intel
  {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
 +      enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
 +      bool is_tc_port = intel_phy_is_tc(dev_priv, phy);
  
 -      if (intel_crtc_has_dp_encoder(crtc_state) ||
 -          intel_port_is_tc(dev_priv, encoder->port))
 +      if (intel_crtc_has_dp_encoder(crtc_state) || is_tc_port)
                intel_display_power_put_unchecked(dev_priv,
                                                  intel_ddi_main_link_aux_domain(dig_port));
 +
 +      if (is_tc_port)
 +              intel_tc_port_put_link(dig_port);
  }
  
  static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
@@@ -3786,6 -3764,7 +3813,6 @@@ void intel_ddi_get_config(struct intel_
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
        enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
 -      struct intel_digital_port *intel_dig_port;
        u32 temp, flags = 0;
  
        /* XXX: DSI transcoder paranoia */
        switch (temp & TRANS_DDI_MODE_SELECT_MASK) {
        case TRANS_DDI_MODE_SELECT_HDMI:
                pipe_config->has_hdmi_sink = true;
 -              intel_dig_port = enc_to_dig_port(&encoder->base);
  
                pipe_config->infoframes.enable |=
                        intel_hdmi_infoframes_enabled(encoder, pipe_config);
@@@ -3961,18 -3941,49 +3988,18 @@@ static int intel_ddi_compute_config(str
        return 0;
  }
  
 -static void intel_ddi_encoder_suspend(struct intel_encoder *encoder)
 -{
 -      struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
 -      struct drm_i915_private *i915 = to_i915(encoder->base.dev);
 -
 -      intel_dp_encoder_suspend(encoder);
 -
 -      /*
 -       * TODO: disconnect also from USB DP alternate mode once we have a
 -       * way to handle the modeset restore in that mode during resume
 -       * even if the sink has disappeared while being suspended.
 -       */
 -      if (dig_port->tc_legacy_port)
 -              icl_tc_phy_disconnect(i915, dig_port);
 -}
 -
 -static void intel_ddi_encoder_reset(struct drm_encoder *drm_encoder)
 -{
 -      struct intel_digital_port *dig_port = enc_to_dig_port(drm_encoder);
 -      struct drm_i915_private *i915 = to_i915(drm_encoder->dev);
 -
 -      if (intel_port_is_tc(i915, dig_port->base.port))
 -              intel_digital_port_connected(&dig_port->base);
 -
 -      intel_dp_encoder_reset(drm_encoder);
 -}
 -
  static void intel_ddi_encoder_destroy(struct drm_encoder *encoder)
  {
        struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
 -      struct drm_i915_private *i915 = to_i915(encoder->dev);
  
        intel_dp_encoder_flush_work(encoder);
  
 -      if (intel_port_is_tc(i915, dig_port->base.port))
 -              icl_tc_phy_disconnect(i915, dig_port);
 -
        drm_encoder_cleanup(encoder);
        kfree(dig_port);
  }
  
  static const struct drm_encoder_funcs intel_ddi_funcs = {
 -      .reset = intel_ddi_encoder_reset,
 +      .reset = intel_dp_encoder_reset,
        .destroy = intel_ddi_encoder_destroy,
  };
  
@@@ -4097,17 -4108,14 +4124,17 @@@ static int intel_hdmi_reset_link(struc
        return modeset_pipe(&crtc->base, ctx);
  }
  
 -static bool intel_ddi_hotplug(struct intel_encoder *encoder,
 -                            struct intel_connector *connector)
 +static enum intel_hotplug_state
 +intel_ddi_hotplug(struct intel_encoder *encoder,
 +                struct intel_connector *connector,
 +                bool irq_received)
  {
 +      struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
        struct drm_modeset_acquire_ctx ctx;
 -      bool changed;
 +      enum intel_hotplug_state state;
        int ret;
  
 -      changed = intel_encoder_hotplug(encoder, connector);
 +      state = intel_encoder_hotplug(encoder, connector, irq_received);
  
        drm_modeset_acquire_init(&ctx, 0);
  
        drm_modeset_acquire_fini(&ctx);
        WARN(ret, "Acquiring modeset locks failed with %i\n", ret);
  
 -      return changed;
 +      /*
 +       * Unpowered type-c dongles can take some time to boot and be
 +       * responsible, so here giving some time to those dongles to power up
 +       * and then retrying the probe.
 +       *
 +       * On many platforms the HDMI live state signal is known to be
 +       * unreliable, so we can't use it to detect if a sink is connected or
 +       * not. Instead we detect if it's connected based on whether we can
 +       * read the EDID or not. That in turn has a problem during disconnect,
 +       * since the HPD interrupt may be raised before the DDC lines get
 +       * disconnected (due to how the required length of DDC vs. HPD
 +       * connector pins are specified) and so we'll still be able to get a
 +       * valid EDID. To solve this schedule another detection cycle if this
 +       * time around we didn't detect any change in the sink's connection
 +       * status.
 +       */
 +      if (state == INTEL_HOTPLUG_UNCHANGED && irq_received &&
 +          !dig_port->dp.is_mst)
 +              state = INTEL_HOTPLUG_RETRY;
 +
 +      return state;
  }
  
  static struct intel_connector *
@@@ -4237,7 -4225,6 +4264,7 @@@ void intel_ddi_init(struct drm_i915_pri
        struct drm_encoder *encoder;
        bool init_hdmi, init_dp, init_lspcon = false;
        enum pipe pipe;
 +      enum phy phy = intel_port_to_phy(dev_priv, port);
  
        init_hdmi = port_info->supports_dvi || port_info->supports_hdmi;
        init_dp = port_info->supports_dp;
        intel_encoder->update_pipe = intel_ddi_update_pipe;
        intel_encoder->get_hw_state = intel_ddi_get_hw_state;
        intel_encoder->get_config = intel_ddi_get_config;
 -      intel_encoder->suspend = intel_ddi_encoder_suspend;
 +      intel_encoder->suspend = intel_dp_encoder_suspend;
        intel_encoder->get_power_domains = intel_ddi_get_power_domains;
        intel_encoder->type = INTEL_OUTPUT_DDI;
        intel_encoder->power_domain = intel_port_to_power_domain(port);
        intel_dig_port->max_lanes = intel_ddi_max_lanes(intel_dig_port);
        intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
  
 -      intel_dig_port->tc_legacy_port = intel_port_is_tc(dev_priv, port) &&
 -                                       !port_info->supports_typec_usb &&
 -                                       !port_info->supports_tbt;
 +      if (intel_phy_is_tc(dev_priv, phy)) {
 +              bool is_legacy = !port_info->supports_typec_usb &&
 +                               !port_info->supports_tbt;
 +
 +              intel_tc_port_init(intel_dig_port, is_legacy);
 +
 +              intel_encoder->update_prepare = intel_ddi_update_prepare;
 +              intel_encoder->update_complete = intel_ddi_update_complete;
 +      }
  
        switch (port) {
        case PORT_A:
                intel_dig_port->ddi_io_power_domain =
                        POWER_DOMAIN_PORT_DDI_F_IO;
                break;
 +      case PORT_G:
 +              intel_dig_port->ddi_io_power_domain =
 +                      POWER_DOMAIN_PORT_DDI_G_IO;
 +              break;
 +      case PORT_H:
 +              intel_dig_port->ddi_io_power_domain =
 +                      POWER_DOMAIN_PORT_DDI_H_IO;
 +              break;
 +      case PORT_I:
 +              intel_dig_port->ddi_io_power_domain =
 +                      POWER_DOMAIN_PORT_DDI_I_IO;
 +              break;
        default:
                MISSING_CASE(port);
        }
  
        intel_infoframe_init(intel_dig_port);
  
 -      if (intel_port_is_tc(dev_priv, port))
 -              intel_digital_port_connected(intel_encoder);
 -
        return;
  
  err:
@@@ -523,16 -523,12 +523,16 @@@ int intel_hdcp_auth_downstream(struct i
         * authentication.
         */
        num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]);
 -      if (num_downstream == 0)
 +      if (num_downstream == 0) {
 +              DRM_DEBUG_KMS("Repeater with zero downstream devices\n");
                return -EINVAL;
 +      }
  
        ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL);
 -      if (!ksv_fifo)
 +      if (!ksv_fifo) {
 +              DRM_DEBUG_KMS("Out of mem: ksv_fifo\n");
                return -ENOMEM;
 +      }
  
        ret = shim->read_ksv_fifo(intel_dig_port, num_downstream, ksv_fifo);
        if (ret)
@@@ -869,7 -865,6 +869,6 @@@ static void intel_hdcp_prop_work(struc
                                               prop_work);
        struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
        struct drm_device *dev = connector->base.dev;
-       struct drm_connector_state *state;
  
        drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
        mutex_lock(&hdcp->mutex);
         * those to UNDESIRED is handled by core. If value == UNDESIRED,
         * we're running just after hdcp has been disabled, so just exit
         */
-       if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
-               state = connector->base.state;
-               state->content_protection = hdcp->value;
-       }
+       if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
+               drm_hdcp_update_content_protection(&connector->base,
+                                                  hdcp->value);
  
        mutex_unlock(&hdcp->mutex);
        drm_modeset_unlock(&dev->mode_config.connection_mutex);
@@@ -1210,10 -1204,8 +1208,10 @@@ static int hdcp2_authentication_key_exc
        if (ret < 0)
                return ret;
  
 -      if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL)
 +      if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL) {
 +              DRM_DEBUG_KMS("cert.rx_caps dont claim HDCP2.2\n");
                return -EINVAL;
 +      }
  
        hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]);
  
@@@ -1754,14 -1746,15 +1752,15 @@@ static const struct component_ops i915_
        .unbind = i915_hdcp_component_unbind,
  };
  
- static inline int initialize_hdcp_port_data(struct intel_connector *connector)
+ static inline int initialize_hdcp_port_data(struct intel_connector *connector,
+                                           const struct intel_hdcp_shim *shim)
  {
        struct intel_hdcp *hdcp = &connector->hdcp;
        struct hdcp_port_data *data = &hdcp->port_data;
  
        data->port = connector->encoder->port;
        data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED;
-       data->protocol = (u8)hdcp->shim->protocol;
+       data->protocol = (u8)shim->protocol;
  
        data->k = 1;
        if (!data->streams)
@@@ -1811,12 -1804,13 +1810,13 @@@ void intel_hdcp_component_init(struct d
        }
  }
  
- static void intel_hdcp2_init(struct intel_connector *connector)
+ static void intel_hdcp2_init(struct intel_connector *connector,
+                            const struct intel_hdcp_shim *shim)
  {
        struct intel_hdcp *hdcp = &connector->hdcp;
        int ret;
  
-       ret = initialize_hdcp_port_data(connector);
+       ret = initialize_hdcp_port_data(connector, shim);
        if (ret) {
                DRM_DEBUG_KMS("Mei hdcp data init failed\n");
                return;
@@@ -1835,23 -1829,28 +1835,28 @@@ int intel_hdcp_init(struct intel_connec
        if (!shim)
                return -EINVAL;
  
-       ret = drm_connector_attach_content_protection_property(&connector->base);
-       if (ret)
+       if (is_hdcp2_supported(dev_priv))
+               intel_hdcp2_init(connector, shim);
+       ret =
+       drm_connector_attach_content_protection_property(&connector->base,
+                                                        hdcp->hdcp2_supported);
+       if (ret) {
+               hdcp->hdcp2_supported = false;
+               kfree(hdcp->port_data.streams);
                return ret;
+       }
  
        hdcp->shim = shim;
        mutex_init(&hdcp->mutex);
        INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work);
        INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work);
-       if (is_hdcp2_supported(dev_priv))
-               intel_hdcp2_init(connector);
        init_waitqueue_head(&hdcp->cp_irq_queue);
  
        return 0;
  }
  
- int intel_hdcp_enable(struct intel_connector *connector)
+ int intel_hdcp_enable(struct intel_connector *connector, u8 content_type)
  {
        struct intel_hdcp *hdcp = &connector->hdcp;
        unsigned long check_link_interval = DRM_HDCP_CHECK_PERIOD_MS;
  
        mutex_lock(&hdcp->mutex);
        WARN_ON(hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
+       hdcp->content_type = content_type;
  
        /*
         * Considering that HDCP2.2 is more secure than HDCP1.4, If the setup
                        check_link_interval = DRM_HDCP2_CHECK_PERIOD_MS;
        }
  
-       /* When HDCP2.2 fails, HDCP1.4 will be attempted */
-       if (ret && intel_hdcp_capable(connector)) {
+       /*
+        * When HDCP2.2 fails and Content Type is not Type1, HDCP1.4 will
+        * be attempted.
+        */
+       if (ret && intel_hdcp_capable(connector) &&
+           hdcp->content_type != DRM_MODE_HDCP_CONTENT_TYPE1) {
                ret = _intel_hdcp_enable(connector);
        }
  
@@@ -1956,12 -1960,15 +1966,15 @@@ void intel_hdcp_atomic_check(struct drm
  
        /*
         * Nothing to do if the state didn't change, or HDCP was activated since
-        * the last commit
+        * the last commit. And also no change in hdcp content type.
         */
        if (old_cp == new_cp ||
            (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
-            new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED))
-               return;
+            new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)) {
+               if (old_state->hdcp_content_type ==
+                               new_state->hdcp_content_type)
+                       return;
+       }
  
        crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
                                                   new_state->crtc);
  
  #include "gem/i915_gem_context.h"
  #include "gem/i915_gem_ioctls.h"
 +#include "gt/intel_gt.h"
  #include "gt/intel_gt_pm.h"
  #include "gt/intel_reset.h"
  #include "gt/intel_workarounds.h"
 +#include "gt/uc/intel_uc.h"
  
  #include "i915_debugfs.h"
  #include "i915_drv.h"
  #include "intel_csr.h"
  #include "intel_drv.h"
  #include "intel_pm.h"
 -#include "intel_uc.h"
  
  static struct drm_driver driver;
  
  #if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
 -static unsigned int i915_load_fail_count;
 +static unsigned int i915_probe_fail_count;
  
 -bool __i915_inject_load_failure(const char *func, int line)
 +bool __i915_inject_probe_failure(const char *func, int line)
  {
 -      if (i915_load_fail_count >= i915_modparams.inject_load_failure)
 +      if (i915_probe_fail_count >= i915_modparams.inject_load_failure)
                return false;
  
 -      if (++i915_load_fail_count == i915_modparams.inject_load_failure) {
 +      if (++i915_probe_fail_count == i915_modparams.inject_load_failure) {
                DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n",
                         i915_modparams.inject_load_failure, func, line);
                i915_modparams.inject_load_failure = 0;
  
  bool i915_error_injected(void)
  {
 -      return i915_load_fail_count && !i915_modparams.inject_load_failure;
 +      return i915_probe_fail_count && !i915_modparams.inject_load_failure;
  }
  
  #endif
@@@ -220,14 -219,9 +220,14 @@@ intel_pch_type(const struct drm_i915_pr
                WARN_ON(!IS_ICELAKE(dev_priv));
                return PCH_ICP;
        case INTEL_PCH_MCC_DEVICE_ID_TYPE:
 +      case INTEL_PCH_MCC2_DEVICE_ID_TYPE:
                DRM_DEBUG_KMS("Found Mule Creek Canyon PCH\n");
                WARN_ON(!IS_ELKHARTLAKE(dev_priv));
                return PCH_MCC;
 +      case INTEL_PCH_TGP_DEVICE_ID_TYPE:
 +              DRM_DEBUG_KMS("Found Tiger Lake LP PCH\n");
 +              WARN_ON(!IS_TIGERLAKE(dev_priv));
 +              return PCH_TGP;
        default:
                return PCH_NONE;
        }
@@@ -255,9 -249,7 +255,9 @@@ intel_virt_detect_pch(const struct drm_
         * make an educated guess as to which PCH is really there.
         */
  
 -      if (IS_ELKHARTLAKE(dev_priv))
 +      if (IS_TIGERLAKE(dev_priv))
 +              id = INTEL_PCH_TGP_DEVICE_ID_TYPE;
 +      else if (IS_ELKHARTLAKE(dev_priv))
                id = INTEL_PCH_MCC_DEVICE_ID_TYPE;
        else if (IS_ICELAKE(dev_priv))
                id = INTEL_PCH_ICP_DEVICE_ID_TYPE;
@@@ -426,7 -418,7 +426,7 @@@ static int i915_getparam_ioctl(struct d
                value = sseu->min_eu_in_pool;
                break;
        case I915_PARAM_HUC_STATUS:
 -              value = intel_huc_check_status(&dev_priv->huc);
 +              value = intel_huc_check_status(&dev_priv->gt.uc.huc);
                if (value < 0)
                        return value;
                break;
@@@ -681,13 -673,13 +681,13 @@@ static const struct vga_switcheroo_clie
        .can_switch = i915_switcheroo_can_switch,
  };
  
 -static int i915_load_modeset_init(struct drm_device *dev)
 +static int i915_driver_modeset_probe(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct pci_dev *pdev = dev_priv->drm.pdev;
        int ret;
  
 -      if (i915_inject_load_failure())
 +      if (i915_inject_probe_failure())
                return -ENODEV;
  
        if (HAS_DISPLAY(dev_priv)) {
  
  cleanup_gem:
        i915_gem_suspend(dev_priv);
 -      i915_gem_fini_hw(dev_priv);
 -      i915_gem_fini(dev_priv);
 +      i915_gem_driver_remove(dev_priv);
 +      i915_gem_driver_release(dev_priv);
  cleanup_modeset:
 -      intel_modeset_cleanup(dev);
 +      intel_modeset_driver_remove(dev);
  cleanup_irq:
 -      drm_irq_uninstall(dev);
 +      intel_irq_uninstall(dev_priv);
        intel_gmbus_teardown(dev_priv);
  cleanup_csr:
        intel_csr_ucode_fini(dev_priv);
 -      intel_power_domains_fini_hw(dev_priv);
 +      intel_power_domains_driver_remove(dev_priv);
        vga_switcheroo_unregister_client(pdev);
  cleanup_vga_client:
        vga_client_register(pdev, NULL, NULL, NULL);
@@@ -848,6 -840,15 +848,6 @@@ out_err
        return -ENOMEM;
  }
  
 -static void i915_engines_cleanup(struct drm_i915_private *i915)
 -{
 -      struct intel_engine_cs *engine;
 -      enum intel_engine_id id;
 -
 -      for_each_engine(engine, i915, id)
 -              kfree(engine);
 -}
 -
  static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
  {
        destroy_workqueue(dev_priv->hotplug.dp_wq);
@@@ -881,7 -882,7 +881,7 @@@ static void intel_detect_preproduction_
  }
  
  /**
 - * i915_driver_init_early - setup state not requiring device access
 + * i915_driver_early_probe - setup state not requiring device access
   * @dev_priv: device private
   *
   * Initialize everything that is a "SW-only" state, that is state not
   * system memory allocation, setting up device specific attributes and
   * function hooks not requiring accessing the device.
   */
 -static int i915_driver_init_early(struct drm_i915_private *dev_priv)
 +static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
  {
        int ret = 0;
  
 -      if (i915_inject_load_failure())
 +      if (i915_inject_probe_failure())
                return -ENODEV;
  
        intel_device_info_subplatform_init(dev_priv);
  
 -      intel_uncore_init_early(&dev_priv->uncore);
 +      intel_uncore_init_early(&dev_priv->uncore, dev_priv);
  
        spin_lock_init(&dev_priv->irq_lock);
        spin_lock_init(&dev_priv->gpu_error.lock);
  
        ret = i915_workqueues_init(dev_priv);
        if (ret < 0)
 -              goto err_engines;
 +              return ret;
 +
 +      intel_gt_init_early(&dev_priv->gt, dev_priv);
  
        ret = i915_gem_init_early(dev_priv);
        if (ret < 0)
        intel_detect_pch(dev_priv);
  
        intel_wopcm_init_early(&dev_priv->wopcm);
 -      intel_uc_init_early(dev_priv);
 +      intel_uc_init_early(&dev_priv->gt.uc);
        intel_pm_setup(dev_priv);
        intel_init_dpio(dev_priv);
        ret = intel_power_domains_init(dev_priv);
        if (ret < 0)
                goto err_uc;
        intel_irq_init(dev_priv);
 -      intel_hangcheck_init(dev_priv);
        intel_init_display_hooks(dev_priv);
        intel_init_clock_gating_hooks(dev_priv);
        intel_init_audio_hooks(dev_priv);
        return 0;
  
  err_uc:
 -      intel_uc_cleanup_early(dev_priv);
 +      intel_uc_cleanup_early(&dev_priv->gt.uc);
        i915_gem_cleanup_early(dev_priv);
  err_workqueues:
        i915_workqueues_cleanup(dev_priv);
 -err_engines:
 -      i915_engines_cleanup(dev_priv);
        return ret;
  }
  
  /**
 - * i915_driver_cleanup_early - cleanup the setup done in i915_driver_init_early()
 + * i915_driver_late_release - cleanup the setup done in
 + *                           i915_driver_early_probe()
   * @dev_priv: device private
   */
 -static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv)
 +static void i915_driver_late_release(struct drm_i915_private *dev_priv)
  {
        intel_irq_fini(dev_priv);
        intel_power_domains_cleanup(dev_priv);
 -      intel_uc_cleanup_early(dev_priv);
 +      intel_uc_cleanup_early(&dev_priv->gt.uc);
        i915_gem_cleanup_early(dev_priv);
        i915_workqueues_cleanup(dev_priv);
 -      i915_engines_cleanup(dev_priv);
  
        pm_qos_remove_request(&dev_priv->sb_qos);
        mutex_destroy(&dev_priv->sb_lock);
  }
  
  /**
 - * i915_driver_init_mmio - setup device MMIO
 + * i915_driver_mmio_probe - setup device MMIO
   * @dev_priv: device private
   *
   * Setup minimal device state necessary for MMIO accesses later in the
   * side effects or exposing the driver via kernel internal or user space
   * interfaces.
   */
 -static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
 +static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
  {
        int ret;
  
 -      if (i915_inject_load_failure())
 +      if (i915_inject_probe_failure())
                return -ENODEV;
  
        if (i915_get_bridge_dev(dev_priv))
  
        intel_uncore_prune_mmio_domains(&dev_priv->uncore);
  
 -      intel_uc_init_mmio(dev_priv);
 +      intel_uc_init_mmio(&dev_priv->gt.uc);
  
        ret = intel_engines_init_mmio(dev_priv);
        if (ret)
@@@ -1022,12 -1024,11 +1022,12 @@@ err_bridge
  }
  
  /**
 - * i915_driver_cleanup_mmio - cleanup the setup done in i915_driver_init_mmio()
 + * i915_driver_mmio_release - cleanup the setup done in i915_driver_mmio_probe()
   * @dev_priv: device private
   */
 -static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
 +static void i915_driver_mmio_release(struct drm_i915_private *dev_priv)
  {
 +      intel_engines_cleanup(dev_priv);
        intel_teardown_mchbar(dev_priv);
        intel_uncore_fini_mmio(&dev_priv->uncore);
        pci_dev_put(dev_priv->bridge_dev);
@@@ -1519,18 -1520,18 +1519,18 @@@ static void edram_detect(struct drm_i91
  }
  
  /**
 - * i915_driver_init_hw - setup state requiring device access
 + * i915_driver_hw_probe - setup state requiring device access
   * @dev_priv: device private
   *
   * Setup state that requires accessing the device, but doesn't require
   * exposing the driver via kernel internal or userspace interfaces.
   */
 -static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
 +static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
  {
        struct pci_dev *pdev = dev_priv->drm.pdev;
        int ret;
  
 -      if (i915_inject_load_failure())
 +      if (i915_inject_probe_failure())
                return -ENODEV;
  
        intel_device_info_runtime_init(dev_priv);
        if (ret)
                goto err_ggtt;
  
 +      intel_gt_init_hw(dev_priv);
 +
        ret = i915_ggtt_enable_hw(dev_priv);
        if (ret) {
                DRM_ERROR("failed to enable GGTT\n");
        pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
                           PM_QOS_DEFAULT_VALUE);
  
 -      intel_uncore_sanitize(dev_priv);
 +      /* BIOS often leaves RC6 enabled, but disable it for hw init */
 +      intel_sanitize_gt_powersave(dev_priv);
  
        intel_gt_init_workarounds(dev_priv);
  
@@@ -1679,17 -1677,17 +1679,17 @@@ err_msi
                pci_disable_msi(pdev);
        pm_qos_remove_request(&dev_priv->pm_qos);
  err_ggtt:
 -      i915_ggtt_cleanup_hw(dev_priv);
 +      i915_ggtt_driver_release(dev_priv);
  err_perf:
        i915_perf_fini(dev_priv);
        return ret;
  }
  
  /**
 - * i915_driver_cleanup_hw - cleanup the setup done in i915_driver_init_hw()
 + * i915_driver_hw_remove - cleanup the setup done in i915_driver_hw_probe()
   * @dev_priv: device private
   */
 -static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv)
 +static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
  {
        struct pci_dev *pdev = dev_priv->drm.pdev;
  
@@@ -1865,17 -1863,17 +1865,17 @@@ static void i915_driver_destroy(struct 
  }
  
  /**
 - * i915_driver_load - setup chip and create an initial config
 + * i915_driver_probe - setup chip and create an initial config
   * @pdev: PCI device
   * @ent: matching PCI ID entry
   *
 - * The driver load routine has to do several things:
 + * The driver probe routine has to do several things:
   *   - drive output discovery via intel_modeset_init()
   *   - initialize the memory manager
   *   - allocate initial config memory
   *   - setup the DRM framebuffer with the allocated memory
   */
 -int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
 +int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  {
        const struct intel_device_info *match_info =
                (struct intel_device_info *)ent->driver_data;
        if (ret)
                goto out_fini;
  
 -      ret = i915_driver_init_early(dev_priv);
 +      ret = i915_driver_early_probe(dev_priv);
        if (ret < 0)
                goto out_pci_disable;
  
        disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
  
 -      ret = i915_driver_init_mmio(dev_priv);
 +      i915_detect_vgpu(dev_priv);
 +
 +      ret = i915_driver_mmio_probe(dev_priv);
        if (ret < 0)
                goto out_runtime_pm_put;
  
 -      ret = i915_driver_init_hw(dev_priv);
 +      ret = i915_driver_hw_probe(dev_priv);
        if (ret < 0)
                goto out_cleanup_mmio;
  
 -      ret = i915_load_modeset_init(&dev_priv->drm);
 +      ret = i915_driver_modeset_probe(&dev_priv->drm);
        if (ret < 0)
                goto out_cleanup_hw;
  
        return 0;
  
  out_cleanup_hw:
 -      i915_driver_cleanup_hw(dev_priv);
 -      i915_ggtt_cleanup_hw(dev_priv);
 +      i915_driver_hw_remove(dev_priv);
 +      i915_ggtt_driver_release(dev_priv);
 +
 +      /* Paranoia: make sure we have disabled everything before we exit. */
 +      intel_sanitize_gt_powersave(dev_priv);
  out_cleanup_mmio:
 -      i915_driver_cleanup_mmio(dev_priv);
 +      i915_driver_mmio_release(dev_priv);
  out_runtime_pm_put:
        enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
 -      i915_driver_cleanup_early(dev_priv);
 +      i915_driver_late_release(dev_priv);
  out_pci_disable:
        pci_disable_device(pdev);
  out_fini:
 -      i915_load_error(dev_priv, "Device initialization failed (%d)\n", ret);
 +      i915_probe_error(dev_priv, "Device initialization failed (%d)\n", ret);
        i915_driver_destroy(dev_priv);
        return ret;
  }
  
 -void i915_driver_unload(struct drm_device *dev)
 +void i915_driver_remove(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct pci_dev *pdev = dev_priv->drm.pdev;
         * all in-flight requests so that we can quickly unbind the active
         * resources.
         */
 -      i915_gem_set_wedged(dev_priv);
 +      intel_gt_set_wedged(&dev_priv->gt);
  
        /* Flush any external code that still may be under the RCU lock */
        synchronize_rcu();
  
        drm_atomic_helper_shutdown(dev);
  
 -      intel_gvt_cleanup(dev_priv);
 +      intel_gvt_driver_remove(dev_priv);
  
 -      intel_modeset_cleanup(dev);
 +      intel_modeset_driver_remove(dev);
  
 -      intel_bios_cleanup(dev_priv);
 +      intel_bios_driver_remove(dev_priv);
  
        vga_switcheroo_unregister_client(pdev);
        vga_client_register(pdev, NULL, NULL, NULL);
        intel_csr_ucode_fini(dev_priv);
  
        /* Free error state after interrupts are fully disabled. */
 -      cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
 +      cancel_delayed_work_sync(&dev_priv->gt.hangcheck.work);
        i915_reset_error_state(dev_priv);
  
 -      i915_gem_fini_hw(dev_priv);
 +      i915_gem_driver_remove(dev_priv);
  
 -      intel_power_domains_fini_hw(dev_priv);
 +      intel_power_domains_driver_remove(dev_priv);
  
 -      i915_driver_cleanup_hw(dev_priv);
 +      i915_driver_hw_remove(dev_priv);
  
        enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
  }
@@@ -1995,19 -1988,15 +1995,19 @@@ static void i915_driver_release(struct 
  
        disable_rpm_wakeref_asserts(rpm);
  
 -      i915_gem_fini(dev_priv);
 +      i915_gem_driver_release(dev_priv);
 +
 +      i915_ggtt_driver_release(dev_priv);
  
 -      i915_ggtt_cleanup_hw(dev_priv);
 -      i915_driver_cleanup_mmio(dev_priv);
 +      /* Paranoia: make sure we have disabled everything before we exit. */
 +      intel_sanitize_gt_powersave(dev_priv);
 +
 +      i915_driver_mmio_release(dev_priv);
  
        enable_rpm_wakeref_asserts(rpm);
 -      intel_runtime_pm_cleanup(rpm);
 +      intel_runtime_pm_driver_release(rpm);
  
 -      i915_driver_cleanup_early(dev_priv);
 +      i915_driver_late_release(dev_priv);
        i915_driver_destroy(dev_priv);
  }
  
@@@ -2200,7 -2189,7 +2200,7 @@@ static int i915_drm_suspend_late(struc
  out:
        enable_rpm_wakeref_asserts(rpm);
        if (!dev_priv->uncore.user_forcewake.count)
 -              intel_runtime_pm_cleanup(rpm);
 +              intel_runtime_pm_driver_release(rpm);
  
        return ret;
  }
@@@ -2359,7 -2348,7 +2359,7 @@@ static int i915_drm_resume_early(struc
  
        intel_uncore_resume_early(&dev_priv->uncore);
  
 -      i915_check_and_clear_faults(dev_priv);
 +      intel_gt_check_and_clear_faults(&dev_priv->gt);
  
        if (INTEL_GEN(dev_priv) >= 11 || IS_GEN9_LP(dev_priv)) {
                gen9_sanitize_dc_state(dev_priv);
                hsw_disable_pc8(dev_priv);
        }
  
 -      intel_uncore_sanitize(dev_priv);
 +      intel_sanitize_gt_powersave(dev_priv);
  
        intel_power_domains_resume(dev_priv);
  
 -      intel_gt_sanitize(dev_priv, true);
 +      intel_gt_sanitize(&dev_priv->gt, true);
  
        enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
  
@@@ -2395,7 -2384,8 +2395,7 @@@ static int i915_resume_switcheroo(struc
  
  static int i915_pm_prepare(struct device *kdev)
  {
 -      struct pci_dev *pdev = to_pci_dev(kdev);
 -      struct drm_device *dev = pci_get_drvdata(pdev);
 +      struct drm_device *dev = dev_get_drvdata(kdev);
  
        if (!dev) {
                dev_err(kdev, "DRM not initialized, aborting suspend.\n");
  
  static int i915_pm_suspend(struct device *kdev)
  {
 -      struct pci_dev *pdev = to_pci_dev(kdev);
 -      struct drm_device *dev = pci_get_drvdata(pdev);
 +      struct drm_device *dev = dev_get_drvdata(kdev);
  
        if (!dev) {
                dev_err(kdev, "DRM not initialized, aborting suspend.\n");
@@@ -2904,7 -2895,8 +2904,7 @@@ static int vlv_resume_prepare(struct dr
  
  static int intel_runtime_suspend(struct device *kdev)
  {
 -      struct pci_dev *pdev = to_pci_dev(kdev);
 -      struct drm_device *dev = pci_get_drvdata(pdev);
 +      struct drm_device *dev = dev_get_drvdata(kdev);
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
        int ret;
         */
        i915_gem_runtime_suspend(dev_priv);
  
 -      intel_uc_runtime_suspend(dev_priv);
 +      intel_uc_runtime_suspend(&dev_priv->gt.uc);
  
        intel_runtime_pm_disable_interrupts(dev_priv);
  
  
                intel_runtime_pm_enable_interrupts(dev_priv);
  
 -              intel_uc_resume(dev_priv);
 +              intel_uc_resume(&dev_priv->gt.uc);
  
 -              i915_gem_init_swizzling(dev_priv);
 +              intel_gt_init_swizzling(&dev_priv->gt);
                i915_gem_restore_fences(dev_priv);
  
                enable_rpm_wakeref_asserts(rpm);
        }
  
        enable_rpm_wakeref_asserts(rpm);
 -      intel_runtime_pm_cleanup(rpm);
 +      intel_runtime_pm_driver_release(rpm);
  
        if (intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore))
                DRM_ERROR("Unclaimed access detected prior to suspending\n");
  
  static int intel_runtime_resume(struct device *kdev)
  {
 -      struct pci_dev *pdev = to_pci_dev(kdev);
 -      struct drm_device *dev = pci_get_drvdata(pdev);
 +      struct drm_device *dev = dev_get_drvdata(kdev);
        struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
        int ret = 0;
  
        intel_runtime_pm_enable_interrupts(dev_priv);
  
 -      intel_uc_resume(dev_priv);
 +      intel_uc_resume(&dev_priv->gt.uc);
  
        /*
         * No point of rolling back things in case of an error, as the best
         * we can do is to hope that things will still work (and disable RPM).
         */
 -      i915_gem_init_swizzling(dev_priv);
 +      intel_gt_init_swizzling(&dev_priv->gt);
        i915_gem_restore_fences(dev_priv);
  
        /*
@@@ -3195,9 -3188,9 +3195,9 @@@ static const struct drm_ioctl_desc i915
        DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(I915_PERF_OPEN, i915_perf_open_ioctl, DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(I915_QUERY, i915_query_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(I915_QUERY, i915_query_ioctl, DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(I915_GEM_VM_CREATE, i915_gem_vm_create_ioctl, DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(I915_GEM_VM_DESTROY, i915_gem_vm_destroy_ioctl, DRM_RENDER_ALLOW),
  };
@@@ -3207,7 -3200,7 +3207,7 @@@ static struct drm_driver driver = 
         * deal with them for Intel hardware.
         */
        .driver_features =
-           DRIVER_GEM | DRIVER_PRIME |
+           DRIVER_GEM |
            DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_SYNCOBJ,
        .release = i915_driver_release,
        .open = i915_driver_open,
        .gem_prime_export = i915_gem_prime_export,
        .gem_prime_import = i915_gem_prime_import,
  
 +      .get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos,
 +      .get_scanout_position = i915_get_crtc_scanoutpos,
 +
        .dumb_create = i915_gem_dumb_create,
        .dumb_map_offset = i915_gem_mmap_gtt,
        .ioctls = i915_ioctls,
  
  #include "gt/intel_lrc.h"
  #include "gt/intel_engine.h"
 +#include "gt/intel_gt_types.h"
  #include "gt/intel_workarounds.h"
 +#include "gt/uc/intel_uc.h"
  
  #include "intel_device_info.h"
  #include "intel_runtime_pm.h"
 -#include "intel_uc.h"
  #include "intel_uncore.h"
  #include "intel_wakeref.h"
  #include "intel_wopcm.h"
@@@ -89,7 -88,7 +89,7 @@@
  #include "i915_gpu_error.h"
  #include "i915_request.h"
  #include "i915_scheduler.h"
 -#include "i915_timeline.h"
 +#include "gt/intel_timeline.h"
  #include "i915_vma.h"
  
  #include "intel_gvt.h"
@@@ -99,8 -98,8 +99,8 @@@
  
  #define DRIVER_NAME           "i915"
  #define DRIVER_DESC           "Intel Graphics"
 -#define DRIVER_DATE           "20190619"
 -#define DRIVER_TIMESTAMP      1560947544
 +#define DRIVER_DATE           "20190730"
 +#define DRIVER_TIMESTAMP      1564512624
  
  /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
   * WARN_ON()) for hw state sanity checks to check for unexpected conditions
  
  #if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
  
 -bool __i915_inject_load_failure(const char *func, int line);
 -#define i915_inject_load_failure() \
 -      __i915_inject_load_failure(__func__, __LINE__)
 +bool __i915_inject_probe_failure(const char *func, int line);
 +#define i915_inject_probe_failure() \
 +      __i915_inject_probe_failure(__func__, __LINE__)
  
  bool i915_error_injected(void);
  
  #else
  
 -#define i915_inject_load_failure() false
 +#define i915_inject_probe_failure() false
  #define i915_error_injected() false
  
  #endif
  
 -#define i915_load_error(i915, fmt, ...)                                        \
 +#define i915_probe_error(i915, fmt, ...)                                 \
        __i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \
                      fmt, ##__VA_ARGS__)
  
@@@ -163,7 -162,7 +163,7 @@@ enum hpd_pin 
  #define HPD_STORM_DEFAULT_THRESHOLD 50
  
  struct i915_hotplug {
 -      struct work_struct hotplug_work;
 +      struct delayed_work hotplug_work;
  
        struct {
                unsigned long last_jiffies;
                } state;
        } stats[HPD_NUM_PINS];
        u32 event_bits;
 +      u32 retry_bits;
        struct delayed_work reenable_work;
  
        u32 long_port_mask;
@@@ -288,14 -286,14 +288,14 @@@ struct drm_i915_display_funcs 
                          enum pipe pipe);
        int (*get_fifo_size)(struct drm_i915_private *dev_priv,
                             enum i9xx_plane_id i9xx_plane);
 -      int (*compute_pipe_wm)(struct intel_crtc_state *cstate);
 -      int (*compute_intermediate_wm)(struct intel_crtc_state *newstate);
 +      int (*compute_pipe_wm)(struct intel_crtc_state *crtc_state);
 +      int (*compute_intermediate_wm)(struct intel_crtc_state *crtc_state);
        void (*initial_watermarks)(struct intel_atomic_state *state,
 -                                 struct intel_crtc_state *cstate);
 +                                 struct intel_crtc_state *crtc_state);
        void (*atomic_update_watermarks)(struct intel_atomic_state *state,
 -                                       struct intel_crtc_state *cstate);
 +                                       struct intel_crtc_state *crtc_state);
        void (*optimize_watermarks)(struct intel_atomic_state *state,
 -                                  struct intel_crtc_state *cstate);
 +                                  struct intel_crtc_state *crtc_state);
        int (*compute_global_watermarks)(struct intel_atomic_state *state);
        void (*update_wm)(struct intel_crtc *crtc);
        int (*modeset_calc_cdclk)(struct intel_atomic_state *state);
        int (*crtc_compute_clock)(struct intel_crtc *crtc,
                                  struct intel_crtc_state *crtc_state);
        void (*crtc_enable)(struct intel_crtc_state *pipe_config,
 -                          struct drm_atomic_state *old_state);
 +                          struct intel_atomic_state *old_state);
        void (*crtc_disable)(struct intel_crtc_state *old_crtc_state,
 -                           struct drm_atomic_state *old_state);
 -      void (*update_crtcs)(struct drm_atomic_state *state);
 +                           struct intel_atomic_state *old_state);
 +      void (*update_crtcs)(struct intel_atomic_state *state);
        void (*audio_codec_enable)(struct intel_encoder *encoder,
                                   const struct intel_crtc_state *crtc_state,
                                   const struct drm_connector_state *conn_state);
@@@ -537,7 -535,6 +537,7 @@@ enum intel_pch 
        PCH_CNP,        /* Cannon/Comet Lake PCH */
        PCH_ICP,        /* Ice Lake PCH */
        PCH_MCC,        /* Mule Creek Canyon PCH */
 +      PCH_TGP,        /* Tiger Lake PCH */
  };
  
  #define QUIRK_LVDS_SSC_DISABLE (1<<1)
@@@ -784,6 -781,9 +784,6 @@@ struct i915_gem_mm 
         */
        struct vfsmount *gemfs;
  
 -      /** PPGTT used for aliasing the PPGTT with the GTT */
 -      struct i915_ppgtt *aliasing_ppgtt;
 -
        struct notifier_block oom_notifier;
        struct notifier_block vmap_notifier;
        struct shrinker shrinker;
@@@ -1347,6 -1347,9 +1347,6 @@@ struct drm_i915_private 
  
        struct intel_wopcm wopcm;
  
 -      struct intel_huc huc;
 -      struct intel_guc guc;
 -
        struct intel_csr csr;
  
        struct intel_gmbus gmbus[GMBUS_NUM_PINS];
        struct intel_engine_cs *engine[I915_NUM_ENGINES];
        /* Context used internally to idle the GPU and setup initial state */
        struct i915_gem_context *kernel_context;
 -      /* Context only to be used for injecting preemption commands */
 -      struct i915_gem_context *preempt_context;
        struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1]
                                            [MAX_ENGINE_INSTANCE + 1];
  
                u32 de_irq_mask[I915_MAX_PIPES];
        };
        u32 gt_irq_mask;
 -      u32 pm_imr;
 -      u32 pm_ier;
        u32 pm_rps_events;
 -      u32 pm_guc_events;
        u32 pipestat_irq_mask[I915_MAX_PIPES];
  
        struct i915_hotplug hotplug;
        DECLARE_HASHTABLE(mm_structs, 7);
        struct mutex mm_lock;
  
 -      struct intel_ppat ppat;
 -
        /* Kernel Modesetting */
  
        struct intel_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
                /*
                 * Should be held around atomic WM register writing; also
                 * protects * intel_crtc->wm.active and
 -               * cstate->wm.need_postvbl_update.
 +               * crtc_state->wm.need_postvbl_update.
                 */
                struct mutex wm_mutex;
  
        } dram_info;
  
        struct intel_bw_info {
 -              int num_planes;
 -              int deratedbw[3];
 +              unsigned int deratedbw[3]; /* for each QGV point */
 +              u8 num_qgv_points;
 +              u8 num_planes;
        } max_bw[6];
  
        struct drm_private_obj bw_obj;
        } perf;
  
        /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
 -      struct {
 -              struct i915_gt_timelines {
 -                      struct mutex mutex; /* protects list, tainted by GPU */
 -                      struct list_head active_list;
 -
 -                      /* Pack multiple timelines' seqnos into the same page */
 -                      spinlock_t hwsp_lock;
 -                      struct list_head hwsp_free_list;
 -              } timelines;
 -
 -              struct list_head active_rings;
 -
 -              struct intel_wakeref wakeref;
 -
 -              struct list_head closed_vma;
 -              spinlock_t closed_lock; /* guards the list of closed_vma */
 -
 -              /**
 -               * Is the GPU currently considered idle, or busy executing
 -               * userspace requests? Whilst idle, we allow runtime power
 -               * management to power down the hardware and display clocks.
 -               * In order to reduce the effect on performance, there
 -               * is a slight delay before we do so.
 -               */
 -              intel_wakeref_t awake;
 -
 -              struct blocking_notifier_head pm_notifications;
 -
 -              ktime_t last_init_time;
 -
 -              struct i915_vma *scratch;
 -      } gt;
 +      struct intel_gt gt;
  
        struct {
                struct notifier_block pm_notifier;
@@@ -1900,6 -1940,21 +1900,6 @@@ static inline struct drm_i915_private *
        return container_of(wopcm, struct drm_i915_private, wopcm);
  }
  
 -static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
 -{
 -      return container_of(guc, struct drm_i915_private, guc);
 -}
 -
 -static inline struct drm_i915_private *huc_to_i915(struct intel_huc *huc)
 -{
 -      return container_of(huc, struct drm_i915_private, huc);
 -}
 -
 -static inline struct drm_i915_private *uncore_to_i915(struct intel_uncore *uncore)
 -{
 -      return container_of(uncore, struct drm_i915_private, uncore);
 -}
 -
  /* Simple iterator over all initialised engines */
  #define for_each_engine(engine__, dev_priv__, id__) \
        for ((id__) = 0; \
@@@ -2071,7 -2126,6 +2071,7 @@@ IS_SUBPLATFORM(const struct drm_i915_pr
  #define IS_CANNONLAKE(dev_priv)       IS_PLATFORM(dev_priv, INTEL_CANNONLAKE)
  #define IS_ICELAKE(dev_priv)  IS_PLATFORM(dev_priv, INTEL_ICELAKE)
  #define IS_ELKHARTLAKE(dev_priv)      IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE)
 +#define IS_TIGERLAKE(dev_priv)        IS_PLATFORM(dev_priv, INTEL_TIGERLAKE)
  #define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
                                    (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
  #define IS_BDW_ULT(dev_priv) \
  
  #define HAS_IPC(dev_priv)              (INTEL_INFO(dev_priv)->display.has_ipc)
  
 -/*
 - * For now, anything with a GuC requires uCode loading, and then supports
 - * command submission once loaded. But these are logically independent
 - * properties, so we have separate macros to test them.
 - */
 -#define HAS_GUC(dev_priv)     (INTEL_INFO(dev_priv)->has_guc)
 -#define HAS_GUC_UCODE(dev_priv)       (HAS_GUC(dev_priv))
 -#define HAS_GUC_SCHED(dev_priv)       (HAS_GUC(dev_priv))
 +#define HAS_GT_UC(dev_priv)   (INTEL_INFO(dev_priv)->has_gt_uc)
  
 -/* For now, anything with a GuC has also HuC */
 -#define HAS_HUC(dev_priv)     (HAS_GUC(dev_priv))
 -#define HAS_HUC_UCODE(dev_priv)       (HAS_GUC(dev_priv))
 -
 -/* Having a GuC is not the same as using a GuC */
 -#define USES_GUC(dev_priv)            intel_uc_is_using_guc(dev_priv)
 -#define USES_GUC_SUBMISSION(dev_priv) intel_uc_is_using_guc_submission(dev_priv)
 -#define USES_HUC(dev_priv)            intel_uc_is_using_huc(dev_priv)
 +/* Having GuC/HuC is not the same as using GuC/HuC */
 +#define USES_GUC(dev_priv)            intel_uc_is_using_guc(&(dev_priv)->gt.uc)
 +#define USES_GUC_SUBMISSION(dev_priv) intel_uc_is_using_guc_submission(&(dev_priv)->gt.uc)
 +#define USES_HUC(dev_priv)            intel_uc_is_using_huc(&(dev_priv)->gt.uc)
  
  #define HAS_POOLED_EU(dev_priv)       (INTEL_INFO(dev_priv)->has_pooled_eu)
  
  #define INTEL_PCH_CMP_DEVICE_ID_TYPE          0x0280
  #define INTEL_PCH_ICP_DEVICE_ID_TYPE          0x3480
  #define INTEL_PCH_MCC_DEVICE_ID_TYPE          0x4B00
 +#define INTEL_PCH_MCC2_DEVICE_ID_TYPE         0x3880
 +#define INTEL_PCH_TGP_DEVICE_ID_TYPE          0xA080
  #define INTEL_PCH_P2X_DEVICE_ID_TYPE          0x7100
  #define INTEL_PCH_P3X_DEVICE_ID_TYPE          0x7000
  #define INTEL_PCH_QEMU_DEVICE_ID_TYPE         0x2900 /* qemu q35 has 2918 */
  #define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type)
  #define INTEL_PCH_ID(dev_priv) ((dev_priv)->pch_id)
  #define HAS_PCH_MCC(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_MCC)
 +#define HAS_PCH_TGP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_TGP)
  #define HAS_PCH_ICP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_ICP)
  #define HAS_PCH_CNP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CNP)
  #define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT)
@@@ -2363,18 -2425,40 +2363,18 @@@ __i915_printk(struct drm_i915_private *
        __i915_printk(dev_priv, KERN_ERR, fmt, ##__VA_ARGS__)
  
  #ifdef CONFIG_COMPAT
 -extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
 -                            unsigned long arg);
 +long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
  #else
  #define i915_compat_ioctl NULL
  #endif
  extern const struct dev_pm_ops i915_pm_ops;
  
 -extern int i915_driver_load(struct pci_dev *pdev,
 -                          const struct pci_device_id *ent);
 -extern void i915_driver_unload(struct drm_device *dev);
 +int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
 +void i915_driver_remove(struct drm_device *dev);
  
 -extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
 -extern void intel_hangcheck_init(struct drm_i915_private *dev_priv);
 +void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
  int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
  
 -u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv);
 -
 -static inline void i915_queue_hangcheck(struct drm_i915_private *dev_priv)
 -{
 -      unsigned long delay;
 -
 -      if (unlikely(!i915_modparams.enable_hangcheck))
 -              return;
 -
 -      /* Don't continually defer the hangcheck so that it is always run at
 -       * least once after work has been scheduled on any ring. Otherwise,
 -       * we will ignore a hung ring if a second ring is kept busy.
 -       */
 -
 -      delay = round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES);
 -      queue_delayed_work(system_long_wq,
 -                         &dev_priv->gpu_error.hangcheck_work, delay);
 -}
 -
  static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
  {
        return dev_priv->gvt;
@@@ -2396,17 -2480,18 +2396,17 @@@ int i915_gem_freeze_late(struct drm_i91
  
  static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
  {
 -      if (!atomic_read(&i915->mm.free_count))
 -              return;
 -
 -      /* A single pass should suffice to release all the freed objects (along
 +      /*
 +       * A single pass should suffice to release all the freed objects (along
         * most call paths) , but be a little more paranoid in that freeing
         * the objects does take a little amount of time, during which the rcu
         * callbacks could have added new objects into the freed list, and
         * armed the work again.
         */
 -      do {
 +      while (atomic_read(&i915->mm.free_count)) {
 +              flush_work(&i915->mm.free_work);
                rcu_barrier();
 -      } while (flush_work(&i915->mm.free_work));
 +      }
  }
  
  static inline void i915_gem_drain_workqueue(struct drm_i915_private *i915)
         */
        int pass = 3;
        do {
 +              flush_workqueue(i915->wq);
                rcu_barrier();
                i915_gem_drain_freed_objects(i915);
        } while (--pass);
@@@ -2438,9 -2522,7 +2438,9 @@@ i915_gem_object_ggtt_pin(struct drm_i91
                         u64 alignment,
                         u64 flags);
  
 -int i915_gem_object_unbind(struct drm_i915_gem_object *obj);
 +int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
 +                         unsigned long flags);
 +#define I915_GEM_OBJECT_UNBIND_ACTIVE BIT(0)
  
  void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
  
@@@ -2463,22 -2545,36 +2463,22 @@@ void i915_gem_track_fb(struct drm_i915_
  
  int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno);
  
 -static inline bool __i915_wedged(struct i915_gpu_error *error)
 -{
 -      return unlikely(test_bit(I915_WEDGED, &error->flags));
 -}
 -
 -static inline bool i915_reset_failed(struct drm_i915_private *i915)
 -{
 -      return __i915_wedged(&i915->gpu_error);
 -}
 -
  static inline u32 i915_reset_count(struct i915_gpu_error *error)
  {
 -      return READ_ONCE(error->reset_count);
 +      return atomic_read(&error->reset_count);
  }
  
  static inline u32 i915_reset_engine_count(struct i915_gpu_error *error,
                                          struct intel_engine_cs *engine)
  {
 -      return READ_ONCE(error->reset_engine_count[engine->id]);
 +      return atomic_read(&error->reset_engine_count[engine->uabi_class]);
  }
  
 -void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
 -bool i915_gem_unset_wedged(struct drm_i915_private *dev_priv);
 -
  void i915_gem_init_mmio(struct drm_i915_private *i915);
  int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
  int __must_check i915_gem_init_hw(struct drm_i915_private *dev_priv);
 -void i915_gem_init_swizzling(struct drm_i915_private *dev_priv);
 -void i915_gem_fini_hw(struct drm_i915_private *dev_priv);
 -void i915_gem_fini(struct drm_i915_private *dev_priv);
 +void i915_gem_driver_remove(struct drm_i915_private *dev_priv);
 +void i915_gem_driver_release(struct drm_i915_private *dev_priv);
  int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
                           unsigned int flags, long timeout);
  void i915_gem_suspend(struct drm_i915_private *dev_priv);
@@@ -2495,8 -2591,7 +2495,7 @@@ int i915_gem_object_set_cache_level(str
  struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
                                struct dma_buf *dma_buf);
  
- struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
-                               struct drm_gem_object *gem_obj, int flags);
+ struct dma_buf *i915_gem_prime_export(struct drm_gem_object *gem_obj, int flags);
  
  static inline struct i915_gem_context *
  __i915_gem_context_lookup_rcu(struct drm_i915_file_private *file_priv, u32 id)
@@@ -2539,6 -2634,16 +2538,6 @@@ int __must_check i915_gem_evict_for_nod
                                         unsigned int flags);
  int i915_gem_evict_vm(struct i915_address_space *vm);
  
 -void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv);
 -
 -/* belongs in i915_gem_gtt.h */
 -static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv)
 -{
 -      wmb();
 -      if (INTEL_GEN(dev_priv) < 6)
 -              intel_gtt_chipset_flush();
 -}
 -
  /* i915_gem_stolen.c */
  int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
                                struct drm_mm_node *node, u64 size,
@@@ -2610,14 -2715,14 +2609,14 @@@ int intel_engine_cmd_parser(struct inte
                            bool is_master);
  
  /* i915_perf.c */
 -extern void i915_perf_init(struct drm_i915_private *dev_priv);
 -extern void i915_perf_fini(struct drm_i915_private *dev_priv);
 -extern void i915_perf_register(struct drm_i915_private *dev_priv);
 -extern void i915_perf_unregister(struct drm_i915_private *dev_priv);
 +void i915_perf_init(struct drm_i915_private *dev_priv);
 +void i915_perf_fini(struct drm_i915_private *dev_priv);
 +void i915_perf_register(struct drm_i915_private *dev_priv);
 +void i915_perf_unregister(struct drm_i915_private *dev_priv);
  
  /* i915_suspend.c */
 -extern int i915_save_state(struct drm_i915_private *dev_priv);
 -extern int i915_restore_state(struct drm_i915_private *dev_priv);
 +int i915_save_state(struct drm_i915_private *dev_priv);
 +int i915_restore_state(struct drm_i915_private *dev_priv);
  
  /* i915_sysfs.c */
  void i915_setup_sysfs(struct drm_i915_private *dev_priv);
@@@ -2631,22 -2736,23 +2630,22 @@@ mkwrite_device_info(struct drm_i915_pri
  }
  
  /* modesetting */
 -extern void intel_modeset_init_hw(struct drm_device *dev);
 -extern int intel_modeset_init(struct drm_device *dev);
 -extern void intel_modeset_cleanup(struct drm_device *dev);
 -extern int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv,
 -                                     bool state);
 -extern void intel_display_resume(struct drm_device *dev);
 -extern void i915_redisable_vga(struct drm_i915_private *dev_priv);
 -extern void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv);
 -extern void intel_init_pch_refclk(struct drm_i915_private *dev_priv);
 +void intel_modeset_init_hw(struct drm_device *dev);
 +int intel_modeset_init(struct drm_device *dev);
 +void intel_modeset_driver_remove(struct drm_device *dev);
 +int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state);
 +void intel_display_resume(struct drm_device *dev);
 +void i915_redisable_vga(struct drm_i915_private *dev_priv);
 +void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv);
 +void intel_init_pch_refclk(struct drm_i915_private *dev_priv);
  
  int i915_reg_read_ioctl(struct drm_device *dev, void *data,
                        struct drm_file *file);
  
 -extern struct intel_display_error_state *
 +struct intel_display_error_state *
  intel_display_capture_error_state(struct drm_i915_private *dev_priv);
 -extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
 -                                          struct intel_display_error_state *error);
 +void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
 +                                   struct intel_display_error_state *error);
  
  #define __I915_REG_OP(op__, dev_priv__, ...) \
        intel_uncore_##op__(&(dev_priv__)->uncore, __VA_ARGS__)
@@@ -2722,6 -2828,11 +2721,6 @@@ static inline int intel_hws_csb_write_i
                return I915_HWS_CSB_WRITE_INDEX;
  }
  
 -static inline u32 i915_scratch_offset(const struct drm_i915_private *i915)
 -{
 -      return i915_ggtt_offset(i915->gt.scratch);
 -}
 -
  static inline enum i915_map_type
  i915_coherent_map_type(struct drm_i915_private *i915)
  {
@@@ -619,7 -619,7 +619,7 @@@ static int context_init(struct drm_devi
  
        msm_submitqueue_init(dev, ctx);
  
 -      ctx->aspace = priv->gpu->aspace;
 +      ctx->aspace = priv->gpu ? priv->gpu->aspace : NULL;
        file->driver_priv = ctx;
  
        return 0;
@@@ -984,17 -984,17 +984,17 @@@ static int msm_ioctl_submitqueue_close(
  }
  
  static const struct drm_ioctl_desc msm_ioctls[] = {
-       DRM_IOCTL_DEF_DRV(MSM_GET_PARAM,    msm_ioctl_get_param,    DRM_AUTH|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(MSM_GEM_NEW,      msm_ioctl_gem_new,      DRM_AUTH|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(MSM_GEM_INFO,     msm_ioctl_gem_info,     DRM_AUTH|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_AUTH|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_AUTH|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT,   msm_ioctl_gem_submit,   DRM_AUTH|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE,   msm_ioctl_wait_fence,   DRM_AUTH|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(MSM_GEM_MADVISE,  msm_ioctl_gem_madvise,  DRM_AUTH|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_NEW,   msm_ioctl_submitqueue_new,   DRM_AUTH|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_CLOSE, msm_ioctl_submitqueue_close, DRM_AUTH|DRM_RENDER_ALLOW),
-       DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_QUERY, msm_ioctl_submitqueue_query, DRM_AUTH|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(MSM_GET_PARAM,    msm_ioctl_get_param,    DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(MSM_GEM_NEW,      msm_ioctl_gem_new,      DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(MSM_GEM_INFO,     msm_ioctl_gem_info,     DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT,   msm_ioctl_gem_submit,   DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE,   msm_ioctl_wait_fence,   DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(MSM_GEM_MADVISE,  msm_ioctl_gem_madvise,  DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_NEW,   msm_ioctl_submitqueue_new,   DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_CLOSE, msm_ioctl_submitqueue_close, DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_QUERY, msm_ioctl_submitqueue_query, DRM_RENDER_ALLOW),
  };
  
  static const struct vm_operations_struct vm_ops = {
@@@ -1017,7 -1017,6 +1017,6 @@@ static const struct file_operations fop
  
  static struct drm_driver msm_driver = {
        .driver_features    = DRIVER_GEM |
-                               DRIVER_PRIME |
                                DRIVER_RENDER |
                                DRIVER_ATOMIC |
                                DRIVER_MODESET,
        .dumb_map_offset    = msm_gem_dumb_map_offset,
        .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
        .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
-       .gem_prime_export   = drm_gem_prime_export,
-       .gem_prime_import   = drm_gem_prime_import,
        .gem_prime_pin      = msm_gem_prime_pin,
        .gem_prime_unpin    = msm_gem_prime_unpin,
        .gem_prime_get_sg_table = msm_gem_prime_get_sg_table,
@@@ -1279,8 -1276,7 +1276,8 @@@ static int add_gpu_components(struct de
        if (!np)
                return 0;
  
 -      drm_of_component_match_add(dev, matchptr, compare_of, np);
 +      if (of_device_is_available(np))
 +              drm_of_component_match_add(dev, matchptr, compare_of, np);
  
        of_node_put(np);
  
@@@ -32,46 -32,6 +32,46 @@@ static bool use_pages(struct drm_gem_ob
        return !msm_obj->vram_node;
  }
  
 +/*
 + * Cache sync.. this is a bit over-complicated, to fit dma-mapping
 + * API.  Really GPU cache is out of scope here (handled on cmdstream)
 + * and all we need to do is invalidate newly allocated pages before
 + * mapping to CPU as uncached/writecombine.
 + *
 + * On top of this, we have the added headache, that depending on
 + * display generation, the display's iommu may be wired up to either
 + * the toplevel drm device (mdss), or to the mdp sub-node, meaning
 + * that here we either have dma-direct or iommu ops.
 + *
 + * Let this be a cautionary tail of abstraction gone wrong.
 + */
 +
 +static void sync_for_device(struct msm_gem_object *msm_obj)
 +{
 +      struct device *dev = msm_obj->base.dev->dev;
 +
 +      if (get_dma_ops(dev)) {
 +              dma_sync_sg_for_device(dev, msm_obj->sgt->sgl,
 +                      msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
 +      } else {
 +              dma_map_sg(dev, msm_obj->sgt->sgl,
 +                      msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
 +      }
 +}
 +
 +static void sync_for_cpu(struct msm_gem_object *msm_obj)
 +{
 +      struct device *dev = msm_obj->base.dev->dev;
 +
 +      if (get_dma_ops(dev)) {
 +              dma_sync_sg_for_cpu(dev, msm_obj->sgt->sgl,
 +                      msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
 +      } else {
 +              dma_unmap_sg(dev, msm_obj->sgt->sgl,
 +                      msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
 +      }
 +}
 +
  /* allocate pages from VRAM carveout, used when no IOMMU: */
  static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
  {
@@@ -137,7 -97,8 +137,7 @@@ static struct page **get_pages(struct d
                 * because display controller, GPU, etc. are not coherent:
                 */
                if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
 -                      dma_map_sg(dev->dev, msm_obj->sgt->sgl,
 -                                      msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
 +                      sync_for_device(msm_obj);
        }
  
        return msm_obj->pages;
@@@ -166,7 -127,9 +166,7 @@@ static void put_pages(struct drm_gem_ob
                         * GPU, etc. are not coherent:
                         */
                        if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
 -                              dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
 -                                           msm_obj->sgt->nents,
 -                                           DMA_BIDIRECTIONAL);
 +                              sync_for_cpu(msm_obj);
  
                        sg_free_table(msm_obj->sgt);
                        kfree(msm_obj->sgt);
@@@ -975,7 -938,6 +975,6 @@@ int msm_gem_new_handle(struct drm_devic
  
  static int msm_gem_new_impl(struct drm_device *dev,
                uint32_t size, uint32_t flags,
-               struct reservation_object *resv,
                struct drm_gem_object **obj,
                bool struct_mutex_locked)
  {
        msm_obj->flags = flags;
        msm_obj->madv = MSM_MADV_WILLNEED;
  
-       if (resv)
-               msm_obj->base.resv = resv;
        INIT_LIST_HEAD(&msm_obj->submit_entry);
        INIT_LIST_HEAD(&msm_obj->vmas);
  
@@@ -1046,7 -1005,7 +1042,7 @@@ static struct drm_gem_object *_msm_gem_
        if (size == 0)
                return ERR_PTR(-EINVAL);
  
-       ret = msm_gem_new_impl(dev, size, flags, NULL, &obj, struct_mutex_locked);
+       ret = msm_gem_new_impl(dev, size, flags, &obj, struct_mutex_locked);
        if (ret)
                goto fail;
  
@@@ -1123,7 -1082,7 +1119,7 @@@ struct drm_gem_object *msm_gem_import(s
  
        size = PAGE_ALIGN(dmabuf->size);
  
-       ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj, false);
+       ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj, false);
        if (ret)
                goto fail;