Merge tag 'drm-misc-next-2021-08-12' of git://anongit.freedesktop.org/drm/drm-misc...
authorDave Airlie <airlied@redhat.com>
Mon, 16 Aug 2021 02:57:26 +0000 (12:57 +1000)
committerDave Airlie <airlied@redhat.com>
Mon, 16 Aug 2021 02:57:33 +0000 (12:57 +1000)
drm-misc-next for v5.15:

UAPI Changes:

Cross-subsystem Changes:
- Add lockdep_assert(once) helpers.

Core Changes:
- Add lockdep assert to drm_is_current_master_locked.
- Fix typos in dma-buf documentation.
- Mark drm irq midlayer as legacy only.
- Fix GPF in udmabuf_create.
- Rename member to correct value in drm_edid.h

Driver Changes:
- Build fix to make nouveau build with NOUVEAU_BACKLIGHT.
- Add MI101AIT-ICP1, LTTD800480070-L6WWH-RT panels.
- Assorted fixes to bridge/it66121, anx7625.
- Add custom crtc_state to simple helpers, and use it to
  convert pll handling in mgag200 to atomic.
- Convert drivers to use offset-adjusted framebuffer bo mappings.
- Assorted small fixes and fix for a use-after-free in vmwgfx.
- Convert remaining callers of non-legacy drivers to use linux irqs directly.
- Small cleanup in ingenic.
- Small fixes to virtio and ti-sn65dsi86.

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/1cf2d7fc-402d-1852-574a-21cbbd2eaebf@linux.intel.com
448 files changed:
Documentation/gpu/i915.rst
Documentation/gpu/rfc/i915_parallel_execbuf.h [new file with mode: 0644]
Documentation/gpu/rfc/i915_scheduler.rst [new file with mode: 0644]
Documentation/gpu/rfc/index.rst
arch/arm/common/locomo.c
arch/arm/common/sa1111.c
arch/arm/mach-rpc/ecard.c
arch/mips/sgi-ip22/ip22-gio.c
arch/parisc/kernel/drivers.c
arch/powerpc/platforms/ps3/system-bus.c
arch/powerpc/platforms/pseries/ibmebus.c
arch/powerpc/platforms/pseries/vio.c
arch/s390/include/asm/eadm.h
arch/sparc/kernel/vio.c
drivers/acpi/bus.c
drivers/amba/bus.c
drivers/base/auxiliary.c
drivers/base/isa.c
drivers/base/platform.c
drivers/bcma/main.c
drivers/bus/sunxi-rsb.c
drivers/cxl/core.c
drivers/dax/bus.c
drivers/dma/idxd/sysfs.c
drivers/firewire/core-device.c
drivers/firmware/arm_scmi/bus.c
drivers/firmware/google/coreboot_table.c
drivers/fpga/dfl.c
drivers/gpu/drm/amd/amdgpu/Makefile
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.h [new file with mode: 0644]
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
drivers/gpu/drm/amd/amdgpu/cik.c
drivers/gpu/drm/amd/amdgpu/dce_virtual.c [deleted file]
drivers/gpu/drm/amd/amdgpu/dce_virtual.h [deleted file]
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
drivers/gpu/drm/amd/amdgpu/nv.c
drivers/gpu/drm/amd/amdgpu/si.c
drivers/gpu/drm/amd/amdgpu/soc15.c
drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
drivers/gpu/drm/amd/amdgpu/umc_v6_7.h
drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
drivers/gpu/drm/amd/amdgpu/vi.c
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
drivers/gpu/drm/amd/amdkfd/kfd_device.c
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
drivers/gpu/drm/amd/amdkfd/kfd_process.c
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
drivers/gpu/drm/amd/amdkfd/kfd_topology.c
drivers/gpu/drm/amd/amdkfd/kfd_topology.h
drivers/gpu/drm/amd/display/amdgpu_dm/Makefile
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h
drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c [new file with mode: 0644]
drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.h [new file with mode: 0644]
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
drivers/gpu/drm/amd/display/dc/core/dc_resource.c
drivers/gpu/drm/amd/display/dc/dc.h
drivers/gpu/drm/amd/display/dc/dc_trace.h
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
drivers/gpu/drm/amd/display/dc/dml/Makefile
drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
drivers/gpu/drm/amd/display/dc/dml/dcn2x/dcn2x.c [new file with mode: 0644]
drivers/gpu/drm/amd/display/dc/dml/dcn2x/dcn2x.h [new file with mode: 0644]
drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
drivers/gpu/drm/amd/display/dc/os_types.h
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_2_offset.h
drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_2_sh_mask.h
drivers/gpu/drm/amd/include/atomfirmware.h
drivers/gpu/drm/amd/pm/inc/smu_types.h
drivers/gpu/drm/amd/pm/inc/smu_v13_0.h
drivers/gpu/drm/amd/pm/inc/smu_v13_0_1_pmfw.h
drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
drivers/gpu/drm/drm_dp_aux_bus.c
drivers/gpu/drm/i915/Kconfig.debug
drivers/gpu/drm/i915/Makefile
drivers/gpu/drm/i915/display/i9xx_plane.c
drivers/gpu/drm/i915/display/intel_acpi.c
drivers/gpu/drm/i915/display/intel_acpi.h
drivers/gpu/drm/i915/display/intel_atomic_plane.c
drivers/gpu/drm/i915/display/intel_atomic_plane.h
drivers/gpu/drm/i915/display/intel_audio.c
drivers/gpu/drm/i915/display/intel_bios.c
drivers/gpu/drm/i915/display/intel_bw.c
drivers/gpu/drm/i915/display/intel_cdclk.c
drivers/gpu/drm/i915/display/intel_color.c
drivers/gpu/drm/i915/display/intel_combo_phy.c
drivers/gpu/drm/i915/display/intel_crtc.c
drivers/gpu/drm/i915/display/intel_cursor.c
drivers/gpu/drm/i915/display/intel_ddi.c
drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h
drivers/gpu/drm/i915/display/intel_display.c
drivers/gpu/drm/i915/display/intel_display.h
drivers/gpu/drm/i915/display/intel_display_debugfs.c
drivers/gpu/drm/i915/display/intel_display_power.c
drivers/gpu/drm/i915/display/intel_display_power.h
drivers/gpu/drm/i915/display/intel_display_types.h
drivers/gpu/drm/i915/display/intel_dmc.c
drivers/gpu/drm/i915/display/intel_dp.c
drivers/gpu/drm/i915/display/intel_dp_aux.c
drivers/gpu/drm/i915/display/intel_dp_mst.c
drivers/gpu/drm/i915/display/intel_dpll.c
drivers/gpu/drm/i915/display/intel_dpll_mgr.c
drivers/gpu/drm/i915/display/intel_dpll_mgr.h
drivers/gpu/drm/i915/display/intel_fbc.c
drivers/gpu/drm/i915/display/intel_gmbus.c
drivers/gpu/drm/i915/display/intel_hdmi.c
drivers/gpu/drm/i915/display/intel_opregion.c
drivers/gpu/drm/i915/display/intel_psr.c
drivers/gpu/drm/i915/display/intel_snps_phy.c [new file with mode: 0644]
drivers/gpu/drm/i915/display/intel_snps_phy.h [new file with mode: 0644]
drivers/gpu/drm/i915/display/intel_sprite.c
drivers/gpu/drm/i915/display/intel_vbt_defs.h
drivers/gpu/drm/i915/display/intel_vdsc.c
drivers/gpu/drm/i915/display/skl_scaler.c
drivers/gpu/drm/i915/display/skl_universal_plane.c
drivers/gpu/drm/i915/gem/i915_gem_clflush.c
drivers/gpu/drm/i915/gem/i915_gem_client_blt.c [deleted file]
drivers/gpu/drm/i915/gem/i915_gem_client_blt.h [deleted file]
drivers/gpu/drm/i915/gem/i915_gem_context.c
drivers/gpu/drm/i915/gem/i915_gem_context.h
drivers/gpu/drm/i915/gem/i915_gem_context_types.h
drivers/gpu/drm/i915/gem/i915_gem_create.c
drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
drivers/gpu/drm/i915/gem/i915_gem_domain.c
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
drivers/gpu/drm/i915/gem/i915_gem_internal.c
drivers/gpu/drm/i915/gem/i915_gem_lmem.c
drivers/gpu/drm/i915/gem/i915_gem_lmem.h
drivers/gpu/drm/i915/gem/i915_gem_mman.c
drivers/gpu/drm/i915/gem/i915_gem_object.c
drivers/gpu/drm/i915/gem/i915_gem_object.h
drivers/gpu/drm/i915/gem/i915_gem_object_blt.c [deleted file]
drivers/gpu/drm/i915/gem/i915_gem_object_blt.h [deleted file]
drivers/gpu/drm/i915/gem/i915_gem_object_types.h
drivers/gpu/drm/i915/gem/i915_gem_pages.c
drivers/gpu/drm/i915/gem/i915_gem_phys.c
drivers/gpu/drm/i915/gem/i915_gem_region.c
drivers/gpu/drm/i915/gem/i915_gem_region.h
drivers/gpu/drm/i915/gem/i915_gem_shmem.c
drivers/gpu/drm/i915/gem/i915_gem_stolen.c
drivers/gpu/drm/i915/gem/i915_gem_ttm.c
drivers/gpu/drm/i915/gem/i915_gem_ttm.h
drivers/gpu/drm/i915/gem/i915_gem_userptr.c
drivers/gpu/drm/i915/gem/i915_gem_wait.c
drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c
drivers/gpu/drm/i915/gem/selftests/huge_pages.c
drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c [new file with mode: 0644]
drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c [deleted file]
drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c
drivers/gpu/drm/i915/gem/selftests/mock_context.c
drivers/gpu/drm/i915/gem/selftests/mock_context.h
drivers/gpu/drm/i915/gt/debugfs_gt_pm.c
drivers/gpu/drm/i915/gt/gen8_engine_cs.c
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
drivers/gpu/drm/i915/gt/intel_breadcrumbs.h
drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h
drivers/gpu/drm/i915/gt/intel_context.c
drivers/gpu/drm/i915/gt/intel_context.h
drivers/gpu/drm/i915/gt/intel_context_param.c [deleted file]
drivers/gpu/drm/i915/gt/intel_context_param.h
drivers/gpu/drm/i915/gt/intel_context_types.h
drivers/gpu/drm/i915/gt/intel_engine.h
drivers/gpu/drm/i915/gt/intel_engine_cs.c
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h
drivers/gpu/drm/i915/gt/intel_engine_pm.c
drivers/gpu/drm/i915/gt/intel_engine_types.h
drivers/gpu/drm/i915/gt/intel_engine_user.c
drivers/gpu/drm/i915/gt/intel_execlists_submission.c
drivers/gpu/drm/i915/gt/intel_execlists_submission.h
drivers/gpu/drm/i915/gt/intel_ggtt.c
drivers/gpu/drm/i915/gt/intel_gpu_commands.h
drivers/gpu/drm/i915/gt/intel_gt.c
drivers/gpu/drm/i915/gt/intel_gt.h
drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c
drivers/gpu/drm/i915/gt/intel_gt_irq.c
drivers/gpu/drm/i915/gt/intel_gt_pm.c
drivers/gpu/drm/i915/gt/intel_gt_requests.c
drivers/gpu/drm/i915/gt/intel_gt_requests.h
drivers/gpu/drm/i915/gt/intel_gt_types.h
drivers/gpu/drm/i915/gt/intel_gtt.c
drivers/gpu/drm/i915/gt/intel_gtt.h
drivers/gpu/drm/i915/gt/intel_lrc.c
drivers/gpu/drm/i915/gt/intel_lrc_reg.h
drivers/gpu/drm/i915/gt/intel_migrate.c [new file with mode: 0644]
drivers/gpu/drm/i915/gt/intel_migrate.h [new file with mode: 0644]
drivers/gpu/drm/i915/gt/intel_migrate_types.h [new file with mode: 0644]
drivers/gpu/drm/i915/gt/intel_mocs.c
drivers/gpu/drm/i915/gt/intel_rc6.c
drivers/gpu/drm/i915/gt/intel_region_lmem.c
drivers/gpu/drm/i915/gt/intel_renderstate.h
drivers/gpu/drm/i915/gt/intel_reset.c
drivers/gpu/drm/i915/gt/intel_ring.h
drivers/gpu/drm/i915/gt/intel_ring_submission.c
drivers/gpu/drm/i915/gt/intel_rps.c
drivers/gpu/drm/i915/gt/intel_rps.h
drivers/gpu/drm/i915/gt/intel_sseu.c
drivers/gpu/drm/i915/gt/intel_sseu.h
drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c
drivers/gpu/drm/i915/gt/intel_workarounds.c
drivers/gpu/drm/i915/gt/intel_workarounds_types.h
drivers/gpu/drm/i915/gt/mock_engine.c
drivers/gpu/drm/i915/gt/selftest_context.c
drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.h
drivers/gpu/drm/i915/gt/selftest_engine_pm.c
drivers/gpu/drm/i915/gt/selftest_execlists.c
drivers/gpu/drm/i915/gt/selftest_hangcheck.c
drivers/gpu/drm/i915/gt/selftest_lrc.c
drivers/gpu/drm/i915/gt/selftest_migrate.c [new file with mode: 0644]
drivers/gpu/drm/i915/gt/selftest_mocs.c
drivers/gpu/drm/i915/gt/selftest_reset.c
drivers/gpu/drm/i915/gt/selftest_slpc.c [new file with mode: 0644]
drivers/gpu/drm/i915/gt/selftest_timeline.c
drivers/gpu/drm/i915/gt/selftest_workarounds.c
drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h [new file with mode: 0644]
drivers/gpu/drm/i915/gt/uc/abi/guc_communication_ctb_abi.h
drivers/gpu/drm/i915/gt/uc/abi/guc_communication_mmio_abi.h
drivers/gpu/drm/i915/gt/uc/abi/guc_messages_abi.h
drivers/gpu/drm/i915/gt/uc/intel_guc.c
drivers/gpu/drm/i915/gt/uc/intel_guc.h
drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
drivers/gpu/drm/i915/gt/uc/intel_guc_ads.h
drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h
drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.c
drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
drivers/gpu/drm/i915/gt/uc/intel_guc_log.h
drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c [new file with mode: 0644]
drivers/gpu/drm/i915/gt/uc/intel_guc_rc.h [new file with mode: 0644]
drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c [new file with mode: 0644]
drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h [new file with mode: 0644]
drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h [new file with mode: 0644]
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
drivers/gpu/drm/i915/gt/uc/intel_uc.c
drivers/gpu/drm/i915/gt/uc/intel_uc.h
drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
drivers/gpu/drm/i915/gvt/gtt.c
drivers/gpu/drm/i915/gvt/scheduler.c
drivers/gpu/drm/i915/i915_active.c
drivers/gpu/drm/i915/i915_active.h
drivers/gpu/drm/i915/i915_buddy.c [new file with mode: 0644]
drivers/gpu/drm/i915/i915_buddy.h [new file with mode: 0644]
drivers/gpu/drm/i915/i915_cmd_parser.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_debugfs_params.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem.h
drivers/gpu/drm/i915/i915_gem_evict.c
drivers/gpu/drm/i915/i915_gem_ww.c [new file with mode: 0644]
drivers/gpu/drm/i915/i915_gem_ww.h [new file with mode: 0644]
drivers/gpu/drm/i915/i915_getparam.c
drivers/gpu/drm/i915/i915_globals.c [deleted file]
drivers/gpu/drm/i915/i915_globals.h [deleted file]
drivers/gpu/drm/i915/i915_gpu_error.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_module.c [new file with mode: 0644]
drivers/gpu/drm/i915/i915_pci.c
drivers/gpu/drm/i915/i915_pci.h [new file with mode: 0644]
drivers/gpu/drm/i915/i915_perf.c
drivers/gpu/drm/i915/i915_perf.h
drivers/gpu/drm/i915/i915_pmu.c
drivers/gpu/drm/i915/i915_pmu.h
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/i915_request.c
drivers/gpu/drm/i915/i915_request.h
drivers/gpu/drm/i915/i915_scatterlist.c
drivers/gpu/drm/i915/i915_scatterlist.h
drivers/gpu/drm/i915/i915_scheduler.c
drivers/gpu/drm/i915/i915_scheduler.h
drivers/gpu/drm/i915/i915_scheduler_types.h
drivers/gpu/drm/i915/i915_sw_fence_work.c
drivers/gpu/drm/i915/i915_sw_fence_work.h
drivers/gpu/drm/i915/i915_sysfs.c
drivers/gpu/drm/i915/i915_trace.h
drivers/gpu/drm/i915/i915_ttm_buddy_manager.c [new file with mode: 0644]
drivers/gpu/drm/i915/i915_ttm_buddy_manager.h [new file with mode: 0644]
drivers/gpu/drm/i915/i915_vma.c
drivers/gpu/drm/i915/i915_vma.h
drivers/gpu/drm/i915/intel_device_info.c
drivers/gpu/drm/i915/intel_device_info.h
drivers/gpu/drm/i915/intel_dram.c
drivers/gpu/drm/i915/intel_memory_region.c
drivers/gpu/drm/i915/intel_memory_region.h
drivers/gpu/drm/i915/intel_pch.c
drivers/gpu/drm/i915/intel_pch.h
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_region_ttm.c
drivers/gpu/drm/i915/intel_region_ttm.h
drivers/gpu/drm/i915/intel_sideband.c
drivers/gpu/drm/i915/intel_sideband.h
drivers/gpu/drm/i915/intel_step.c
drivers/gpu/drm/i915/intel_step.h
drivers/gpu/drm/i915/intel_uncore.c
drivers/gpu/drm/i915/intel_uncore.h
drivers/gpu/drm/i915/intel_wopcm.c
drivers/gpu/drm/i915/selftests/i915_buddy.c [new file with mode: 0644]
drivers/gpu/drm/i915/selftests/i915_live_selftests.h
drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
drivers/gpu/drm/i915/selftests/i915_perf_selftests.h
drivers/gpu/drm/i915/selftests/i915_request.c
drivers/gpu/drm/i915/selftests/i915_selftest.c
drivers/gpu/drm/i915/selftests/igt_flush_test.c
drivers/gpu/drm/i915/selftests/igt_live_test.c
drivers/gpu/drm/i915/selftests/intel_memory_region.c
drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c [new file with mode: 0644]
drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.h [new file with mode: 0644]
drivers/gpu/drm/i915/selftests/intel_uncore.c
drivers/gpu/drm/i915/selftests/mock_gem_device.c
drivers/gpu/drm/i915/selftests/mock_gtt.c
drivers/gpu/drm/i915/selftests/mock_region.c
drivers/gpu/drm/radeon/atombios.h
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/r100.c
drivers/gpu/drm/radeon/r300_reg.h
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_fb.c
drivers/gpu/drm/radeon/radeon_fence.c
drivers/gpu/drm/radeon/radeon_vm.c
drivers/gpu/drm/radeon/rs600.c
drivers/gpu/drm/radeon/rv770.c
drivers/gpu/drm/xlnx/Kconfig
drivers/gpu/drm/xlnx/zynqmp_disp.c
drivers/gpu/drm/xlnx/zynqmp_dp.c
drivers/hid/hid-core.c
drivers/hid/intel-ish-hid/ishtp/bus.c
drivers/hv/vmbus_drv.c
drivers/hwtracing/intel_th/core.c
drivers/i2c/i2c-core-base.c
drivers/i3c/master.c
drivers/input/gameport/gameport.c
drivers/input/serio/serio.c
drivers/ipack/ipack.c
drivers/macintosh/macio_asic.c
drivers/mcb/mcb-core.c
drivers/media/pci/bt8xx/bttv-gpio.c
drivers/memstick/core/memstick.c
drivers/mfd/mcp-core.c
drivers/misc/mei/bus.c
drivers/misc/tifm_core.c
drivers/mmc/core/bus.c
drivers/mmc/core/sdio_bus.c
drivers/net/netdevsim/bus.c
drivers/ntb/core.c
drivers/ntb/ntb_transport.c
drivers/nubus/bus.c
drivers/nvdimm/bus.c
drivers/pci/endpoint/pci-epf-core.c
drivers/pci/pci-driver.c
drivers/pcmcia/ds.c
drivers/platform/surface/aggregator/bus.c
drivers/platform/x86/wmi.c
drivers/pnp/driver.c
drivers/rapidio/rio-driver.c
drivers/rpmsg/rpmsg_core.c
drivers/s390/block/scm_drv.c
drivers/s390/cio/ccwgroup.c
drivers/s390/cio/chsc_sch.c
drivers/s390/cio/css.c
drivers/s390/cio/css.h
drivers/s390/cio/device.c
drivers/s390/cio/eadm_sch.c
drivers/s390/cio/scm.c
drivers/s390/cio/vfio_ccw_drv.c
drivers/s390/crypto/ap_bus.c
drivers/scsi/scsi_debug.c
drivers/sh/superhyway/superhyway.c
drivers/siox/siox-core.c
drivers/slimbus/core.c
drivers/soc/qcom/apr.c
drivers/spi/spi.c
drivers/spmi/spmi.c
drivers/ssb/main.c
drivers/staging/fieldbus/anybuss/host.c
drivers/staging/greybus/gbphy.c
drivers/target/loopback/tcm_loop.c
drivers/thunderbolt/domain.c
drivers/tty/serdev/core.c
drivers/usb/common/ulpi.c
drivers/usb/serial/bus.c
drivers/usb/typec/bus.c
drivers/vdpa/vdpa.c
drivers/vfio/mdev/mdev_driver.c
drivers/virtio/virtio.c
drivers/vlynq/vlynq.c
drivers/vme/vme.c
drivers/xen/xenbus/xenbus.h
drivers/xen/xenbus/xenbus_probe.c
drivers/zorro/zorro-driver.c
include/linux/device/bus.h
include/linux/pci-epf.h
include/uapi/drm/i915_drm.h
include/uapi/linux/kfd_ioctl.h
sound/ac97/bus.c
sound/aoa/soundbus/core.c

index 42ce019..204ebda 100644 (file)
@@ -422,9 +422,16 @@ Batchbuffer Parsing
 User Batchbuffer Execution
 --------------------------
 
+.. kernel-doc:: drivers/gpu/drm/i915/gem/i915_gem_context_types.h
+
 .. kernel-doc:: drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
    :doc: User command execution
 
+Scheduling
+----------
+.. kernel-doc:: drivers/gpu/drm/i915/i915_scheduler_types.h
+   :functions: i915_sched_engine
+
 Logical Rings, Logical Ring Contexts and Execlists
 --------------------------------------------------
 
@@ -518,6 +525,14 @@ GuC-based command submission
 .. kernel-doc:: drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
    :doc: GuC-based command submission
 
+GuC ABI
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. kernel-doc:: drivers/gpu/drm/i915/gt/uc/abi/guc_messages_abi.h
+.. kernel-doc:: drivers/gpu/drm/i915/gt/uc/abi/guc_communication_mmio_abi.h
+.. kernel-doc:: drivers/gpu/drm/i915/gt/uc/abi/guc_communication_ctb_abi.h
+.. kernel-doc:: drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
+
 HuC
 ---
 .. kernel-doc:: drivers/gpu/drm/i915/gt/uc/intel_huc.c
diff --git a/Documentation/gpu/rfc/i915_parallel_execbuf.h b/Documentation/gpu/rfc/i915_parallel_execbuf.h
new file mode 100644 (file)
index 0000000..8cbe2c4
--- /dev/null
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright Â© 2021 Intel Corporation
+ */
+
+#define I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT 2 /* see i915_context_engines_parallel_submit */
+
+/**
+ * struct drm_i915_context_engines_parallel_submit - Configure engine for
+ * parallel submission.
+ *
+ * Setup a slot in the context engine map to allow multiple BBs to be submitted
+ * in a single execbuf IOCTL. Those BBs will then be scheduled to run on the GPU
+ * in parallel. Multiple hardware contexts are created internally in the i915
+ * run these BBs. Once a slot is configured for N BBs only N BBs can be
+ * submitted in each execbuf IOCTL and this is implicit behavior e.g. The user
+ * doesn't tell the execbuf IOCTL there are N BBs, the execbuf IOCTL knows how
+ * many BBs there are based on the slot's configuration. The N BBs are the last
+ * N buffer objects or first N if I915_EXEC_BATCH_FIRST is set.
+ *
+ * The default placement behavior is to create implicit bonds between each
+ * context if each context maps to more than 1 physical engine (e.g. context is
+ * a virtual engine). Also we only allow contexts of same engine class and these
+ * contexts must be in logically contiguous order. Examples of the placement
+ * behavior described below. Lastly, the default is to not allow BBs to
+ * preempted mid BB rather insert coordinated preemption on all hardware
+ * contexts between each set of BBs. Flags may be added in the future to change
+ * both of these default behaviors.
+ *
+ * Returns -EINVAL if hardware context placement configuration is invalid or if
+ * the placement configuration isn't supported on the platform / submission
+ * interface.
+ * Returns -ENODEV if extension isn't supported on the platform / submission
+ * interface.
+ *
+ * .. code-block:: none
+ *
+ *     Example 1 pseudo code:
+ *     CS[X] = generic engine of same class, logical instance X
+ *     INVALID = I915_ENGINE_CLASS_INVALID, I915_ENGINE_CLASS_INVALID_NONE
+ *     set_engines(INVALID)
+ *     set_parallel(engine_index=0, width=2, num_siblings=1,
+ *                  engines=CS[0],CS[1])
+ *
+ *     Results in the following valid placement:
+ *     CS[0], CS[1]
+ *
+ *     Example 2 pseudo code:
+ *     CS[X] = generic engine of same class, logical instance X
+ *     INVALID = I915_ENGINE_CLASS_INVALID, I915_ENGINE_CLASS_INVALID_NONE
+ *     set_engines(INVALID)
+ *     set_parallel(engine_index=0, width=2, num_siblings=2,
+ *                  engines=CS[0],CS[2],CS[1],CS[3])
+ *
+ *     Results in the following valid placements:
+ *     CS[0], CS[1]
+ *     CS[2], CS[3]
+ *
+ *     This can also be thought of as 2 virtual engines described by 2-D array
+ *     in the engines the field with bonds placed between each index of the
+ *     virtual engines. e.g. CS[0] is bonded to CS[1], CS[2] is bonded to
+ *     CS[3].
+ *     VE[0] = CS[0], CS[2]
+ *     VE[1] = CS[1], CS[3]
+ *
+ *     Example 3 pseudo code:
+ *     CS[X] = generic engine of same class, logical instance X
+ *     INVALID = I915_ENGINE_CLASS_INVALID, I915_ENGINE_CLASS_INVALID_NONE
+ *     set_engines(INVALID)
+ *     set_parallel(engine_index=0, width=2, num_siblings=2,
+ *                  engines=CS[0],CS[1],CS[1],CS[3])
+ *
+ *     Results in the following valid and invalid placements:
+ *     CS[0], CS[1]
+ *     CS[1], CS[3] - Not logical contiguous, return -EINVAL
+ */
+struct drm_i915_context_engines_parallel_submit {
+       /**
+        * @base: base user extension.
+        */
+       struct i915_user_extension base;
+
+       /**
+        * @engine_index: slot for parallel engine
+        */
+       __u16 engine_index;
+
+       /**
+        * @width: number of contexts per parallel engine
+        */
+       __u16 width;
+
+       /**
+        * @num_siblings: number of siblings per context
+        */
+       __u16 num_siblings;
+
+       /**
+        * @mbz16: reserved for future use; must be zero
+        */
+       __u16 mbz16;
+
+       /**
+        * @flags: all undefined flags must be zero, currently not defined flags
+        */
+       __u64 flags;
+
+       /**
+        * @mbz64: reserved for future use; must be zero
+        */
+       __u64 mbz64[3];
+
+       /**
+        * @engines: 2-d array of engine instances to configure parallel engine
+        *
+        * length = width (i) * num_siblings (j)
+        * index = j + i * num_siblings
+        */
+       struct i915_engine_class_instance engines[0];
+
+} __packed;
+
diff --git a/Documentation/gpu/rfc/i915_scheduler.rst b/Documentation/gpu/rfc/i915_scheduler.rst
new file mode 100644 (file)
index 0000000..cbda750
--- /dev/null
@@ -0,0 +1,148 @@
+=========================================
+I915 GuC Submission/DRM Scheduler Section
+=========================================
+
+Upstream plan
+=============
+For upstream the overall plan for landing GuC submission and integrating the
+i915 with the DRM scheduler is:
+
+* Merge basic GuC submission
+       * Basic submission support for all gen11+ platforms
+       * Not enabled by default on any current platforms but can be enabled via
+         modparam enable_guc
+       * Lots of rework will need to be done to integrate with DRM scheduler so
+         no need to nit pick everything in the code, it just should be
+         functional, no major coding style / layering errors, and not regress
+         execlists
+       * Update IGTs / selftests as needed to work with GuC submission
+       * Enable CI on supported platforms for a baseline
+       * Rework / get CI heathly for GuC submission in place as needed
+* Merge new parallel submission uAPI
+       * Bonding uAPI completely incompatible with GuC submission, plus it has
+         severe design issues in general, which is why we want to retire it no
+         matter what
+       * New uAPI adds I915_CONTEXT_ENGINES_EXT_PARALLEL context setup step
+         which configures a slot with N contexts
+       * After I915_CONTEXT_ENGINES_EXT_PARALLEL a user can submit N batches to
+         a slot in a single execbuf IOCTL and the batches run on the GPU in
+         paralllel
+       * Initially only for GuC submission but execlists can be supported if
+         needed
+* Convert the i915 to use the DRM scheduler
+       * GuC submission backend fully integrated with DRM scheduler
+               * All request queues removed from backend (e.g. all backpressure
+                 handled in DRM scheduler)
+               * Resets / cancels hook in DRM scheduler
+               * Watchdog hooks into DRM scheduler
+               * Lots of complexity of the GuC backend can be pulled out once
+                 integrated with DRM scheduler (e.g. state machine gets
+                 simplier, locking gets simplier, etc...)
+       * Execlists backend will minimum required to hook in the DRM scheduler
+               * Legacy interface
+               * Features like timeslicing / preemption / virtual engines would
+                 be difficult to integrate with the DRM scheduler and these
+                 features are not required for GuC submission as the GuC does
+                 these things for us
+               * ROI low on fully integrating into DRM scheduler
+               * Fully integrating would add lots of complexity to DRM
+                 scheduler
+       * Port i915 priority inheritance / boosting feature in DRM scheduler
+               * Used for i915 page flip, may be useful to other DRM drivers as
+                 well
+               * Will be an optional feature in the DRM scheduler
+       * Remove in-order completion assumptions from DRM scheduler
+               * Even when using the DRM scheduler the backends will handle
+                 preemption, timeslicing, etc... so it is possible for jobs to
+                 finish out of order
+       * Pull out i915 priority levels and use DRM priority levels
+       * Optimize DRM scheduler as needed
+
+TODOs for GuC submission upstream
+=================================
+
+* Need an update to GuC firmware / i915 to enable error state capture
+* Open source tool to decode GuC logs
+* Public GuC spec
+
+New uAPI for basic GuC submission
+=================================
+No major changes are required to the uAPI for basic GuC submission. The only
+change is a new scheduler attribute: I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP.
+This attribute indicates the 2k i915 user priority levels are statically mapped
+into 3 levels as follows:
+
+* -1k to -1 Low priority
+* 0 Medium priority
+* 1 to 1k High priority
+
+This is needed because the GuC only has 4 priority bands. The highest priority
+band is reserved with the kernel. This aligns with the DRM scheduler priority
+levels too.
+
+Spec references:
+----------------
+* https://www.khronos.org/registry/EGL/extensions/IMG/EGL_IMG_context_priority.txt
+* https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/chap5.html#devsandqueues-priority
+* https://spec.oneapi.com/level-zero/latest/core/api.html#ze-command-queue-priority-t
+
+New parallel submission uAPI
+============================
+The existing bonding uAPI is completely broken with GuC submission because
+whether a submission is a single context submit or parallel submit isn't known
+until execbuf time activated via the I915_SUBMIT_FENCE. To submit multiple
+contexts in parallel with the GuC the context must be explicitly registered with
+N contexts and all N contexts must be submitted in a single command to the GuC.
+The GuC interfaces do not support dynamically changing between N contexts as the
+bonding uAPI does. Hence the need for a new parallel submission interface. Also
+the legacy bonding uAPI is quite confusing and not intuitive at all. Furthermore
+I915_SUBMIT_FENCE is by design a future fence, so not really something we should
+continue to support.
+
+The new parallel submission uAPI consists of 3 parts:
+
+* Export engines logical mapping
+* A 'set_parallel' extension to configure contexts for parallel
+  submission
+* Extend execbuf2 IOCTL to support submitting N BBs in a single IOCTL
+
+Export engines logical mapping
+------------------------------
+Certain use cases require BBs to be placed on engine instances in logical order
+(e.g. split-frame on gen11+). The logical mapping of engine instances can change
+based on fusing. Rather than making UMDs be aware of fusing, simply expose the
+logical mapping with the existing query engine info IOCTL. Also the GuC
+submission interface currently only supports submitting multiple contexts to
+engines in logical order which is a new requirement compared to execlists.
+Lastly, all current platforms have at most 2 engine instances and the logical
+order is the same as uAPI order. This will change on platforms with more than 2
+engine instances.
+
+A single bit will be added to drm_i915_engine_info.flags indicating that the
+logical instance has been returned and a new field,
+drm_i915_engine_info.logical_instance, returns the logical instance.
+
+A 'set_parallel' extension to configure contexts for parallel submission
+------------------------------------------------------------------------
+The 'set_parallel' extension configures a slot for parallel submission of N BBs.
+It is a setup step that must be called before using any of the contexts. See
+I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE or I915_CONTEXT_ENGINES_EXT_BOND for
+similar existing examples. Once a slot is configured for parallel submission the
+execbuf2 IOCTL can be called submitting N BBs in a single IOCTL. Initially only
+supports GuC submission. Execlists supports can be added later if needed.
+
+Add I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT and
+drm_i915_context_engines_parallel_submit to the uAPI to implement this
+extension.
+
+.. kernel-doc:: Documentation/gpu/rfc/i915_parallel_execbuf.h
+        :functions: drm_i915_context_engines_parallel_submit
+
+Extend execbuf2 IOCTL to support submitting N BBs in a single IOCTL
+-------------------------------------------------------------------
+Contexts that have been configured with the 'set_parallel' extension can only
+submit N BBs in a single execbuf2 IOCTL. The BBs are either the last N objects
+in the drm_i915_gem_exec_object2 list or the first N if I915_EXEC_BATCH_FIRST is
+set. The number of BBs is implicit based on the slot submitted and how it has
+been configured by 'set_parallel' or other extensions. No uAPI changes are
+required to the execbuf2 IOCTL.
index 0567044..91e93a7 100644 (file)
@@ -19,3 +19,7 @@ host such documentation:
 .. toctree::
 
     i915_gem_lmem.rst
+
+.. toctree::
+
+    i915_scheduler.rst
index e45f4e4..24d21ba 100644 (file)
@@ -834,14 +834,13 @@ static int locomo_bus_probe(struct device *dev)
        return ret;
 }
 
-static int locomo_bus_remove(struct device *dev)
+static void locomo_bus_remove(struct device *dev)
 {
        struct locomo_dev *ldev = LOCOMO_DEV(dev);
        struct locomo_driver *drv = LOCOMO_DRV(dev->driver);
 
        if (drv->remove)
                drv->remove(ldev);
-       return 0;
 }
 
 struct bus_type locomo_bus_type = {
index ff5e0d0..092a2eb 100644 (file)
@@ -1364,15 +1364,13 @@ static int sa1111_bus_probe(struct device *dev)
        return ret;
 }
 
-static int sa1111_bus_remove(struct device *dev)
+static void sa1111_bus_remove(struct device *dev)
 {
        struct sa1111_dev *sadev = to_sa1111_device(dev);
        struct sa1111_driver *drv = SA1111_DRV(dev->driver);
 
        if (drv->remove)
                drv->remove(sadev);
-
-       return 0;
 }
 
 struct bus_type sa1111_bus_type = {
index 827b50f..53813f9 100644 (file)
@@ -1052,7 +1052,7 @@ static int ecard_drv_probe(struct device *dev)
        return ret;
 }
 
-static int ecard_drv_remove(struct device *dev)
+static void ecard_drv_remove(struct device *dev)
 {
        struct expansion_card *ec = ECARD_DEV(dev);
        struct ecard_driver *drv = ECARD_DRV(dev->driver);
@@ -1067,8 +1067,6 @@ static int ecard_drv_remove(struct device *dev)
        ec->ops = &ecard_default_ops;
        barrier();
        ec->irq_data = NULL;
-
-       return 0;
 }
 
 /*
index de0768a..dfc52f6 100644 (file)
@@ -143,14 +143,13 @@ static int gio_device_probe(struct device *dev)
        return error;
 }
 
-static int gio_device_remove(struct device *dev)
+static void gio_device_remove(struct device *dev)
 {
        struct gio_device *gio_dev = to_gio_device(dev);
        struct gio_driver *drv = to_gio_driver(dev->driver);
 
        if (dev->driver && drv->remove)
                drv->remove(gio_dev);
-       return 0;
 }
 
 static void gio_device_shutdown(struct device *dev)
index 80fa065..776d624 100644 (file)
@@ -133,14 +133,13 @@ static int parisc_driver_probe(struct device *dev)
        return rc;
 }
 
-static int __exit parisc_driver_remove(struct device *dev)
+static void __exit parisc_driver_remove(struct device *dev)
 {
        struct parisc_device *pa_dev = to_parisc_device(dev);
        struct parisc_driver *pa_drv = to_parisc_driver(dev->driver);
+
        if (pa_drv->remove)
                pa_drv->remove(pa_dev);
-
-       return 0;
 }
        
 
index 1a56658..cc5774c 100644 (file)
@@ -381,7 +381,7 @@ static int ps3_system_bus_probe(struct device *_dev)
        return result;
 }
 
-static int ps3_system_bus_remove(struct device *_dev)
+static void ps3_system_bus_remove(struct device *_dev)
 {
        struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
        struct ps3_system_bus_driver *drv;
@@ -399,7 +399,6 @@ static int ps3_system_bus_remove(struct device *_dev)
                        __func__, __LINE__, drv->core.name);
 
        pr_debug(" <- %s:%d: %s\n", __func__, __LINE__, dev_name(&dev->core));
-       return 0;
 }
 
 static void ps3_system_bus_shutdown(struct device *_dev)
index c6c79ef..7ee3ed7 100644 (file)
@@ -366,14 +366,13 @@ static int ibmebus_bus_device_probe(struct device *dev)
        return error;
 }
 
-static int ibmebus_bus_device_remove(struct device *dev)
+static void ibmebus_bus_device_remove(struct device *dev)
 {
        struct platform_device *of_dev = to_platform_device(dev);
        struct platform_driver *drv = to_platform_driver(dev->driver);
 
        if (dev->driver && drv->remove)
                drv->remove(of_dev);
-       return 0;
 }
 
 static void ibmebus_bus_device_shutdown(struct device *dev)
index e00f372..58283ce 100644 (file)
@@ -1257,7 +1257,7 @@ static int vio_bus_probe(struct device *dev)
 }
 
 /* convert from struct device to struct vio_dev and pass to driver. */
-static int vio_bus_remove(struct device *dev)
+static void vio_bus_remove(struct device *dev)
 {
        struct vio_dev *viodev = to_vio_dev(dev);
        struct vio_driver *viodrv = to_vio_driver(dev->driver);
@@ -1276,7 +1276,6 @@ static int vio_bus_remove(struct device *dev)
                vio_cmo_bus_remove(viodev);
 
        put_device(devptr);
-       return 0;
 }
 
 static void vio_bus_shutdown(struct device *dev)
index bb63b2a..445fe4c 100644 (file)
@@ -105,7 +105,7 @@ enum scm_event {SCM_CHANGE, SCM_AVAIL};
 struct scm_driver {
        struct device_driver drv;
        int (*probe) (struct scm_device *scmdev);
-       int (*remove) (struct scm_device *scmdev);
+       void (*remove) (struct scm_device *scmdev);
        void (*notify) (struct scm_device *scmdev, enum scm_event event);
        void (*handler) (struct scm_device *scmdev, void *data,
                        blk_status_t error);
index 348a886..01122a2 100644 (file)
@@ -93,7 +93,7 @@ static int vio_device_probe(struct device *dev)
        return drv->probe(vdev, id);
 }
 
-static int vio_device_remove(struct device *dev)
+static void vio_device_remove(struct device *dev)
 {
        struct vio_dev *vdev = to_vio_dev(dev);
        struct vio_driver *drv = to_vio_driver(dev->driver);
@@ -107,8 +107,6 @@ static int vio_device_remove(struct device *dev)
 
                drv->remove(vdev);
        }
-
-       return 0;
 }
 
 static ssize_t devspec_show(struct device *dev,
index f854bcb..b941555 100644 (file)
@@ -1019,7 +1019,7 @@ static int acpi_device_probe(struct device *dev)
        return 0;
 }
 
-static int acpi_device_remove(struct device *dev)
+static void acpi_device_remove(struct device *dev)
 {
        struct acpi_device *acpi_dev = to_acpi_device(dev);
        struct acpi_driver *acpi_drv = acpi_dev->driver;
@@ -1034,7 +1034,6 @@ static int acpi_device_remove(struct device *dev)
        acpi_dev->driver_data = NULL;
 
        put_device(dev);
-       return 0;
 }
 
 struct bus_type acpi_bus_type = {
index 939ca22..9620411 100644 (file)
@@ -219,7 +219,7 @@ static int amba_probe(struct device *dev)
        return ret;
 }
 
-static int amba_remove(struct device *dev)
+static void amba_remove(struct device *dev)
 {
        struct amba_device *pcdev = to_amba_device(dev);
        struct amba_driver *drv = to_amba_driver(dev->driver);
@@ -236,8 +236,6 @@ static int amba_remove(struct device *dev)
 
        amba_put_disable_pclk(pcdev);
        dev_pm_domain_detach(dev, true);
-
-       return 0;
 }
 
 static void amba_shutdown(struct device *dev)
index 6a30264..9230c94 100644 (file)
@@ -79,7 +79,7 @@ static int auxiliary_bus_probe(struct device *dev)
        return ret;
 }
 
-static int auxiliary_bus_remove(struct device *dev)
+static void auxiliary_bus_remove(struct device *dev)
 {
        struct auxiliary_driver *auxdrv = to_auxiliary_drv(dev->driver);
        struct auxiliary_device *auxdev = to_auxiliary_dev(dev);
@@ -87,8 +87,6 @@ static int auxiliary_bus_remove(struct device *dev)
        if (auxdrv->remove)
                auxdrv->remove(auxdev);
        dev_pm_domain_detach(dev, true);
-
-       return 0;
 }
 
 static void auxiliary_bus_shutdown(struct device *dev)
index aa47376..55e3ee2 100644 (file)
@@ -46,14 +46,12 @@ static int isa_bus_probe(struct device *dev)
        return 0;
 }
 
-static int isa_bus_remove(struct device *dev)
+static void isa_bus_remove(struct device *dev)
 {
        struct isa_driver *isa_driver = dev->platform_data;
 
        if (isa_driver && isa_driver->remove)
                isa_driver->remove(dev, to_isa_dev(dev)->id);
-
-       return 0;
 }
 
 static void isa_bus_shutdown(struct device *dev)
index 8640578..a94b7f4 100644 (file)
@@ -1438,7 +1438,7 @@ out:
        return ret;
 }
 
-static int platform_remove(struct device *_dev)
+static void platform_remove(struct device *_dev)
 {
        struct platform_driver *drv = to_platform_driver(_dev->driver);
        struct platform_device *dev = to_platform_device(_dev);
@@ -1450,8 +1450,6 @@ static int platform_remove(struct device *_dev)
                        dev_warn(_dev, "remove callback returned a non-zero value. This will be ignored.\n");
        }
        dev_pm_domain_detach(_dev, true);
-
-       return 0;
 }
 
 static void platform_shutdown(struct device *_dev)
index 6535614..e076630 100644 (file)
@@ -27,7 +27,7 @@ static DEFINE_MUTEX(bcma_buses_mutex);
 
 static int bcma_bus_match(struct device *dev, struct device_driver *drv);
 static int bcma_device_probe(struct device *dev);
-static int bcma_device_remove(struct device *dev);
+static void bcma_device_remove(struct device *dev);
 static int bcma_device_uevent(struct device *dev, struct kobj_uevent_env *env);
 
 static ssize_t manuf_show(struct device *dev, struct device_attribute *attr, char *buf)
@@ -614,7 +614,7 @@ static int bcma_device_probe(struct device *dev)
        return err;
 }
 
-static int bcma_device_remove(struct device *dev)
+static void bcma_device_remove(struct device *dev)
 {
        struct bcma_device *core = container_of(dev, struct bcma_device, dev);
        struct bcma_driver *adrv = container_of(dev->driver, struct bcma_driver,
@@ -623,8 +623,6 @@ static int bcma_device_remove(struct device *dev)
        if (adrv->remove)
                adrv->remove(core);
        put_device(dev);
-
-       return 0;
 }
 
 static int bcma_device_uevent(struct device *dev, struct kobj_uevent_env *env)
index d46db13..6f225dd 100644 (file)
@@ -169,13 +169,11 @@ static int sunxi_rsb_device_probe(struct device *dev)
        return drv->probe(rdev);
 }
 
-static int sunxi_rsb_device_remove(struct device *dev)
+static void sunxi_rsb_device_remove(struct device *dev)
 {
        const struct sunxi_rsb_driver *drv = to_sunxi_rsb_driver(dev->driver);
 
        drv->remove(to_sunxi_rsb_device(dev));
-
-       return 0;
 }
 
 static struct bus_type sunxi_rsb_bus = {
index a2e4d54..2b90b7c 100644 (file)
@@ -1034,13 +1034,12 @@ static int cxl_bus_probe(struct device *dev)
        return to_cxl_drv(dev->driver)->probe(dev);
 }
 
-static int cxl_bus_remove(struct device *dev)
+static void cxl_bus_remove(struct device *dev)
 {
        struct cxl_driver *cxl_drv = to_cxl_drv(dev->driver);
 
        if (cxl_drv->remove)
                cxl_drv->remove(dev);
-       return 0;
 }
 
 struct bus_type cxl_bus_type = {
index 5aee26e..6cc4da4 100644 (file)
@@ -172,15 +172,13 @@ static int dax_bus_probe(struct device *dev)
        return 0;
 }
 
-static int dax_bus_remove(struct device *dev)
+static void dax_bus_remove(struct device *dev)
 {
        struct dax_device_driver *dax_drv = to_dax_drv(dev->driver);
        struct dev_dax *dev_dax = to_dev_dax(dev);
 
        if (dax_drv->remove)
                dax_drv->remove(dev_dax);
-
-       return 0;
 }
 
 static struct bus_type dax_bus_type = {
index 0460d58..5a017c6 100644 (file)
@@ -260,7 +260,7 @@ static void disable_wq(struct idxd_wq *wq)
        dev_info(dev, "wq %s disabled\n", dev_name(&wq->conf_dev));
 }
 
-static int idxd_config_bus_remove(struct device *dev)
+static void idxd_config_bus_remove(struct device *dev)
 {
        int rc;
 
@@ -305,8 +305,6 @@ static int idxd_config_bus_remove(struct device *dev)
                        dev_info(dev, "Device %s disabled\n", dev_name(dev));
 
        }
-
-       return 0;
 }
 
 static void idxd_config_bus_shutdown(struct device *dev)
index 6821698..90ed8fd 100644 (file)
@@ -187,14 +187,12 @@ static int fw_unit_probe(struct device *dev)
        return driver->probe(fw_unit(dev), unit_match(dev, dev->driver));
 }
 
-static int fw_unit_remove(struct device *dev)
+static void fw_unit_remove(struct device *dev)
 {
        struct fw_driver *driver =
                        container_of(dev->driver, struct fw_driver, driver);
 
        driver->remove(fw_unit(dev));
-
-       return 0;
 }
 
 static int get_modalias(struct fw_unit *unit, char *buffer, size_t buffer_size)
index 6c7e249..f6fe723 100644 (file)
@@ -111,15 +111,13 @@ static int scmi_dev_probe(struct device *dev)
        return scmi_drv->probe(scmi_dev);
 }
 
-static int scmi_dev_remove(struct device *dev)
+static void scmi_dev_remove(struct device *dev)
 {
        struct scmi_driver *scmi_drv = to_scmi_driver(dev->driver);
        struct scmi_device *scmi_dev = to_scmi_dev(dev);
 
        if (scmi_drv->remove)
                scmi_drv->remove(scmi_dev);
-
-       return 0;
 }
 
 static struct bus_type scmi_bus_type = {
index dc83ea1..c52bcaa 100644 (file)
@@ -44,15 +44,13 @@ static int coreboot_bus_probe(struct device *dev)
        return ret;
 }
 
-static int coreboot_bus_remove(struct device *dev)
+static void coreboot_bus_remove(struct device *dev)
 {
        struct coreboot_device *device = CB_DEV(dev);
        struct coreboot_driver *driver = CB_DRV(dev->driver);
 
        if (driver->remove)
                driver->remove(device);
-
-       return 0;
 }
 
 static struct bus_type coreboot_bus_type = {
index 511b20f..1ae6779 100644 (file)
@@ -284,15 +284,13 @@ static int dfl_bus_probe(struct device *dev)
        return ddrv->probe(ddev);
 }
 
-static int dfl_bus_remove(struct device *dev)
+static void dfl_bus_remove(struct device *dev)
 {
        struct dfl_driver *ddrv = to_dfl_drv(dev->driver);
        struct dfl_device *ddev = to_dfl_dev(dev);
 
        if (ddrv->remove)
                ddrv->remove(ddev);
-
-       return 0;
 }
 
 static int dfl_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
index f089794..0d814c9 100644 (file)
@@ -120,7 +120,7 @@ amdgpu-y += \
 amdgpu-y += \
        dce_v10_0.o \
        dce_v11_0.o \
-       dce_virtual.o
+       amdgpu_vkms.o
 
 # add GFX block
 amdgpu-y += \
index d10baa3..96e895d 100644 (file)
@@ -916,6 +916,7 @@ struct amdgpu_device {
 
        /* display */
        bool                            enable_virtual_display;
+       struct amdgpu_vkms_output       *amdgpu_vkms_output;
        struct amdgpu_mode_info         mode_info;
        /* For pre-DCE11. DCE11 and later are in "struct amdgpu_device->dm" */
        struct work_struct              hotplug_work;
index 58520b0..260ba01 100644 (file)
@@ -1040,7 +1040,7 @@ void amdgpu_acpi_detect(void)
  */
 bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev)
 {
-#if defined(CONFIG_AMD_PMC) || defined(CONFIG_AMD_PMC_MODULE)
+#if IS_ENABLED(CONFIG_AMD_PMC) && IS_ENABLED(CONFIG_PM_SLEEP)
        if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) {
                if (adev->flags & AMD_IS_APU)
                        return pm_suspend_target_state == PM_SUSPEND_TO_IDLE;
index 0462d4a..ec028cf 100644 (file)
@@ -271,7 +271,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
                struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv,
                uint64_t *size);
 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
-               struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv);
+               struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv, bool *table_freed);
 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
                struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv);
 int amdgpu_amdkfd_gpuvm_sync_memory(
index c4b3eee..2d6b2d7 100644 (file)
@@ -1057,7 +1057,8 @@ static void unmap_bo_from_gpuvm(struct kgd_mem *mem,
 
 static int update_gpuvm_pte(struct kgd_mem *mem,
                            struct kfd_mem_attachment *entry,
-                           struct amdgpu_sync *sync)
+                           struct amdgpu_sync *sync,
+                           bool *table_freed)
 {
        struct amdgpu_bo_va *bo_va = entry->bo_va;
        struct amdgpu_device *adev = entry->adev;
@@ -1068,7 +1069,7 @@ static int update_gpuvm_pte(struct kgd_mem *mem,
                return ret;
 
        /* Update the page tables  */
-       ret = amdgpu_vm_bo_update(adev, bo_va, false);
+       ret = amdgpu_vm_bo_update(adev, bo_va, false, table_freed);
        if (ret) {
                pr_err("amdgpu_vm_bo_update failed\n");
                return ret;
@@ -1080,7 +1081,8 @@ static int update_gpuvm_pte(struct kgd_mem *mem,
 static int map_bo_to_gpuvm(struct kgd_mem *mem,
                           struct kfd_mem_attachment *entry,
                           struct amdgpu_sync *sync,
-                          bool no_update_pte)
+                          bool no_update_pte,
+                          bool *table_freed)
 {
        int ret;
 
@@ -1097,7 +1099,7 @@ static int map_bo_to_gpuvm(struct kgd_mem *mem,
        if (no_update_pte)
                return 0;
 
-       ret = update_gpuvm_pte(mem, entry, sync);
+       ret = update_gpuvm_pte(mem, entry, sync, table_freed);
        if (ret) {
                pr_err("update_gpuvm_pte() failed\n");
                goto update_gpuvm_pte_failed;
@@ -1605,7 +1607,8 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
 }
 
 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
-               struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv)
+               struct kgd_dev *kgd, struct kgd_mem *mem,
+               void *drm_priv, bool *table_freed)
 {
        struct amdgpu_device *adev = get_amdgpu_device(kgd);
        struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
@@ -1693,7 +1696,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
                         entry->va, entry->va + bo_size, entry);
 
                ret = map_bo_to_gpuvm(mem, entry, ctx.sync,
-                                     is_invalid_userptr);
+                                     is_invalid_userptr, table_freed);
                if (ret) {
                        pr_err("Failed to map bo to gpuvm\n");
                        goto out_unreserve;
@@ -1717,6 +1720,12 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
                                true);
        ret = unreserve_bo_and_vms(&ctx, false, false);
 
+       /* Only apply no TLB flush on Aldebaran to
+        * workaround regressions on other Asics.
+        */
+       if (table_freed && (adev->asic_type != CHIP_ALDEBARAN))
+               *table_freed = true;
+
        goto out;
 
 out_unreserve:
@@ -2143,7 +2152,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
                                continue;
 
                        kfd_mem_dmaunmap_attachment(mem, attachment);
-                       ret = update_gpuvm_pte(mem, attachment, &sync);
+                       ret = update_gpuvm_pte(mem, attachment, &sync, NULL);
                        if (ret) {
                                pr_err("%s: update PTE failed\n", __func__);
                                /* make sure this gets validated again */
@@ -2349,7 +2358,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
                                continue;
 
                        kfd_mem_dmaunmap_attachment(mem, attachment);
-                       ret = update_gpuvm_pte(mem, attachment, &sync_obj);
+                       ret = update_gpuvm_pte(mem, attachment, &sync_obj, NULL);
                        if (ret) {
                                pr_debug("Memory eviction: update PTE failed. Try again\n");
                                goto validate_map_fail;
index 3b5d131..8f53837 100644 (file)
@@ -468,6 +468,46 @@ bool amdgpu_atomfirmware_dynamic_boot_config_supported(struct amdgpu_device *ade
        return (fw_cap & ATOM_FIRMWARE_CAP_DYNAMIC_BOOT_CFG_ENABLE) ? true : false;
 }
 
+/*
+ * Helper function to query RAS EEPROM address
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Return true if vbios supports ras rom address reporting
+ */
+bool amdgpu_atomfirmware_ras_rom_addr(struct amdgpu_device *adev, uint8_t* i2c_address)
+{
+       struct amdgpu_mode_info *mode_info = &adev->mode_info;
+       int index;
+       u16 data_offset, size;
+       union firmware_info *firmware_info;
+       u8 frev, crev;
+
+       if (i2c_address == NULL)
+               return false;
+
+       *i2c_address = 0;
+
+       index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+                       firmwareinfo);
+
+       if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context,
+                               index, &size, &frev, &crev, &data_offset)) {
+               /* support firmware_info 3.4 + */
+               if ((frev == 3 && crev >=4) || (frev > 3)) {
+                       firmware_info = (union firmware_info *)
+                               (mode_info->atom_context->bios + data_offset);
+                       *i2c_address = firmware_info->v34.ras_rom_i2c_slave_addr;
+               }
+       }
+
+       if (*i2c_address != 0)
+               return true;
+
+       return false;
+}
+
+
 union smu_info {
        struct atom_smu_info_v3_1 v31;
 };
index 1bbbb19..751248b 100644 (file)
@@ -36,6 +36,7 @@ int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev);
 int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev);
 bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev);
 bool amdgpu_atomfirmware_sram_ecc_supported(struct amdgpu_device *adev);
+bool amdgpu_atomfirmware_ras_rom_addr(struct amdgpu_device *adev, uint8_t* i2c_address);
 bool amdgpu_atomfirmware_mem_training_supported(struct amdgpu_device *adev);
 bool amdgpu_atomfirmware_dynamic_boot_config_supported(struct amdgpu_device *adev);
 int amdgpu_atomfirmware_get_fw_reserved_fb_size(struct amdgpu_device *adev);
index a152363..913f9ea 100644 (file)
@@ -799,7 +799,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
        if (r)
                return r;
 
-       r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false);
+       r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false, NULL);
        if (r)
                return r;
 
@@ -810,7 +810,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
        if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
                bo_va = fpriv->csa_va;
                BUG_ON(!bo_va);
-               r = amdgpu_vm_bo_update(adev, bo_va, false);
+               r = amdgpu_vm_bo_update(adev, bo_va, false, NULL);
                if (r)
                        return r;
 
@@ -829,7 +829,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
                if (bo_va == NULL)
                        continue;
 
-               r = amdgpu_vm_bo_update(adev, bo_va, false);
+               r = amdgpu_vm_bo_update(adev, bo_va, false, NULL);
                if (r)
                        return r;
 
index 6ca9fe5..d7cc45e 100644 (file)
@@ -3647,9 +3647,9 @@ int amdgpu_device_init(struct amdgpu_device *adev,
 
 fence_driver_init:
        /* Fence driver */
-       r = amdgpu_fence_driver_init(adev);
+       r = amdgpu_fence_driver_sw_init(adev);
        if (r) {
-               dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
+               dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n");
                amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
                goto failed;
        }
@@ -3989,7 +3989,6 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
        }
        amdgpu_fence_driver_hw_init(adev);
 
-
        r = amdgpu_device_ip_late_init(adev);
        if (r)
                return r;
index 2b0b0e8..b664029 100644 (file)
@@ -1215,6 +1215,13 @@ static const struct pci_device_id pciidlist[] = {
        /* CYAN_SKILLFISH */
        {0x1002, 0x13FE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU},
 
+       /* BEIGE_GOBY */
+       {0x1002, 0x7420, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
+       {0x1002, 0x7421, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
+       {0x1002, 0x7422, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
+       {0x1002, 0x7423, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
+       {0x1002, 0x743F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
+
        {0, 0, 0}
 };
 
@@ -1231,7 +1238,7 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
        int ret, retry = 0;
        bool supports_atomic = false;
 
-       if (!amdgpu_virtual_display &&
+       if (amdgpu_virtual_display ||
            amdgpu_device_asic_has_dc_support(flags & AMD_ASIC_MASK))
                supports_atomic = true;
 
@@ -1566,6 +1573,8 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
                pci_ignore_hotplug(pdev);
                pci_set_power_state(pdev, PCI_D3cold);
                drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
+       } else if (amdgpu_device_supports_boco(drm_dev)) {
+               /* nothing to do */
        } else if (amdgpu_device_supports_baco(drm_dev)) {
                amdgpu_device_baco_enter(drm_dev);
        }
index 09b0486..5a143ca 100644 (file)
@@ -344,7 +344,7 @@ int amdgpu_fbdev_init(struct amdgpu_device *adev)
        }
 
        /* disable all the possible outputs/crtcs before entering KMS mode */
-       if (!amdgpu_device_has_dc_support(adev))
+       if (!amdgpu_device_has_dc_support(adev) && !amdgpu_virtual_display)
                drm_helper_disable_unused_functions(adev_to_drm(adev));
 
        drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel);
index 0b1c485..6ed5366 100644 (file)
@@ -498,7 +498,7 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
 }
 
 /**
- * amdgpu_fence_driver_init - init the fence driver
+ * amdgpu_fence_driver_sw_init - init the fence driver
  * for all possible rings.
  *
  * @adev: amdgpu device pointer
@@ -509,13 +509,13 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
  * amdgpu_fence_driver_start_ring().
  * Returns 0 for success.
  */
-int amdgpu_fence_driver_init(struct amdgpu_device *adev)
+int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev)
 {
        return 0;
 }
 
 /**
- * amdgpu_fence_driver_fini - tear down the fence driver
+ * amdgpu_fence_driver_hw_fini - tear down the fence driver
  * for all possible rings.
  *
  * @adev: amdgpu device pointer
@@ -531,8 +531,7 @@ void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev)
 
                if (!ring || !ring->fence_drv.initialized)
                        continue;
-               if (!ring->no_scheduler)
-                       drm_sched_fini(&ring->sched);
+
                /* You can't wait for HW to signal if it's gone */
                if (!drm_dev_is_unplugged(&adev->ddev))
                        r = amdgpu_fence_wait_empty(ring);
@@ -560,6 +559,9 @@ void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev)
                if (!ring || !ring->fence_drv.initialized)
                        continue;
 
+               if (!ring->no_scheduler)
+                       drm_sched_fini(&ring->sched);
+
                for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
                        dma_fence_put(ring->fence_drv.fences[j]);
                kfree(ring->fence_drv.fences);
index 611fd10..c6f2fb9 100644 (file)
@@ -621,7 +621,7 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
 
        if (operation == AMDGPU_VA_OP_MAP ||
            operation == AMDGPU_VA_OP_REPLACE) {
-               r = amdgpu_vm_bo_update(adev, bo_va, false);
+               r = amdgpu_vm_bo_update(adev, bo_va, false, NULL);
                if (r)
                        goto error;
        }
index 10b0461..9dc2d6d 100644 (file)
@@ -41,8 +41,6 @@
 #include "amdgpu_securedisplay.h"
 #include "amdgpu_atomfirmware.h"
 
-#include <drm/drm_drv.h>
-
 static int psp_sysfs_init(struct amdgpu_device *adev);
 static void psp_sysfs_fini(struct amdgpu_device *adev);
 
@@ -253,6 +251,12 @@ static int psp_sw_init(void *handle)
        struct psp_runtime_boot_cfg_entry boot_cfg_entry;
        struct psp_memory_training_context *mem_training_ctx = &psp->mem_train_ctx;
 
+       psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+       if (!psp->cmd) {
+               DRM_ERROR("Failed to allocate memory to command buffer!\n");
+               ret = -ENOMEM;
+       }
+
        if (!amdgpu_sriov_vf(adev)) {
                ret = psp_init_microcode(psp);
                if (ret) {
@@ -315,25 +319,30 @@ static int psp_sw_init(void *handle)
 static int psp_sw_fini(void *handle)
 {
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       struct psp_context *psp = &adev->psp;
+       struct psp_gfx_cmd_resp *cmd = psp->cmd;
 
-       psp_memory_training_fini(&adev->psp);
-       if (adev->psp.sos_fw) {
-               release_firmware(adev->psp.sos_fw);
-               adev->psp.sos_fw = NULL;
+       psp_memory_training_fini(psp);
+       if (psp->sos_fw) {
+               release_firmware(psp->sos_fw);
+               psp->sos_fw = NULL;
        }
-       if (adev->psp.asd_fw) {
-               release_firmware(adev->psp.asd_fw);
-               adev->psp.asd_fw = NULL;
+       if (psp->asd_fw) {
+               release_firmware(psp->asd_fw);
+               psp->asd_fw = NULL;
        }
-       if (adev->psp.ta_fw) {
-               release_firmware(adev->psp.ta_fw);
-               adev->psp.ta_fw = NULL;
+       if (psp->ta_fw) {
+               release_firmware(psp->ta_fw);
+               psp->ta_fw = NULL;
        }
 
        if (adev->asic_type == CHIP_NAVI10 ||
            adev->asic_type == CHIP_SIENNA_CICHLID)
                psp_sysfs_fini(adev);
 
+       kfree(cmd);
+       cmd = NULL;
+
        return 0;
 }
 
@@ -417,8 +426,6 @@ psp_cmd_submit_buf(struct psp_context *psp,
        if (!drm_dev_enter(&psp->adev->ddev, &idx))
                return 0;
 
-       mutex_lock(&psp->mutex);
-
        memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE);
 
        memcpy(psp->cmd_buf_mem, cmd, sizeof(struct psp_gfx_cmd_resp));
@@ -478,11 +485,26 @@ psp_cmd_submit_buf(struct psp_context *psp,
        }
 
 exit:
-       mutex_unlock(&psp->mutex);
        drm_dev_exit(idx);
        return ret;
 }
 
+static struct psp_gfx_cmd_resp *acquire_psp_cmd_buf(struct psp_context *psp)
+{
+       struct psp_gfx_cmd_resp *cmd = psp->cmd;
+
+       mutex_lock(&psp->mutex);
+
+       memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp));
+
+       return cmd;
+}
+
+void release_psp_cmd_buf(struct psp_context *psp)
+{
+       mutex_unlock(&psp->mutex);
+}
+
 static void psp_prep_tmr_cmd_buf(struct psp_context *psp,
                                 struct psp_gfx_cmd_resp *cmd,
                                 uint64_t tmr_mc, struct amdgpu_bo *tmr_bo)
@@ -517,11 +539,8 @@ static int psp_load_toc(struct psp_context *psp,
                        uint32_t *tmr_size)
 {
        int ret;
-       struct psp_gfx_cmd_resp *cmd;
+       struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
 
-       cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
-       if (!cmd)
-               return -ENOMEM;
        /* Copy toc to psp firmware private buffer */
        psp_copy_fw(psp, psp->toc.start_addr, psp->toc.size_bytes);
 
@@ -531,7 +550,9 @@ static int psp_load_toc(struct psp_context *psp,
                                 psp->fence_buf_mc_addr);
        if (!ret)
                *tmr_size = psp->cmd_buf_mem->resp.tmr_size;
-       kfree(cmd);
+
+       release_psp_cmd_buf(psp);
+
        return ret;
 }
 
@@ -596,9 +617,7 @@ static int psp_tmr_load(struct psp_context *psp)
        if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp))
                return 0;
 
-       cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
-       if (!cmd)
-               return -ENOMEM;
+       cmd = acquire_psp_cmd_buf(psp);
 
        psp_prep_tmr_cmd_buf(psp, cmd, psp->tmr_mc_addr, psp->tmr_bo);
        DRM_INFO("reserve 0x%lx from 0x%llx for PSP TMR\n",
@@ -607,13 +626,13 @@ static int psp_tmr_load(struct psp_context *psp)
        ret = psp_cmd_submit_buf(psp, NULL, cmd,
                                 psp->fence_buf_mc_addr);
 
-       kfree(cmd);
+       release_psp_cmd_buf(psp);
 
        return ret;
 }
 
 static void psp_prep_tmr_unload_cmd_buf(struct psp_context *psp,
-                                       struct psp_gfx_cmd_resp *cmd)
+                                       struct psp_gfx_cmd_resp *cmd)
 {
        if (amdgpu_sriov_vf(psp->adev))
                cmd->cmd_id = GFX_CMD_ID_DESTROY_VMR;
@@ -624,11 +643,7 @@ static void psp_prep_tmr_unload_cmd_buf(struct psp_context *psp,
 static int psp_tmr_unload(struct psp_context *psp)
 {
        int ret;
-       struct psp_gfx_cmd_resp *cmd;
-
-       cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
-       if (!cmd)
-               return -ENOMEM;
+       struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
 
        psp_prep_tmr_unload_cmd_buf(psp, cmd);
        DRM_INFO("free PSP TMR buffer\n");
@@ -636,7 +651,7 @@ static int psp_tmr_unload(struct psp_context *psp)
        ret = psp_cmd_submit_buf(psp, NULL, cmd,
                                 psp->fence_buf_mc_addr);
 
-       kfree(cmd);
+       release_psp_cmd_buf(psp);
 
        return ret;
 }
@@ -670,9 +685,7 @@ int psp_get_fw_attestation_records_addr(struct psp_context *psp,
        if (amdgpu_sriov_vf(psp->adev))
                return 0;
 
-       cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
-       if (!cmd)
-               return -ENOMEM;
+       cmd = acquire_psp_cmd_buf(psp);
 
        cmd->cmd_id = GFX_CMD_ID_GET_FW_ATTESTATION;
 
@@ -684,7 +697,7 @@ int psp_get_fw_attestation_records_addr(struct psp_context *psp,
                              ((uint64_t)cmd->resp.uresp.fwar_db_info.fwar_db_addr_hi << 32);
        }
 
-       kfree(cmd);
+       release_psp_cmd_buf(psp);
 
        return ret;
 }
@@ -692,13 +705,13 @@ int psp_get_fw_attestation_records_addr(struct psp_context *psp,
 static int psp_boot_config_get(struct amdgpu_device *adev, uint32_t *boot_cfg)
 {
        struct psp_context *psp = &adev->psp;
-       struct psp_gfx_cmd_resp *cmd = psp->cmd;
+       struct psp_gfx_cmd_resp *cmd;
        int ret;
 
        if (amdgpu_sriov_vf(adev))
                return 0;
 
-       memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp));
+       cmd = acquire_psp_cmd_buf(psp);
 
        cmd->cmd_id = GFX_CMD_ID_BOOT_CFG;
        cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_GET;
@@ -709,47 +722,59 @@ static int psp_boot_config_get(struct amdgpu_device *adev, uint32_t *boot_cfg)
                        (cmd->resp.uresp.boot_cfg.boot_cfg & BOOT_CONFIG_GECC) ? 1 : 0;
        }
 
+       release_psp_cmd_buf(psp);
+
        return ret;
 }
 
 static int psp_boot_config_set(struct amdgpu_device *adev, uint32_t boot_cfg)
 {
+       int ret;
        struct psp_context *psp = &adev->psp;
-       struct psp_gfx_cmd_resp *cmd = psp->cmd;
+       struct psp_gfx_cmd_resp *cmd;
 
        if (amdgpu_sriov_vf(adev))
                return 0;
 
-       memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp));
+       cmd = acquire_psp_cmd_buf(psp);
 
        cmd->cmd_id = GFX_CMD_ID_BOOT_CFG;
        cmd->cmd.boot_cfg.sub_cmd = BOOTCFG_CMD_SET;
        cmd->cmd.boot_cfg.boot_config = boot_cfg;
        cmd->cmd.boot_cfg.boot_config_valid = boot_cfg;
 
-       return psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
+       ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
+
+       release_psp_cmd_buf(psp);
+
+       return ret;
 }
 
 static int psp_rl_load(struct amdgpu_device *adev)
 {
+       int ret;
        struct psp_context *psp = &adev->psp;
-       struct psp_gfx_cmd_resp *cmd = psp->cmd;
+       struct psp_gfx_cmd_resp *cmd;
 
        if (!is_psp_fw_valid(psp->rl))
                return 0;
 
+       cmd = acquire_psp_cmd_buf(psp);
+
        memset(psp->fw_pri_buf, 0, PSP_1_MEG);
        memcpy(psp->fw_pri_buf, psp->rl.start_addr, psp->rl.size_bytes);
 
-       memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp));
-
        cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
        cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(psp->fw_pri_mc_addr);
        cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(psp->fw_pri_mc_addr);
        cmd->cmd.cmd_load_ip_fw.fw_size = psp->rl.size_bytes;
        cmd->cmd.cmd_load_ip_fw.fw_type = GFX_FW_TYPE_REG_LIST;
 
-       return psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
+       ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
+
+       release_psp_cmd_buf(psp);
+
+       return ret;
 }
 
 static void psp_prep_asd_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
@@ -777,9 +802,7 @@ static int psp_asd_load(struct psp_context *psp)
        if (amdgpu_sriov_vf(psp->adev) || !psp->asd_ucode_size)
                return 0;
 
-       cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
-       if (!cmd)
-               return -ENOMEM;
+       cmd = acquire_psp_cmd_buf(psp);
 
        psp_copy_fw(psp, psp->asd_start_addr, psp->asd_ucode_size);
 
@@ -793,7 +816,7 @@ static int psp_asd_load(struct psp_context *psp)
                psp->asd_context.session_id = cmd->resp.session_id;
        }
 
-       kfree(cmd);
+       release_psp_cmd_buf(psp);
 
        return ret;
 }
@@ -816,9 +839,7 @@ static int psp_asd_unload(struct psp_context *psp)
        if (!psp->asd_context.asd_initialized)
                return 0;
 
-       cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
-       if (!cmd)
-               return -ENOMEM;
+       cmd = acquire_psp_cmd_buf(psp);
 
        psp_prep_ta_unload_cmd_buf(cmd, psp->asd_context.session_id);
 
@@ -827,7 +848,7 @@ static int psp_asd_unload(struct psp_context *psp)
        if (!ret)
                psp->asd_context.asd_initialized = false;
 
-       kfree(cmd);
+       release_psp_cmd_buf(psp);
 
        return ret;
 }
@@ -843,22 +864,21 @@ static void psp_prep_reg_prog_cmd_buf(struct psp_gfx_cmd_resp *cmd,
 int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg,
                uint32_t value)
 {
-       struct psp_gfx_cmd_resp *cmd = NULL;
+       struct psp_gfx_cmd_resp *cmd;
        int ret = 0;
 
        if (reg >= PSP_REG_LAST)
                return -EINVAL;
 
-       cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
-       if (!cmd)
-               return -ENOMEM;
+       cmd = acquire_psp_cmd_buf(psp);
 
        psp_prep_reg_prog_cmd_buf(cmd, reg, value);
        ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
        if (ret)
                DRM_ERROR("PSP failed to program reg id %d", reg);
 
-       kfree(cmd);
+       release_psp_cmd_buf(psp);
+
        return ret;
 }
 
@@ -909,18 +929,14 @@ static int psp_ta_invoke(struct psp_context *psp,
                  uint32_t session_id)
 {
        int ret;
-       struct psp_gfx_cmd_resp *cmd;
-
-       cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
-       if (!cmd)
-               return -ENOMEM;
+       struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
 
        psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, session_id);
 
        ret = psp_cmd_submit_buf(psp, NULL, cmd,
                                 psp->fence_buf_mc_addr);
 
-       kfree(cmd);
+       release_psp_cmd_buf(psp);
 
        return ret;
 }
@@ -934,9 +950,7 @@ static int psp_xgmi_load(struct psp_context *psp)
         * TODO: bypass the loading in sriov for now
         */
 
-       cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
-       if (!cmd)
-               return -ENOMEM;
+       cmd = acquire_psp_cmd_buf(psp);
 
        psp_copy_fw(psp, psp->ta_xgmi_start_addr, psp->ta_xgmi_ucode_size);
 
@@ -954,7 +968,7 @@ static int psp_xgmi_load(struct psp_context *psp)
                psp->xgmi_context.session_id = cmd->resp.session_id;
        }
 
-       kfree(cmd);
+       release_psp_cmd_buf(psp);
 
        return ret;
 }
@@ -974,16 +988,14 @@ static int psp_xgmi_unload(struct psp_context *psp)
         * TODO: bypass the unloading in sriov for now
         */
 
-       cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
-       if (!cmd)
-               return -ENOMEM;
+       cmd = acquire_psp_cmd_buf(psp);
 
        psp_prep_ta_unload_cmd_buf(cmd, psp->xgmi_context.session_id);
 
        ret = psp_cmd_submit_buf(psp, NULL, cmd,
                                 psp->fence_buf_mc_addr);
 
-       kfree(cmd);
+       release_psp_cmd_buf(psp);
 
        return ret;
 }
@@ -1213,10 +1225,6 @@ static int psp_ras_load(struct psp_context *psp)
        if (amdgpu_sriov_vf(psp->adev))
                return 0;
 
-       cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
-       if (!cmd)
-               return -ENOMEM;
-
        psp_copy_fw(psp, psp->ta_ras_start_addr, psp->ta_ras_ucode_size);
 
        ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
@@ -1226,6 +1234,8 @@ static int psp_ras_load(struct psp_context *psp)
        else
                ras_cmd->ras_in_message.init_flags.dgpu_mode = 1;
 
+       cmd = acquire_psp_cmd_buf(psp);
+
        psp_prep_ta_load_cmd_buf(cmd,
                                 psp->fw_pri_mc_addr,
                                 psp->ta_ras_ucode_size,
@@ -1244,11 +1254,11 @@ static int psp_ras_load(struct psp_context *psp)
                        dev_warn(psp->adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status);
        }
 
+       release_psp_cmd_buf(psp);
+
        if (ret || ras_cmd->ras_status)
                amdgpu_ras_fini(psp->adev);
 
-       kfree(cmd);
-
        return ret;
 }
 
@@ -1263,16 +1273,14 @@ static int psp_ras_unload(struct psp_context *psp)
        if (amdgpu_sriov_vf(psp->adev))
                return 0;
 
-       cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
-       if (!cmd)
-               return -ENOMEM;
+       cmd = acquire_psp_cmd_buf(psp);
 
        psp_prep_ta_unload_cmd_buf(cmd, psp->ras.session_id);
 
        ret = psp_cmd_submit_buf(psp, NULL, cmd,
                        psp->fence_buf_mc_addr);
 
-       kfree(cmd);
+       release_psp_cmd_buf(psp);
 
        return ret;
 }
@@ -1522,13 +1530,11 @@ static int psp_hdcp_load(struct psp_context *psp)
        if (amdgpu_sriov_vf(psp->adev))
                return 0;
 
-       cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
-       if (!cmd)
-               return -ENOMEM;
-
        psp_copy_fw(psp, psp->ta_hdcp_start_addr,
                    psp->ta_hdcp_ucode_size);
 
+       cmd = acquire_psp_cmd_buf(psp);
+
        psp_prep_ta_load_cmd_buf(cmd,
                                 psp->fw_pri_mc_addr,
                                 psp->ta_hdcp_ucode_size,
@@ -1543,7 +1549,7 @@ static int psp_hdcp_load(struct psp_context *psp)
                mutex_init(&psp->hdcp_context.mutex);
        }
 
-       kfree(cmd);
+       release_psp_cmd_buf(psp);
 
        return ret;
 }
@@ -1587,15 +1593,13 @@ static int psp_hdcp_unload(struct psp_context *psp)
        if (amdgpu_sriov_vf(psp->adev))
                return 0;
 
-       cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
-       if (!cmd)
-               return -ENOMEM;
+       cmd = acquire_psp_cmd_buf(psp);
 
        psp_prep_ta_unload_cmd_buf(cmd, psp->hdcp_context.session_id);
 
        ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
 
-       kfree(cmd);
+       release_psp_cmd_buf(psp);
 
        return ret;
 }
@@ -1673,12 +1677,10 @@ static int psp_dtm_load(struct psp_context *psp)
        if (amdgpu_sriov_vf(psp->adev))
                return 0;
 
-       cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
-       if (!cmd)
-               return -ENOMEM;
-
        psp_copy_fw(psp, psp->ta_dtm_start_addr, psp->ta_dtm_ucode_size);
 
+       cmd = acquire_psp_cmd_buf(psp);
+
        psp_prep_ta_load_cmd_buf(cmd,
                                 psp->fw_pri_mc_addr,
                                 psp->ta_dtm_ucode_size,
@@ -1693,7 +1695,7 @@ static int psp_dtm_load(struct psp_context *psp)
                mutex_init(&psp->dtm_context.mutex);
        }
 
-       kfree(cmd);
+       release_psp_cmd_buf(psp);
 
        return ret;
 }
@@ -1738,15 +1740,13 @@ static int psp_dtm_unload(struct psp_context *psp)
        if (amdgpu_sriov_vf(psp->adev))
                return 0;
 
-       cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
-       if (!cmd)
-               return -ENOMEM;
+       cmd = acquire_psp_cmd_buf(psp);
 
        psp_prep_ta_unload_cmd_buf(cmd, psp->dtm_context.session_id);
 
        ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
 
-       kfree(cmd);
+       release_psp_cmd_buf(psp);
 
        return ret;
 }
@@ -1818,12 +1818,10 @@ static int psp_rap_load(struct psp_context *psp)
        int ret;
        struct psp_gfx_cmd_resp *cmd;
 
-       cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
-       if (!cmd)
-               return -ENOMEM;
-
        psp_copy_fw(psp, psp->ta_rap_start_addr, psp->ta_rap_ucode_size);
 
+       cmd = acquire_psp_cmd_buf(psp);
+
        psp_prep_ta_load_cmd_buf(cmd,
                                 psp->fw_pri_mc_addr,
                                 psp->ta_rap_ucode_size,
@@ -1838,7 +1836,7 @@ static int psp_rap_load(struct psp_context *psp)
                mutex_init(&psp->rap_context.mutex);
        }
 
-       kfree(cmd);
+       release_psp_cmd_buf(psp);
 
        return ret;
 }
@@ -1846,17 +1844,13 @@ static int psp_rap_load(struct psp_context *psp)
 static int psp_rap_unload(struct psp_context *psp)
 {
        int ret;
-       struct psp_gfx_cmd_resp *cmd;
-
-       cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
-       if (!cmd)
-               return -ENOMEM;
+       struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
 
        psp_prep_ta_unload_cmd_buf(cmd, psp->rap_context.session_id);
 
        ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
 
-       kfree(cmd);
+       release_psp_cmd_buf(psp);
 
        return ret;
 }
@@ -1982,11 +1976,7 @@ static int psp_securedisplay_init_shared_buf(struct psp_context *psp)
 static int psp_securedisplay_load(struct psp_context *psp)
 {
        int ret;
-       struct psp_gfx_cmd_resp *cmd;
-
-       cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
-       if (!cmd)
-               return -ENOMEM;
+       struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
 
        memset(psp->fw_pri_buf, 0, PSP_1_MEG);
        memcpy(psp->fw_pri_buf, psp->ta_securedisplay_start_addr, psp->ta_securedisplay_ucode_size);
@@ -1999,32 +1989,27 @@ static int psp_securedisplay_load(struct psp_context *psp)
 
        ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
 
-       if (ret)
-               goto failed;
+       if (!ret) {
+               psp->securedisplay_context.securedisplay_initialized = true;
+               psp->securedisplay_context.session_id = cmd->resp.session_id;
+               mutex_init(&psp->securedisplay_context.mutex);
+       }
 
-       psp->securedisplay_context.securedisplay_initialized = true;
-       psp->securedisplay_context.session_id = cmd->resp.session_id;
-       mutex_init(&psp->securedisplay_context.mutex);
+       release_psp_cmd_buf(psp);
 
-failed:
-       kfree(cmd);
        return ret;
 }
 
 static int psp_securedisplay_unload(struct psp_context *psp)
 {
        int ret;
-       struct psp_gfx_cmd_resp *cmd;
-
-       cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
-       if (!cmd)
-               return -ENOMEM;
+       struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
 
        psp_prep_ta_unload_cmd_buf(cmd, psp->securedisplay_context.session_id);
 
        ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
 
-       kfree(cmd);
+       release_psp_cmd_buf(psp);
 
        return ret;
 }
@@ -2400,8 +2385,6 @@ static int psp_prep_load_ip_fw_cmd_buf(struct amdgpu_firmware_info *ucode,
        int ret;
        uint64_t fw_mem_mc_addr = ucode->mc_addr;
 
-       memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp));
-
        cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
        cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr);
        cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr);
@@ -2418,13 +2401,15 @@ static int psp_execute_non_psp_fw_load(struct psp_context *psp,
                                  struct amdgpu_firmware_info *ucode)
 {
        int ret = 0;
+       struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
 
-       ret = psp_prep_load_ip_fw_cmd_buf(ucode, psp->cmd);
-       if (ret)
-               return ret;
+       ret = psp_prep_load_ip_fw_cmd_buf(ucode, cmd);
+       if (!ret) {
+               ret = psp_cmd_submit_buf(psp, ucode, cmd,
+                                        psp->fence_buf_mc_addr);
+       }
 
-       ret = psp_cmd_submit_buf(psp, ucode, psp->cmd,
-                                psp->fence_buf_mc_addr);
+       release_psp_cmd_buf(psp);
 
        return ret;
 }
@@ -2579,10 +2564,6 @@ static int psp_load_fw(struct amdgpu_device *adev)
                goto skip_memalloc;
        }
 
-       psp->cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
-       if (!psp->cmd)
-               return -ENOMEM;
-
        if (amdgpu_sriov_vf(adev)) {
                ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG,
                                                AMDGPU_GEM_DOMAIN_VRAM,
@@ -2736,9 +2717,6 @@ static int psp_hw_fini(void *handle)
        amdgpu_bo_free_kernel(&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
                              (void **)&psp->cmd_buf_mem);
 
-       kfree(psp->cmd);
-       psp->cmd = NULL;
-
        return 0;
 }
 
@@ -2902,17 +2880,15 @@ int psp_gpu_reset(struct amdgpu_device *adev)
 int psp_rlc_autoload_start(struct psp_context *psp)
 {
        int ret;
-       struct psp_gfx_cmd_resp *cmd;
-
-       cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
-       if (!cmd)
-               return -ENOMEM;
+       struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
 
        cmd->cmd_id = GFX_CMD_ID_AUTOLOAD_RLC;
 
        ret = psp_cmd_submit_buf(psp, NULL, cmd,
                                 psp->fence_buf_mc_addr);
-       kfree(cmd);
+
+       release_psp_cmd_buf(psp);
+
        return ret;
 }
 
index 4435b18..6b16455 100644 (file)
@@ -54,9 +54,9 @@ enum psp_bootloader_cmd {
        PSP_BL__LOAD_SYSDRV             = 0x10000,
        PSP_BL__LOAD_SOSDRV             = 0x20000,
        PSP_BL__LOAD_KEY_DATABASE       = 0x80000,
-       PSP_BL__LOAD_SOCDRV             = 0x90000,
-       PSP_BL__LOAD_INTFDRV            = 0xA0000,
-       PSP_BL__LOAD_DBGDRV             = 0xB0000,
+       PSP_BL__LOAD_SOCDRV             = 0xB0000,
+       PSP_BL__LOAD_INTFDRV            = 0xC0000,
+       PSP_BL__LOAD_DBGDRV             = 0xD0000,
        PSP_BL__DRAM_LONG_TRAIN         = 0x100000,
        PSP_BL__DRAM_SHORT_TRAIN        = 0x200000,
        PSP_BL__LOAD_TOS_SPL_TABLE      = 0x10000000,
index d2e5b25..1945902 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/bits.h>
 #include "atom.h"
 #include "amdgpu_eeprom.h"
+#include "amdgpu_atomfirmware.h"
 #include <linux/debugfs.h>
 #include <linux/uaccess.h>
 
@@ -116,6 +117,22 @@ static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev,
        if (!control)
                return false;
 
+       control->i2c_address = 0;
+
+       if (amdgpu_atomfirmware_ras_rom_addr(adev, (uint8_t*)&control->i2c_address))
+       {
+               if (control->i2c_address == 0xA0)
+                       control->i2c_address = 0;
+               else if (control->i2c_address == 0xA8)
+                       control->i2c_address = 0x40000;
+               else {
+                       dev_warn(adev->dev, "RAS EEPROM I2C address not supported");
+                       return false;
+               }
+
+               return true;
+       }
+
        switch (adev->asic_type) {
        case CHIP_VEGA20:
                control->i2c_address = EEPROM_I2C_MADDR_VEGA20;
index 59e0fef..acfa207 100644 (file)
@@ -54,11 +54,12 @@ static inline void amdgpu_res_first(struct ttm_resource *res,
 {
        struct drm_mm_node *node;
 
-       if (!res) {
+       if (!res || res->mem_type == TTM_PL_SYSTEM) {
                cur->start = start;
                cur->size = size;
                cur->remaining = size;
                cur->node = NULL;
+               WARN_ON(res && start + size > res->num_pages << PAGE_SHIFT);
                return;
        }
 
index 27adffa..9c11ced 100644 (file)
@@ -106,7 +106,6 @@ struct amdgpu_fence_driver {
        struct dma_fence                **fences;
 };
 
-int amdgpu_fence_driver_init(struct amdgpu_device *adev);
 void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring);
 
 int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
@@ -115,9 +114,10 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
 int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
                                   struct amdgpu_irq_src *irq_src,
                                   unsigned irq_type);
+void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev);
 void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev);
+int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev);
 void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev);
-void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev);
 int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence,
                      unsigned flags);
 int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s,
index 9fd5db5..38dade4 100644 (file)
@@ -1123,7 +1123,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_device *bdev,
        struct amdgpu_ttm_tt *gtt = (void *)ttm;
 
        /* user pages are bound by amdgpu_ttm_tt_pin_userptr() */
-       if (gtt && gtt->userptr) {
+       if (gtt->userptr) {
                ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
                if (!ttm->sg)
                        return -ENOMEM;
@@ -1148,7 +1148,7 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev,
        struct amdgpu_ttm_tt *gtt = (void *)ttm;
        struct amdgpu_device *adev;
 
-       if (gtt && gtt->userptr) {
+       if (gtt->userptr) {
                amdgpu_ttm_tt_set_user_pages(ttm, NULL);
                kfree(ttm->sg);
                ttm->sg = NULL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
new file mode 100644 (file)
index 0000000..ce982af
--- /dev/null
@@ -0,0 +1,643 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_simple_kms_helper.h>
+#include <drm/drm_vblank.h>
+
+#include "amdgpu.h"
+#ifdef CONFIG_DRM_AMDGPU_SI
+#include "dce_v6_0.h"
+#endif
+#ifdef CONFIG_DRM_AMDGPU_CIK
+#include "dce_v8_0.h"
+#endif
+#include "dce_v10_0.h"
+#include "dce_v11_0.h"
+#include "ivsrcid/ivsrcid_vislands30.h"
+#include "amdgpu_vkms.h"
+#include "amdgpu_display.h"
+
+/**
+ * DOC: amdgpu_vkms
+ *
+ * The amdgpu vkms interface provides a virtual KMS interface for several use
+ * cases: devices without display hardware, platforms where the actual display
+ * hardware is not useful (e.g., servers), SR-IOV virtual functions, device
+ * emulation/simulation, and device bring up prior to display hardware being
+ * usable. We previously emulated a legacy KMS interface, but there was a desire
+ * to move to the atomic KMS interface. The vkms driver did everything we
+ * needed, but we wanted KMS support natively in the driver without buffer
+ * sharing and the ability to support an instance of VKMS per device. We first
+ * looked at splitting vkms into a stub driver and a helper module that other
+ * drivers could use to implement a virtual display, but this strategy ended up
+ * being messy due to driver specific callbacks needed for buffer management.
+ * Ultimately, it proved easier to import the vkms code as it mostly used core
+ * drm helpers anyway.
+ */
+
+static const u32 amdgpu_vkms_formats[] = {
+       DRM_FORMAT_XRGB8888,
+};
+
+static enum hrtimer_restart amdgpu_vkms_vblank_simulate(struct hrtimer *timer)
+{
+       struct amdgpu_vkms_output *output = container_of(timer,
+                                                        struct amdgpu_vkms_output,
+                                                        vblank_hrtimer);
+       struct drm_crtc *crtc = &output->crtc;
+       u64 ret_overrun;
+       bool ret;
+
+       ret_overrun = hrtimer_forward_now(&output->vblank_hrtimer,
+                                         output->period_ns);
+       WARN_ON(ret_overrun != 1);
+
+       ret = drm_crtc_handle_vblank(crtc);
+       if (!ret)
+               DRM_ERROR("amdgpu_vkms failure on handling vblank");
+
+       return HRTIMER_RESTART;
+}
+
+static int amdgpu_vkms_enable_vblank(struct drm_crtc *crtc)
+{
+       struct drm_device *dev = crtc->dev;
+       unsigned int pipe = drm_crtc_index(crtc);
+       struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+       struct amdgpu_vkms_output *out = drm_crtc_to_amdgpu_vkms_output(crtc);
+
+       drm_calc_timestamping_constants(crtc, &crtc->mode);
+
+       hrtimer_init(&out->vblank_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+       out->vblank_hrtimer.function = &amdgpu_vkms_vblank_simulate;
+       out->period_ns = ktime_set(0, vblank->framedur_ns);
+       hrtimer_start(&out->vblank_hrtimer, out->period_ns, HRTIMER_MODE_REL);
+
+       return 0;
+}
+
+static void amdgpu_vkms_disable_vblank(struct drm_crtc *crtc)
+{
+       struct amdgpu_vkms_output *out = drm_crtc_to_amdgpu_vkms_output(crtc);
+
+       hrtimer_cancel(&out->vblank_hrtimer);
+}
+
+static bool amdgpu_vkms_get_vblank_timestamp(struct drm_crtc *crtc,
+                                            int *max_error,
+                                            ktime_t *vblank_time,
+                                            bool in_vblank_irq)
+{
+       struct drm_device *dev = crtc->dev;
+       unsigned int pipe = crtc->index;
+       struct amdgpu_vkms_output *output = drm_crtc_to_amdgpu_vkms_output(crtc);
+       struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
+
+       if (!READ_ONCE(vblank->enabled)) {
+               *vblank_time = ktime_get();
+               return true;
+       }
+
+       *vblank_time = READ_ONCE(output->vblank_hrtimer.node.expires);
+
+       if (WARN_ON(*vblank_time == vblank->time))
+               return true;
+
+       /*
+        * To prevent races we roll the hrtimer forward before we do any
+        * interrupt processing - this is how real hw works (the interrupt is
+        * only generated after all the vblank registers are updated) and what
+        * the vblank core expects. Therefore we need to always correct the
+        * timestampe by one frame.
+        */
+       *vblank_time -= output->period_ns;
+
+       return true;
+}
+
+static const struct drm_crtc_funcs amdgpu_vkms_crtc_funcs = {
+       .set_config             = drm_atomic_helper_set_config,
+       .destroy                = drm_crtc_cleanup,
+       .page_flip              = drm_atomic_helper_page_flip,
+       .reset                  = drm_atomic_helper_crtc_reset,
+       .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+       .atomic_destroy_state   = drm_atomic_helper_crtc_destroy_state,
+       .enable_vblank          = amdgpu_vkms_enable_vblank,
+       .disable_vblank         = amdgpu_vkms_disable_vblank,
+       .get_vblank_timestamp   = amdgpu_vkms_get_vblank_timestamp,
+};
+
+static void amdgpu_vkms_crtc_atomic_enable(struct drm_crtc *crtc,
+                                          struct drm_atomic_state *state)
+{
+       drm_crtc_vblank_on(crtc);
+}
+
+static void amdgpu_vkms_crtc_atomic_disable(struct drm_crtc *crtc,
+                                           struct drm_atomic_state *state)
+{
+       drm_crtc_vblank_off(crtc);
+}
+
+static void amdgpu_vkms_crtc_atomic_flush(struct drm_crtc *crtc,
+                                         struct drm_atomic_state *state)
+{
+       if (crtc->state->event) {
+               spin_lock(&crtc->dev->event_lock);
+
+               if (drm_crtc_vblank_get(crtc) != 0)
+                       drm_crtc_send_vblank_event(crtc, crtc->state->event);
+               else
+                       drm_crtc_arm_vblank_event(crtc, crtc->state->event);
+
+               spin_unlock(&crtc->dev->event_lock);
+
+               crtc->state->event = NULL;
+       }
+}
+
+static const struct drm_crtc_helper_funcs amdgpu_vkms_crtc_helper_funcs = {
+       .atomic_flush   = amdgpu_vkms_crtc_atomic_flush,
+       .atomic_enable  = amdgpu_vkms_crtc_atomic_enable,
+       .atomic_disable = amdgpu_vkms_crtc_atomic_disable,
+};
+
+static int amdgpu_vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
+                         struct drm_plane *primary, struct drm_plane *cursor)
+{
+       int ret;
+
+       ret = drm_crtc_init_with_planes(dev, crtc, primary, cursor,
+                                       &amdgpu_vkms_crtc_funcs, NULL);
+       if (ret) {
+               DRM_ERROR("Failed to init CRTC\n");
+               return ret;
+       }
+
+       drm_crtc_helper_add(crtc, &amdgpu_vkms_crtc_helper_funcs);
+
+       return ret;
+}
+
+static const struct drm_connector_funcs amdgpu_vkms_connector_funcs = {
+       .fill_modes = drm_helper_probe_single_connector_modes,
+       .destroy = drm_connector_cleanup,
+       .reset = drm_atomic_helper_connector_reset,
+       .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+       .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int amdgpu_vkms_conn_get_modes(struct drm_connector *connector)
+{
+       struct drm_device *dev = connector->dev;
+       struct drm_display_mode *mode = NULL;
+       unsigned i;
+       static const struct mode_size {
+               int w;
+               int h;
+       } common_modes[] = {
+               { 640,  480},
+               { 720,  480},
+               { 800,  600},
+               { 848,  480},
+               {1024,  768},
+               {1152,  768},
+               {1280,  720},
+               {1280,  800},
+               {1280,  854},
+               {1280,  960},
+               {1280, 1024},
+               {1440,  900},
+               {1400, 1050},
+               {1680, 1050},
+               {1600, 1200},
+               {1920, 1080},
+               {1920, 1200},
+               {2560, 1440},
+               {4096, 3112},
+               {3656, 2664},
+               {3840, 2160},
+               {4096, 2160},
+       };
+
+       for (i = 0; i < ARRAY_SIZE(common_modes); i++) {
+               mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false);
+               drm_mode_probed_add(connector, mode);
+       }
+
+       drm_set_preferred_mode(connector, XRES_DEF, YRES_DEF);
+
+       return ARRAY_SIZE(common_modes);
+}
+
+static const struct drm_connector_helper_funcs amdgpu_vkms_conn_helper_funcs = {
+       .get_modes    = amdgpu_vkms_conn_get_modes,
+};
+
+static const struct drm_plane_funcs amdgpu_vkms_plane_funcs = {
+       .update_plane           = drm_atomic_helper_update_plane,
+       .disable_plane          = drm_atomic_helper_disable_plane,
+       .destroy                = drm_plane_cleanup,
+       .reset                  = drm_atomic_helper_plane_reset,
+       .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+       .atomic_destroy_state   = drm_atomic_helper_plane_destroy_state,
+};
+
+static void amdgpu_vkms_plane_atomic_update(struct drm_plane *plane,
+                                           struct drm_atomic_state *old_state)
+{
+       return;
+}
+
+static int amdgpu_vkms_plane_atomic_check(struct drm_plane *plane,
+                                         struct drm_atomic_state *state)
+{
+       struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
+                                                                                plane);
+       struct drm_crtc_state *crtc_state;
+       int ret;
+
+       if (!new_plane_state->fb || WARN_ON(!new_plane_state->crtc))
+               return 0;
+
+       crtc_state = drm_atomic_get_crtc_state(state,
+                                              new_plane_state->crtc);
+       if (IS_ERR(crtc_state))
+               return PTR_ERR(crtc_state);
+
+       ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
+                                                 DRM_PLANE_HELPER_NO_SCALING,
+                                                 DRM_PLANE_HELPER_NO_SCALING,
+                                                 false, true);
+       if (ret != 0)
+               return ret;
+
+       /* for now primary plane must be visible and full screen */
+       if (!new_plane_state->visible)
+               return -EINVAL;
+
+       return 0;
+}
+
+static int amdgpu_vkms_prepare_fb(struct drm_plane *plane,
+                                 struct drm_plane_state *new_state)
+{
+       struct amdgpu_framebuffer *afb;
+       struct drm_gem_object *obj;
+       struct amdgpu_device *adev;
+       struct amdgpu_bo *rbo;
+       struct list_head list;
+       struct ttm_validate_buffer tv;
+       struct ww_acquire_ctx ticket;
+       uint32_t domain;
+       int r;
+
+       if (!new_state->fb) {
+               DRM_DEBUG_KMS("No FB bound\n");
+               return 0;
+       }
+       afb = to_amdgpu_framebuffer(new_state->fb);
+       obj = new_state->fb->obj[0];
+       rbo = gem_to_amdgpu_bo(obj);
+       adev = amdgpu_ttm_adev(rbo->tbo.bdev);
+       INIT_LIST_HEAD(&list);
+
+       tv.bo = &rbo->tbo;
+       tv.num_shared = 1;
+       list_add(&tv.head, &list);
+
+       r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
+       if (r) {
+               dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
+               return r;
+       }
+
+       if (plane->type != DRM_PLANE_TYPE_CURSOR)
+               domain = amdgpu_display_supported_domains(adev, rbo->flags);
+       else
+               domain = AMDGPU_GEM_DOMAIN_VRAM;
+
+       r = amdgpu_bo_pin(rbo, domain);
+       if (unlikely(r != 0)) {
+               if (r != -ERESTARTSYS)
+                       DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
+               ttm_eu_backoff_reservation(&ticket, &list);
+               return r;
+       }
+
+       r = amdgpu_ttm_alloc_gart(&rbo->tbo);
+       if (unlikely(r != 0)) {
+               amdgpu_bo_unpin(rbo);
+               ttm_eu_backoff_reservation(&ticket, &list);
+               DRM_ERROR("%p bind failed\n", rbo);
+               return r;
+       }
+
+       ttm_eu_backoff_reservation(&ticket, &list);
+
+       afb->address = amdgpu_bo_gpu_offset(rbo);
+
+       amdgpu_bo_ref(rbo);
+
+       return 0;
+}
+
+static void amdgpu_vkms_cleanup_fb(struct drm_plane *plane,
+                                  struct drm_plane_state *old_state)
+{
+       struct amdgpu_bo *rbo;
+       int r;
+
+       if (!old_state->fb)
+               return;
+
+       rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
+       r = amdgpu_bo_reserve(rbo, false);
+       if (unlikely(r)) {
+               DRM_ERROR("failed to reserve rbo before unpin\n");
+               return;
+       }
+
+       amdgpu_bo_unpin(rbo);
+       amdgpu_bo_unreserve(rbo);
+       amdgpu_bo_unref(&rbo);
+}
+
+static const struct drm_plane_helper_funcs amdgpu_vkms_primary_helper_funcs = {
+       .atomic_update          = amdgpu_vkms_plane_atomic_update,
+       .atomic_check           = amdgpu_vkms_plane_atomic_check,
+       .prepare_fb             = amdgpu_vkms_prepare_fb,
+       .cleanup_fb             = amdgpu_vkms_cleanup_fb,
+};
+
+static struct drm_plane *amdgpu_vkms_plane_init(struct drm_device *dev,
+                                               enum drm_plane_type type,
+                                               int index)
+{
+       struct drm_plane *plane;
+       int ret;
+
+       plane = kzalloc(sizeof(*plane), GFP_KERNEL);
+       if (!plane)
+               return ERR_PTR(-ENOMEM);
+
+       ret = drm_universal_plane_init(dev, plane, 1 << index,
+                                      &amdgpu_vkms_plane_funcs,
+                                      amdgpu_vkms_formats,
+                                      ARRAY_SIZE(amdgpu_vkms_formats),
+                                      NULL, type, NULL);
+       if (ret) {
+               kfree(plane);
+               return ERR_PTR(ret);
+       }
+
+       drm_plane_helper_add(plane, &amdgpu_vkms_primary_helper_funcs);
+
+       return plane;
+}
+
+int amdgpu_vkms_output_init(struct drm_device *dev,
+                           struct amdgpu_vkms_output *output, int index)
+{
+       struct drm_connector *connector = &output->connector;
+       struct drm_encoder *encoder = &output->encoder;
+       struct drm_crtc *crtc = &output->crtc;
+       struct drm_plane *primary, *cursor = NULL;
+       int ret;
+
+       primary = amdgpu_vkms_plane_init(dev, DRM_PLANE_TYPE_PRIMARY, index);
+       if (IS_ERR(primary))
+               return PTR_ERR(primary);
+
+       ret = amdgpu_vkms_crtc_init(dev, crtc, primary, cursor);
+       if (ret)
+               goto err_crtc;
+
+       ret = drm_connector_init(dev, connector, &amdgpu_vkms_connector_funcs,
+                                DRM_MODE_CONNECTOR_VIRTUAL);
+       if (ret) {
+               DRM_ERROR("Failed to init connector\n");
+               goto err_connector;
+       }
+
+       drm_connector_helper_add(connector, &amdgpu_vkms_conn_helper_funcs);
+
+       ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_VIRTUAL);
+       if (ret) {
+               DRM_ERROR("Failed to init encoder\n");
+               goto err_encoder;
+       }
+       encoder->possible_crtcs = 1 << index;
+
+       ret = drm_connector_attach_encoder(connector, encoder);
+       if (ret) {
+               DRM_ERROR("Failed to attach connector to encoder\n");
+               goto err_attach;
+       }
+
+       drm_mode_config_reset(dev);
+
+       return 0;
+
+err_attach:
+       drm_encoder_cleanup(encoder);
+
+err_encoder:
+       drm_connector_cleanup(connector);
+
+err_connector:
+       drm_crtc_cleanup(crtc);
+
+err_crtc:
+       drm_plane_cleanup(primary);
+
+       return ret;
+}
+
+const struct drm_mode_config_funcs amdgpu_vkms_mode_funcs = {
+       .fb_create = amdgpu_display_user_framebuffer_create,
+       .atomic_check = drm_atomic_helper_check,
+       .atomic_commit = drm_atomic_helper_commit,
+};
+
+static int amdgpu_vkms_sw_init(void *handle)
+{
+       int r, i;
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       adev_to_drm(adev)->max_vblank_count = 0;
+
+       adev_to_drm(adev)->mode_config.funcs = &amdgpu_vkms_mode_funcs;
+
+       adev_to_drm(adev)->mode_config.max_width = XRES_MAX;
+       adev_to_drm(adev)->mode_config.max_height = YRES_MAX;
+
+       adev_to_drm(adev)->mode_config.preferred_depth = 24;
+       adev_to_drm(adev)->mode_config.prefer_shadow = 1;
+
+       adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
+
+       r = amdgpu_display_modeset_create_props(adev);
+       if (r)
+               return r;
+
+       adev->amdgpu_vkms_output = kcalloc(adev->mode_info.num_crtc, sizeof(struct amdgpu_vkms_output), GFP_KERNEL);
+       if (!adev->amdgpu_vkms_output)
+               return -ENOMEM;
+
+       /* allocate crtcs, encoders, connectors */
+       for (i = 0; i < adev->mode_info.num_crtc; i++) {
+               r = amdgpu_vkms_output_init(adev_to_drm(adev), &adev->amdgpu_vkms_output[i], i);
+               if (r)
+                       return r;
+       }
+
+       drm_kms_helper_poll_init(adev_to_drm(adev));
+
+       adev->mode_info.mode_config_initialized = true;
+       return 0;
+}
+
+static int amdgpu_vkms_sw_fini(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       int i = 0;
+
+       for (i = 0; i < adev->mode_info.num_crtc; i++)
+               if (adev->mode_info.crtcs[i])
+                       hrtimer_cancel(&adev->mode_info.crtcs[i]->vblank_timer);
+
+       kfree(adev->mode_info.bios_hardcoded_edid);
+       kfree(adev->amdgpu_vkms_output);
+
+       drm_kms_helper_poll_fini(adev_to_drm(adev));
+
+       adev->mode_info.mode_config_initialized = false;
+       return 0;
+}
+
+static int amdgpu_vkms_hw_init(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+       switch (adev->asic_type) {
+#ifdef CONFIG_DRM_AMDGPU_SI
+       case CHIP_TAHITI:
+       case CHIP_PITCAIRN:
+       case CHIP_VERDE:
+       case CHIP_OLAND:
+               dce_v6_0_disable_dce(adev);
+               break;
+#endif
+#ifdef CONFIG_DRM_AMDGPU_CIK
+       case CHIP_BONAIRE:
+       case CHIP_HAWAII:
+       case CHIP_KAVERI:
+       case CHIP_KABINI:
+       case CHIP_MULLINS:
+               dce_v8_0_disable_dce(adev);
+               break;
+#endif
+       case CHIP_FIJI:
+       case CHIP_TONGA:
+               dce_v10_0_disable_dce(adev);
+               break;
+       case CHIP_CARRIZO:
+       case CHIP_STONEY:
+       case CHIP_POLARIS10:
+       case CHIP_POLARIS11:
+       case CHIP_VEGAM:
+               dce_v11_0_disable_dce(adev);
+               break;
+       case CHIP_TOPAZ:
+#ifdef CONFIG_DRM_AMDGPU_SI
+       case CHIP_HAINAN:
+#endif
+               /* no DCE */
+               break;
+       default:
+               break;
+       }
+       return 0;
+}
+
+static int amdgpu_vkms_hw_fini(void *handle)
+{
+       return 0;
+}
+
+static int amdgpu_vkms_suspend(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       int r;
+
+       r = drm_mode_config_helper_suspend(adev_to_drm(adev));
+       if (r)
+               return r;
+       return amdgpu_vkms_hw_fini(handle);
+}
+
+static int amdgpu_vkms_resume(void *handle)
+{
+       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+       int r;
+
+       r = amdgpu_vkms_hw_init(handle);
+       if (r)
+               return r;
+       return drm_mode_config_helper_resume(adev_to_drm(adev));
+}
+
+static bool amdgpu_vkms_is_idle(void *handle)
+{
+       return true;
+}
+
+static int amdgpu_vkms_wait_for_idle(void *handle)
+{
+       return 0;
+}
+
+static int amdgpu_vkms_soft_reset(void *handle)
+{
+       return 0;
+}
+
+static int amdgpu_vkms_set_clockgating_state(void *handle,
+                                         enum amd_clockgating_state state)
+{
+       return 0;
+}
+
+static int amdgpu_vkms_set_powergating_state(void *handle,
+                                         enum amd_powergating_state state)
+{
+       return 0;
+}
+
+static const struct amd_ip_funcs amdgpu_vkms_ip_funcs = {
+       .name = "amdgpu_vkms",
+       .early_init = NULL,
+       .late_init = NULL,
+       .sw_init = amdgpu_vkms_sw_init,
+       .sw_fini = amdgpu_vkms_sw_fini,
+       .hw_init = amdgpu_vkms_hw_init,
+       .hw_fini = amdgpu_vkms_hw_fini,
+       .suspend = amdgpu_vkms_suspend,
+       .resume = amdgpu_vkms_resume,
+       .is_idle = amdgpu_vkms_is_idle,
+       .wait_for_idle = amdgpu_vkms_wait_for_idle,
+       .soft_reset = amdgpu_vkms_soft_reset,
+       .set_clockgating_state = amdgpu_vkms_set_clockgating_state,
+       .set_powergating_state = amdgpu_vkms_set_powergating_state,
+};
+
+const struct amdgpu_ip_block_version amdgpu_vkms_ip_block =
+{
+       .type = AMD_IP_BLOCK_TYPE_DCE,
+       .major = 1,
+       .minor = 0,
+       .rev = 0,
+       .funcs = &amdgpu_vkms_ip_funcs,
+};
+
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.h
new file mode 100644 (file)
index 0000000..97f1b79
--- /dev/null
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef _AMDGPU_VKMS_H_
+#define _AMDGPU_VKMS_H_
+
+#define XRES_DEF  1024
+#define YRES_DEF   768
+
+#define XRES_MAX  16384
+#define YRES_MAX  16384
+
+#define drm_crtc_to_amdgpu_vkms_output(target) \
+       container_of(target, struct amdgpu_vkms_output, crtc)
+
+extern const struct amdgpu_ip_block_version amdgpu_vkms_ip_block;
+
+struct amdgpu_vkms_output {
+       struct drm_crtc crtc;
+       struct drm_encoder encoder;
+       struct drm_connector connector;
+       struct hrtimer vblank_hrtimer;
+       ktime_t period_ns;
+       struct drm_pending_vblank_event *event;
+};
+
+#endif /* _AMDGPU_VKMS_H_ */
index 6362e86..2a88ed5 100644 (file)
@@ -1798,7 +1798,7 @@ int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
        r = vm->update_funcs->commit(&params, fence);
 
        if (table_freed)
-               *table_freed = params.table_freed;
+               *table_freed = *table_freed || params.table_freed;
 
 error_unlock:
        amdgpu_vm_eviction_unlock(vm);
@@ -1856,6 +1856,7 @@ void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem,
  * @adev: amdgpu_device pointer
  * @bo_va: requested BO and VM object
  * @clear: if true clear the entries
+ * @table_freed: return true if page table is freed
  *
  * Fill in the page table entries for @bo_va.
  *
@@ -1863,7 +1864,7 @@ void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem,
  * 0 for success, -EINVAL for failure.
  */
 int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
-                       bool clear)
+                       bool clear, bool *table_freed)
 {
        struct amdgpu_bo *bo = bo_va->base.bo;
        struct amdgpu_vm *vm = bo_va->base.vm;
@@ -1942,7 +1943,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
                                                resv, mapping->start,
                                                mapping->last, update_flags,
                                                mapping->offset, mem,
-                                               pages_addr, last_update, NULL);
+                                               pages_addr, last_update, table_freed);
                if (r)
                        return r;
        }
@@ -2194,7 +2195,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
 
        list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
                /* Per VM BOs never need to bo cleared in the page tables */
-               r = amdgpu_vm_bo_update(adev, bo_va, false);
+               r = amdgpu_vm_bo_update(adev, bo_va, false, NULL);
                if (r)
                        return r;
        }
@@ -2213,7 +2214,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
                else
                        clear = true;
 
-               r = amdgpu_vm_bo_update(adev, bo_va, clear);
+               r = amdgpu_vm_bo_update(adev, bo_va, clear, NULL);
                if (r)
                        return r;
 
index ee0028e..80cc9ab 100644 (file)
@@ -408,7 +408,7 @@ int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
                                struct dma_fence **fence, bool *free_table);
 int amdgpu_vm_bo_update(struct amdgpu_device *adev,
                        struct amdgpu_bo_va *bo_va,
-                       bool clear);
+                       bool clear, bool *table_freed);
 bool amdgpu_vm_evictable(struct amdgpu_bo *bo);
 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
                             struct amdgpu_bo *bo, bool evicted);
index c0fcc41..54f28c0 100644 (file)
@@ -70,7 +70,7 @@
 
 #include "amdgpu_dm.h"
 #include "amdgpu_amdkfd.h"
-#include "dce_virtual.h"
+#include "amdgpu_vkms.h"
 
 static const struct amdgpu_video_codec_info cik_video_codecs_encode_array[] =
 {
@@ -2259,7 +2259,7 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
                amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block);
                amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
                if (adev->enable_virtual_display)
-                       amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+                       amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
 #if defined(CONFIG_DRM_AMD_DC)
                else if (amdgpu_device_has_dc_support(adev))
                        amdgpu_device_ip_block_add(adev, &dm_ip_block);
@@ -2277,7 +2277,7 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
                amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block);
                amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
                if (adev->enable_virtual_display)
-                       amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+                       amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
 #if defined(CONFIG_DRM_AMD_DC)
                else if (amdgpu_device_has_dc_support(adev))
                        amdgpu_device_ip_block_add(adev, &dm_ip_block);
@@ -2295,7 +2295,7 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
                amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block);
                amdgpu_device_ip_block_add(adev, &kv_smu_ip_block);
                if (adev->enable_virtual_display)
-                       amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+                       amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
 #if defined(CONFIG_DRM_AMD_DC)
                else if (amdgpu_device_has_dc_support(adev))
                        amdgpu_device_ip_block_add(adev, &dm_ip_block);
@@ -2315,7 +2315,7 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
                amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block);
                amdgpu_device_ip_block_add(adev, &kv_smu_ip_block);
                if (adev->enable_virtual_display)
-                       amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+                       amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
 #if defined(CONFIG_DRM_AMD_DC)
                else if (amdgpu_device_has_dc_support(adev))
                        amdgpu_device_ip_block_add(adev, &dm_ip_block);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
deleted file mode 100644 (file)
index 7e0d8c0..0000000
+++ /dev/null
@@ -1,780 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <drm/drm_vblank.h>
-
-#include "amdgpu.h"
-#include "amdgpu_pm.h"
-#include "amdgpu_i2c.h"
-#include "atom.h"
-#include "amdgpu_pll.h"
-#include "amdgpu_connectors.h"
-#ifdef CONFIG_DRM_AMDGPU_SI
-#include "dce_v6_0.h"
-#endif
-#ifdef CONFIG_DRM_AMDGPU_CIK
-#include "dce_v8_0.h"
-#endif
-#include "dce_v10_0.h"
-#include "dce_v11_0.h"
-#include "dce_virtual.h"
-#include "ivsrcid/ivsrcid_vislands30.h"
-#include "amdgpu_display.h"
-
-#define DCE_VIRTUAL_VBLANK_PERIOD 16666666
-
-
-static void dce_virtual_set_display_funcs(struct amdgpu_device *adev);
-static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev);
-static int dce_virtual_connector_encoder_init(struct amdgpu_device *adev,
-                                             int index);
-static int dce_virtual_pageflip(struct amdgpu_device *adev,
-                               unsigned crtc_id);
-static enum hrtimer_restart dce_virtual_vblank_timer_handle(struct hrtimer *vblank_timer);
-static void dce_virtual_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
-                                                       int crtc,
-                                                       enum amdgpu_interrupt_state state);
-
-static u32 dce_virtual_vblank_get_counter(struct amdgpu_device *adev, int crtc)
-{
-       return 0;
-}
-
-static void dce_virtual_page_flip(struct amdgpu_device *adev,
-                             int crtc_id, u64 crtc_base, bool async)
-{
-       return;
-}
-
-static int dce_virtual_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
-                                       u32 *vbl, u32 *position)
-{
-       *vbl = 0;
-       *position = 0;
-
-       return -EINVAL;
-}
-
-static bool dce_virtual_hpd_sense(struct amdgpu_device *adev,
-                              enum amdgpu_hpd_id hpd)
-{
-       return true;
-}
-
-static void dce_virtual_hpd_set_polarity(struct amdgpu_device *adev,
-                                     enum amdgpu_hpd_id hpd)
-{
-       return;
-}
-
-static u32 dce_virtual_hpd_get_gpio_reg(struct amdgpu_device *adev)
-{
-       return 0;
-}
-
-/**
- * dce_virtual_bandwidth_update - program display watermarks
- *
- * @adev: amdgpu_device pointer
- *
- * Calculate and program the display watermarks and line
- * buffer allocation (CIK).
- */
-static void dce_virtual_bandwidth_update(struct amdgpu_device *adev)
-{
-       return;
-}
-
-static int dce_virtual_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
-                                     u16 *green, u16 *blue, uint32_t size,
-                                     struct drm_modeset_acquire_ctx *ctx)
-{
-       return 0;
-}
-
-static void dce_virtual_crtc_destroy(struct drm_crtc *crtc)
-{
-       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-
-       drm_crtc_cleanup(crtc);
-       kfree(amdgpu_crtc);
-}
-
-static const struct drm_crtc_funcs dce_virtual_crtc_funcs = {
-       .cursor_set2 = NULL,
-       .cursor_move = NULL,
-       .gamma_set = dce_virtual_crtc_gamma_set,
-       .set_config = amdgpu_display_crtc_set_config,
-       .destroy = dce_virtual_crtc_destroy,
-       .page_flip_target = amdgpu_display_crtc_page_flip_target,
-       .get_vblank_counter = amdgpu_get_vblank_counter_kms,
-       .enable_vblank = amdgpu_enable_vblank_kms,
-       .disable_vblank = amdgpu_disable_vblank_kms,
-       .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
-};
-
-static void dce_virtual_crtc_dpms(struct drm_crtc *crtc, int mode)
-{
-       struct drm_device *dev = crtc->dev;
-       struct amdgpu_device *adev = drm_to_adev(dev);
-       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-       unsigned type;
-
-       switch (mode) {
-       case DRM_MODE_DPMS_ON:
-               amdgpu_crtc->enabled = true;
-               /* Make sure VBLANK interrupts are still enabled */
-               type = amdgpu_display_crtc_idx_to_irq_type(adev,
-                                               amdgpu_crtc->crtc_id);
-               amdgpu_irq_update(adev, &adev->crtc_irq, type);
-               drm_crtc_vblank_on(crtc);
-               break;
-       case DRM_MODE_DPMS_STANDBY:
-       case DRM_MODE_DPMS_SUSPEND:
-       case DRM_MODE_DPMS_OFF:
-               drm_crtc_vblank_off(crtc);
-               amdgpu_crtc->enabled = false;
-               break;
-       }
-}
-
-
-static void dce_virtual_crtc_prepare(struct drm_crtc *crtc)
-{
-       dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
-}
-
-static void dce_virtual_crtc_commit(struct drm_crtc *crtc)
-{
-       dce_virtual_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
-}
-
-static void dce_virtual_crtc_disable(struct drm_crtc *crtc)
-{
-       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-       struct drm_device *dev = crtc->dev;
-
-       if (dev->num_crtcs)
-               drm_crtc_vblank_off(crtc);
-
-       amdgpu_crtc->enabled = false;
-       amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
-       amdgpu_crtc->encoder = NULL;
-       amdgpu_crtc->connector = NULL;
-}
-
-static int dce_virtual_crtc_mode_set(struct drm_crtc *crtc,
-                                 struct drm_display_mode *mode,
-                                 struct drm_display_mode *adjusted_mode,
-                                 int x, int y, struct drm_framebuffer *old_fb)
-{
-       struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-
-       /* update the hw version fpr dpm */
-       amdgpu_crtc->hw_mode = *adjusted_mode;
-
-       return 0;
-}
-
-static bool dce_virtual_crtc_mode_fixup(struct drm_crtc *crtc,
-                                    const struct drm_display_mode *mode,
-                                    struct drm_display_mode *adjusted_mode)
-{
-       return true;
-}
-
-
-static int dce_virtual_crtc_set_base(struct drm_crtc *crtc, int x, int y,
-                                 struct drm_framebuffer *old_fb)
-{
-       return 0;
-}
-
-static int dce_virtual_crtc_set_base_atomic(struct drm_crtc *crtc,
-                                        struct drm_framebuffer *fb,
-                                        int x, int y, enum mode_set_atomic state)
-{
-       return 0;
-}
-
-static const struct drm_crtc_helper_funcs dce_virtual_crtc_helper_funcs = {
-       .dpms = dce_virtual_crtc_dpms,
-       .mode_fixup = dce_virtual_crtc_mode_fixup,
-       .mode_set = dce_virtual_crtc_mode_set,
-       .mode_set_base = dce_virtual_crtc_set_base,
-       .mode_set_base_atomic = dce_virtual_crtc_set_base_atomic,
-       .prepare = dce_virtual_crtc_prepare,
-       .commit = dce_virtual_crtc_commit,
-       .disable = dce_virtual_crtc_disable,
-       .get_scanout_position = amdgpu_crtc_get_scanout_position,
-};
-
-static int dce_virtual_crtc_init(struct amdgpu_device *adev, int index)
-{
-       struct amdgpu_crtc *amdgpu_crtc;
-
-       amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) +
-                             (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
-       if (amdgpu_crtc == NULL)
-               return -ENOMEM;
-
-       drm_crtc_init(adev_to_drm(adev), &amdgpu_crtc->base, &dce_virtual_crtc_funcs);
-
-       drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256);
-       amdgpu_crtc->crtc_id = index;
-       adev->mode_info.crtcs[index] = amdgpu_crtc;
-
-       amdgpu_crtc->pll_id = ATOM_PPLL_INVALID;
-       amdgpu_crtc->encoder = NULL;
-       amdgpu_crtc->connector = NULL;
-       amdgpu_crtc->vsync_timer_enabled = AMDGPU_IRQ_STATE_DISABLE;
-       drm_crtc_helper_add(&amdgpu_crtc->base, &dce_virtual_crtc_helper_funcs);
-
-       hrtimer_init(&amdgpu_crtc->vblank_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-       hrtimer_set_expires(&amdgpu_crtc->vblank_timer, DCE_VIRTUAL_VBLANK_PERIOD);
-       amdgpu_crtc->vblank_timer.function = dce_virtual_vblank_timer_handle;
-       hrtimer_start(&amdgpu_crtc->vblank_timer,
-                     DCE_VIRTUAL_VBLANK_PERIOD, HRTIMER_MODE_REL);
-       return 0;
-}
-
-static int dce_virtual_early_init(void *handle)
-{
-       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-       dce_virtual_set_display_funcs(adev);
-       dce_virtual_set_irq_funcs(adev);
-
-       adev->mode_info.num_hpd = 1;
-       adev->mode_info.num_dig = 1;
-       return 0;
-}
-
-static struct drm_encoder *
-dce_virtual_encoder(struct drm_connector *connector)
-{
-       struct drm_encoder *encoder;
-
-       drm_connector_for_each_possible_encoder(connector, encoder) {
-               if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL)
-                       return encoder;
-       }
-
-       /* pick the first one */
-       drm_connector_for_each_possible_encoder(connector, encoder)
-               return encoder;
-
-       return NULL;
-}
-
-static int dce_virtual_get_modes(struct drm_connector *connector)
-{
-       struct drm_device *dev = connector->dev;
-       struct drm_display_mode *mode = NULL;
-       unsigned i;
-       static const struct mode_size {
-               int w;
-               int h;
-       } common_modes[] = {
-               { 640,  480},
-               { 720,  480},
-               { 800,  600},
-               { 848,  480},
-               {1024,  768},
-               {1152,  768},
-               {1280,  720},
-               {1280,  800},
-               {1280,  854},
-               {1280,  960},
-               {1280, 1024},
-               {1440,  900},
-               {1400, 1050},
-               {1680, 1050},
-               {1600, 1200},
-               {1920, 1080},
-               {1920, 1200},
-               {2560, 1440},
-               {4096, 3112},
-               {3656, 2664},
-               {3840, 2160},
-               {4096, 2160},
-       };
-
-       for (i = 0; i < ARRAY_SIZE(common_modes); i++) {
-               mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false);
-               drm_mode_probed_add(connector, mode);
-       }
-
-       return 0;
-}
-
-static enum drm_mode_status dce_virtual_mode_valid(struct drm_connector *connector,
-                                 struct drm_display_mode *mode)
-{
-       return MODE_OK;
-}
-
-static int
-dce_virtual_dpms(struct drm_connector *connector, int mode)
-{
-       return 0;
-}
-
-static int
-dce_virtual_set_property(struct drm_connector *connector,
-                        struct drm_property *property,
-                        uint64_t val)
-{
-       return 0;
-}
-
-static void dce_virtual_destroy(struct drm_connector *connector)
-{
-       drm_connector_unregister(connector);
-       drm_connector_cleanup(connector);
-       kfree(connector);
-}
-
-static void dce_virtual_force(struct drm_connector *connector)
-{
-       return;
-}
-
-static const struct drm_connector_helper_funcs dce_virtual_connector_helper_funcs = {
-       .get_modes = dce_virtual_get_modes,
-       .mode_valid = dce_virtual_mode_valid,
-       .best_encoder = dce_virtual_encoder,
-};
-
-static const struct drm_connector_funcs dce_virtual_connector_funcs = {
-       .dpms = dce_virtual_dpms,
-       .fill_modes = drm_helper_probe_single_connector_modes,
-       .set_property = dce_virtual_set_property,
-       .destroy = dce_virtual_destroy,
-       .force = dce_virtual_force,
-};
-
-static int dce_virtual_sw_init(void *handle)
-{
-       int r, i;
-       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-       r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SMU_DISP_TIMER2_TRIGGER, &adev->crtc_irq);
-       if (r)
-               return r;
-
-       adev_to_drm(adev)->max_vblank_count = 0;
-
-       adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs;
-
-       adev_to_drm(adev)->mode_config.max_width = 16384;
-       adev_to_drm(adev)->mode_config.max_height = 16384;
-
-       adev_to_drm(adev)->mode_config.preferred_depth = 24;
-       adev_to_drm(adev)->mode_config.prefer_shadow = 1;
-
-       adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
-
-       r = amdgpu_display_modeset_create_props(adev);
-       if (r)
-               return r;
-
-       adev_to_drm(adev)->mode_config.max_width = 16384;
-       adev_to_drm(adev)->mode_config.max_height = 16384;
-
-       /* allocate crtcs, encoders, connectors */
-       for (i = 0; i < adev->mode_info.num_crtc; i++) {
-               r = dce_virtual_crtc_init(adev, i);
-               if (r)
-                       return r;
-               r = dce_virtual_connector_encoder_init(adev, i);
-               if (r)
-                       return r;
-       }
-
-       drm_kms_helper_poll_init(adev_to_drm(adev));
-
-       adev->mode_info.mode_config_initialized = true;
-       return 0;
-}
-
-static int dce_virtual_sw_fini(void *handle)
-{
-       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-       int i = 0;
-
-       for (i = 0; i < adev->mode_info.num_crtc; i++)
-               if (adev->mode_info.crtcs[i])
-                       hrtimer_cancel(&adev->mode_info.crtcs[i]->vblank_timer);
-
-       kfree(adev->mode_info.bios_hardcoded_edid);
-
-       drm_kms_helper_poll_fini(adev_to_drm(adev));
-
-       drm_mode_config_cleanup(adev_to_drm(adev));
-       /* clear crtcs pointer to avoid dce irq finish routine access freed data */
-       memset(adev->mode_info.crtcs, 0, sizeof(adev->mode_info.crtcs[0]) * AMDGPU_MAX_CRTCS);
-       adev->mode_info.mode_config_initialized = false;
-       return 0;
-}
-
-static int dce_virtual_hw_init(void *handle)
-{
-       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
-       switch (adev->asic_type) {
-#ifdef CONFIG_DRM_AMDGPU_SI
-       case CHIP_TAHITI:
-       case CHIP_PITCAIRN:
-       case CHIP_VERDE:
-       case CHIP_OLAND:
-               dce_v6_0_disable_dce(adev);
-               break;
-#endif
-#ifdef CONFIG_DRM_AMDGPU_CIK
-       case CHIP_BONAIRE:
-       case CHIP_HAWAII:
-       case CHIP_KAVERI:
-       case CHIP_KABINI:
-       case CHIP_MULLINS:
-               dce_v8_0_disable_dce(adev);
-               break;
-#endif
-       case CHIP_FIJI:
-       case CHIP_TONGA:
-               dce_v10_0_disable_dce(adev);
-               break;
-       case CHIP_CARRIZO:
-       case CHIP_STONEY:
-       case CHIP_POLARIS10:
-       case CHIP_POLARIS11:
-       case CHIP_VEGAM:
-               dce_v11_0_disable_dce(adev);
-               break;
-       case CHIP_TOPAZ:
-#ifdef CONFIG_DRM_AMDGPU_SI
-       case CHIP_HAINAN:
-#endif
-               /* no DCE */
-               break;
-       default:
-               break;
-       }
-       return 0;
-}
-
-static int dce_virtual_hw_fini(void *handle)
-{
-       return 0;
-}
-
-static int dce_virtual_suspend(void *handle)
-{
-       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-       int r;
-
-       r = amdgpu_display_suspend_helper(adev);
-       if (r)
-               return r;
-       return dce_virtual_hw_fini(handle);
-}
-
-static int dce_virtual_resume(void *handle)
-{
-       struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-       int r;
-
-       r = dce_virtual_hw_init(handle);
-       if (r)
-               return r;
-       return amdgpu_display_resume_helper(adev);
-}
-
-static bool dce_virtual_is_idle(void *handle)
-{
-       return true;
-}
-
-static int dce_virtual_wait_for_idle(void *handle)
-{
-       return 0;
-}
-
-static int dce_virtual_soft_reset(void *handle)
-{
-       return 0;
-}
-
-static int dce_virtual_set_clockgating_state(void *handle,
-                                         enum amd_clockgating_state state)
-{
-       return 0;
-}
-
-static int dce_virtual_set_powergating_state(void *handle,
-                                         enum amd_powergating_state state)
-{
-       return 0;
-}
-
-static const struct amd_ip_funcs dce_virtual_ip_funcs = {
-       .name = "dce_virtual",
-       .early_init = dce_virtual_early_init,
-       .late_init = NULL,
-       .sw_init = dce_virtual_sw_init,
-       .sw_fini = dce_virtual_sw_fini,
-       .hw_init = dce_virtual_hw_init,
-       .hw_fini = dce_virtual_hw_fini,
-       .suspend = dce_virtual_suspend,
-       .resume = dce_virtual_resume,
-       .is_idle = dce_virtual_is_idle,
-       .wait_for_idle = dce_virtual_wait_for_idle,
-       .soft_reset = dce_virtual_soft_reset,
-       .set_clockgating_state = dce_virtual_set_clockgating_state,
-       .set_powergating_state = dce_virtual_set_powergating_state,
-};
-
-/* these are handled by the primary encoders */
-static void dce_virtual_encoder_prepare(struct drm_encoder *encoder)
-{
-       return;
-}
-
-static void dce_virtual_encoder_commit(struct drm_encoder *encoder)
-{
-       return;
-}
-
-static void
-dce_virtual_encoder_mode_set(struct drm_encoder *encoder,
-                            struct drm_display_mode *mode,
-                            struct drm_display_mode *adjusted_mode)
-{
-       return;
-}
-
-static void dce_virtual_encoder_disable(struct drm_encoder *encoder)
-{
-       return;
-}
-
-static void
-dce_virtual_encoder_dpms(struct drm_encoder *encoder, int mode)
-{
-       return;
-}
-
-static bool dce_virtual_encoder_mode_fixup(struct drm_encoder *encoder,
-                                   const struct drm_display_mode *mode,
-                                   struct drm_display_mode *adjusted_mode)
-{
-       return true;
-}
-
-static const struct drm_encoder_helper_funcs dce_virtual_encoder_helper_funcs = {
-       .dpms = dce_virtual_encoder_dpms,
-       .mode_fixup = dce_virtual_encoder_mode_fixup,
-       .prepare = dce_virtual_encoder_prepare,
-       .mode_set = dce_virtual_encoder_mode_set,
-       .commit = dce_virtual_encoder_commit,
-       .disable = dce_virtual_encoder_disable,
-};
-
-static void dce_virtual_encoder_destroy(struct drm_encoder *encoder)
-{
-       drm_encoder_cleanup(encoder);
-       kfree(encoder);
-}
-
-static const struct drm_encoder_funcs dce_virtual_encoder_funcs = {
-       .destroy = dce_virtual_encoder_destroy,
-};
-
-static int dce_virtual_connector_encoder_init(struct amdgpu_device *adev,
-                                             int index)
-{
-       struct drm_encoder *encoder;
-       struct drm_connector *connector;
-
-       /* add a new encoder */
-       encoder = kzalloc(sizeof(struct drm_encoder), GFP_KERNEL);
-       if (!encoder)
-               return -ENOMEM;
-       encoder->possible_crtcs = 1 << index;
-       drm_encoder_init(adev_to_drm(adev), encoder, &dce_virtual_encoder_funcs,
-                        DRM_MODE_ENCODER_VIRTUAL, NULL);
-       drm_encoder_helper_add(encoder, &dce_virtual_encoder_helper_funcs);
-
-       connector = kzalloc(sizeof(struct drm_connector), GFP_KERNEL);
-       if (!connector) {
-               kfree(encoder);
-               return -ENOMEM;
-       }
-
-       /* add a new connector */
-       drm_connector_init(adev_to_drm(adev), connector, &dce_virtual_connector_funcs,
-                          DRM_MODE_CONNECTOR_VIRTUAL);
-       drm_connector_helper_add(connector, &dce_virtual_connector_helper_funcs);
-       connector->display_info.subpixel_order = SubPixelHorizontalRGB;
-       connector->interlace_allowed = false;
-       connector->doublescan_allowed = false;
-
-       /* link them */
-       drm_connector_attach_encoder(connector, encoder);
-
-       return 0;
-}
-
-static const struct amdgpu_display_funcs dce_virtual_display_funcs = {
-       .bandwidth_update = &dce_virtual_bandwidth_update,
-       .vblank_get_counter = &dce_virtual_vblank_get_counter,
-       .backlight_set_level = NULL,
-       .backlight_get_level = NULL,
-       .hpd_sense = &dce_virtual_hpd_sense,
-       .hpd_set_polarity = &dce_virtual_hpd_set_polarity,
-       .hpd_get_gpio_reg = &dce_virtual_hpd_get_gpio_reg,
-       .page_flip = &dce_virtual_page_flip,
-       .page_flip_get_scanoutpos = &dce_virtual_crtc_get_scanoutpos,
-       .add_encoder = NULL,
-       .add_connector = NULL,
-};
-
-static void dce_virtual_set_display_funcs(struct amdgpu_device *adev)
-{
-       adev->mode_info.funcs = &dce_virtual_display_funcs;
-}
-
-static int dce_virtual_pageflip(struct amdgpu_device *adev,
-                               unsigned crtc_id)
-{
-       unsigned long flags;
-       struct amdgpu_crtc *amdgpu_crtc;
-       struct amdgpu_flip_work *works;
-
-       amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
-
-       if (crtc_id >= adev->mode_info.num_crtc) {
-               DRM_ERROR("invalid pageflip crtc %d\n", crtc_id);
-               return -EINVAL;
-       }
-
-       /* IRQ could occur when in initial stage */
-       if (amdgpu_crtc == NULL)
-               return 0;
-
-       spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
-       works = amdgpu_crtc->pflip_works;
-       if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) {
-               DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != "
-                       "AMDGPU_FLIP_SUBMITTED(%d)\n",
-                       amdgpu_crtc->pflip_status,
-                       AMDGPU_FLIP_SUBMITTED);
-               spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
-               return 0;
-       }
-
-       /* page flip completed. clean up */
-       amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
-       amdgpu_crtc->pflip_works = NULL;
-
-       /* wakeup usersapce */
-       if (works->event)
-               drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event);
-
-       spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
-
-       drm_crtc_vblank_put(&amdgpu_crtc->base);
-       amdgpu_bo_unref(&works->old_abo);
-       kfree(works->shared);
-       kfree(works);
-
-       return 0;
-}
-
-static enum hrtimer_restart dce_virtual_vblank_timer_handle(struct hrtimer *vblank_timer)
-{
-       struct amdgpu_crtc *amdgpu_crtc = container_of(vblank_timer,
-                                                      struct amdgpu_crtc, vblank_timer);
-       struct drm_device *ddev = amdgpu_crtc->base.dev;
-       struct amdgpu_device *adev = drm_to_adev(ddev);
-       struct amdgpu_irq_src *source = adev->irq.client[AMDGPU_IRQ_CLIENTID_LEGACY].sources
-               [VISLANDS30_IV_SRCID_SMU_DISP_TIMER2_TRIGGER];
-       int irq_type = amdgpu_display_crtc_idx_to_irq_type(adev,
-                                               amdgpu_crtc->crtc_id);
-
-       if (amdgpu_irq_enabled(adev, source, irq_type)) {
-               drm_handle_vblank(ddev, amdgpu_crtc->crtc_id);
-               dce_virtual_pageflip(adev, amdgpu_crtc->crtc_id);
-       }
-       hrtimer_start(vblank_timer, DCE_VIRTUAL_VBLANK_PERIOD,
-                     HRTIMER_MODE_REL);
-
-       return HRTIMER_NORESTART;
-}
-
-static void dce_virtual_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev,
-                                                       int crtc,
-                                                       enum amdgpu_interrupt_state state)
-{
-       if (crtc >= adev->mode_info.num_crtc || !adev->mode_info.crtcs[crtc]) {
-               DRM_DEBUG("invalid crtc %d\n", crtc);
-               return;
-       }
-
-       adev->mode_info.crtcs[crtc]->vsync_timer_enabled = state;
-       DRM_DEBUG("[FM]set crtc %d vblank interrupt state %d\n", crtc, state);
-}
-
-
-static int dce_virtual_set_crtc_irq_state(struct amdgpu_device *adev,
-                                         struct amdgpu_irq_src *source,
-                                         unsigned type,
-                                         enum amdgpu_interrupt_state state)
-{
-       if (type > AMDGPU_CRTC_IRQ_VBLANK6)
-               return -EINVAL;
-
-       dce_virtual_set_crtc_vblank_interrupt_state(adev, type, state);
-
-       return 0;
-}
-
-static const struct amdgpu_irq_src_funcs dce_virtual_crtc_irq_funcs = {
-       .set = dce_virtual_set_crtc_irq_state,
-       .process = NULL,
-};
-
-static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev)
-{
-       adev->crtc_irq.num_types = adev->mode_info.num_crtc;
-       adev->crtc_irq.funcs = &dce_virtual_crtc_irq_funcs;
-}
-
-const struct amdgpu_ip_block_version dce_virtual_ip_block =
-{
-       .type = AMD_IP_BLOCK_TYPE_DCE,
-       .major = 1,
-       .minor = 0,
-       .rev = 0,
-       .funcs = &dce_virtual_ip_funcs,
-};
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.h b/drivers/gpu/drm/amd/amdgpu/dce_virtual.h
deleted file mode 100644 (file)
index ed42201..0000000
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef __DCE_VIRTUAL_H__
-#define __DCE_VIRTUAL_H__
-
-extern const struct amdgpu_ip_block_version dce_virtual_ip_block;
-
-#endif
-
index 24b0195..16dbe59 100644 (file)
@@ -4621,6 +4621,7 @@ static void gfx_v10_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd,
        dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS2);
        dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_DBG1);
        dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_M0);
+       dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_MODE);
 }
 
 static void gfx_v10_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
index 3a8d52a..6a8dade 100644 (file)
@@ -3027,6 +3027,7 @@ static void gfx_v6_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, u
        dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_HI);
        dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
        dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
+       dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_MODE);
 }
 
 static void gfx_v6_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
index 685212c..37b4a3d 100644 (file)
@@ -4198,6 +4198,7 @@ static void gfx_v7_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, u
        dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_HI);
        dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
        dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
+       dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_MODE);
 }
 
 static void gfx_v7_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
index c26e060..e0302c2 100644 (file)
@@ -5279,6 +5279,7 @@ static void gfx_v8_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, u
        dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_TMA_HI);
        dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
        dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
+       dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_MODE);
 }
 
 static void gfx_v8_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
index 03acc77..603c259 100644 (file)
@@ -1295,6 +1295,16 @@ static bool is_raven_kicker(struct amdgpu_device *adev)
                return false;
 }
 
+static bool check_if_enlarge_doorbell_range(struct amdgpu_device *adev)
+{
+       if ((adev->asic_type == CHIP_RENOIR) &&
+           (adev->gfx.me_fw_version >= 0x000000a5) &&
+           (adev->gfx.me_feature_version >= 52))
+               return true;
+       else
+               return false;
+}
+
 static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
 {
        if (gfx_v9_0_should_disable_gfxoff(adev->pdev))
@@ -2080,6 +2090,7 @@ static void gfx_v9_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, u
        dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_STS);
        dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_IB_DBG0);
        dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_M0);
+       dst[(*no_fields)++] = wave_read_ind(adev, simd, wave, ixSQ_WAVE_MODE);
 }
 
 static void gfx_v9_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd,
@@ -3675,7 +3686,16 @@ static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
        if (ring->use_doorbell) {
                WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
                                        (adev->doorbell_index.kiq * 2) << 2);
-               WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
+               /* If GC has entered CGPG, ringing doorbell > first page
+                * doesn't wakeup GC. Enlarge CP_MEC_DOORBELL_RANGE_UPPER to
+                * workaround this issue. And this change has to align with firmware
+                * update.
+                */
+               if (check_if_enlarge_doorbell_range(adev))
+                       WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
+                                       (adev->doorbell.size - 4));
+               else
+                       WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
                                        (adev->doorbell_index.userqueue_end * 2) << 2);
        }
 
index 7cf653f..097230b 100644 (file)
@@ -1171,8 +1171,8 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
                break;
        case CHIP_ALDEBARAN:
                adev->umc.max_ras_err_cnt_per_query = UMC_V6_7_TOTAL_CHANNEL_NUM;
-               adev->umc.channel_inst_num = UMC_V6_7_UMC_INSTANCE_NUM;
-               adev->umc.umc_inst_num = UMC_V6_7_CHANNEL_INSTANCE_NUM;
+               adev->umc.channel_inst_num = UMC_V6_7_CHANNEL_INSTANCE_NUM;
+               adev->umc.umc_inst_num = UMC_V6_7_UMC_INSTANCE_NUM;
                adev->umc.channel_offs = UMC_V6_7_PER_CHANNEL_OFFSET;
                if (!adev->gmc.xgmi.connected_to_cpu)
                        adev->umc.ras_funcs = &umc_v6_7_ras_funcs;
index 436fb13..ff80786 100644 (file)
@@ -58,7 +58,7 @@
 #include "jpeg_v2_0.h"
 #include "vcn_v3_0.h"
 #include "jpeg_v3_0.h"
-#include "dce_virtual.h"
+#include "amdgpu_vkms.h"
 #include "mes_v10_1.h"
 #include "mxgpu_nv.h"
 #include "smuio_v11_0.h"
@@ -721,7 +721,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
                    !amdgpu_sriov_vf(adev))
                        amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
                if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
-                       amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+                       amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
 #if defined(CONFIG_DRM_AMD_DC)
                else if (amdgpu_device_has_dc_support(adev))
                        amdgpu_device_ip_block_add(adev, &dm_ip_block);
@@ -749,7 +749,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
                if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
                        amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
                if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
-                       amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+                       amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
 #if defined(CONFIG_DRM_AMD_DC)
                else if (amdgpu_device_has_dc_support(adev))
                        amdgpu_device_ip_block_add(adev, &dm_ip_block);
@@ -779,7 +779,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
                    is_support_sw_smu(adev))
                        amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
                if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
-                       amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+                       amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
 #if defined(CONFIG_DRM_AMD_DC)
                else if (amdgpu_device_has_dc_support(adev))
                        amdgpu_device_ip_block_add(adev, &dm_ip_block);
@@ -802,7 +802,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
                    is_support_sw_smu(adev))
                        amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
                if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
-                       amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+                       amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
 #if defined(CONFIG_DRM_AMD_DC)
                else if (amdgpu_device_has_dc_support(adev))
                        amdgpu_device_ip_block_add(adev, &dm_ip_block);
@@ -823,7 +823,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
                        amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
                amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
                if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
-                       amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+                       amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
 #if defined(CONFIG_DRM_AMD_DC)
                else if (amdgpu_device_has_dc_support(adev))
                        amdgpu_device_ip_block_add(adev, &dm_ip_block);
@@ -843,7 +843,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
                    is_support_sw_smu(adev))
                        amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
                if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
-                       amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+                       amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
 #if defined(CONFIG_DRM_AMD_DC)
                 else if (amdgpu_device_has_dc_support(adev))
                         amdgpu_device_ip_block_add(adev, &dm_ip_block);
@@ -865,7 +865,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
                amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
                amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
                if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
-                       amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+                       amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
 #if defined(CONFIG_DRM_AMD_DC)
                else if (amdgpu_device_has_dc_support(adev))
                        amdgpu_device_ip_block_add(adev, &dm_ip_block);
@@ -883,11 +883,11 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
                        amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
                amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
                if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
-                       amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+                       amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
                amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
                amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
                if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
-                       amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+                       amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
 #if defined(CONFIG_DRM_AMD_DC)
                else if (amdgpu_device_has_dc_support(adev))
                        amdgpu_device_ip_block_add(adev, &dm_ip_block);
@@ -905,7 +905,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
                        amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
                }
                if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
-                       amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+                       amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
                amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
                amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
                break;
index 7cbc2bb..e6d2f74 100644 (file)
@@ -44,7 +44,7 @@
 #include "dce_v6_0.h"
 #include "si.h"
 #include "uvd_v3_1.h"
-#include "dce_virtual.h"
+#include "amdgpu_vkms.h"
 #include "gca/gfx_6_0_d.h"
 #include "oss/oss_1_0_d.h"
 #include "oss/oss_1_0_sh_mask.h"
@@ -2759,7 +2759,7 @@ int si_set_ip_blocks(struct amdgpu_device *adev)
                amdgpu_device_ip_block_add(adev, &si_dma_ip_block);
                amdgpu_device_ip_block_add(adev, &si_smu_ip_block);
                if (adev->enable_virtual_display)
-                       amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+                       amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
 #if defined(CONFIG_DRM_AMD_DC) && defined(CONFIG_DRM_AMD_DC_SI)
                else if (amdgpu_device_has_dc_support(adev))
                        amdgpu_device_ip_block_add(adev, &dm_ip_block);
@@ -2777,7 +2777,7 @@ int si_set_ip_blocks(struct amdgpu_device *adev)
                amdgpu_device_ip_block_add(adev, &si_dma_ip_block);
                amdgpu_device_ip_block_add(adev, &si_smu_ip_block);
                if (adev->enable_virtual_display)
-                       amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+                       amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
 #if defined(CONFIG_DRM_AMD_DC) && defined(CONFIG_DRM_AMD_DC_SI)
                else if (amdgpu_device_has_dc_support(adev))
                        amdgpu_device_ip_block_add(adev, &dm_ip_block);
@@ -2795,7 +2795,7 @@ int si_set_ip_blocks(struct amdgpu_device *adev)
                amdgpu_device_ip_block_add(adev, &si_dma_ip_block);
                amdgpu_device_ip_block_add(adev, &si_smu_ip_block);
                if (adev->enable_virtual_display)
-                       amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+                       amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
                break;
        default:
                BUG();
index a5e085e..f7b56a7 100644 (file)
@@ -74,7 +74,7 @@
 #include "smuio_v9_0.h"
 #include "smuio_v11_0.h"
 #include "smuio_v13_0.h"
-#include "dce_virtual.h"
+#include "amdgpu_vkms.h"
 #include "mxgpu_ai.h"
 #include "amdgpu_ras.h"
 #include "amdgpu_xgmi.h"
@@ -843,7 +843,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
                        amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
                }
                if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
-                       amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+                       amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
 #if defined(CONFIG_DRM_AMD_DC)
                else if (amdgpu_device_has_dc_support(adev))
                        amdgpu_device_ip_block_add(adev, &dm_ip_block);
@@ -863,7 +863,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
                amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
                amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
                if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
-                       amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+                       amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
 #if defined(CONFIG_DRM_AMD_DC)
                else if (amdgpu_device_has_dc_support(adev))
                        amdgpu_device_ip_block_add(adev, &dm_ip_block);
@@ -885,7 +885,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
                }
 
                if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
-                       amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+                       amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
                amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
                amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
                amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
@@ -909,7 +909,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
                amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
                amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
                if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
-                       amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+                       amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
 #if defined(CONFIG_DRM_AMD_DC)
                 else if (amdgpu_device_has_dc_support(adev))
                        amdgpu_device_ip_block_add(adev, &dm_ip_block);
index 7da1211..bb30336 100644 (file)
 
 const uint32_t
        umc_v6_7_channel_idx_tbl_second[UMC_V6_7_UMC_INSTANCE_NUM][UMC_V6_7_CHANNEL_INSTANCE_NUM] = {
-               {28, 12, 6, 22},        {19, 3, 9, 25},
-               {20, 4, 30, 14},        {11, 27, 1, 17},
-               {24, 8, 2, 18},         {15, 31, 5, 21},
-               {16, 0, 26, 10},        {7, 23, 29, 13}
+               {28, 20, 24, 16, 12, 4, 8, 0},
+               {6, 30, 2, 26, 22, 14, 18, 10},
+               {19, 11, 15, 7, 3, 27, 31, 23},
+               {9, 1, 5, 29, 25, 17, 21, 13}
 };
 const uint32_t
        umc_v6_7_channel_idx_tbl_first[UMC_V6_7_UMC_INSTANCE_NUM][UMC_V6_7_CHANNEL_INSTANCE_NUM] = {
-               {19, 3, 9, 25},         {28, 12, 6, 22},
-               {11, 27, 1, 17},        {20, 4, 30, 14},
-               {15, 31, 5, 21},        {24, 8, 2, 18},
-               {7, 23, 29, 13},        {16, 0, 26, 10}
+               {19, 11, 15, 7, 3, 27, 31, 23},
+               {9, 1, 5, 29, 25, 17, 21, 13},
+               {28, 20, 24, 16, 12, 4, 8, 0},
+               {6, 30, 2, 26, 22, 14, 18, 10},
 };
 
 static inline uint32_t get_umc_v6_7_reg_offset(struct amdgpu_device *adev,
index 81b8f18..57f2557 100644 (file)
@@ -36,9 +36,9 @@
 #define UMC_V6_7_INST_DIST     0x40000
 
 /* number of umc channel instance with memory map register access */
-#define UMC_V6_7_CHANNEL_INSTANCE_NUM          4
+#define UMC_V6_7_UMC_INSTANCE_NUM              4
 /* number of umc instance with memory map register access */
-#define UMC_V6_7_UMC_INSTANCE_NUM              8
+#define UMC_V6_7_CHANNEL_INSTANCE_NUM          8
 /* total channel instances in one umc block */
 #define UMC_V6_7_TOTAL_CHANNEL_NUM     (UMC_V6_7_CHANNEL_INSTANCE_NUM * UMC_V6_7_UMC_INSTANCE_NUM)
 /* UMC regiser per channel offset */
index 2f01756..3d18aab 100644 (file)
@@ -90,9 +90,7 @@ static int vcn_v3_0_early_init(void *handle)
        int i;
 
        if (amdgpu_sriov_vf(adev)) {
-               for (i = 0; i < VCN_INSTANCES_SIENNA_CICHLID; i++)
-                       if (amdgpu_vcn_is_disabled_vcn(adev, VCN_DECODE_RING, i))
-                               adev->vcn.num_vcn_inst++;
+               adev->vcn.num_vcn_inst = VCN_INSTANCES_SIENNA_CICHLID;
                adev->vcn.harvest_config = 0;
                adev->vcn.num_enc_rings = 1;
 
@@ -153,8 +151,7 @@ static int vcn_v3_0_sw_init(void *handle)
                adev->firmware.fw_size +=
                        ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
 
-               if ((adev->vcn.num_vcn_inst == VCN_INSTANCES_SIENNA_CICHLID) ||
-                   (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)) {
+               if (adev->vcn.num_vcn_inst == VCN_INSTANCES_SIENNA_CICHLID) {
                        adev->firmware.ucode[AMDGPU_UCODE_ID_VCN1].ucode_id = AMDGPU_UCODE_ID_VCN1;
                        adev->firmware.ucode[AMDGPU_UCODE_ID_VCN1].fw = adev->vcn.fw;
                        adev->firmware.fw_size +=
@@ -328,18 +325,28 @@ static int vcn_v3_0_hw_init(void *handle)
                                continue;
 
                        ring = &adev->vcn.inst[i].ring_dec;
-                       ring->wptr = 0;
-                       ring->wptr_old = 0;
-                       vcn_v3_0_dec_ring_set_wptr(ring);
-                       ring->sched.ready = true;
-
-                       for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
-                               ring = &adev->vcn.inst[i].ring_enc[j];
+                       if (amdgpu_vcn_is_disabled_vcn(adev, VCN_DECODE_RING, i)) {
+                               ring->sched.ready = false;
+                               dev_info(adev->dev, "ring %s is disabled by hypervisor\n", ring->name);
+                       } else {
                                ring->wptr = 0;
                                ring->wptr_old = 0;
-                               vcn_v3_0_enc_ring_set_wptr(ring);
+                               vcn_v3_0_dec_ring_set_wptr(ring);
                                ring->sched.ready = true;
                        }
+
+                       for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
+                               ring = &adev->vcn.inst[i].ring_enc[j];
+                               if (amdgpu_vcn_is_disabled_vcn(adev, VCN_ENCODE_RING, i)) {
+                                       ring->sched.ready = false;
+                                       dev_info(adev->dev, "ring %s is disabled by hypervisor\n", ring->name);
+                               } else {
+                                       ring->wptr = 0;
+                                       ring->wptr_old = 0;
+                                       vcn_v3_0_enc_ring_set_wptr(ring);
+                                       ring->sched.ready = true;
+                               }
+                       }
                }
        } else {
                for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
index 3d21c07..fe9a7cc 100644 (file)
@@ -77,7 +77,7 @@
 #if defined(CONFIG_DRM_AMD_ACP)
 #include "amdgpu_acp.h"
 #endif
-#include "dce_virtual.h"
+#include "amdgpu_vkms.h"
 #include "mxgpu_vi.h"
 #include "amdgpu_dm.h"
 
@@ -2102,7 +2102,7 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
                amdgpu_device_ip_block_add(adev, &sdma_v2_4_ip_block);
                amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
                if (adev->enable_virtual_display)
-                       amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+                       amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
                break;
        case CHIP_FIJI:
                amdgpu_device_ip_block_add(adev, &vi_common_ip_block);
@@ -2112,7 +2112,7 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
                amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
                amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
                if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
-                       amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+                       amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
 #if defined(CONFIG_DRM_AMD_DC)
                else if (amdgpu_device_has_dc_support(adev))
                        amdgpu_device_ip_block_add(adev, &dm_ip_block);
@@ -2132,7 +2132,7 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
                amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
                amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
                if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
-                       amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+                       amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
 #if defined(CONFIG_DRM_AMD_DC)
                else if (amdgpu_device_has_dc_support(adev))
                        amdgpu_device_ip_block_add(adev, &dm_ip_block);
@@ -2155,7 +2155,7 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
                amdgpu_device_ip_block_add(adev, &sdma_v3_1_ip_block);
                amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
                if (adev->enable_virtual_display)
-                       amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+                       amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
 #if defined(CONFIG_DRM_AMD_DC)
                else if (amdgpu_device_has_dc_support(adev))
                        amdgpu_device_ip_block_add(adev, &dm_ip_block);
@@ -2173,7 +2173,7 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
                amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
                amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
                if (adev->enable_virtual_display)
-                       amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+                       amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
 #if defined(CONFIG_DRM_AMD_DC)
                else if (amdgpu_device_has_dc_support(adev))
                        amdgpu_device_ip_block_add(adev, &dm_ip_block);
@@ -2194,7 +2194,7 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
                amdgpu_device_ip_block_add(adev, &sdma_v3_0_ip_block);
                amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
                if (adev->enable_virtual_display)
-                       amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
+                       amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
 #if defined(CONFIG_DRM_AMD_DC)
                else if (amdgpu_device_has_dc_support(adev))
                        amdgpu_device_ip_block_add(adev, &dm_ip_block);
index e48acdd..86afd37 100644 (file)
@@ -1393,6 +1393,7 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
        long err = 0;
        int i;
        uint32_t *devices_arr = NULL;
+       bool table_freed = false;
 
        dev = kfd_device_by_id(GET_GPU_ID(args->handle));
        if (!dev)
@@ -1450,7 +1451,8 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
                        goto get_mem_obj_from_handle_failed;
                }
                err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
-                       peer->kgd, (struct kgd_mem *)mem, peer_pdd->drm_priv);
+                       peer->kgd, (struct kgd_mem *)mem,
+                       peer_pdd->drm_priv, &table_freed);
                if (err) {
                        pr_err("Failed to map to gpu %d/%d\n",
                               i, args->n_devices);
@@ -1468,16 +1470,17 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
        }
 
        /* Flush TLBs after waiting for the page table updates to complete */
-       for (i = 0; i < args->n_devices; i++) {
-               peer = kfd_device_by_id(devices_arr[i]);
-               if (WARN_ON_ONCE(!peer))
-                       continue;
-               peer_pdd = kfd_get_process_device_data(peer, p);
-               if (WARN_ON_ONCE(!peer_pdd))
-                       continue;
-               kfd_flush_tlb(peer_pdd, TLB_FLUSH_LEGACY);
+       if (table_freed) {
+               for (i = 0; i < args->n_devices; i++) {
+                       peer = kfd_device_by_id(devices_arr[i]);
+                       if (WARN_ON_ONCE(!peer))
+                               continue;
+                       peer_pdd = kfd_get_process_device_data(peer, p);
+                       if (WARN_ON_ONCE(!peer_pdd))
+                               continue;
+                       kfd_flush_tlb(peer_pdd, TLB_FLUSH_LEGACY);
+               }
        }
-
        kfree(devices_arr);
 
        return err;
@@ -1565,10 +1568,29 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,
                }
                args->n_success = i+1;
        }
-       kfree(devices_arr);
-
        mutex_unlock(&p->mutex);
 
+       if (dev->device_info->asic_family == CHIP_ALDEBARAN) {
+               err = amdgpu_amdkfd_gpuvm_sync_memory(dev->kgd,
+                               (struct kgd_mem *) mem, true);
+               if (err) {
+                       pr_debug("Sync memory failed, wait interrupted by user signal\n");
+                       goto sync_memory_failed;
+               }
+
+               /* Flush TLBs after waiting for the page table updates to complete */
+               for (i = 0; i < args->n_devices; i++) {
+                       peer = kfd_device_by_id(devices_arr[i]);
+                       if (WARN_ON_ONCE(!peer))
+                               continue;
+                       peer_pdd = kfd_get_process_device_data(peer, p);
+                       if (WARN_ON_ONCE(!peer_pdd))
+                               continue;
+                       kfd_flush_tlb(peer_pdd, TLB_FLUSH_HEAVYWEIGHT);
+               }
+       }
+       kfree(devices_arr);
+
        return 0;
 
 bind_process_to_device_failed:
@@ -1576,6 +1598,7 @@ get_mem_obj_from_handle_failed:
 unmap_memory_from_gpu_failed:
        mutex_unlock(&p->mutex);
 copy_from_user_failed:
+sync_memory_failed:
        kfree(devices_arr);
        return err;
 }
index b551dd6..16a57b7 100644 (file)
@@ -91,6 +91,7 @@ static const struct kfd2kgd_calls *kfd2kgd_funcs[] = {
 static const struct kfd_device_info kaveri_device_info = {
        .asic_family = CHIP_KAVERI,
        .asic_name = "kaveri",
+       .gfx_target_version = 70000,
        .max_pasid_bits = 16,
        /* max num of queues for KV.TODO should be a dynamic value */
        .max_no_of_hqd  = 24,
@@ -110,6 +111,7 @@ static const struct kfd_device_info kaveri_device_info = {
 static const struct kfd_device_info carrizo_device_info = {
        .asic_family = CHIP_CARRIZO,
        .asic_name = "carrizo",
+       .gfx_target_version = 80001,
        .max_pasid_bits = 16,
        /* max num of queues for CZ.TODO should be a dynamic value */
        .max_no_of_hqd  = 24,
@@ -130,6 +132,7 @@ static const struct kfd_device_info carrizo_device_info = {
 static const struct kfd_device_info raven_device_info = {
        .asic_family = CHIP_RAVEN,
        .asic_name = "raven",
+       .gfx_target_version = 90002,
        .max_pasid_bits = 16,
        .max_no_of_hqd  = 24,
        .doorbell_size  = 8,
@@ -148,6 +151,7 @@ static const struct kfd_device_info raven_device_info = {
 static const struct kfd_device_info hawaii_device_info = {
        .asic_family = CHIP_HAWAII,
        .asic_name = "hawaii",
+       .gfx_target_version = 70001,
        .max_pasid_bits = 16,
        /* max num of queues for KV.TODO should be a dynamic value */
        .max_no_of_hqd  = 24,
@@ -167,6 +171,7 @@ static const struct kfd_device_info hawaii_device_info = {
 static const struct kfd_device_info tonga_device_info = {
        .asic_family = CHIP_TONGA,
        .asic_name = "tonga",
+       .gfx_target_version = 80002,
        .max_pasid_bits = 16,
        .max_no_of_hqd  = 24,
        .doorbell_size  = 4,
@@ -185,6 +190,7 @@ static const struct kfd_device_info tonga_device_info = {
 static const struct kfd_device_info fiji_device_info = {
        .asic_family = CHIP_FIJI,
        .asic_name = "fiji",
+       .gfx_target_version = 80003,
        .max_pasid_bits = 16,
        .max_no_of_hqd  = 24,
        .doorbell_size  = 4,
@@ -203,6 +209,7 @@ static const struct kfd_device_info fiji_device_info = {
 static const struct kfd_device_info fiji_vf_device_info = {
        .asic_family = CHIP_FIJI,
        .asic_name = "fiji",
+       .gfx_target_version = 80003,
        .max_pasid_bits = 16,
        .max_no_of_hqd  = 24,
        .doorbell_size  = 4,
@@ -222,6 +229,7 @@ static const struct kfd_device_info fiji_vf_device_info = {
 static const struct kfd_device_info polaris10_device_info = {
        .asic_family = CHIP_POLARIS10,
        .asic_name = "polaris10",
+       .gfx_target_version = 80003,
        .max_pasid_bits = 16,
        .max_no_of_hqd  = 24,
        .doorbell_size  = 4,
@@ -240,6 +248,7 @@ static const struct kfd_device_info polaris10_device_info = {
 static const struct kfd_device_info polaris10_vf_device_info = {
        .asic_family = CHIP_POLARIS10,
        .asic_name = "polaris10",
+       .gfx_target_version = 80003,
        .max_pasid_bits = 16,
        .max_no_of_hqd  = 24,
        .doorbell_size  = 4,
@@ -258,6 +267,7 @@ static const struct kfd_device_info polaris10_vf_device_info = {
 static const struct kfd_device_info polaris11_device_info = {
        .asic_family = CHIP_POLARIS11,
        .asic_name = "polaris11",
+       .gfx_target_version = 80003,
        .max_pasid_bits = 16,
        .max_no_of_hqd  = 24,
        .doorbell_size  = 4,
@@ -276,6 +286,7 @@ static const struct kfd_device_info polaris11_device_info = {
 static const struct kfd_device_info polaris12_device_info = {
        .asic_family = CHIP_POLARIS12,
        .asic_name = "polaris12",
+       .gfx_target_version = 80003,
        .max_pasid_bits = 16,
        .max_no_of_hqd  = 24,
        .doorbell_size  = 4,
@@ -294,6 +305,7 @@ static const struct kfd_device_info polaris12_device_info = {
 static const struct kfd_device_info vegam_device_info = {
        .asic_family = CHIP_VEGAM,
        .asic_name = "vegam",
+       .gfx_target_version = 80003,
        .max_pasid_bits = 16,
        .max_no_of_hqd  = 24,
        .doorbell_size  = 4,
@@ -312,6 +324,7 @@ static const struct kfd_device_info vegam_device_info = {
 static const struct kfd_device_info vega10_device_info = {
        .asic_family = CHIP_VEGA10,
        .asic_name = "vega10",
+       .gfx_target_version = 90000,
        .max_pasid_bits = 16,
        .max_no_of_hqd  = 24,
        .doorbell_size  = 8,
@@ -330,6 +343,7 @@ static const struct kfd_device_info vega10_device_info = {
 static const struct kfd_device_info vega10_vf_device_info = {
        .asic_family = CHIP_VEGA10,
        .asic_name = "vega10",
+       .gfx_target_version = 90000,
        .max_pasid_bits = 16,
        .max_no_of_hqd  = 24,
        .doorbell_size  = 8,
@@ -348,6 +362,7 @@ static const struct kfd_device_info vega10_vf_device_info = {
 static const struct kfd_device_info vega12_device_info = {
        .asic_family = CHIP_VEGA12,
        .asic_name = "vega12",
+       .gfx_target_version = 90004,
        .max_pasid_bits = 16,
        .max_no_of_hqd  = 24,
        .doorbell_size  = 8,
@@ -366,6 +381,7 @@ static const struct kfd_device_info vega12_device_info = {
 static const struct kfd_device_info vega20_device_info = {
        .asic_family = CHIP_VEGA20,
        .asic_name = "vega20",
+       .gfx_target_version = 90006,
        .max_pasid_bits = 16,
        .max_no_of_hqd  = 24,
        .doorbell_size  = 8,
@@ -384,6 +400,7 @@ static const struct kfd_device_info vega20_device_info = {
 static const struct kfd_device_info arcturus_device_info = {
        .asic_family = CHIP_ARCTURUS,
        .asic_name = "arcturus",
+       .gfx_target_version = 90008,
        .max_pasid_bits = 16,
        .max_no_of_hqd  = 24,
        .doorbell_size  = 8,
@@ -402,6 +419,7 @@ static const struct kfd_device_info arcturus_device_info = {
 static const struct kfd_device_info aldebaran_device_info = {
        .asic_family = CHIP_ALDEBARAN,
        .asic_name = "aldebaran",
+       .gfx_target_version = 90010,
        .max_pasid_bits = 16,
        .max_no_of_hqd  = 24,
        .doorbell_size  = 8,
@@ -420,6 +438,7 @@ static const struct kfd_device_info aldebaran_device_info = {
 static const struct kfd_device_info renoir_device_info = {
        .asic_family = CHIP_RENOIR,
        .asic_name = "renoir",
+       .gfx_target_version = 90002,
        .max_pasid_bits = 16,
        .max_no_of_hqd  = 24,
        .doorbell_size  = 8,
@@ -438,6 +457,7 @@ static const struct kfd_device_info renoir_device_info = {
 static const struct kfd_device_info navi10_device_info = {
        .asic_family = CHIP_NAVI10,
        .asic_name = "navi10",
+       .gfx_target_version = 100100,
        .max_pasid_bits = 16,
        .max_no_of_hqd  = 24,
        .doorbell_size  = 8,
@@ -456,6 +476,7 @@ static const struct kfd_device_info navi10_device_info = {
 static const struct kfd_device_info navi12_device_info = {
        .asic_family = CHIP_NAVI12,
        .asic_name = "navi12",
+       .gfx_target_version = 100101,
        .max_pasid_bits = 16,
        .max_no_of_hqd  = 24,
        .doorbell_size  = 8,
@@ -474,6 +495,7 @@ static const struct kfd_device_info navi12_device_info = {
 static const struct kfd_device_info navi14_device_info = {
        .asic_family = CHIP_NAVI14,
        .asic_name = "navi14",
+       .gfx_target_version = 100102,
        .max_pasid_bits = 16,
        .max_no_of_hqd  = 24,
        .doorbell_size  = 8,
@@ -492,6 +514,7 @@ static const struct kfd_device_info navi14_device_info = {
 static const struct kfd_device_info sienna_cichlid_device_info = {
        .asic_family = CHIP_SIENNA_CICHLID,
        .asic_name = "sienna_cichlid",
+       .gfx_target_version = 100300,
        .max_pasid_bits = 16,
        .max_no_of_hqd  = 24,
        .doorbell_size  = 8,
@@ -510,6 +533,7 @@ static const struct kfd_device_info sienna_cichlid_device_info = {
 static const struct kfd_device_info navy_flounder_device_info = {
        .asic_family = CHIP_NAVY_FLOUNDER,
        .asic_name = "navy_flounder",
+       .gfx_target_version = 100301,
        .max_pasid_bits = 16,
        .max_no_of_hqd  = 24,
        .doorbell_size  = 8,
@@ -528,6 +552,7 @@ static const struct kfd_device_info navy_flounder_device_info = {
 static const struct kfd_device_info vangogh_device_info = {
        .asic_family = CHIP_VANGOGH,
        .asic_name = "vangogh",
+       .gfx_target_version = 100303,
        .max_pasid_bits = 16,
        .max_no_of_hqd  = 24,
        .doorbell_size  = 8,
@@ -546,6 +571,7 @@ static const struct kfd_device_info vangogh_device_info = {
 static const struct kfd_device_info dimgrey_cavefish_device_info = {
        .asic_family = CHIP_DIMGREY_CAVEFISH,
        .asic_name = "dimgrey_cavefish",
+       .gfx_target_version = 100302,
        .max_pasid_bits = 16,
        .max_no_of_hqd  = 24,
        .doorbell_size  = 8,
@@ -564,6 +590,7 @@ static const struct kfd_device_info dimgrey_cavefish_device_info = {
 static const struct kfd_device_info beige_goby_device_info = {
        .asic_family = CHIP_BEIGE_GOBY,
        .asic_name = "beige_goby",
+       .gfx_target_version = 100304,
        .max_pasid_bits = 16,
        .max_no_of_hqd  = 24,
        .doorbell_size  = 8,
@@ -582,6 +609,7 @@ static const struct kfd_device_info beige_goby_device_info = {
 static const struct kfd_device_info yellow_carp_device_info = {
        .asic_family = CHIP_YELLOW_CARP,
        .asic_name = "yellow_carp",
+       .gfx_target_version = 100305,
        .max_pasid_bits = 16,
        .max_no_of_hqd  = 24,
        .doorbell_size  = 8,
@@ -600,6 +628,7 @@ static const struct kfd_device_info yellow_carp_device_info = {
 static const struct kfd_device_info cyan_skillfish_device_info = {
        .asic_family = CHIP_CYAN_SKILLFISH,
        .asic_name = "cyan_skillfish",
+       .gfx_target_version = 100103,
        .max_pasid_bits = 16,
        .max_no_of_hqd  = 24,
        .doorbell_size  = 8,
index 8a5dfda..ab83b0d 100644 (file)
@@ -196,6 +196,7 @@ struct kfd_event_interrupt_class {
 struct kfd_device_info {
        enum amd_asic_type asic_family;
        const char *asic_name;
+       uint32_t gfx_target_version;
        const struct kfd_event_interrupt_class *event_interrupt_class;
        unsigned int max_pasid_bits;
        unsigned int max_no_of_hqd;
index 8a2c6fc..21ec8a1 100644 (file)
@@ -714,7 +714,8 @@ static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
        if (err)
                goto err_alloc_mem;
 
-       err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->kgd, mem, pdd->drm_priv);
+       err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->kgd, mem,
+                       pdd->drm_priv, NULL);
        if (err)
                goto err_map_mem;
 
index c7b364e..7df69b7 100644 (file)
@@ -3019,7 +3019,8 @@ svm_range_get_attr(struct kfd_process *p, uint64_t start, uint64_t size,
        struct svm_range *prange;
        uint32_t prefetch_loc = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
        uint32_t location = KFD_IOCTL_SVM_LOCATION_UNDEFINED;
-       uint32_t flags = 0xffffffff;
+       uint32_t flags_and = 0xffffffff;
+       uint32_t flags_or = 0;
        int gpuidx;
        uint32_t i;
 
@@ -3046,12 +3047,12 @@ svm_range_get_attr(struct kfd_process *p, uint64_t start, uint64_t size,
                        get_accessible = true;
                        break;
                case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
+               case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
                        get_flags = true;
                        break;
                case KFD_IOCTL_SVM_ATTR_GRANULARITY:
                        get_granularity = true;
                        break;
-               case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
                case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
                case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
                        fallthrough;
@@ -3069,7 +3070,8 @@ svm_range_get_attr(struct kfd_process *p, uint64_t start, uint64_t size,
        if (!node) {
                pr_debug("range attrs not found return default values\n");
                svm_range_set_default_attributes(&location, &prefetch_loc,
-                                                &granularity, &flags);
+                                                &granularity, &flags_and);
+               flags_or = flags_and;
                if (p->xnack_enabled)
                        bitmap_copy(bitmap_access, svms->bitmap_supported,
                                    MAX_GPU_INSTANCE);
@@ -3115,8 +3117,10 @@ svm_range_get_attr(struct kfd_process *p, uint64_t start, uint64_t size,
                        bitmap_and(bitmap_aip, bitmap_aip,
                                   prange->bitmap_aip, MAX_GPU_INSTANCE);
                }
-               if (get_flags)
-                       flags &= prange->flags;
+               if (get_flags) {
+                       flags_and &= prange->flags;
+                       flags_or |= prange->flags;
+               }
 
                if (get_granularity && prange->granularity < granularity)
                        granularity = prange->granularity;
@@ -3150,7 +3154,10 @@ fill_values:
                                attrs[i].type = KFD_IOCTL_SVM_ATTR_NO_ACCESS;
                        break;
                case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
-                       attrs[i].value = flags;
+                       attrs[i].value = flags_and;
+                       break;
+               case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
+                       attrs[i].value = ~flags_or;
                        break;
                case KFD_IOCTL_SVM_ATTR_GRANULARITY:
                        attrs[i].value = (uint32_t)granularity;
index ef992dd..98cca5f 100644 (file)
@@ -478,6 +478,8 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
                              dev->node_props.simd_per_cu);
        sysfs_show_32bit_prop(buffer, offs, "max_slots_scratch_cu",
                              dev->node_props.max_slots_scratch_cu);
+       sysfs_show_32bit_prop(buffer, offs, "gfx_target_version",
+                             dev->node_props.gfx_target_version);
        sysfs_show_32bit_prop(buffer, offs, "vendor_id",
                              dev->node_props.vendor_id);
        sysfs_show_32bit_prop(buffer, offs, "device_id",
@@ -1360,6 +1362,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
        dev->node_props.simd_arrays_per_engine =
                cu_info.num_shader_arrays_per_engine;
 
+       dev->node_props.gfx_target_version = gpu->device_info->gfx_target_version;
        dev->node_props.vendor_id = gpu->pdev->vendor;
        dev->node_props.device_id = gpu->pdev->device;
        dev->node_props.capability |=
index 8b48c66..a8db017 100644 (file)
@@ -78,6 +78,7 @@ struct kfd_node_properties {
        uint32_t simd_per_cu;
        uint32_t max_slots_scratch_cu;
        uint32_t engine_id;
+       uint32_t gfx_target_version;
        uint32_t vendor_id;
        uint32_t device_id;
        uint32_t location_id;
index 91fb72c..718e123 100644 (file)
 
 AMDGPUDM = amdgpu_dm.o amdgpu_dm_irq.o amdgpu_dm_mst_types.o amdgpu_dm_color.o
 
+ifdef CONFIG_DRM_AMD_DC_DCN
+AMDGPUDM += dc_fpu.o
+endif
+
 ifneq ($(CONFIG_DRM_AMD_DC),)
 AMDGPUDM += amdgpu_dm_services.o amdgpu_dm_helpers.o amdgpu_dm_pp_smu.o amdgpu_dm_psr.o
 endif
index 986c9d2..3e28f17 100644 (file)
@@ -1066,19 +1066,15 @@ static void event_mall_stutter(struct work_struct *work)
 
 static struct vblank_workqueue *vblank_create_workqueue(struct amdgpu_device *adev, struct dc *dc)
 {
-
-       int max_caps = dc->caps.max_links;
        struct vblank_workqueue *vblank_work;
-       int i = 0;
 
-       vblank_work = kcalloc(max_caps, sizeof(*vblank_work), GFP_KERNEL);
+       vblank_work = kzalloc(sizeof(*vblank_work), GFP_KERNEL);
        if (ZERO_OR_NULL_PTR(vblank_work)) {
                kfree(vblank_work);
                return NULL;
        }
 
-       for (i = 0; i < max_caps; i++)
-               INIT_WORK(&vblank_work[i].mall_work, event_mall_stutter);
+       INIT_WORK(&vblank_work->mall_work, event_mall_stutter);
 
        return vblank_work;
 }
@@ -1548,6 +1544,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
        }
 
        hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
+       adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
 
        if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
                adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
@@ -1561,7 +1558,6 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
                         adev->dm.dmcub_fw_version);
        }
 
-       adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
 
        adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
        dmub_srv = adev->dm.dmub_srv;
@@ -9620,7 +9616,12 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
                } else if (amdgpu_freesync_vid_mode && aconnector &&
                           is_freesync_video_mode(&new_crtc_state->mode,
                                                  aconnector)) {
-                       set_freesync_fixed_config(dm_new_crtc_state);
+                       struct drm_display_mode *high_mode;
+
+                       high_mode = get_highest_refresh_rate_mode(aconnector, false);
+                       if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
+                               set_freesync_fixed_config(dm_new_crtc_state);
+                       }
                }
 
                ret = dm_atomic_get_state(state, &dm_state);
index 46a33f6..fdcaea2 100644 (file)
@@ -637,6 +637,30 @@ TRACE_EVENT(amdgpu_refresh_rate_track,
                  __entry->refresh_rate_ns)
 );
 
+TRACE_EVENT(dcn_fpu,
+           TP_PROTO(bool begin, const char *function, const int line, const int recursion_depth),
+           TP_ARGS(begin, function, line, recursion_depth),
+
+           TP_STRUCT__entry(
+                            __field(bool, begin)
+                            __field(const char *, function)
+                            __field(int, line)
+                            __field(int, recursion_depth)
+           ),
+           TP_fast_assign(
+                          __entry->begin = begin;
+                          __entry->function = function;
+                          __entry->line = line;
+                          __entry->recursion_depth = recursion_depth;
+           ),
+           TP_printk("%s: recursion_depth: %d: %s()+%d:",
+                     __entry->begin ? "begin" : "end",
+                     __entry->recursion_depth,
+                     __entry->function,
+                     __entry->line
+           )
+);
+
 #endif /* _AMDGPU_DM_TRACE_H_ */
 
 #undef TRACE_INCLUDE_PATH
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c
new file mode 100644 (file)
index 0000000..c9f47d1
--- /dev/null
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dc_trace.h"
+
+#if defined(CONFIG_X86)
+#include <asm/fpu/api.h>
+#elif defined(CONFIG_PPC64)
+#include <asm/switch_to.h>
+#include <asm/cputable.h>
+#endif
+
+/**
+ * DOC: DC FPU manipulation overview
+ *
+ * DC core uses FPU operations in multiple parts of the code, which requires a
+ * more specialized way to manage these areas' entrance. To fulfill this
+ * requirement, we created some wrapper functions that encapsulate
+ * kernel_fpu_begin/end to better fit our need in the display component. In
+ * summary, in this file, you can find functions related to FPU operation
+ * management.
+ */
+
+static DEFINE_PER_CPU(int, fpu_recursion_depth);
+
+/**
+ * dc_assert_fp_enabled - Check if FPU protection is enabled
+ *
+ * This function tells if the code is already under FPU protection or not. A
+ * function that works as an API for a set of FPU operations can use this
+ * function for checking if the caller invoked it after DC_FP_START(). For
+ * example, take a look at dcn2x.c file.
+ */
+inline void dc_assert_fp_enabled(void)
+{
+       int *pcpu, depth = 0;
+
+       pcpu = get_cpu_ptr(&fpu_recursion_depth);
+       depth = *pcpu;
+       put_cpu_ptr(&fpu_recursion_depth);
+
+       ASSERT(depth > 1);
+}
+
+/**
+ * dc_fpu_begin - Enables FPU protection
+ * @function_name: A string containing the function name for debug purposes
+ *   (usually __func__)
+ *
+ * @line: A line number where DC_FP_START was invoked for debug purpose
+ *   (usually __LINE__)
+ *
+ * This function is responsible for managing the use of kernel_fpu_begin() with
+ * the advantage of providing an event trace for debugging.
+ *
+ * Note: Do not call this function directly; always use DC_FP_START().
+ */
+void dc_fpu_begin(const char *function_name, const int line)
+{
+       int *pcpu;
+
+       pcpu = get_cpu_ptr(&fpu_recursion_depth);
+       *pcpu += 1;
+
+       if (*pcpu == 1) {
+#if defined(CONFIG_X86)
+               kernel_fpu_begin();
+#elif defined(CONFIG_PPC64)
+               if (cpu_has_feature(CPU_FTR_VSX_COMP)) {
+                       preempt_disable();
+                       enable_kernel_vsx();
+               } else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP)) {
+                       preempt_disable();
+                       enable_kernel_altivec();
+               } else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE)) {
+                       preempt_disable();
+                       enable_kernel_fp();
+               }
+#endif
+       }
+
+       TRACE_DCN_FPU(true, function_name, line, *pcpu);
+       put_cpu_ptr(&fpu_recursion_depth);
+}
+
+/**
+ * dc_fpu_end - Disable FPU protection
+ * @function_name: A string containing the function name for debug purposes
+ * @line: A-line number where DC_FP_END was invoked for debug purpose
+ *
+ * This function is responsible for managing the use of kernel_fpu_end() with
+ * the advantage of providing an event trace for debugging.
+ *
+ * Note: Do not call this function directly; always use DC_FP_END().
+ */
+void dc_fpu_end(const char *function_name, const int line)
+{
+       int *pcpu;
+
+       pcpu = get_cpu_ptr(&fpu_recursion_depth);
+       *pcpu -= 1;
+       if (*pcpu <= 0) {
+#if defined(CONFIG_X86)
+               kernel_fpu_end();
+#elif defined(CONFIG_PPC64)
+               if (cpu_has_feature(CPU_FTR_VSX_COMP)) {
+                       disable_kernel_vsx();
+                       preempt_enable();
+               } else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP)) {
+                       disable_kernel_altivec();
+                       preempt_enable();
+               } else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE)) {
+                       disable_kernel_fp();
+                       preempt_enable();
+               }
+#endif
+       }
+
+       TRACE_DCN_FPU(false, function_name, line, *pcpu);
+       put_cpu_ptr(&fpu_recursion_depth);
+}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.h b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.h
new file mode 100644 (file)
index 0000000..b8275b3
--- /dev/null
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_FPU_H__
+#define __DC_FPU_H__
+
+void dc_assert_fp_enabled(void);
+void dc_fpu_begin(const char *function_name, const int line);
+void dc_fpu_end(const char *function_name, const int line);
+
+#endif /* __DC_FPU_H__ */
index c6f494f..6185f94 100644 (file)
@@ -66,9 +66,11 @@ int rn_get_active_display_cnt_wa(
        for (i = 0; i < context->stream_count; i++) {
                const struct dc_stream_state *stream = context->streams[i];
 
+               /* Extend the WA to DP for Linux*/
                if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A ||
                                stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK ||
-                               stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK)
+                               stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK ||
+                               stream->signal == SIGNAL_TYPE_DISPLAY_PORT)
                        tmds_present = true;
        }
 
index f56e061..cd025c1 100644 (file)
@@ -3595,29 +3595,12 @@ static bool dpcd_read_sink_ext_caps(struct dc_link *link)
 bool dp_retrieve_lttpr_cap(struct dc_link *link)
 {
        uint8_t lttpr_dpcd_data[6];
-       bool vbios_lttpr_enable = false;
-       bool vbios_lttpr_interop = false;
-       struct dc_bios *bios = link->dc->ctx->dc_bios;
+       bool vbios_lttpr_enable = link->dc->caps.vbios_lttpr_enable;
+       bool vbios_lttpr_interop = link->dc->caps.vbios_lttpr_aware;
        enum dc_status status = DC_ERROR_UNEXPECTED;
        bool is_lttpr_present = false;
 
        memset(lttpr_dpcd_data, '\0', sizeof(lttpr_dpcd_data));
-       /* Query BIOS to determine if LTTPR functionality is forced on by system */
-       if (bios->funcs->get_lttpr_caps) {
-               enum bp_result bp_query_result;
-               uint8_t is_vbios_lttpr_enable = 0;
-
-               bp_query_result = bios->funcs->get_lttpr_caps(bios, &is_vbios_lttpr_enable);
-               vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable;
-       }
-
-       if (bios->funcs->get_lttpr_interop) {
-               enum bp_result bp_query_result;
-               uint8_t is_vbios_interop_enabled = 0;
-
-               bp_query_result = bios->funcs->get_lttpr_interop(bios, &is_vbios_interop_enabled);
-               vbios_lttpr_interop = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled;
-       }
 
        /*
         * Logic to determine LTTPR mode
index 1a89d56..de80a9e 100644 (file)
@@ -305,7 +305,7 @@ struct link_encoder *link_enc_cfg_get_next_avail_link_enc(
        const struct dc_state *state)
 {
        struct link_encoder *link_enc = NULL;
-       enum engine_id eng_id = ENGINE_ID_UNKNOWN;
+       enum engine_id eng_id;
 
        eng_id = find_first_avail_link_enc(dc->ctx, state);
        if (eng_id != ENGINE_ID_UNKNOWN)
index 1596f6b..a60396d 100644 (file)
@@ -1030,7 +1030,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
 
        /* Timing borders are part of vactive that we are also supposed to skip in addition
         * to any stream dst offset. Since dm logic assumes dst is in addressable
-        * space we need to add the the left and top borders to dst offsets temporarily.
+        * space we need to add the left and top borders to dst offsets temporarily.
         * TODO: fix in DM, stream dst is supposed to be in vactive
         */
        pipe_ctx->stream->dst.x += timing->h_border_left;
@@ -1051,6 +1051,11 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
        /* depends on scaling ratios and recout, does not calculate offset yet */
        calculate_viewport_size(pipe_ctx);
 
+       /* Stopgap for validation of ODM + MPO on one side of screen case */
+       if (pipe_ctx->plane_res.scl_data.viewport.height < 1 ||
+                       pipe_ctx->plane_res.scl_data.viewport.width < 1)
+               return false;
+
        /*
         * LB calculations depend on vp size, h/v_active and scaling ratios
         * Setting line buffer pixel depth to 24bpp yields banding
index a948f4f..62c222d 100644 (file)
@@ -45,7 +45,7 @@
 /* forward declaration */
 struct aux_payload;
 
-#define DC_VER "3.2.146"
+#define DC_VER "3.2.147"
 
 #define MAX_SURFACES 3
 #define MAX_PLANES 6
@@ -183,6 +183,8 @@ struct dc_caps {
        unsigned int cursor_cache_size;
        struct dc_plane_cap planes[MAX_PLANES];
        struct dc_color_caps color;
+       bool vbios_lttpr_aware;
+       bool vbios_lttpr_enable;
 };
 
 struct dc_bug_wa {
index d261535..c711797 100644 (file)
@@ -37,3 +37,6 @@
 
 #define TRACE_DCN_CLOCK_STATE(dcn_clocks) \
        trace_amdgpu_dm_dc_clocks_state(dcn_clocks)
+
+#define TRACE_DCN_FPU(begin, function, line, ref_count) \
+       trace_dcn_fpu(begin, function, line, ref_count)
index 7fa9fc6..f6e747f 100644 (file)
@@ -464,7 +464,7 @@ void optc2_lock_doublebuffer_enable(struct timing_generator *optc)
 
        REG_UPDATE_2(OTG_GLOBAL_CONTROL1,
                        MASTER_UPDATE_LOCK_DB_X,
-                       h_blank_start - 200 - 1,
+                       (h_blank_start - 200 - 1) / optc1->opp_count,
                        MASTER_UPDATE_LOCK_DB_Y,
                        v_blank_start - 1);
 }
index 0b1cd1d..e3e01b1 100644 (file)
@@ -35,6 +35,8 @@
 #include "include/irq_service_interface.h"
 #include "dcn20/dcn20_resource.h"
 
+#include "dml/dcn2x/dcn2x.h"
+
 #include "dcn10/dcn10_hubp.h"
 #include "dcn10/dcn10_ipp.h"
 #include "dcn20_hubbub.h"
@@ -1974,43 +1976,6 @@ void dcn20_split_stream_for_mpc(
        ASSERT(primary_pipe->plane_state);
 }
 
-void dcn20_populate_dml_writeback_from_context(
-               struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes)
-{
-       int pipe_cnt, i;
-
-       for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
-               struct dc_writeback_info *wb_info = &res_ctx->pipe_ctx[i].stream->writeback_info[0];
-
-               if (!res_ctx->pipe_ctx[i].stream)
-                       continue;
-
-               /* Set writeback information */
-               pipes[pipe_cnt].dout.wb_enable = (wb_info->wb_enabled == true) ? 1 : 0;
-               pipes[pipe_cnt].dout.num_active_wb++;
-               pipes[pipe_cnt].dout.wb.wb_src_height = wb_info->dwb_params.cnv_params.crop_height;
-               pipes[pipe_cnt].dout.wb.wb_src_width = wb_info->dwb_params.cnv_params.crop_width;
-               pipes[pipe_cnt].dout.wb.wb_dst_width = wb_info->dwb_params.dest_width;
-               pipes[pipe_cnt].dout.wb.wb_dst_height = wb_info->dwb_params.dest_height;
-               pipes[pipe_cnt].dout.wb.wb_htaps_luma = 1;
-               pipes[pipe_cnt].dout.wb.wb_vtaps_luma = 1;
-               pipes[pipe_cnt].dout.wb.wb_htaps_chroma = wb_info->dwb_params.scaler_taps.h_taps_c;
-               pipes[pipe_cnt].dout.wb.wb_vtaps_chroma = wb_info->dwb_params.scaler_taps.v_taps_c;
-               pipes[pipe_cnt].dout.wb.wb_hratio = 1.0;
-               pipes[pipe_cnt].dout.wb.wb_vratio = 1.0;
-               if (wb_info->dwb_params.out_format == dwb_scaler_mode_yuv420) {
-                       if (wb_info->dwb_params.output_depth == DWB_OUTPUT_PIXEL_DEPTH_8BPC)
-                               pipes[pipe_cnt].dout.wb.wb_pixel_format = dm_420_8;
-                       else
-                               pipes[pipe_cnt].dout.wb.wb_pixel_format = dm_420_10;
-               } else
-                       pipes[pipe_cnt].dout.wb.wb_pixel_format = dm_444_32;
-
-               pipe_cnt++;
-       }
-
-}
-
 int dcn20_populate_dml_pipes_from_context(
                struct dc *dc,
                struct dc_state *context,
@@ -2392,7 +2357,9 @@ int dcn20_populate_dml_pipes_from_context(
        }
 
        /* populate writeback information */
+       DC_FP_START();
        dc->res_pool->funcs->populate_dml_writeback_from_context(dc, res_ctx, pipes);
+       DC_FP_END();
 
        return pipe_cnt;
 }
index c8f3127..6ec8ff4 100644 (file)
@@ -58,8 +58,6 @@ struct pipe_ctx *dcn20_acquire_idle_pipe_for_layer(
                struct dc_state *state,
                const struct resource_pool *pool,
                struct dc_stream_state *stream);
-void dcn20_populate_dml_writeback_from_context(
-               struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes);
 
 struct stream_encoder *dcn20_stream_encoder_create(
        enum engine_id eng_id,
index f27fc2a..fbbdf99 100644 (file)
@@ -35,6 +35,8 @@
 #include "include/irq_service_interface.h"
 #include "dcn20/dcn20_resource.h"
 
+#include "dml/dcn2x/dcn2x.h"
+
 #include "clk_mgr.h"
 #include "dcn10/dcn10_hubp.h"
 #include "dcn10/dcn10_ipp.h"
index 596c97d..253654d 100644 (file)
@@ -2617,6 +2617,26 @@ static bool dcn30_resource_construct(
        dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
        dc->caps.color.mpc.ocsc = 1;
 
+       /* read VBIOS LTTPR caps */
+       {
+               if (ctx->dc_bios->funcs->get_lttpr_caps) {
+                       enum bp_result bp_query_result;
+                       uint8_t is_vbios_lttpr_enable = 0;
+
+                       bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable);
+                       dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable;
+               }
+
+               if (ctx->dc_bios->funcs->get_lttpr_interop) {
+                       enum bp_result bp_query_result;
+                       uint8_t is_vbios_interop_enabled = 0;
+
+                       bp_query_result = ctx->dc_bios->funcs->get_lttpr_interop(ctx->dc_bios,
+                                       &is_vbios_interop_enabled);
+                       dc->caps.vbios_lttpr_aware = (bp_query_result == BP_RESULT_OK) && !!is_vbios_interop_enabled;
+               }
+       }
+
        if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
                dc->debug = debug_defaults_drv;
        else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) {
index 833ab13..dc7823d 100644 (file)
@@ -146,8 +146,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_03_soc = {
 
                .min_dcfclk = 500.0, /* TODO: set this to actual min DCFCLK */
                .num_states = 1,
-               .sr_exit_time_us = 26.5,
-               .sr_enter_plus_exit_time_us = 31,
+               .sr_exit_time_us = 35.5,
+               .sr_enter_plus_exit_time_us = 40,
                .urgent_latency_us = 4.0,
                .urgent_latency_pixel_data_only_us = 4.0,
                .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
index 38c010a..cd3248d 100644 (file)
@@ -1968,6 +1968,22 @@ static bool dcn31_resource_construct(
        dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
        dc->caps.color.mpc.ocsc = 1;
 
+       /* read VBIOS LTTPR caps */
+       {
+               if (ctx->dc_bios->funcs->get_lttpr_caps) {
+                       enum bp_result bp_query_result;
+                       uint8_t is_vbios_lttpr_enable = 0;
+
+                       bp_query_result = ctx->dc_bios->funcs->get_lttpr_caps(ctx->dc_bios, &is_vbios_lttpr_enable);
+                       dc->caps.vbios_lttpr_enable = (bp_query_result == BP_RESULT_OK) && !!is_vbios_lttpr_enable;
+               }
+
+               /* interop bit is implicit */
+               {
+                       dc->caps.vbios_lttpr_aware = true;
+               }
+       }
+
        if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
                dc->debug = debug_defaults_drv;
        else if (dc->ctx->dce_environment == DCE_ENV_FPGA_MAXIMUS) {
index 4586216..56055df 100644 (file)
@@ -58,6 +58,8 @@ CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags)
 
 ifdef CONFIG_DRM_AMD_DC_DCN
 CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_vba.o := $(dml_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn2x/dcn2x.o := $(dml_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags)
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags)
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20.o := $(dml_ccflags)
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20v2.o := $(dml_ccflags)
@@ -70,6 +72,7 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_mode_vba_31.o := $(dml_ccflags) $(fram
 CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_rq_dlg_calc_31.o := $(dml_ccflags)
 CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags)
 CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/display_mode_vba.o := $(dml_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn2x/dcn2x.o := $(dml_rcflags)
 CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_rcflags)
 CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20.o := $(dml_rcflags)
 CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20v2.o := $(dml_rcflags)
@@ -91,6 +94,7 @@ DML = display_mode_lib.o display_rq_dlg_helpers.o dml1_display_rq_dlg_calc.o \
 
 ifdef CONFIG_DRM_AMD_DC_DCN
 DML += display_mode_vba.o dcn20/display_rq_dlg_calc_20.o dcn20/display_mode_vba_20.o
+DML += dcn2x/dcn2x.o
 DML += dcn20/display_rq_dlg_calc_20v2.o dcn20/display_mode_vba_20v2.o
 DML += dcn21/display_rq_dlg_calc_21.o dcn21/display_mode_vba_21.o
 DML += dcn30/display_mode_vba_30.o dcn30/display_rq_dlg_calc_30.o
index 799bae2..2091dd8 100644 (file)
@@ -488,7 +488,7 @@ static void get_meta_and_pte_attr(struct display_mode_lib *mode_lib,
        log2_meta_req_bytes = 6; // meta request is 64b and is 8x8byte meta element
 
        // each 64b meta request for dcn is 8x8 meta elements and
-       // a meta element covers one 256b block of the the data surface.
+       // a meta element covers one 256b block of the data surface.
        log2_meta_req_height = log2_blk256_height + 3; // meta req is 8x8 byte, each byte represent 1 blk256
        log2_meta_req_width = log2_meta_req_bytes + 8 - log2_bytes_per_element
                        - log2_meta_req_height;
index 6a6d597..1a0c14e 100644 (file)
@@ -488,7 +488,7 @@ static void get_meta_and_pte_attr(struct display_mode_lib *mode_lib,
        log2_meta_req_bytes = 6; // meta request is 64b and is 8x8byte meta element
 
        // each 64b meta request for dcn is 8x8 meta elements and
-       // a meta element covers one 256b block of the the data surface.
+       // a meta element covers one 256b block of the data surface.
        log2_meta_req_height = log2_blk256_height + 3; // meta req is 8x8 byte, each byte represent 1 blk256
        log2_meta_req_width = log2_meta_req_bytes + 8 - log2_bytes_per_element
                        - log2_meta_req_height;
index dc1c81a..287e310 100644 (file)
@@ -482,7 +482,7 @@ static void get_meta_and_pte_attr(
        log2_meta_req_bytes = 6; // meta request is 64b and is 8x8byte meta element
 
        // each 64b meta request for dcn is 8x8 meta elements and
-       // a meta element covers one 256b block of the the data surface.
+       // a meta element covers one 256b block of the data surface.
        log2_meta_req_height = log2_blk256_height + 3; // meta req is 8x8 byte, each byte represent 1 blk256
        log2_meta_req_width = log2_meta_req_bytes + 8 - log2_bytes_per_element
                        - log2_meta_req_height;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn2x/dcn2x.c b/drivers/gpu/drm/amd/display/dc/dml/dcn2x/dcn2x.c
new file mode 100644 (file)
index 0000000..c585224
--- /dev/null
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "resource.h"
+
+#include "dcn2x.h"
+
+/**
+ * DOC: DCN2x FPU manipulation Overview
+ *
+ * The DCN architecture relies on FPU operations, which require special
+ * compilation flags and the use of kernel_fpu_begin/end functions; ideally, we
+ * want to avoid spreading FPU access across multiple files. With this idea in
+ * mind, this file aims to centralize all DCN20 and DCN2.1 (DCN2x) functions
+ * that require FPU access in a single place. Code in this file follows the
+ * following code pattern:
+ *
+ * 1. Functions that use FPU operations should be isolated in static functions.
+ * 2. The FPU functions should have the noinline attribute to ensure anything
+ *    that deals with FP register is contained within this call.
+ * 3. All function that needs to be accessed outside this file requires a
+ *    public interface that not uses any FPU reference.
+ * 4. Developers **must not** use DC_FP_START/END in this file, but they need
+ *    to ensure that the caller invokes it before access any function available
+ *    in this file. For this reason, public functions in this file must invoke
+ *    dc_assert_fp_enabled();
+ *
+ * Let's expand a little bit more the idea in the code pattern. To fully
+ * isolate FPU operations in a single place, we must avoid situations where
+ * compilers spill FP values to registers due to FP enable in a specific C
+ * file. Note that even if we isolate all FPU functions in a single file and
+ * call its interface from other files, the compiler might enable the use of
+ * FPU before we call DC_FP_START. Nevertheless, it is the programmer's
+ * responsibility to invoke DC_FP_START/END in the correct place. To highlight
+ * situations where developers forgot to use the FP protection before calling
+ * the DC FPU interface functions, we introduce a helper that checks if the
+ * function is invoked under FP protection. If not, it will trigger a kernel
+ * warning.
+ */
+
+void dcn20_populate_dml_writeback_from_context(struct dc *dc,
+                                              struct resource_context *res_ctx,
+                                              display_e2e_pipe_params_st *pipes)
+{
+       int pipe_cnt, i;
+
+       dc_assert_fp_enabled();
+
+       for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) {
+               struct dc_writeback_info *wb_info = &res_ctx->pipe_ctx[i].stream->writeback_info[0];
+
+               if (!res_ctx->pipe_ctx[i].stream)
+                       continue;
+
+               /* Set writeback information */
+               pipes[pipe_cnt].dout.wb_enable = (wb_info->wb_enabled == true) ? 1 : 0;
+               pipes[pipe_cnt].dout.num_active_wb++;
+               pipes[pipe_cnt].dout.wb.wb_src_height = wb_info->dwb_params.cnv_params.crop_height;
+               pipes[pipe_cnt].dout.wb.wb_src_width = wb_info->dwb_params.cnv_params.crop_width;
+               pipes[pipe_cnt].dout.wb.wb_dst_width = wb_info->dwb_params.dest_width;
+               pipes[pipe_cnt].dout.wb.wb_dst_height = wb_info->dwb_params.dest_height;
+               pipes[pipe_cnt].dout.wb.wb_htaps_luma = 1;
+               pipes[pipe_cnt].dout.wb.wb_vtaps_luma = 1;
+               pipes[pipe_cnt].dout.wb.wb_htaps_chroma = wb_info->dwb_params.scaler_taps.h_taps_c;
+               pipes[pipe_cnt].dout.wb.wb_vtaps_chroma = wb_info->dwb_params.scaler_taps.v_taps_c;
+               pipes[pipe_cnt].dout.wb.wb_hratio = 1.0;
+               pipes[pipe_cnt].dout.wb.wb_vratio = 1.0;
+               if (wb_info->dwb_params.out_format == dwb_scaler_mode_yuv420) {
+                       if (wb_info->dwb_params.output_depth == DWB_OUTPUT_PIXEL_DEPTH_8BPC)
+                               pipes[pipe_cnt].dout.wb.wb_pixel_format = dm_420_8;
+                       else
+                               pipes[pipe_cnt].dout.wb.wb_pixel_format = dm_420_10;
+               } else {
+                       pipes[pipe_cnt].dout.wb.wb_pixel_format = dm_444_32;
+               }
+
+               pipe_cnt++;
+       }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn2x/dcn2x.h b/drivers/gpu/drm/amd/display/dc/dml/dcn2x/dcn2x.h
new file mode 100644 (file)
index 0000000..331547b
--- /dev/null
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DCN2X_H__
+#define __DCN2X_H__
+
+void dcn20_populate_dml_writeback_from_context(struct dc *dc,
+                                              struct resource_context *res_ctx,
+                                              display_e2e_pipe_params_st *pipes);
+
+#endif /* __DCN2X_H__ */
index 04601a7..0d934fa 100644 (file)
@@ -549,7 +549,7 @@ static void get_meta_and_pte_attr(struct display_mode_lib *mode_lib,
        log2_meta_req_bytes = 6; // meta request is 64b and is 8x8byte meta element
 
                                 // each 64b meta request for dcn is 8x8 meta elements and
-                                // a meta element covers one 256b block of the the data surface.
+                                // a meta element covers one 256b block of the data surface.
        log2_meta_req_height = log2_blk256_height + 3; // meta req is 8x8 byte, each byte represent 1 blk256
        log2_meta_req_width = log2_meta_req_bytes + 8 - log2_bytes_per_element
                - log2_meta_req_height;
index 3def093..c23905b 100644 (file)
@@ -563,7 +563,7 @@ static void get_meta_and_pte_attr(
        log2_meta_req_bytes = 6;        // meta request is 64b and is 8x8byte meta element
 
        // each 64b meta request for dcn is 8x8 meta elements and
-       // a meta element covers one 256b block of the the data surface.
+       // a meta element covers one 256b block of the data surface.
        log2_meta_req_height = log2_blk256_height + 3;  // meta req is 8x8 byte, each byte represent 1 blk256
        log2_meta_req_width = log2_meta_req_bytes + 8 - log2_bytes_per_element - log2_meta_req_height;
        meta_req_width = 1 << log2_meta_req_width;
index 414da64..8f2b168 100644 (file)
@@ -456,7 +456,7 @@ static void dml1_rq_dlg_get_row_heights(
        log2_meta_req_bytes = 6; /* meta request is 64b and is 8x8byte meta element */
 
        /* each 64b meta request for dcn is 8x8 meta elements and
-        * a meta element covers one 256b block of the the data surface.
+        * a meta element covers one 256b block of the data surface.
         */
        log2_meta_req_height = log2_blk256_height + 3; /* meta req is 8x8 */
        log2_meta_req_width = log2_meta_req_bytes + 8 - log2_bytes_per_element
@@ -718,7 +718,7 @@ static void get_surf_rq_param(
        log2_meta_req_bytes = 6; /* meta request is 64b and is 8x8byte meta element */
 
        /* each 64b meta request for dcn is 8x8 meta elements and
-        * a meta element covers one 256b block of the the data surface.
+        * a meta element covers one 256b block of the data surface.
         */
        log2_meta_req_height = log2_blk256_height + 3; /* meta req is 8x8 byte, each byte represent 1 blk256 */
        log2_meta_req_width = log2_meta_req_bytes + 8 - log2_bytes_per_element
index 126c2f3..f50cae2 100644 (file)
 #define dm_error(fmt, ...) DRM_ERROR(fmt, ##__VA_ARGS__)
 
 #if defined(CONFIG_DRM_AMD_DC_DCN)
-#if defined(CONFIG_X86)
-#include <asm/fpu/api.h>
-#define DC_FP_START() kernel_fpu_begin()
-#define DC_FP_END() kernel_fpu_end()
-#elif defined(CONFIG_PPC64)
-#include <asm/switch_to.h>
-#include <asm/cputable.h>
-#define DC_FP_START() { \
-       if (cpu_has_feature(CPU_FTR_VSX_COMP)) { \
-               preempt_disable(); \
-               enable_kernel_vsx(); \
-       } else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP)) { \
-               preempt_disable(); \
-               enable_kernel_altivec(); \
-       } else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE)) { \
-               preempt_disable(); \
-               enable_kernel_fp(); \
-       } \
-}
-#define DC_FP_END() { \
-       if (cpu_has_feature(CPU_FTR_VSX_COMP)) { \
-               disable_kernel_vsx(); \
-               preempt_enable(); \
-       } else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP)) { \
-               disable_kernel_altivec(); \
-               preempt_enable(); \
-       } else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE)) { \
-               disable_kernel_fp(); \
-               preempt_enable(); \
-       } \
-}
-#endif
+#include "amdgpu_dm/dc_fpu.h"
+#define DC_FP_START() dc_fpu_begin(__func__, __LINE__)
+#define DC_FP_END() dc_fpu_end(__func__, __LINE__)
 #endif
 
 /*
index 02921ad..aa2707e 100644 (file)
 
 /* Firmware versioning. */
 #ifdef DMUB_EXPOSE_VERSION
-#define DMUB_FW_VERSION_GIT_HASH 0xe599e0896
+#define DMUB_FW_VERSION_GIT_HASH 0x6d13d5e2c
 #define DMUB_FW_VERSION_MAJOR 0
 #define DMUB_FW_VERSION_MINOR 0
-#define DMUB_FW_VERSION_REVISION 76
+#define DMUB_FW_VERSION_REVISION 77
 #define DMUB_FW_VERSION_TEST 0
 #define DMUB_FW_VERSION_VBIOS 0
 #define DMUB_FW_VERSION_HOTFIX 0
index 6144617..6820012 100644 (file)
@@ -270,11 +270,13 @@ void dmub_dcn31_set_outbox1_rptr(struct dmub_srv *dmub, uint32_t rptr_offset)
 
 bool dmub_dcn31_is_hw_init(struct dmub_srv *dmub)
 {
-       uint32_t is_hw_init;
+       union dmub_fw_boot_status status;
+       uint32_t is_enable;
 
-       REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_hw_init);
+       status.all = REG_READ(DMCUB_SCRATCH0);
+       REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_enable);
 
-       return is_hw_init != 0;
+       return is_enable != 0 && status.bits.dal_fw;
 }
 
 bool dmub_dcn31_is_supported(struct dmub_srv *dmub)
index a9ad00e..1a8a6a3 100644 (file)
 #define regCP_ME2_PIPE2_INT_STATUS_BASE_IDX                                                             0
 #define regCP_ME2_PIPE3_INT_STATUS                                                                      0x1094
 #define regCP_ME2_PIPE3_INT_STATUS_BASE_IDX                                                             0
+#define regCP_ME1_INT_STAT_DEBUG                                                                        0x1095
+#define regCP_ME1_INT_STAT_DEBUG_BASE_IDX                                                               0
+#define regCP_ME2_INT_STAT_DEBUG                                                                        0x1096
+#define regCP_ME2_INT_STAT_DEBUG_BASE_IDX                                                               0
 #define regCC_GC_EDC_CONFIG                                                                             0x1098
 #define regCC_GC_EDC_CONFIG_BASE_IDX                                                                    0
 #define regCP_ME1_PIPE_PRIORITY_CNTS                                                                    0x1099
index bc4d299..0492212 100644 (file)
 #define CP_ME1_PIPE3_INT_STATUS__GENERIC2_INT_STATUS_MASK                                                     0x20000000L
 #define CP_ME1_PIPE3_INT_STATUS__GENERIC1_INT_STATUS_MASK                                                     0x40000000L
 #define CP_ME1_PIPE3_INT_STATUS__GENERIC0_INT_STATUS_MASK                                                     0x80000000L
+//CP_ME1_INT_STAT_DEBUG
+#define CP_ME1_INT_STAT_DEBUG__CMP_QUERY_STATUS_INT_ASSERTED__SHIFT                                           0xc
+#define CP_ME1_INT_STAT_DEBUG__DEQUEUE_REQUEST_INT_ASSERTED__SHIFT                                            0xd
+#define CP_ME1_INT_STAT_DEBUG__CP_ECC_ERROR_INT_ASSERTED__SHIFT                                               0xe
+#define CP_ME1_INT_STAT_DEBUG__SUA_VIOLATION_INT_STATUS__SHIFT                                                0xf
+#define CP_ME1_INT_STAT_DEBUG__GPF_INT_ASSERTED__SHIFT                                                        0x10
+#define CP_ME1_INT_STAT_DEBUG__WRM_POLL_TIMEOUT_INT_ASSERTED__SHIFT                                           0x11
+#define CP_ME1_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED__SHIFT                                                   0x17
+#define CP_ME1_INT_STAT_DEBUG__OPCODE_ERROR_INT_ASSERTED__SHIFT                                               0x18
+#define CP_ME1_INT_STAT_DEBUG__TIME_STAMP_INT_ASSERTED__SHIFT                                                 0x1a
+#define CP_ME1_INT_STAT_DEBUG__RESERVED_BIT_ERROR_INT_ASSERTED__SHIFT                                         0x1b
+#define CP_ME1_INT_STAT_DEBUG__GENERIC2_INT_ASSERTED__SHIFT                                                   0x1d
+#define CP_ME1_INT_STAT_DEBUG__GENERIC1_INT_ASSERTED__SHIFT                                                   0x1e
+#define CP_ME1_INT_STAT_DEBUG__GENERIC0_INT_ASSERTED__SHIFT                                                   0x1f
+#define CP_ME1_INT_STAT_DEBUG__CMP_QUERY_STATUS_INT_ASSERTED_MASK                                             0x00001000L
+#define CP_ME1_INT_STAT_DEBUG__DEQUEUE_REQUEST_INT_ASSERTED_MASK                                              0x00002000L
+#define CP_ME1_INT_STAT_DEBUG__CP_ECC_ERROR_INT_ASSERTED_MASK                                                 0x00004000L
+#define CP_ME1_INT_STAT_DEBUG__SUA_VIOLATION_INT_STATUS_MASK                                                  0x00008000L
+#define CP_ME1_INT_STAT_DEBUG__GPF_INT_ASSERTED_MASK                                                          0x00010000L
+#define CP_ME1_INT_STAT_DEBUG__WRM_POLL_TIMEOUT_INT_ASSERTED_MASK                                             0x00020000L
+#define CP_ME1_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED_MASK                                                     0x00800000L
+#define CP_ME1_INT_STAT_DEBUG__OPCODE_ERROR_INT_ASSERTED_MASK                                                 0x01000000L
+#define CP_ME1_INT_STAT_DEBUG__TIME_STAMP_INT_ASSERTED_MASK                                                   0x04000000L
+#define CP_ME1_INT_STAT_DEBUG__RESERVED_BIT_ERROR_INT_ASSERTED_MASK                                           0x08000000L
+#define CP_ME1_INT_STAT_DEBUG__GENERIC2_INT_ASSERTED_MASK                                                     0x20000000L
+#define CP_ME1_INT_STAT_DEBUG__GENERIC1_INT_ASSERTED_MASK                                                     0x40000000L
+#define CP_ME1_INT_STAT_DEBUG__GENERIC0_INT_ASSERTED_MASK                                                     0x80000000L
+//CP_ME2_INT_STAT_DEBUG
+#define CP_ME2_INT_STAT_DEBUG__CMP_QUERY_STATUS_INT_ASSERTED__SHIFT                                           0xc
+#define CP_ME2_INT_STAT_DEBUG__DEQUEUE_REQUEST_INT_ASSERTED__SHIFT                                            0xd
+#define CP_ME2_INT_STAT_DEBUG__CP_ECC_ERROR_INT_ASSERTED__SHIFT                                               0xe
+#define CP_ME2_INT_STAT_DEBUG__SUA_VIOLATION_INT_STATUS__SHIFT                                                0xf
+#define CP_ME2_INT_STAT_DEBUG__GPF_INT_ASSERTED__SHIFT                                                        0x10
+#define CP_ME2_INT_STAT_DEBUG__WRM_POLL_TIMEOUT_INT_ASSERTED__SHIFT                                           0x11
+#define CP_ME2_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED__SHIFT                                                   0x17
+#define CP_ME2_INT_STAT_DEBUG__OPCODE_ERROR_INT_ASSERTED__SHIFT                                               0x18
+#define CP_ME2_INT_STAT_DEBUG__TIME_STAMP_INT_ASSERTED__SHIFT                                                 0x1a
+#define CP_ME2_INT_STAT_DEBUG__RESERVED_BIT_ERROR_INT_ASSERTED__SHIFT                                         0x1b
+#define CP_ME2_INT_STAT_DEBUG__GENERIC2_INT_ASSERTED__SHIFT                                                   0x1d
+#define CP_ME2_INT_STAT_DEBUG__GENERIC1_INT_ASSERTED__SHIFT                                                   0x1e
+#define CP_ME2_INT_STAT_DEBUG__GENERIC0_INT_ASSERTED__SHIFT                                                   0x1f
+#define CP_ME2_INT_STAT_DEBUG__CMP_QUERY_STATUS_INT_ASSERTED_MASK                                             0x00001000L
+#define CP_ME2_INT_STAT_DEBUG__DEQUEUE_REQUEST_INT_ASSERTED_MASK                                              0x00002000L
+#define CP_ME2_INT_STAT_DEBUG__CP_ECC_ERROR_INT_ASSERTED_MASK                                                 0x00004000L
+#define CP_ME2_INT_STAT_DEBUG__SUA_VIOLATION_INT_STATUS_MASK                                                  0x00008000L
+#define CP_ME2_INT_STAT_DEBUG__GPF_INT_ASSERTED_MASK                                                          0x00010000L
+#define CP_ME2_INT_STAT_DEBUG__WRM_POLL_TIMEOUT_INT_ASSERTED_MASK                                             0x00020000L
+#define CP_ME2_INT_STAT_DEBUG__PRIV_REG_INT_ASSERTED_MASK                                                     0x00800000L
+#define CP_ME2_INT_STAT_DEBUG__OPCODE_ERROR_INT_ASSERTED_MASK                                                 0x01000000L
+#define CP_ME2_INT_STAT_DEBUG__TIME_STAMP_INT_ASSERTED_MASK                                                   0x04000000L
+#define CP_ME2_INT_STAT_DEBUG__RESERVED_BIT_ERROR_INT_ASSERTED_MASK                                           0x08000000L
+#define CP_ME2_INT_STAT_DEBUG__GENERIC2_INT_ASSERTED_MASK                                                     0x20000000L
+#define CP_ME2_INT_STAT_DEBUG__GENERIC1_INT_ASSERTED_MASK                                                     0x40000000L
+#define CP_ME2_INT_STAT_DEBUG__GENERIC0_INT_ASSERTED_MASK                                                     0x80000000L
 //CP_ME2_PIPE0_INT_STATUS
 #define CP_ME2_PIPE0_INT_STATUS__CMP_QUERY_STATUS_INT_STATUS__SHIFT                                           0xc
 #define CP_ME2_PIPE0_INT_STATUS__DEQUEUE_REQUEST_INT_STATUS__SHIFT                                            0xd
index 3811e58..4495545 100644 (file)
@@ -590,7 +590,7 @@ struct atom_firmware_info_v3_4 {
        uint8_t  board_i2c_feature_id;            // enum of atom_board_i2c_feature_id_def
        uint8_t  board_i2c_feature_gpio_id;       // i2c id find in gpio_lut data table gpio_id
        uint8_t  board_i2c_feature_slave_addr;
-       uint8_t  reserved3;
+       uint8_t  ras_rom_i2c_slave_addr;
        uint16_t bootup_mvddq_mv;
        uint16_t bootup_mvpp_mv;
        uint32_t zfbstartaddrin16mb;
index 1d3765b..6239c30 100644 (file)
@@ -282,6 +282,7 @@ enum smu_clk_type {
        __SMU_DUMMY_MAP(TDC),                                   \
        __SMU_DUMMY_MAP(THERMAL),                               \
        __SMU_DUMMY_MAP(GFX_PER_CU_CG),                         \
+       __SMU_DUMMY_MAP(DATA_CALCULATIONS),                     \
        __SMU_DUMMY_MAP(RM),                                    \
        __SMU_DUMMY_MAP(DS_DCEFCLK),                            \
        __SMU_DUMMY_MAP(ACDC),                                  \
index 3fea243..dc91eb6 100644 (file)
@@ -26,7 +26,7 @@
 #include "amdgpu_smu.h"
 
 #define SMU13_DRIVER_IF_VERSION_INV 0xFFFFFFFF
-#define SMU13_DRIVER_IF_VERSION_YELLOW_CARP 0x03
+#define SMU13_DRIVER_IF_VERSION_YELLOW_CARP 0x04
 #define SMU13_DRIVER_IF_VERSION_ALDE 0x07
 
 /* MP Apertures */
index 5627de7..c5e26d6 100644 (file)
@@ -111,7 +111,9 @@ typedef struct {
   uint32_t InWhisperMode        : 1;
   uint32_t spare0               : 1;
   uint32_t ZstateStatus         : 4;
-  uint32_t spare1               :12;
+  uint32_t spare1               : 4;
+  uint32_t DstateFun            : 4;
+  uint32_t DstateDev            : 4;
   // MP1_EXT_SCRATCH2
   uint32_t P2JobHandler         :24;
   uint32_t RsmuPmiP2FinishedCnt : 8;
index 43c3f6e..7dd92bd 100644 (file)
@@ -1580,7 +1580,7 @@ static int smu10_set_fine_grain_clk_vol(struct pp_hwmgr *hwmgr,
                }
 
                if (smu10_data->gfx_actual_soft_min_freq > smu10_data->gfx_actual_soft_max_freq) {
-                       pr_err("The setting minimun sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
+                       pr_err("The setting minimum sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
                                        smu10_data->gfx_actual_soft_min_freq, smu10_data->gfx_actual_soft_max_freq);
                        return -EINVAL;
                }
index 90e40aa..261ef8c 100644 (file)
@@ -353,8 +353,7 @@ static void sienna_cichlid_check_bxco_support(struct smu_context *smu)
        struct amdgpu_device *adev = smu->adev;
        uint32_t val;
 
-       if (powerplay_table->platform_caps & SMU_11_0_7_PP_PLATFORM_CAP_BACO ||
-           powerplay_table->platform_caps & SMU_11_0_7_PP_PLATFORM_CAP_MACO) {
+       if (powerplay_table->platform_caps & SMU_11_0_7_PP_PLATFORM_CAP_BACO) {
                val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0);
                smu_baco->platform_support =
                        (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true :
index 18681dc..bcaaa08 100644 (file)
@@ -256,7 +256,7 @@ static int vangogh_tables_init(struct smu_context *smu)
        return 0;
 
 err3_out:
-       kfree(smu_table->clocks_table);
+       kfree(smu_table->watermarks_table);
 err2_out:
        kfree(smu_table->gpu_metrics_table);
 err1_out:
index 856eeaf..562783d 100644 (file)
@@ -150,7 +150,7 @@ static const struct cmn2asic_mapping aldebaran_clk_map[SMU_CLK_COUNT] = {
 };
 
 static const struct cmn2asic_mapping aldebaran_feature_mask_map[SMU_FEATURE_COUNT] = {
-       ALDEBARAN_FEA_MAP(SMU_FEATURE_DPM_PREFETCHER_BIT,               FEATURE_DATA_CALCULATIONS),
+       ALDEBARAN_FEA_MAP(SMU_FEATURE_DATA_CALCULATIONS_BIT,            FEATURE_DATA_CALCULATIONS),
        ALDEBARAN_FEA_MAP(SMU_FEATURE_DPM_GFXCLK_BIT,                   FEATURE_DPM_GFXCLK_BIT),
        ALDEBARAN_FEA_MAP(SMU_FEATURE_DPM_UCLK_BIT,                     FEATURE_DPM_UCLK_BIT),
        ALDEBARAN_FEA_MAP(SMU_FEATURE_DPM_SOCCLK_BIT,                   FEATURE_DPM_SOCCLK_BIT),
index e49a70f..298ea7a 100644 (file)
@@ -67,9 +67,8 @@ static int dp_aux_ep_probe(struct device *dev)
  *
  * Calls through to the endpoint driver remove.
  *
- * Return: 0 if no error or negative error code.
  */
-static int dp_aux_ep_remove(struct device *dev)
+static void dp_aux_ep_remove(struct device *dev)
 {
        struct dp_aux_ep_driver *aux_ep_drv = to_dp_aux_ep_drv(dev->driver);
        struct dp_aux_ep_device *aux_ep = to_dp_aux_ep_dev(dev);
@@ -77,8 +76,6 @@ static int dp_aux_ep_remove(struct device *dev)
        if (aux_ep_drv->remove)
                aux_ep_drv->remove(aux_ep);
        dev_pm_domain_detach(dev, true);
-
-       return 0;
 }
 
 /**
index 72a38f2..47e8453 100644 (file)
@@ -207,6 +207,8 @@ config DRM_I915_LOW_LEVEL_TRACEPOINTS
          This provides the ability to precisely monitor engine utilisation
          and also analyze the request dependency resolving timeline.
 
+         Recommended for driver developers only.
+
          If in doubt, say "N".
 
 config DRM_I915_DEBUG_VBLANK_EVADE
@@ -220,6 +222,8 @@ config DRM_I915_DEBUG_VBLANK_EVADE
          is exceeded, even if there isn't an actual risk of missing
          the vblank.
 
+         Recommended for driver developers only.
+
          If in doubt, say "N".
 
 config DRM_I915_DEBUG_RUNTIME_PM
@@ -232,4 +236,6 @@ config DRM_I915_DEBUG_RUNTIME_PM
          runtime PM functionality. This may introduce overhead during
          driver loading, suspend and resume operations.
 
+         Recommended for driver developers only.
+
          If in doubt, say "N"
index f57dfc7..642a5b5 100644 (file)
@@ -38,6 +38,7 @@ i915-y += i915_drv.o \
          i915_irq.o \
          i915_getparam.o \
          i915_mitigations.o \
+         i915_module.o \
          i915_params.o \
          i915_pci.o \
          i915_scatterlist.o \
@@ -89,7 +90,6 @@ gt-y += \
        gt/gen8_ppgtt.o \
        gt/intel_breadcrumbs.o \
        gt/intel_context.o \
-       gt/intel_context_param.o \
        gt/intel_context_sseu.o \
        gt/intel_engine_cs.o \
        gt/intel_engine_heartbeat.o \
@@ -108,6 +108,7 @@ gt-y += \
        gt/intel_gtt.o \
        gt/intel_llc.o \
        gt/intel_lrc.o \
+       gt/intel_migrate.o \
        gt/intel_mocs.o \
        gt/intel_ppgtt.o \
        gt/intel_rc6.o \
@@ -135,7 +136,6 @@ i915-y += $(gt-y)
 gem-y += \
        gem/i915_gem_busy.o \
        gem/i915_gem_clflush.o \
-       gem/i915_gem_client_blt.o \
        gem/i915_gem_context.o \
        gem/i915_gem_create.o \
        gem/i915_gem_dmabuf.o \
@@ -143,7 +143,6 @@ gem-y += \
        gem/i915_gem_execbuffer.o \
        gem/i915_gem_internal.o \
        gem/i915_gem_object.o \
-       gem/i915_gem_object_blt.o \
        gem/i915_gem_lmem.o \
        gem/i915_gem_mman.o \
        gem/i915_gem_pages.o \
@@ -162,15 +161,17 @@ gem-y += \
 i915-y += \
          $(gem-y) \
          i915_active.o \
+         i915_buddy.o \
          i915_cmd_parser.o \
          i915_gem_evict.o \
          i915_gem_gtt.o \
+         i915_gem_ww.o \
          i915_gem.o \
-         i915_globals.o \
          i915_query.o \
          i915_request.o \
          i915_scheduler.o \
          i915_trace_points.o \
+         i915_ttm_buddy_manager.o \
          i915_vma.o \
          intel_wopcm.o
 
@@ -185,6 +186,8 @@ i915-y += gt/uc/intel_uc.o \
          gt/uc/intel_guc_fw.o \
          gt/uc/intel_guc_log.o \
          gt/uc/intel_guc_log_debugfs.o \
+         gt/uc/intel_guc_rc.o \
+         gt/uc/intel_guc_slpc.o \
          gt/uc/intel_guc_submission.o \
          gt/uc/intel_huc.o \
          gt/uc/intel_huc_debugfs.o \
@@ -266,6 +269,7 @@ i915-y += \
        display/intel_pps.o \
        display/intel_qp_tables.o \
        display/intel_sdvo.o \
+       display/intel_snps_phy.o \
        display/intel_tv.o \
        display/intel_vdsc.o \
        display/intel_vrr.o \
@@ -277,7 +281,9 @@ i915-y += i915_perf.o
 # Post-mortem debug and GPU hang state capture
 i915-$(CONFIG_DRM_I915_CAPTURE_ERROR) += i915_gpu_error.o
 i915-$(CONFIG_DRM_I915_SELFTEST) += \
+       gem/selftests/i915_gem_client_blt.o \
        gem/selftests/igt_gem_utils.o \
+       selftests/intel_scheduler_helpers.o \
        selftests/i915_random.o \
        selftests/i915_selftest.o \
        selftests/igt_atomic.o \
index 9643c45..b1439ba 100644 (file)
@@ -912,7 +912,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
        zpos = 0;
        drm_plane_create_zpos_immutable_property(&plane->base, zpos);
 
-       drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
+       intel_plane_helper_add(plane);
 
        return plane;
 
index 833d0c1..7cfe91f 100644 (file)
@@ -19,6 +19,12 @@ static const guid_t intel_dsm_guid =
        GUID_INIT(0x7ed873d3, 0xc2d0, 0x4e4f,
                  0xa8, 0x54, 0x0f, 0x13, 0x17, 0xb0, 0x1c, 0x2c);
 
+#define INTEL_DSM_FN_GET_BIOS_DATA_FUNCS_SUPPORTED 0 /* No args */
+
+static const guid_t intel_dsm_guid2 =
+       GUID_INIT(0x3e5b41c6, 0xeb1d, 0x4260,
+                 0x9d, 0x15, 0xc7, 0x1f, 0xba, 0xda, 0xe4, 0x14);
+
 static char *intel_dsm_port_name(u8 id)
 {
        switch (id) {
@@ -176,6 +182,19 @@ void intel_unregister_dsm_handler(void)
 {
 }
 
+void intel_dsm_get_bios_data_funcs_supported(struct drm_i915_private *i915)
+{
+       struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
+       acpi_handle dhandle;
+
+       dhandle = ACPI_HANDLE(&pdev->dev);
+       if (!dhandle)
+               return;
+
+       acpi_evaluate_dsm(dhandle, &intel_dsm_guid2, INTEL_DSM_REVISION_ID,
+                         INTEL_DSM_FN_GET_BIOS_DATA_FUNCS_SUPPORTED, NULL);
+}
+
 /*
  * ACPI Specification, Revision 5.0, Appendix B.3.2 _DOD (Enumerate All Devices
  * Attached to the Display Adapter).
index e8b0686..9f19740 100644 (file)
@@ -11,11 +11,14 @@ struct drm_i915_private;
 #ifdef CONFIG_ACPI
 void intel_register_dsm_handler(void);
 void intel_unregister_dsm_handler(void);
+void intel_dsm_get_bios_data_funcs_supported(struct drm_i915_private *i915);
 void intel_acpi_device_id_update(struct drm_i915_private *i915);
 #else
 static inline void intel_register_dsm_handler(void) { return; }
 static inline void intel_unregister_dsm_handler(void) { return; }
 static inline
+void intel_dsm_get_bios_data_funcs_supported(struct drm_i915_private *i915) { return; }
+static inline
 void intel_acpi_device_id_update(struct drm_i915_private *i915) { return; }
 #endif /* CONFIG_ACPI */
 
index 36f52a1..47234d8 100644 (file)
@@ -601,7 +601,12 @@ int intel_atomic_plane_check_clipping(struct intel_plane_state *plane_state,
        return 0;
 }
 
-const struct drm_plane_helper_funcs intel_plane_helper_funcs = {
+static const struct drm_plane_helper_funcs intel_plane_helper_funcs = {
        .prepare_fb = intel_prepare_plane_fb,
        .cleanup_fb = intel_cleanup_plane_fb,
 };
+
+void intel_plane_helper_add(struct intel_plane *plane)
+{
+       drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
+}
index dc4d05e..62e5a2a 100644 (file)
@@ -17,8 +17,6 @@ struct intel_crtc_state;
 struct intel_plane;
 struct intel_plane_state;
 
-extern const struct drm_plane_helper_funcs intel_plane_helper_funcs;
-
 unsigned int intel_adjusted_rate(const struct drm_rect *src,
                                 const struct drm_rect *dst,
                                 unsigned int rate);
@@ -65,5 +63,6 @@ int intel_atomic_plane_check_clipping(struct intel_plane_state *plane_state,
                                      bool can_position);
 void intel_plane_set_invisible(struct intel_crtc_state *crtc_state,
                               struct intel_plane_state *plane_state);
+void intel_plane_helper_add(struct intel_plane *plane);
 
 #endif /* __INTEL_ATOMIC_PLANE_H__ */
index 5f4f316..5322375 100644 (file)
@@ -1001,7 +1001,7 @@ static unsigned long i915_audio_component_get_power(struct device *kdev)
        /* Catch potential impedance mismatches before they occur! */
        BUILD_BUG_ON(sizeof(intel_wakeref_t) > sizeof(unsigned long));
 
-       ret = intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
+       ret = intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO_PLAYBACK);
 
        if (dev_priv->audio_power_refcount++ == 0) {
                if (DISPLAY_VER(dev_priv) >= 9) {
@@ -1034,7 +1034,7 @@ static void i915_audio_component_put_power(struct device *kdev,
                if (IS_GEMINILAKE(dev_priv))
                        glk_force_audio_cdclk(dev_priv, false);
 
-       intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO, cookie);
+       intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO_PLAYBACK, cookie);
 }
 
 static void i915_audio_component_codec_wake_override(struct device *kdev,
index 5b6922e..e86e6ed 100644 (file)
@@ -1871,12 +1871,12 @@ intel_bios_encoder_supports_edp(const struct intel_bios_encoder_data *devdata)
 static bool is_port_valid(struct drm_i915_private *i915, enum port port)
 {
        /*
-        * On some ICL/CNL SKUs port F is not present, but broken VBTs mark
+        * On some ICL SKUs port F is not present, but broken VBTs mark
         * the port as present. Only try to initialize port F for the
         * SKUs that may actually have it.
         */
-       if (port == PORT_F && (IS_ICELAKE(i915) || IS_CANNONLAKE(i915)))
-               return IS_ICL_WITH_PORT_F(i915) || IS_CNL_WITH_PORT_F(i915);
+       if (port == PORT_F && IS_ICELAKE(i915))
+               return IS_ICL_WITH_PORT_F(i915);
 
        return true;
 }
@@ -1998,7 +1998,7 @@ static void parse_ddi_port(struct drm_i915_private *i915,
                            "Port %c VBT HDMI boost level: %d\n",
                            port_name(port), hdmi_boost_level);
 
-       /* DP max link rate for CNL+ */
+       /* DP max link rate for GLK+ */
        if (i915->vbt.version >= 216) {
                if (i915->vbt.version >= 230)
                        info->dp_max_link_rate = parse_bdb_230_dp_max_link_rate(child->dp_max_link_rate);
@@ -2166,7 +2166,8 @@ static void
 init_vbt_missing_defaults(struct drm_i915_private *i915)
 {
        enum port port;
-       int ports = PORT_A | PORT_B | PORT_C | PORT_D | PORT_E | PORT_F;
+       int ports = BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_C) |
+                   BIT(PORT_D) | BIT(PORT_E) | BIT(PORT_F);
 
        if (!HAS_DDI(i915) && !IS_CHERRYVIEW(i915))
                return;
index bfb398f..e91e0e0 100644 (file)
@@ -17,12 +17,53 @@ struct intel_qgv_point {
        u16 dclk, t_rp, t_rdpre, t_rc, t_ras, t_rcd;
 };
 
+struct intel_psf_gv_point {
+       u8 clk; /* clock in multiples of 16.6666 MHz */
+};
+
 struct intel_qgv_info {
        struct intel_qgv_point points[I915_NUM_QGV_POINTS];
+       struct intel_psf_gv_point psf_points[I915_NUM_PSF_GV_POINTS];
        u8 num_points;
+       u8 num_psf_points;
        u8 t_bl;
 };
 
+static int dg1_mchbar_read_qgv_point_info(struct drm_i915_private *dev_priv,
+                                         struct intel_qgv_point *sp,
+                                         int point)
+{
+       u32 dclk_ratio, dclk_reference;
+       u32 val;
+
+       val = intel_uncore_read(&dev_priv->uncore, SA_PERF_STATUS_0_0_0_MCHBAR_PC);
+       dclk_ratio = REG_FIELD_GET(DG1_QCLK_RATIO_MASK, val);
+       if (val & DG1_QCLK_REFERENCE)
+               dclk_reference = 6; /* 6 * 16.666 MHz = 100 MHz */
+       else
+               dclk_reference = 8; /* 8 * 16.666 MHz = 133 MHz */
+       sp->dclk = dclk_ratio * dclk_reference;
+
+       val = intel_uncore_read(&dev_priv->uncore, SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU);
+       if (val & DG1_GEAR_TYPE)
+               sp->dclk *= 2;
+
+       if (sp->dclk == 0)
+               return -EINVAL;
+
+       val = intel_uncore_read(&dev_priv->uncore, MCHBAR_CH0_CR_TC_PRE_0_0_0_MCHBAR);
+       sp->t_rp = REG_FIELD_GET(DG1_DRAM_T_RP_MASK, val);
+       sp->t_rdpre = REG_FIELD_GET(DG1_DRAM_T_RDPRE_MASK, val);
+
+       val = intel_uncore_read(&dev_priv->uncore, MCHBAR_CH0_CR_TC_PRE_0_0_0_MCHBAR_HIGH);
+       sp->t_rcd = REG_FIELD_GET(DG1_DRAM_T_RCD_MASK, val);
+       sp->t_ras = REG_FIELD_GET(DG1_DRAM_T_RAS_MASK, val);
+
+       sp->t_rc = sp->t_rp + sp->t_ras;
+
+       return 0;
+}
+
 static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv,
                                         struct intel_qgv_point *sp,
                                         int point)
@@ -49,6 +90,28 @@ static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv,
        return 0;
 }
 
+static int adls_pcode_read_psf_gv_point_info(struct drm_i915_private *dev_priv,
+                                           struct intel_psf_gv_point *points)
+{
+       u32 val = 0;
+       int ret;
+       int i;
+
+       ret = sandybridge_pcode_read(dev_priv,
+                                    ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
+                                    ADL_PCODE_MEM_SS_READ_PSF_GV_INFO,
+                                    &val, NULL);
+       if (ret)
+               return ret;
+
+       for (i = 0; i < I915_NUM_PSF_GV_POINTS; i++) {
+               points[i].clk = val & 0xff;
+               val >>= 8;
+       }
+
+       return 0;
+}
+
 int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv,
                                  u32 points_mask)
 {
@@ -62,7 +125,7 @@ int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv,
                                1);
 
        if (ret < 0) {
-               drm_err(&dev_priv->drm, "Failed to disable qgv points (%d)\n", ret);
+               drm_err(&dev_priv->drm, "Failed to disable qgv points (%d) points: 0x%x\n", ret, points_mask);
                return ret;
        }
 
@@ -76,6 +139,7 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
        int i, ret;
 
        qi->num_points = dram_info->num_qgv_points;
+       qi->num_psf_points = dram_info->num_psf_gv_points;
 
        if (DISPLAY_VER(dev_priv) == 12)
                switch (dram_info->type) {
@@ -99,7 +163,11 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
        for (i = 0; i < qi->num_points; i++) {
                struct intel_qgv_point *sp = &qi->points[i];
 
-               ret = icl_pcode_read_qgv_point_info(dev_priv, sp, i);
+               if (IS_DG1(dev_priv))
+                       ret = dg1_mchbar_read_qgv_point_info(dev_priv, sp, i);
+               else
+                       ret = icl_pcode_read_qgv_point_info(dev_priv, sp, i);
+
                if (ret)
                        return ret;
 
@@ -109,6 +177,19 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
                            sp->t_rcd, sp->t_rc);
        }
 
+       if (qi->num_psf_points > 0) {
+               ret = adls_pcode_read_psf_gv_point_info(dev_priv, qi->psf_points);
+               if (ret) {
+                       drm_err(&dev_priv->drm, "Failed to read PSF point data; PSF points will not be considered in bandwidth calculations.\n");
+                       qi->num_psf_points = 0;
+               }
+
+               for (i = 0; i < qi->num_psf_points; i++)
+                       drm_dbg_kms(&dev_priv->drm,
+                                   "PSF GV %d: CLK=%d \n",
+                                   i, qi->psf_points[i].clk);
+       }
+
        return 0;
 }
 
@@ -118,6 +199,16 @@ static int icl_calc_bw(int dclk, int num, int den)
        return DIV_ROUND_CLOSEST(num * dclk * 100, den * 6);
 }
 
+static int adl_calc_psf_bw(int clk)
+{
+       /*
+        * clk is multiples of 16.666MHz (100/6)
+        * According to BSpec PSF GV bandwidth is
+        * calculated as BW = 64 * clk * 16.666Mhz
+        */
+       return DIV_ROUND_CLOSEST(64 * clk * 100, 6);
+}
+
 static int icl_sagv_max_dclk(const struct intel_qgv_info *qi)
 {
        u16 dclk = 0;
@@ -194,6 +285,7 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
                bi->num_planes = (ipqdepth - clpchgroup) / clpchgroup + 1;
 
                bi->num_qgv_points = qi.num_points;
+               bi->num_psf_gv_points = qi.num_psf_points;
 
                for (j = 0; j < qi.num_points; j++) {
                        const struct intel_qgv_point *sp = &qi.points[j];
@@ -217,6 +309,16 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
                                    i, j, bi->num_planes, bi->deratedbw[j]);
                }
 
+               for (j = 0; j < qi.num_psf_points; j++) {
+                       const struct intel_psf_gv_point *sp = &qi.psf_points[j];
+
+                       bi->psf_bw[j] = adl_calc_psf_bw(sp->clk);
+
+                       drm_dbg_kms(&dev_priv->drm,
+                                   "BW%d / PSF GV %d: num_planes=%d bw=%u\n",
+                                   i, j, bi->num_planes, bi->psf_bw[j]);
+               }
+
                if (bi->num_planes == 1)
                        break;
        }
@@ -234,6 +336,26 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
        return 0;
 }
 
+static void dg2_get_bw_info(struct drm_i915_private *i915)
+{
+       struct intel_bw_info *bi = &i915->max_bw[0];
+
+       /*
+        * DG2 doesn't have SAGV or QGV points, just a constant max bandwidth
+        * that doesn't depend on the number of planes enabled.  Create a
+        * single dummy QGV point to reflect that.  DG2-G10 platforms have a
+        * constant 50 GB/s bandwidth, whereas DG2-G11 platforms have 38 GB/s.
+        */
+       bi->num_planes = 1;
+       bi->num_qgv_points = 1;
+       if (IS_DG2_G11(i915))
+               bi->deratedbw[0] = 38000;
+       else
+               bi->deratedbw[0] = 50000;
+
+       i915->sagv_status = I915_SAGV_NOT_CONTROLLED;
+}
+
 static unsigned int icl_max_bw(struct drm_i915_private *dev_priv,
                               int num_planes, int qgv_point)
 {
@@ -262,12 +384,23 @@ static unsigned int icl_max_bw(struct drm_i915_private *dev_priv,
        return 0;
 }
 
+static unsigned int adl_psf_bw(struct drm_i915_private *dev_priv,
+                              int psf_gv_point)
+{
+       const struct intel_bw_info *bi =
+                       &dev_priv->max_bw[0];
+
+       return bi->psf_bw[psf_gv_point];
+}
+
 void intel_bw_init_hw(struct drm_i915_private *dev_priv)
 {
        if (!HAS_DISPLAY(dev_priv))
                return;
 
-       if (IS_ALDERLAKE_S(dev_priv) || IS_ALDERLAKE_P(dev_priv))
+       if (IS_DG2(dev_priv))
+               dg2_get_bw_info(dev_priv);
+       else if (IS_ALDERLAKE_S(dev_priv) || IS_ALDERLAKE_P(dev_priv))
                icl_get_bw_info(dev_priv, &adls_sa_info);
        else if (IS_ROCKETLAKE(dev_priv))
                icl_get_bw_info(dev_priv, &rkl_sa_info);
@@ -534,12 +667,24 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
        u32 allowed_points = 0;
        unsigned int max_bw_point = 0, max_bw = 0;
        unsigned int num_qgv_points = dev_priv->max_bw[0].num_qgv_points;
-       u32 mask = (1 << num_qgv_points) - 1;
+       unsigned int num_psf_gv_points = dev_priv->max_bw[0].num_psf_gv_points;
+       u32 mask = 0;
 
        /* FIXME earlier gens need some checks too */
        if (DISPLAY_VER(dev_priv) < 11)
                return 0;
 
+       /*
+        * We can _not_ use the whole ADLS_QGV_PT_MASK here, as PCode rejects
+        * it with failure if we try masking any unadvertised points.
+        * So need to operate only with those returned from PCode.
+        */
+       if (num_qgv_points > 0)
+               mask |= REG_GENMASK(num_qgv_points - 1, 0);
+
+       if (num_psf_gv_points > 0)
+               mask |= REG_GENMASK(num_psf_gv_points - 1, 0) << ADLS_PSF_PT_SHIFT;
+
        for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
                                            new_crtc_state, i) {
                unsigned int old_data_rate =
@@ -602,23 +747,44 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
                        max_bw = max_data_rate;
                }
                if (max_data_rate >= data_rate)
-                       allowed_points |= BIT(i);
+                       allowed_points |= REG_FIELD_PREP(ADLS_QGV_PT_MASK, BIT(i));
+
                drm_dbg_kms(&dev_priv->drm, "QGV point %d: max bw %d required %d\n",
                            i, max_data_rate, data_rate);
        }
 
+       for (i = 0; i < num_psf_gv_points; i++) {
+               unsigned int max_data_rate = adl_psf_bw(dev_priv, i);
+
+               if (max_data_rate >= data_rate)
+                       allowed_points |= REG_FIELD_PREP(ADLS_PSF_PT_MASK, BIT(i));
+
+               drm_dbg_kms(&dev_priv->drm, "PSF GV point %d: max bw %d"
+                           " required %d\n",
+                           i, max_data_rate, data_rate);
+       }
+
        /*
         * BSpec states that we always should have at least one allowed point
         * left, so if we couldn't - simply reject the configuration for obvious
         * reasons.
         */
-       if (allowed_points == 0) {
+       if ((allowed_points & ADLS_QGV_PT_MASK) == 0) {
                drm_dbg_kms(&dev_priv->drm, "No QGV points provide sufficient memory"
                            " bandwidth %d for display configuration(%d active planes).\n",
                            data_rate, num_active_planes);
                return -EINVAL;
        }
 
+       if (num_psf_gv_points > 0) {
+               if ((allowed_points & ADLS_PSF_PT_MASK) == 0) {
+                       drm_dbg_kms(&dev_priv->drm, "No PSF GV points provide sufficient memory"
+                                   " bandwidth %d for display configuration(%d active planes).\n",
+                                   data_rate, num_active_planes);
+                       return -EINVAL;
+               }
+       }
+
        /*
         * Leave only single point with highest bandwidth, if
         * we can't enable SAGV due to the increased memory latency it may
index 613ffcc..34fa413 100644 (file)
@@ -1195,17 +1195,6 @@ static const struct intel_cdclk_vals glk_cdclk_table[] = {
        {}
 };
 
-static const struct intel_cdclk_vals cnl_cdclk_table[] = {
-       { .refclk = 19200, .cdclk = 168000, .divider = 4, .ratio = 35 },
-       { .refclk = 19200, .cdclk = 336000, .divider = 2, .ratio = 35 },
-       { .refclk = 19200, .cdclk = 528000, .divider = 2, .ratio = 55 },
-
-       { .refclk = 24000, .cdclk = 168000, .divider = 4, .ratio = 28 },
-       { .refclk = 24000, .cdclk = 336000, .divider = 2, .ratio = 28 },
-       { .refclk = 24000, .cdclk = 528000, .divider = 2, .ratio = 44 },
-       {}
-};
-
 static const struct intel_cdclk_vals icl_cdclk_table[] = {
        { .refclk = 19200, .cdclk = 172800, .divider = 2, .ratio = 18 },
        { .refclk = 19200, .cdclk = 192000, .divider = 2, .ratio = 20 },
@@ -1290,6 +1279,16 @@ static const struct intel_cdclk_vals adlp_cdclk_table[] = {
        {}
 };
 
+static const struct intel_cdclk_vals dg2_cdclk_table[] = {
+       { .refclk = 38400, .cdclk = 172800, .divider = 2, .ratio =  9 },
+       { .refclk = 38400, .cdclk = 192000, .divider = 2, .ratio = 10 },
+       { .refclk = 38400, .cdclk = 307200, .divider = 2, .ratio = 16 },
+       { .refclk = 38400, .cdclk = 326400, .divider = 4, .ratio = 34 },
+       { .refclk = 38400, .cdclk = 556800, .divider = 2, .ratio = 29 },
+       { .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34 },
+       {}
+};
+
 static int bxt_calc_cdclk(struct drm_i915_private *dev_priv, int min_cdclk)
 {
        const struct intel_cdclk_vals *table = dev_priv->cdclk.table;
@@ -1329,16 +1328,6 @@ static u8 bxt_calc_voltage_level(int cdclk)
        return DIV_ROUND_UP(cdclk, 25000);
 }
 
-static u8 cnl_calc_voltage_level(int cdclk)
-{
-       if (cdclk > 336000)
-               return 2;
-       else if (cdclk > 168000)
-               return 1;
-       else
-               return 0;
-}
-
 static u8 icl_calc_voltage_level(int cdclk)
 {
        if (cdclk > 556800)
@@ -1373,15 +1362,6 @@ static u8 tgl_calc_voltage_level(int cdclk)
                return 0;
 }
 
-static void cnl_readout_refclk(struct drm_i915_private *dev_priv,
-                              struct intel_cdclk_config *cdclk_config)
-{
-       if (intel_de_read(dev_priv, SKL_DSSM) & CNL_DSSM_CDCLK_PLL_REFCLK_24MHz)
-               cdclk_config->ref = 24000;
-       else
-               cdclk_config->ref = 19200;
-}
-
 static void icl_readout_refclk(struct drm_i915_private *dev_priv,
                               struct intel_cdclk_config *cdclk_config)
 {
@@ -1408,10 +1388,10 @@ static void bxt_de_pll_readout(struct drm_i915_private *dev_priv,
 {
        u32 val, ratio;
 
-       if (DISPLAY_VER(dev_priv) >= 11)
+       if (IS_DG2(dev_priv))
+               cdclk_config->ref = 38400;
+       else if (DISPLAY_VER(dev_priv) >= 11)
                icl_readout_refclk(dev_priv, cdclk_config);
-       else if (IS_CANNONLAKE(dev_priv))
-               cnl_readout_refclk(dev_priv, cdclk_config);
        else
                cdclk_config->ref = 19200;
 
@@ -1427,11 +1407,11 @@ static void bxt_de_pll_readout(struct drm_i915_private *dev_priv,
        }
 
        /*
-        * CNL+ have the ratio directly in the PLL enable register, gen9lp had
-        * it in a separate PLL control register.
+        * DISPLAY_VER >= 11 have the ratio directly in the PLL enable register,
+        * gen9lp had it in a separate PLL control register.
         */
-       if (DISPLAY_VER(dev_priv) >= 11 || IS_CANNONLAKE(dev_priv))
-               ratio = val & CNL_CDCLK_PLL_RATIO_MASK;
+       if (DISPLAY_VER(dev_priv) >= 11)
+               ratio = val & ICL_CDCLK_PLL_RATIO_MASK;
        else
                ratio = intel_de_read(dev_priv, BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK;
 
@@ -1518,7 +1498,7 @@ static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco)
        dev_priv->cdclk.hw.vco = vco;
 }
 
-static void cnl_cdclk_pll_disable(struct drm_i915_private *dev_priv)
+static void icl_cdclk_pll_disable(struct drm_i915_private *dev_priv)
 {
        intel_de_rmw(dev_priv, BXT_DE_PLL_ENABLE,
                     BXT_DE_PLL_PLL_ENABLE, 0);
@@ -1530,12 +1510,12 @@ static void cnl_cdclk_pll_disable(struct drm_i915_private *dev_priv)
        dev_priv->cdclk.hw.vco = 0;
 }
 
-static void cnl_cdclk_pll_enable(struct drm_i915_private *dev_priv, int vco)
+static void icl_cdclk_pll_enable(struct drm_i915_private *dev_priv, int vco)
 {
        int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->cdclk.hw.ref);
        u32 val;
 
-       val = CNL_CDCLK_PLL_RATIO(ratio);
+       val = ICL_CDCLK_PLL_RATIO(ratio);
        intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val);
 
        val |= BXT_DE_PLL_PLL_ENABLE;
@@ -1548,18 +1528,13 @@ static void cnl_cdclk_pll_enable(struct drm_i915_private *dev_priv, int vco)
        dev_priv->cdclk.hw.vco = vco;
 }
 
-static bool has_cdclk_crawl(struct drm_i915_private *i915)
-{
-       return INTEL_INFO(i915)->has_cdclk_crawl;
-}
-
 static void adlp_cdclk_pll_crawl(struct drm_i915_private *dev_priv, int vco)
 {
        int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->cdclk.hw.ref);
        u32 val;
 
        /* Write PLL ratio without disabling */
-       val = CNL_CDCLK_PLL_RATIO(ratio) | BXT_DE_PLL_PLL_ENABLE;
+       val = ICL_CDCLK_PLL_RATIO(ratio) | BXT_DE_PLL_PLL_ENABLE;
        intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val);
 
        /* Submit freq change request */
@@ -1628,7 +1603,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
        int ret;
 
        /* Inform power controller of upcoming frequency change. */
-       if (DISPLAY_VER(dev_priv) >= 11 || IS_CANNONLAKE(dev_priv))
+       if (DISPLAY_VER(dev_priv) >= 11)
                ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
                                        SKL_CDCLK_PREPARE_FOR_CHANGE,
                                        SKL_CDCLK_READY_FOR_CHANGE,
@@ -1649,16 +1624,16 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
                return;
        }
 
-       if (has_cdclk_crawl(dev_priv) && dev_priv->cdclk.hw.vco > 0 && vco > 0) {
+       if (HAS_CDCLK_CRAWL(dev_priv) && dev_priv->cdclk.hw.vco > 0 && vco > 0) {
                if (dev_priv->cdclk.hw.vco != vco)
                        adlp_cdclk_pll_crawl(dev_priv, vco);
-       } else if (DISPLAY_VER(dev_priv) >= 11 || IS_CANNONLAKE(dev_priv)) {
+       } else if (DISPLAY_VER(dev_priv) >= 11) {
                if (dev_priv->cdclk.hw.vco != 0 &&
                    dev_priv->cdclk.hw.vco != vco)
-                       cnl_cdclk_pll_disable(dev_priv);
+                       icl_cdclk_pll_disable(dev_priv);
 
                if (dev_priv->cdclk.hw.vco != vco)
-                       cnl_cdclk_pll_enable(dev_priv, vco);
+                       icl_cdclk_pll_enable(dev_priv, vco);
        } else {
                if (dev_priv->cdclk.hw.vco != 0 &&
                    dev_priv->cdclk.hw.vco != vco)
@@ -1684,7 +1659,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
        if (pipe != INVALID_PIPE)
                intel_wait_for_vblank(dev_priv, pipe);
 
-       if (DISPLAY_VER(dev_priv) >= 11 || IS_CANNONLAKE(dev_priv)) {
+       if (DISPLAY_VER(dev_priv) >= 11) {
                ret = sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
                                              cdclk_config->voltage_level);
        } else {
@@ -1709,7 +1684,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
 
        intel_update_cdclk(dev_priv);
 
-       if (DISPLAY_VER(dev_priv) >= 11 || IS_CANNONLAKE(dev_priv))
+       if (DISPLAY_VER(dev_priv) >= 11)
                /*
                 * Can't read out the voltage level :(
                 * Let's just assume everything is as expected.
@@ -1857,7 +1832,7 @@ static bool intel_cdclk_can_crawl(struct drm_i915_private *dev_priv,
 {
        int a_div, b_div;
 
-       if (!has_cdclk_crawl(dev_priv))
+       if (!HAS_CDCLK_CRAWL(dev_priv))
                return false;
 
        /*
@@ -2118,7 +2093,7 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
            crtc_state->port_clock >= 540000 &&
            crtc_state->lane_count == 4) {
                if (DISPLAY_VER(dev_priv) == 10) {
-                       /* Display WA #1145: glk,cnl */
+                       /* Display WA #1145: glk */
                        min_cdclk = max(316800, min_cdclk);
                } else if (DISPLAY_VER(dev_priv) == 9 || IS_BROADWELL(dev_priv)) {
                        /* Display WA #1144: skl,bxt */
@@ -2239,7 +2214,7 @@ static int intel_compute_min_cdclk(struct intel_cdclk_state *cdclk_state)
 
 /*
  * Account for port clock min voltage level requirements.
- * This only really does something on CNL+ but can be
+ * This only really does something on DISPLA_VER >= 11 but can be
  * called on earlier platforms as well.
  *
  * Note that this functions assumes that 0 is
@@ -2653,8 +2628,6 @@ void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
                        dev_priv->max_cdclk_freq = 648000;
                else
                        dev_priv->max_cdclk_freq = 652800;
-       } else if (IS_CANNONLAKE(dev_priv)) {
-               dev_priv->max_cdclk_freq = 528000;
        } else if (IS_GEMINILAKE(dev_priv)) {
                dev_priv->max_cdclk_freq = 316800;
        } else if (IS_BROXTON(dev_priv)) {
@@ -2878,13 +2851,19 @@ u32 intel_read_rawclk(struct drm_i915_private *dev_priv)
  */
 void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
 {
-       if (IS_ALDERLAKE_P(dev_priv)) {
+       if (IS_DG2(dev_priv)) {
+               dev_priv->display.set_cdclk = bxt_set_cdclk;
+               dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk;
+               dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
+               dev_priv->display.calc_voltage_level = tgl_calc_voltage_level;
+               dev_priv->cdclk.table = dg2_cdclk_table;
+       } else if (IS_ALDERLAKE_P(dev_priv)) {
                dev_priv->display.set_cdclk = bxt_set_cdclk;
                dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk;
                dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
                dev_priv->display.calc_voltage_level = tgl_calc_voltage_level;
-               /* Wa_22011320316:adlp[a0] */
-               if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_A0))
+               /* Wa_22011320316:adl-p[a0] */
+               if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
                        dev_priv->cdclk.table = adlp_a_step_cdclk_table;
                else
                        dev_priv->cdclk.table = adlp_cdclk_table;
@@ -2912,12 +2891,6 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
                dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
                dev_priv->display.calc_voltage_level = icl_calc_voltage_level;
                dev_priv->cdclk.table = icl_cdclk_table;
-       } else if (IS_CANNONLAKE(dev_priv)) {
-               dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk;
-               dev_priv->display.set_cdclk = bxt_set_cdclk;
-               dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
-               dev_priv->display.calc_voltage_level = cnl_calc_voltage_level;
-               dev_priv->cdclk.table = cnl_cdclk_table;
        } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
                dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk;
                dev_priv->display.set_cdclk = bxt_set_cdclk;
index dab892d..afcb4bf 100644 (file)
@@ -305,13 +305,12 @@ static void ilk_load_csc_matrix(const struct intel_crtc_state *crtc_state)
                                    ilk_csc_postoff_limited_range);
        } else if (crtc_state->csc_enable) {
                /*
-                * On GLK+ both pipe CSC and degamma LUT are controlled
+                * On GLK both pipe CSC and degamma LUT are controlled
                 * by csc_enable. Hence for the cases where the degama
                 * LUT is needed but CSC is not we need to load an
                 * identity matrix.
                 */
-               drm_WARN_ON(&dev_priv->drm, !IS_CANNONLAKE(dev_priv) &&
-                           !IS_GEMINILAKE(dev_priv));
+               drm_WARN_ON(&dev_priv->drm, !IS_GEMINILAKE(dev_priv));
 
                ilk_update_pipe_csc(crtc, ilk_csc_off_zero,
                                    ilk_csc_coeff_identity,
index 487c54c..bacdf8a 100644 (file)
@@ -23,9 +23,9 @@ enum {
        PROCMON_1_05V_DOT_1,
 };
 
-static const struct cnl_procmon {
+static const struct icl_procmon {
        u32 dw1, dw9, dw10;
-} cnl_procmon_values[] = {
+} icl_procmon_values[] = {
        [PROCMON_0_85V_DOT_0] =
                { .dw1 = 0x00000000, .dw9 = 0x62AB67BB, .dw10 = 0x51914F96, },
        [PROCMON_0_95V_DOT_0] =
@@ -38,15 +38,10 @@ static const struct cnl_procmon {
                { .dw1 = 0x00440000, .dw9 = 0x9A00AB25, .dw10 = 0x8AE38FF1, },
 };
 
-/*
- * CNL has just one set of registers, while gen11 has a set for each combo PHY.
- * The CNL registers are equivalent to the gen11 PHY A registers, that's why we
- * call the ICL macros even though the function has CNL on its name.
- */
-static const struct cnl_procmon *
-cnl_get_procmon_ref_values(struct drm_i915_private *dev_priv, enum phy phy)
+static const struct icl_procmon *
+icl_get_procmon_ref_values(struct drm_i915_private *dev_priv, enum phy phy)
 {
-       const struct cnl_procmon *procmon;
+       const struct icl_procmon *procmon;
        u32 val;
 
        val = intel_de_read(dev_priv, ICL_PORT_COMP_DW3(phy));
@@ -55,32 +50,32 @@ cnl_get_procmon_ref_values(struct drm_i915_private *dev_priv, enum phy phy)
                MISSING_CASE(val);
                fallthrough;
        case VOLTAGE_INFO_0_85V | PROCESS_INFO_DOT_0:
-               procmon = &cnl_procmon_values[PROCMON_0_85V_DOT_0];
+               procmon = &icl_procmon_values[PROCMON_0_85V_DOT_0];
                break;
        case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_0:
-               procmon = &cnl_procmon_values[PROCMON_0_95V_DOT_0];
+               procmon = &icl_procmon_values[PROCMON_0_95V_DOT_0];
                break;
        case VOLTAGE_INFO_0_95V | PROCESS_INFO_DOT_1:
-               procmon = &cnl_procmon_values[PROCMON_0_95V_DOT_1];
+               procmon = &icl_procmon_values[PROCMON_0_95V_DOT_1];
                break;
        case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_0:
-               procmon = &cnl_procmon_values[PROCMON_1_05V_DOT_0];
+               procmon = &icl_procmon_values[PROCMON_1_05V_DOT_0];
                break;
        case VOLTAGE_INFO_1_05V | PROCESS_INFO_DOT_1:
-               procmon = &cnl_procmon_values[PROCMON_1_05V_DOT_1];
+               procmon = &icl_procmon_values[PROCMON_1_05V_DOT_1];
                break;
        }
 
        return procmon;
 }
 
-static void cnl_set_procmon_ref_values(struct drm_i915_private *dev_priv,
+static void icl_set_procmon_ref_values(struct drm_i915_private *dev_priv,
                                       enum phy phy)
 {
-       const struct cnl_procmon *procmon;
+       const struct icl_procmon *procmon;
        u32 val;
 
-       procmon = cnl_get_procmon_ref_values(dev_priv, phy);
+       procmon = icl_get_procmon_ref_values(dev_priv, phy);
 
        val = intel_de_read(dev_priv, ICL_PORT_COMP_DW1(phy));
        val &= ~((0xff << 16) | 0xff);
@@ -109,13 +104,13 @@ static bool check_phy_reg(struct drm_i915_private *dev_priv,
        return true;
 }
 
-static bool cnl_verify_procmon_ref_values(struct drm_i915_private *dev_priv,
+static bool icl_verify_procmon_ref_values(struct drm_i915_private *dev_priv,
                                          enum phy phy)
 {
-       const struct cnl_procmon *procmon;
+       const struct icl_procmon *procmon;
        bool ret;
 
-       procmon = cnl_get_procmon_ref_values(dev_priv, phy);
+       procmon = icl_get_procmon_ref_values(dev_priv, phy);
 
        ret = check_phy_reg(dev_priv, phy, ICL_PORT_COMP_DW1(phy),
                            (0xff << 16) | 0xff, procmon->dw1);
@@ -127,61 +122,6 @@ static bool cnl_verify_procmon_ref_values(struct drm_i915_private *dev_priv,
        return ret;
 }
 
-static bool cnl_combo_phy_enabled(struct drm_i915_private *dev_priv)
-{
-       return !(intel_de_read(dev_priv, CHICKEN_MISC_2) & CNL_COMP_PWR_DOWN) &&
-               (intel_de_read(dev_priv, CNL_PORT_COMP_DW0) & COMP_INIT);
-}
-
-static bool cnl_combo_phy_verify_state(struct drm_i915_private *dev_priv)
-{
-       enum phy phy = PHY_A;
-       bool ret;
-
-       if (!cnl_combo_phy_enabled(dev_priv))
-               return false;
-
-       ret = cnl_verify_procmon_ref_values(dev_priv, phy);
-
-       ret &= check_phy_reg(dev_priv, phy, CNL_PORT_CL1CM_DW5,
-                            CL_POWER_DOWN_ENABLE, CL_POWER_DOWN_ENABLE);
-
-       return ret;
-}
-
-static void cnl_combo_phys_init(struct drm_i915_private *dev_priv)
-{
-       u32 val;
-
-       val = intel_de_read(dev_priv, CHICKEN_MISC_2);
-       val &= ~CNL_COMP_PWR_DOWN;
-       intel_de_write(dev_priv, CHICKEN_MISC_2, val);
-
-       /* Dummy PORT_A to get the correct CNL register from the ICL macro */
-       cnl_set_procmon_ref_values(dev_priv, PHY_A);
-
-       val = intel_de_read(dev_priv, CNL_PORT_COMP_DW0);
-       val |= COMP_INIT;
-       intel_de_write(dev_priv, CNL_PORT_COMP_DW0, val);
-
-       val = intel_de_read(dev_priv, CNL_PORT_CL1CM_DW5);
-       val |= CL_POWER_DOWN_ENABLE;
-       intel_de_write(dev_priv, CNL_PORT_CL1CM_DW5, val);
-}
-
-static void cnl_combo_phys_uninit(struct drm_i915_private *dev_priv)
-{
-       u32 val;
-
-       if (!cnl_combo_phy_verify_state(dev_priv))
-               drm_warn(&dev_priv->drm,
-                        "Combo PHY HW state changed unexpectedly.\n");
-
-       val = intel_de_read(dev_priv, CHICKEN_MISC_2);
-       val |= CNL_COMP_PWR_DOWN;
-       intel_de_write(dev_priv, CHICKEN_MISC_2, val);
-}
-
 static bool has_phy_misc(struct drm_i915_private *i915, enum phy phy)
 {
        /*
@@ -291,7 +231,7 @@ static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv,
                                     DCC_MODE_SELECT_CONTINUOSLY);
        }
 
-       ret &= cnl_verify_procmon_ref_values(dev_priv, phy);
+       ret &= icl_verify_procmon_ref_values(dev_priv, phy);
 
        if (phy_is_master(dev_priv, phy)) {
                ret &= check_phy_reg(dev_priv, phy, ICL_PORT_COMP_DW8(phy),
@@ -415,7 +355,7 @@ skip_phy_misc:
                        intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy), val);
                }
 
-               cnl_set_procmon_ref_values(dev_priv, phy);
+               icl_set_procmon_ref_values(dev_priv, phy);
 
                if (phy_is_master(dev_priv, phy)) {
                        val = intel_de_read(dev_priv, ICL_PORT_COMP_DW8(phy));
@@ -474,16 +414,10 @@ skip_phy_misc:
 
 void intel_combo_phy_init(struct drm_i915_private *i915)
 {
-       if (DISPLAY_VER(i915) >= 11)
-               icl_combo_phys_init(i915);
-       else if (IS_CANNONLAKE(i915))
-               cnl_combo_phys_init(i915);
+       icl_combo_phys_init(i915);
 }
 
 void intel_combo_phy_uninit(struct drm_i915_private *i915)
 {
-       if (DISPLAY_VER(i915) >= 11)
-               icl_combo_phys_uninit(i915);
-       else if (IS_CANNONLAKE(i915))
-               cnl_combo_phys_uninit(i915);
+       icl_combo_phys_uninit(i915);
 }
index 448c4d9..254e671 100644 (file)
@@ -335,7 +335,7 @@ int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
                dev_priv->plane_to_crtc_mapping[i9xx_plane] = crtc;
        }
 
-       if (DISPLAY_VER(dev_priv) >= 11 || IS_CANNONLAKE(dev_priv))
+       if (DISPLAY_VER(dev_priv) >= 11)
                drm_crtc_create_scaling_filter_property(&crtc->base,
                                                BIT(DRM_SCALING_FILTER_DEFAULT) |
                                                BIT(DRM_SCALING_FILTER_NEAREST_NEIGHBOR));
index 966e020..c7618fe 100644 (file)
@@ -383,7 +383,7 @@ static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
        if (plane_state->hw.rotation & DRM_MODE_ROTATE_180)
                cntl |= MCURSOR_ROTATE_180;
 
-       /* Wa_22012358565:adlp */
+       /* Wa_22012358565:adl-p */
        if (DISPLAY_VER(dev_priv) == 13)
                cntl |= MCURSOR_ARB_SLOTS(1);
 
@@ -629,12 +629,16 @@ intel_legacy_cursor_update(struct drm_plane *_plane,
 
        /*
         * When crtc is inactive or there is a modeset pending,
-        * wait for it to complete in the slowpath
+        * wait for it to complete in the slowpath.
+        * PSR2 selective fetch also requires the slow path as
+        * PSR2 plane and transcoder registers can only be updated during
+        * vblank.
         *
         * FIXME bigjoiner fastpath would be good
         */
        if (!crtc_state->hw.active || intel_crtc_needs_modeset(crtc_state) ||
-           crtc_state->update_pipe || crtc_state->bigjoiner)
+           crtc_state->update_pipe || crtc_state->bigjoiner ||
+           crtc_state->enable_psr2_sel_fetch)
                goto slow;
 
        /*
@@ -801,7 +805,7 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
        if (DISPLAY_VER(dev_priv) >= 12)
                drm_plane_enable_fb_damage_clips(&cursor->base);
 
-       drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
+       intel_plane_helper_add(cursor);
 
        return cursor;
 
index 26a3aa7..d816295 100644 (file)
@@ -51,6 +51,7 @@
 #include "intel_panel.h"
 #include "intel_pps.h"
 #include "intel_psr.h"
+#include "intel_snps_phy.h"
 #include "intel_sprite.h"
 #include "intel_tc.h"
 #include "intel_vdsc.h"
@@ -171,14 +172,18 @@ void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
 static void intel_wait_ddi_buf_active(struct drm_i915_private *dev_priv,
                                      enum port port)
 {
+       int ret;
+
        /* Wait > 518 usecs for DDI_BUF_CTL to be non idle */
        if (DISPLAY_VER(dev_priv) < 10) {
                usleep_range(518, 1000);
                return;
        }
 
-       if (wait_for_us(!(intel_de_read(dev_priv, DDI_BUF_CTL(port)) &
-                         DDI_BUF_IS_IDLE), 500))
+       ret = _wait_for(!(intel_de_read(dev_priv, DDI_BUF_CTL(port)) &
+                         DDI_BUF_IS_IDLE), IS_DG2(dev_priv) ? 1200 : 500, 10, 10);
+
+       if (ret)
                drm_err(&dev_priv->drm, "Timeout waiting for DDI BUF %c to get active\n",
                        port_name(port));
 }
@@ -822,7 +827,7 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
 static enum intel_display_power_domain
 intel_ddi_main_link_aux_domain(struct intel_digital_port *dig_port)
 {
-       /* CNL+ HW requires corresponding AUX IOs to be powered up for PSR with
+       /* ICL+ HW requires corresponding AUX IOs to be powered up for PSR with
         * DC states enabled at the same time, while for driver initiated AUX
         * transfers we need the same AUX IOs to be powered but with DC states
         * disabled. Accordingly use the AUX power domain here which leaves DC
@@ -1017,126 +1022,6 @@ static u8 intel_ddi_dp_preemph_max(struct intel_dp *intel_dp)
        return DP_TRAIN_PRE_EMPH_LEVEL_3;
 }
 
-static void cnl_ddi_vswing_program(struct intel_encoder *encoder,
-                                  const struct intel_crtc_state *crtc_state,
-                                  int level)
-{
-       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-       const struct intel_ddi_buf_trans *ddi_translations;
-       enum port port = encoder->port;
-       int n_entries, ln;
-       u32 val;
-
-       ddi_translations = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
-       if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
-               return;
-       if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
-               level = n_entries - 1;
-
-       /* Set PORT_TX_DW5 Scaling Mode Sel to 010b. */
-       val = intel_de_read(dev_priv, CNL_PORT_TX_DW5_LN0(port));
-       val &= ~SCALING_MODE_SEL_MASK;
-       val |= SCALING_MODE_SEL(2);
-       intel_de_write(dev_priv, CNL_PORT_TX_DW5_GRP(port), val);
-
-       /* Program PORT_TX_DW2 */
-       val = intel_de_read(dev_priv, CNL_PORT_TX_DW2_LN0(port));
-       val &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
-                RCOMP_SCALAR_MASK);
-       val |= SWING_SEL_UPPER(ddi_translations->entries[level].cnl.dw2_swing_sel);
-       val |= SWING_SEL_LOWER(ddi_translations->entries[level].cnl.dw2_swing_sel);
-       /* Rcomp scalar is fixed as 0x98 for every table entry */
-       val |= RCOMP_SCALAR(0x98);
-       intel_de_write(dev_priv, CNL_PORT_TX_DW2_GRP(port), val);
-
-       /* Program PORT_TX_DW4 */
-       /* We cannot write to GRP. It would overrite individual loadgen */
-       for (ln = 0; ln < 4; ln++) {
-               val = intel_de_read(dev_priv, CNL_PORT_TX_DW4_LN(ln, port));
-               val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
-                        CURSOR_COEFF_MASK);
-               val |= POST_CURSOR_1(ddi_translations->entries[level].cnl.dw4_post_cursor_1);
-               val |= POST_CURSOR_2(ddi_translations->entries[level].cnl.dw4_post_cursor_2);
-               val |= CURSOR_COEFF(ddi_translations->entries[level].cnl.dw4_cursor_coeff);
-               intel_de_write(dev_priv, CNL_PORT_TX_DW4_LN(ln, port), val);
-       }
-
-       /* Program PORT_TX_DW5 */
-       /* All DW5 values are fixed for every table entry */
-       val = intel_de_read(dev_priv, CNL_PORT_TX_DW5_LN0(port));
-       val &= ~RTERM_SELECT_MASK;
-       val |= RTERM_SELECT(6);
-       val |= TAP3_DISABLE;
-       intel_de_write(dev_priv, CNL_PORT_TX_DW5_GRP(port), val);
-
-       /* Program PORT_TX_DW7 */
-       val = intel_de_read(dev_priv, CNL_PORT_TX_DW7_LN0(port));
-       val &= ~N_SCALAR_MASK;
-       val |= N_SCALAR(ddi_translations->entries[level].cnl.dw7_n_scalar);
-       intel_de_write(dev_priv, CNL_PORT_TX_DW7_GRP(port), val);
-}
-
-static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder,
-                                   const struct intel_crtc_state *crtc_state,
-                                   int level)
-{
-       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-       enum port port = encoder->port;
-       int width, rate, ln;
-       u32 val;
-
-       width = crtc_state->lane_count;
-       rate = crtc_state->port_clock;
-
-       /*
-        * 1. If port type is eDP or DP,
-        * set PORT_PCS_DW1 cmnkeeper_enable to 1b,
-        * else clear to 0b.
-        */
-       val = intel_de_read(dev_priv, CNL_PORT_PCS_DW1_LN0(port));
-       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
-               val &= ~COMMON_KEEPER_EN;
-       else
-               val |= COMMON_KEEPER_EN;
-       intel_de_write(dev_priv, CNL_PORT_PCS_DW1_GRP(port), val);
-
-       /* 2. Program loadgen select */
-       /*
-        * Program PORT_TX_DW4_LN depending on Bit rate and used lanes
-        * <= 6 GHz and 4 lanes (LN0=0, LN1=1, LN2=1, LN3=1)
-        * <= 6 GHz and 1,2 lanes (LN0=0, LN1=1, LN2=1, LN3=0)
-        * > 6 GHz (LN0=0, LN1=0, LN2=0, LN3=0)
-        */
-       for (ln = 0; ln <= 3; ln++) {
-               val = intel_de_read(dev_priv, CNL_PORT_TX_DW4_LN(ln, port));
-               val &= ~LOADGEN_SELECT;
-
-               if ((rate <= 600000 && width == 4 && ln >= 1)  ||
-                   (rate <= 600000 && width < 4 && (ln == 1 || ln == 2))) {
-                       val |= LOADGEN_SELECT;
-               }
-               intel_de_write(dev_priv, CNL_PORT_TX_DW4_LN(ln, port), val);
-       }
-
-       /* 3. Set PORT_CL_DW5 SUS Clock Config to 11b */
-       val = intel_de_read(dev_priv, CNL_PORT_CL1CM_DW5);
-       val |= SUS_CLOCK_CONFIG;
-       intel_de_write(dev_priv, CNL_PORT_CL1CM_DW5, val);
-
-       /* 4. Clear training enable to change swing values */
-       val = intel_de_read(dev_priv, CNL_PORT_TX_DW5_LN0(port));
-       val &= ~TX_TRAINING_EN;
-       intel_de_write(dev_priv, CNL_PORT_TX_DW5_GRP(port), val);
-
-       /* 5. Program swing and de-emphasis */
-       cnl_ddi_vswing_program(encoder, crtc_state, level);
-
-       /* 6. Set training enable to trigger update */
-       val = intel_de_read(dev_priv, CNL_PORT_TX_DW5_LN0(port));
-       val |= TX_TRAINING_EN;
-       intel_de_write(dev_priv, CNL_PORT_TX_DW5_GRP(port), val);
-}
-
 static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
                                         const struct intel_crtc_state *crtc_state,
                                         int level)
@@ -1175,8 +1060,8 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
        val = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN0(phy));
        val &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
                 RCOMP_SCALAR_MASK);
-       val |= SWING_SEL_UPPER(ddi_translations->entries[level].cnl.dw2_swing_sel);
-       val |= SWING_SEL_LOWER(ddi_translations->entries[level].cnl.dw2_swing_sel);
+       val |= SWING_SEL_UPPER(ddi_translations->entries[level].icl.dw2_swing_sel);
+       val |= SWING_SEL_LOWER(ddi_translations->entries[level].icl.dw2_swing_sel);
        /* Program Rcomp scalar for every table entry */
        val |= RCOMP_SCALAR(0x98);
        intel_de_write(dev_priv, ICL_PORT_TX_DW2_GRP(phy), val);
@@ -1187,16 +1072,16 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
                val = intel_de_read(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy));
                val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
                         CURSOR_COEFF_MASK);
-               val |= POST_CURSOR_1(ddi_translations->entries[level].cnl.dw4_post_cursor_1);
-               val |= POST_CURSOR_2(ddi_translations->entries[level].cnl.dw4_post_cursor_2);
-               val |= CURSOR_COEFF(ddi_translations->entries[level].cnl.dw4_cursor_coeff);
+               val |= POST_CURSOR_1(ddi_translations->entries[level].icl.dw4_post_cursor_1);
+               val |= POST_CURSOR_2(ddi_translations->entries[level].icl.dw4_post_cursor_2);
+               val |= CURSOR_COEFF(ddi_translations->entries[level].icl.dw4_cursor_coeff);
                intel_de_write(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy), val);
        }
 
        /* Program PORT_TX_DW7 */
        val = intel_de_read(dev_priv, ICL_PORT_TX_DW7_LN0(phy));
        val &= ~N_SCALAR_MASK;
-       val |= N_SCALAR(ddi_translations->entries[level].cnl.dw7_n_scalar);
+       val |= N_SCALAR(ddi_translations->entries[level].icl.dw7_n_scalar);
        intel_de_write(dev_priv, ICL_PORT_TX_DW7_GRP(phy), val);
 }
 
@@ -1496,33 +1381,33 @@ static int intel_ddi_dp_level(struct intel_dp *intel_dp)
 }
 
 static void
-tgl_set_signal_levels(struct intel_dp *intel_dp,
+dg2_set_signal_levels(struct intel_dp *intel_dp,
                      const struct intel_crtc_state *crtc_state)
 {
        struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
        int level = intel_ddi_dp_level(intel_dp);
 
-       tgl_ddi_vswing_sequence(encoder, crtc_state, level);
+       intel_snps_phy_ddi_vswing_sequence(encoder, level);
 }
 
 static void
-icl_set_signal_levels(struct intel_dp *intel_dp,
+tgl_set_signal_levels(struct intel_dp *intel_dp,
                      const struct intel_crtc_state *crtc_state)
 {
        struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
        int level = intel_ddi_dp_level(intel_dp);
 
-       icl_ddi_vswing_sequence(encoder, crtc_state, level);
+       tgl_ddi_vswing_sequence(encoder, crtc_state, level);
 }
 
 static void
-cnl_set_signal_levels(struct intel_dp *intel_dp,
+icl_set_signal_levels(struct intel_dp *intel_dp,
                      const struct intel_crtc_state *crtc_state)
 {
        struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
        int level = intel_ddi_dp_level(intel_dp);
 
-       cnl_ddi_vswing_sequence(encoder, crtc_state, level);
+       icl_ddi_vswing_sequence(encoder, crtc_state, level);
 }
 
 static void
@@ -1560,7 +1445,7 @@ hsw_set_signal_levels(struct intel_dp *intel_dp,
        intel_de_posting_read(dev_priv, DDI_BUF_CTL(port));
 }
 
-static void _cnl_ddi_enable_clock(struct drm_i915_private *i915, i915_reg_t reg,
+static void _icl_ddi_enable_clock(struct drm_i915_private *i915, i915_reg_t reg,
                                  u32 clk_sel_mask, u32 clk_sel, u32 clk_off)
 {
        mutex_lock(&i915->dpll.lock);
@@ -1576,7 +1461,7 @@ static void _cnl_ddi_enable_clock(struct drm_i915_private *i915, i915_reg_t reg,
        mutex_unlock(&i915->dpll.lock);
 }
 
-static void _cnl_ddi_disable_clock(struct drm_i915_private *i915, i915_reg_t reg,
+static void _icl_ddi_disable_clock(struct drm_i915_private *i915, i915_reg_t reg,
                                   u32 clk_off)
 {
        mutex_lock(&i915->dpll.lock);
@@ -1586,14 +1471,14 @@ static void _cnl_ddi_disable_clock(struct drm_i915_private *i915, i915_reg_t reg
        mutex_unlock(&i915->dpll.lock);
 }
 
-static bool _cnl_ddi_is_clock_enabled(struct drm_i915_private *i915, i915_reg_t reg,
+static bool _icl_ddi_is_clock_enabled(struct drm_i915_private *i915, i915_reg_t reg,
                                      u32 clk_off)
 {
        return !(intel_de_read(i915, reg) & clk_off);
 }
 
 static struct intel_shared_dpll *
-_cnl_ddi_get_pll(struct drm_i915_private *i915, i915_reg_t reg,
+_icl_ddi_get_pll(struct drm_i915_private *i915, i915_reg_t reg,
                 u32 clk_sel_mask, u32 clk_sel_shift)
 {
        enum intel_dpll_id id;
@@ -1613,7 +1498,7 @@ static void adls_ddi_enable_clock(struct intel_encoder *encoder,
        if (drm_WARN_ON(&i915->drm, !pll))
                return;
 
-       _cnl_ddi_enable_clock(i915, ADLS_DPCLKA_CFGCR(phy),
+       _icl_ddi_enable_clock(i915, ADLS_DPCLKA_CFGCR(phy),
                              ADLS_DPCLKA_CFGCR_DDI_CLK_SEL_MASK(phy),
                              pll->info->id << ADLS_DPCLKA_CFGCR_DDI_SHIFT(phy),
                              ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
@@ -1624,7 +1509,7 @@ static void adls_ddi_disable_clock(struct intel_encoder *encoder)
        struct drm_i915_private *i915 = to_i915(encoder->base.dev);
        enum phy phy = intel_port_to_phy(i915, encoder->port);
 
-       _cnl_ddi_disable_clock(i915, ADLS_DPCLKA_CFGCR(phy),
+       _icl_ddi_disable_clock(i915, ADLS_DPCLKA_CFGCR(phy),
                               ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
 }
 
@@ -1633,7 +1518,7 @@ static bool adls_ddi_is_clock_enabled(struct intel_encoder *encoder)
        struct drm_i915_private *i915 = to_i915(encoder->base.dev);
        enum phy phy = intel_port_to_phy(i915, encoder->port);
 
-       return _cnl_ddi_is_clock_enabled(i915, ADLS_DPCLKA_CFGCR(phy),
+       return _icl_ddi_is_clock_enabled(i915, ADLS_DPCLKA_CFGCR(phy),
                                         ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
 }
 
@@ -1642,7 +1527,7 @@ static struct intel_shared_dpll *adls_ddi_get_pll(struct intel_encoder *encoder)
        struct drm_i915_private *i915 = to_i915(encoder->base.dev);
        enum phy phy = intel_port_to_phy(i915, encoder->port);
 
-       return _cnl_ddi_get_pll(i915, ADLS_DPCLKA_CFGCR(phy),
+       return _icl_ddi_get_pll(i915, ADLS_DPCLKA_CFGCR(phy),
                                ADLS_DPCLKA_CFGCR_DDI_CLK_SEL_MASK(phy),
                                ADLS_DPCLKA_CFGCR_DDI_SHIFT(phy));
 }
@@ -1657,7 +1542,7 @@ static void rkl_ddi_enable_clock(struct intel_encoder *encoder,
        if (drm_WARN_ON(&i915->drm, !pll))
                return;
 
-       _cnl_ddi_enable_clock(i915, ICL_DPCLKA_CFGCR0,
+       _icl_ddi_enable_clock(i915, ICL_DPCLKA_CFGCR0,
                              RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy),
                              RKL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy),
                              RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
@@ -1668,7 +1553,7 @@ static void rkl_ddi_disable_clock(struct intel_encoder *encoder)
        struct drm_i915_private *i915 = to_i915(encoder->base.dev);
        enum phy phy = intel_port_to_phy(i915, encoder->port);
 
-       _cnl_ddi_disable_clock(i915, ICL_DPCLKA_CFGCR0,
+       _icl_ddi_disable_clock(i915, ICL_DPCLKA_CFGCR0,
                               RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
 }
 
@@ -1677,7 +1562,7 @@ static bool rkl_ddi_is_clock_enabled(struct intel_encoder *encoder)
        struct drm_i915_private *i915 = to_i915(encoder->base.dev);
        enum phy phy = intel_port_to_phy(i915, encoder->port);
 
-       return _cnl_ddi_is_clock_enabled(i915, ICL_DPCLKA_CFGCR0,
+       return _icl_ddi_is_clock_enabled(i915, ICL_DPCLKA_CFGCR0,
                                         RKL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
 }
 
@@ -1686,7 +1571,7 @@ static struct intel_shared_dpll *rkl_ddi_get_pll(struct intel_encoder *encoder)
        struct drm_i915_private *i915 = to_i915(encoder->base.dev);
        enum phy phy = intel_port_to_phy(i915, encoder->port);
 
-       return _cnl_ddi_get_pll(i915, ICL_DPCLKA_CFGCR0,
+       return _icl_ddi_get_pll(i915, ICL_DPCLKA_CFGCR0,
                                RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy),
                                RKL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy));
 }
@@ -1710,7 +1595,7 @@ static void dg1_ddi_enable_clock(struct intel_encoder *encoder,
                        (pll->info->id >= DPLL_ID_DG1_DPLL2 && phy < PHY_C)))
                return;
 
-       _cnl_ddi_enable_clock(i915, DG1_DPCLKA_CFGCR0(phy),
+       _icl_ddi_enable_clock(i915, DG1_DPCLKA_CFGCR0(phy),
                              DG1_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy),
                              DG1_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy),
                              DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
@@ -1721,7 +1606,7 @@ static void dg1_ddi_disable_clock(struct intel_encoder *encoder)
        struct drm_i915_private *i915 = to_i915(encoder->base.dev);
        enum phy phy = intel_port_to_phy(i915, encoder->port);
 
-       _cnl_ddi_disable_clock(i915, DG1_DPCLKA_CFGCR0(phy),
+       _icl_ddi_disable_clock(i915, DG1_DPCLKA_CFGCR0(phy),
                               DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
 }
 
@@ -1730,7 +1615,7 @@ static bool dg1_ddi_is_clock_enabled(struct intel_encoder *encoder)
        struct drm_i915_private *i915 = to_i915(encoder->base.dev);
        enum phy phy = intel_port_to_phy(i915, encoder->port);
 
-       return _cnl_ddi_is_clock_enabled(i915, DG1_DPCLKA_CFGCR0(phy),
+       return _icl_ddi_is_clock_enabled(i915, DG1_DPCLKA_CFGCR0(phy),
                                         DG1_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
 }
 
@@ -1767,7 +1652,7 @@ static void icl_ddi_combo_enable_clock(struct intel_encoder *encoder,
        if (drm_WARN_ON(&i915->drm, !pll))
                return;
 
-       _cnl_ddi_enable_clock(i915, ICL_DPCLKA_CFGCR0,
+       _icl_ddi_enable_clock(i915, ICL_DPCLKA_CFGCR0,
                              ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy),
                              ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy),
                              ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
@@ -1778,7 +1663,7 @@ static void icl_ddi_combo_disable_clock(struct intel_encoder *encoder)
        struct drm_i915_private *i915 = to_i915(encoder->base.dev);
        enum phy phy = intel_port_to_phy(i915, encoder->port);
 
-       _cnl_ddi_disable_clock(i915, ICL_DPCLKA_CFGCR0,
+       _icl_ddi_disable_clock(i915, ICL_DPCLKA_CFGCR0,
                               ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
 }
 
@@ -1787,7 +1672,7 @@ static bool icl_ddi_combo_is_clock_enabled(struct intel_encoder *encoder)
        struct drm_i915_private *i915 = to_i915(encoder->base.dev);
        enum phy phy = intel_port_to_phy(i915, encoder->port);
 
-       return _cnl_ddi_is_clock_enabled(i915, ICL_DPCLKA_CFGCR0,
+       return _icl_ddi_is_clock_enabled(i915, ICL_DPCLKA_CFGCR0,
                                         ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy));
 }
 
@@ -1796,7 +1681,7 @@ struct intel_shared_dpll *icl_ddi_combo_get_pll(struct intel_encoder *encoder)
        struct drm_i915_private *i915 = to_i915(encoder->base.dev);
        enum phy phy = intel_port_to_phy(i915, encoder->port);
 
-       return _cnl_ddi_get_pll(i915, ICL_DPCLKA_CFGCR0,
+       return _icl_ddi_get_pll(i915, ICL_DPCLKA_CFGCR0,
                                ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy),
                                ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy));
 }
@@ -1929,50 +1814,6 @@ static struct intel_shared_dpll *icl_ddi_tc_get_pll(struct intel_encoder *encode
        return intel_get_shared_dpll_by_id(i915, id);
 }
 
-static void cnl_ddi_enable_clock(struct intel_encoder *encoder,
-                                const struct intel_crtc_state *crtc_state)
-{
-       struct drm_i915_private *i915 = to_i915(encoder->base.dev);
-       const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
-       enum port port = encoder->port;
-
-       if (drm_WARN_ON(&i915->drm, !pll))
-               return;
-
-       _cnl_ddi_enable_clock(i915, DPCLKA_CFGCR0,
-                             DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port),
-                             DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port),
-                             DPCLKA_CFGCR0_DDI_CLK_OFF(port));
-}
-
-static void cnl_ddi_disable_clock(struct intel_encoder *encoder)
-{
-       struct drm_i915_private *i915 = to_i915(encoder->base.dev);
-       enum port port = encoder->port;
-
-       _cnl_ddi_disable_clock(i915, DPCLKA_CFGCR0,
-                              DPCLKA_CFGCR0_DDI_CLK_OFF(port));
-}
-
-static bool cnl_ddi_is_clock_enabled(struct intel_encoder *encoder)
-{
-       struct drm_i915_private *i915 = to_i915(encoder->base.dev);
-       enum port port = encoder->port;
-
-       return _cnl_ddi_is_clock_enabled(i915, DPCLKA_CFGCR0,
-                                        DPCLKA_CFGCR0_DDI_CLK_OFF(port));
-}
-
-static struct intel_shared_dpll *cnl_ddi_get_pll(struct intel_encoder *encoder)
-{
-       struct drm_i915_private *i915 = to_i915(encoder->base.dev);
-       enum port port = encoder->port;
-
-       return _cnl_ddi_get_pll(i915, DPCLKA_CFGCR0,
-                               DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port),
-                               DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port));
-}
-
 static struct intel_shared_dpll *bxt_ddi_get_pll(struct intel_encoder *encoder)
 {
        struct drm_i915_private *i915 = to_i915(encoder->base.dev);
@@ -2196,7 +2037,7 @@ void intel_ddi_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
                ddi_clk_needed = false;
        }
 
-       if (ddi_clk_needed || !encoder->disable_clock ||
+       if (ddi_clk_needed || !encoder->is_clock_enabled ||
            !encoder->is_clock_enabled(encoder))
                return;
 
@@ -2477,6 +2318,116 @@ static void intel_ddi_mso_configure(const struct intel_crtc_state *crtc_state)
                     OVERLAP_PIXELS_MASK, dss1);
 }
 
+static void dg2_ddi_pre_enable_dp(struct intel_atomic_state *state,
+                                 struct intel_encoder *encoder,
+                                 const struct intel_crtc_state *crtc_state,
+                                 const struct drm_connector_state *conn_state)
+{
+       struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+       struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+       bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
+       int level = intel_ddi_dp_level(intel_dp);
+
+       intel_dp_set_link_params(intel_dp, crtc_state->port_clock,
+                                crtc_state->lane_count);
+
+       /*
+        * 1. Enable Power Wells
+        *
+        * This was handled at the beginning of intel_atomic_commit_tail(),
+        * before we called down into this function.
+        */
+
+       /* 2. Enable Panel Power if PPS is required */
+       intel_pps_on(intel_dp);
+
+       /*
+        * 3. Enable the port PLL.
+        */
+       intel_ddi_enable_clock(encoder, crtc_state);
+
+       /* 4. Enable IO power */
+       if (!intel_phy_is_tc(dev_priv, phy) ||
+           dig_port->tc_mode != TC_PORT_TBT_ALT)
+               dig_port->ddi_io_wakeref = intel_display_power_get(dev_priv,
+                                                                  dig_port->ddi_io_power_domain);
+
+       /*
+        * 5. The rest of the below are substeps under the bspec's "Enable and
+        * Train Display Port" step.  Note that steps that are specific to
+        * MST will be handled by intel_mst_pre_enable_dp() before/after it
+        * calls into this function.  Also intel_mst_pre_enable_dp() only calls
+        * us when active_mst_links==0, so any steps designated for "single
+        * stream or multi-stream master transcoder" can just be performed
+        * unconditionally here.
+        */
+
+       /*
+        * 5.a Configure Transcoder Clock Select to direct the Port clock to the
+        * Transcoder.
+        */
+       intel_ddi_enable_pipe_clock(encoder, crtc_state);
+
+       /* 5.b Not relevant to i915 for now */
+
+       /*
+        * 5.c Configure TRANS_DDI_FUNC_CTL DDI Select, DDI Mode Select & MST
+        * Transport Select
+        */
+       intel_ddi_config_transcoder_func(encoder, crtc_state);
+
+       /*
+        * 5.d Configure & enable DP_TP_CTL with link training pattern 1
+        * selected
+        *
+        * This will be handled by the intel_dp_start_link_train() farther
+        * down this function.
+        */
+
+       /* 5.e Configure voltage swing and related IO settings */
+       intel_snps_phy_ddi_vswing_sequence(encoder, level);
+
+       /*
+        * 5.f Configure and enable DDI_BUF_CTL
+        * 5.g Wait for DDI_BUF_CTL DDI Idle Status = 0b (Not Idle), timeout
+        *     after 1200 us.
+        *
+        * We only configure what the register value will be here.  Actual
+        * enabling happens during link training farther down.
+        */
+       intel_ddi_init_dp_buf_reg(encoder, crtc_state);
+
+       if (!is_mst)
+               intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
+
+       intel_dp_sink_set_decompression_state(intel_dp, crtc_state, true);
+       /*
+        * DDI FEC: "anticipates enabling FEC encoding sets the FEC_READY bit
+        * in the FEC_CONFIGURATION register to 1 before initiating link
+        * training
+        */
+       intel_dp_sink_set_fec_ready(intel_dp, crtc_state);
+
+       /*
+        * 5.h Follow DisplayPort specification training sequence (see notes for
+        *     failure handling)
+        * 5.i If DisplayPort multi-stream - Set DP_TP_CTL link training to Idle
+        *     Pattern, wait for 5 idle patterns (DP_TP_STATUS Min_Idles_Sent)
+        *     (timeout after 800 us)
+        */
+       intel_dp_start_link_train(intel_dp, crtc_state);
+
+       /* 5.j Set DP_TP_CTL link training to Normal */
+       if (!is_trans_port_sync_mode(crtc_state))
+               intel_dp_stop_link_train(intel_dp, crtc_state);
+
+       /* 5.k Configure and enable FEC if needed */
+       intel_ddi_enable_fec(encoder, crtc_state);
+       intel_dsc_enable(encoder, crtc_state);
+}
+
 static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
                                  struct intel_encoder *encoder,
                                  const struct intel_crtc_state *crtc_state,
@@ -2657,8 +2608,6 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
 
        if (DISPLAY_VER(dev_priv) >= 11)
                icl_ddi_vswing_sequence(encoder, crtc_state, level);
-       else if (IS_CANNONLAKE(dev_priv))
-               cnl_ddi_vswing_sequence(encoder, crtc_state, level);
        else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
                bxt_ddi_vswing_sequence(encoder, crtc_state, level);
        else
@@ -2694,7 +2643,9 @@ static void intel_ddi_pre_enable_dp(struct intel_atomic_state *state,
 {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
 
-       if (DISPLAY_VER(dev_priv) >= 12)
+       if (IS_DG2(dev_priv))
+               dg2_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state);
+       else if (DISPLAY_VER(dev_priv) >= 12)
                tgl_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state);
        else
                hsw_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state);
@@ -3101,12 +3052,12 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state,
                            "[CONNECTOR:%d:%s] Failed to configure sink scrambling/TMDS bit clock ratio\n",
                            connector->base.id, connector->name);
 
-       if (DISPLAY_VER(dev_priv) >= 12)
+       if (IS_DG2(dev_priv))
+               intel_snps_phy_ddi_vswing_sequence(encoder, U32_MAX);
+       else if (DISPLAY_VER(dev_priv) >= 12)
                tgl_ddi_vswing_sequence(encoder, crtc_state, level);
        else if (DISPLAY_VER(dev_priv) == 11)
                icl_ddi_vswing_sequence(encoder, crtc_state, level);
-       else if (IS_CANNONLAKE(dev_priv))
-               cnl_ddi_vswing_sequence(encoder, crtc_state, level);
        else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
                bxt_ddi_vswing_sequence(encoder, crtc_state, level);
        else
@@ -3204,12 +3155,6 @@ static void intel_disable_ddi_dp(struct intel_atomic_state *state,
 
        intel_dp->link_trained = false;
 
-       if (old_crtc_state->has_audio)
-               intel_audio_codec_disable(encoder,
-                                         old_crtc_state, old_conn_state);
-
-       intel_edp_drrs_disable(intel_dp, old_crtc_state);
-       intel_psr_disable(intel_dp, old_crtc_state);
        intel_edp_backlight_off(old_conn_state);
        /* Disable the decompression in DP Sink */
        intel_dp_sink_set_decompression_state(intel_dp, old_crtc_state,
@@ -3227,10 +3172,6 @@ static void intel_disable_ddi_hdmi(struct intel_atomic_state *state,
        struct drm_i915_private *i915 = to_i915(encoder->base.dev);
        struct drm_connector *connector = old_conn_state->connector;
 
-       if (old_crtc_state->has_audio)
-               intel_audio_codec_disable(encoder,
-                                         old_crtc_state, old_conn_state);
-
        if (!intel_hdmi_handle_sink_scrambling(encoder, connector,
                                               false, false))
                drm_dbg_kms(&i915->drm,
@@ -3238,6 +3179,25 @@ static void intel_disable_ddi_hdmi(struct intel_atomic_state *state,
                            connector->base.id, connector->name);
 }
 
+static void intel_pre_disable_ddi(struct intel_atomic_state *state,
+                                 struct intel_encoder *encoder,
+                                 const struct intel_crtc_state *old_crtc_state,
+                                 const struct drm_connector_state *old_conn_state)
+{
+       struct intel_dp *intel_dp;
+
+       if (old_crtc_state->has_audio)
+               intel_audio_codec_disable(encoder, old_crtc_state,
+                                         old_conn_state);
+
+       if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_HDMI))
+               return;
+
+       intel_dp = enc_to_intel_dp(encoder);
+       intel_edp_drrs_disable(intel_dp, old_crtc_state);
+       intel_psr_disable(intel_dp, old_crtc_state);
+}
+
 static void intel_disable_ddi(struct intel_atomic_state *state,
                              struct intel_encoder *encoder,
                              const struct intel_crtc_state *old_crtc_state,
@@ -3454,7 +3414,7 @@ static bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
        if (cpu_transcoder == TRANSCODER_EDP)
                return false;
 
-       if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_AUDIO))
+       if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_AUDIO_MMIO))
                return false;
 
        return intel_de_read(dev_priv, HSW_AUD_PIN_ELD_CP_VLD) &
@@ -3470,8 +3430,6 @@ void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
                crtc_state->min_voltage_level = 3;
        else if (DISPLAY_VER(dev_priv) >= 11 && crtc_state->port_clock > 594000)
                crtc_state->min_voltage_level = 1;
-       else if (IS_CANNONLAKE(dev_priv) && crtc_state->port_clock > 594000)
-               crtc_state->min_voltage_level = 2;
 }
 
 static enum transcoder bdw_transcoder_master_readout(struct drm_i915_private *dev_priv,
@@ -3745,6 +3703,15 @@ void intel_ddi_get_clock(struct intel_encoder *encoder,
                                                     &crtc_state->dpll_hw_state);
 }
 
+static void dg2_ddi_get_config(struct intel_encoder *encoder,
+                               struct intel_crtc_state *crtc_state)
+{
+       intel_mpllb_readout_hw_state(encoder, &crtc_state->mpllb_state);
+       crtc_state->port_clock = intel_mpllb_calc_port_clock(encoder, &crtc_state->mpllb_state);
+
+       intel_ddi_get_config(encoder, crtc_state);
+}
+
 static void adls_ddi_get_config(struct intel_encoder *encoder,
                                struct intel_crtc_state *crtc_state)
 {
@@ -3812,13 +3779,6 @@ static void icl_ddi_tc_get_config(struct intel_encoder *encoder,
        intel_ddi_get_config(encoder, crtc_state);
 }
 
-static void cnl_ddi_get_config(struct intel_encoder *encoder,
-                              struct intel_crtc_state *crtc_state)
-{
-       intel_ddi_get_clock(encoder, crtc_state, cnl_ddi_get_pll(encoder));
-       intel_ddi_get_config(encoder, crtc_state);
-}
-
 static void bxt_ddi_get_config(struct intel_encoder *encoder,
                               struct intel_crtc_state *crtc_state)
 {
@@ -4065,12 +4025,12 @@ intel_ddi_init_dp_connector(struct intel_digital_port *dig_port)
        dig_port->dp.set_link_train = intel_ddi_set_link_train;
        dig_port->dp.set_idle_link_train = intel_ddi_set_idle_link_train;
 
-       if (DISPLAY_VER(dev_priv) >= 12)
+       if (IS_DG2(dev_priv))
+               dig_port->dp.set_signal_levels = dg2_set_signal_levels;
+       else if (DISPLAY_VER(dev_priv) >= 12)
                dig_port->dp.set_signal_levels = tgl_set_signal_levels;
        else if (DISPLAY_VER(dev_priv) >= 11)
                dig_port->dp.set_signal_levels = icl_set_signal_levels;
-       else if (IS_CANNONLAKE(dev_priv))
-               dig_port->dp.set_signal_levels = cnl_set_signal_levels;
        else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
                dig_port->dp.set_signal_levels = bxt_set_signal_levels;
        else
@@ -4317,15 +4277,6 @@ static bool intel_ddi_a_force_4_lanes(struct intel_digital_port *dig_port)
        if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
                return true;
 
-       /* Cannonlake: Most of SKUs don't support DDI_E, and the only
-        *             one who does also have a full A/E split called
-        *             DDI_F what makes DDI_E useless. However for this
-        *             case let's trust VBT info.
-        */
-       if (IS_CANNONLAKE(dev_priv) &&
-           !intel_bios_is_port_present(dev_priv, PORT_E))
-               return true;
-
        return false;
 }
 
@@ -4430,15 +4381,6 @@ static enum hpd_pin ehl_hpd_pin(struct drm_i915_private *dev_priv,
        return HPD_PORT_A + port - PORT_A;
 }
 
-static enum hpd_pin cnl_hpd_pin(struct drm_i915_private *dev_priv,
-                               enum port port)
-{
-       if (port == PORT_F)
-               return HPD_PORT_E;
-
-       return HPD_PORT_A + port - PORT_A;
-}
-
 static enum hpd_pin skl_hpd_pin(struct drm_i915_private *dev_priv, enum port port)
 {
        if (HAS_PCH_TGP(dev_priv))
@@ -4590,6 +4532,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
        encoder->enable = intel_enable_ddi;
        encoder->pre_pll_enable = intel_ddi_pre_pll_enable;
        encoder->pre_enable = intel_ddi_pre_enable;
+       encoder->pre_disable = intel_pre_disable_ddi;
        encoder->disable = intel_disable_ddi;
        encoder->post_disable = intel_ddi_post_disable;
        encoder->update_pipe = intel_ddi_update_pipe;
@@ -4606,7 +4549,11 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
        encoder->cloneable = 0;
        encoder->pipe_mask = ~0;
 
-       if (IS_ALDERLAKE_S(dev_priv)) {
+       if (IS_DG2(dev_priv)) {
+               encoder->enable_clock = intel_mpllb_enable;
+               encoder->disable_clock = intel_mpllb_disable;
+               encoder->get_config = dg2_ddi_get_config;
+       } else if (IS_ALDERLAKE_S(dev_priv)) {
                encoder->enable_clock = adls_ddi_enable_clock;
                encoder->disable_clock = adls_ddi_disable_clock;
                encoder->is_clock_enabled = adls_ddi_is_clock_enabled;
@@ -4645,11 +4592,6 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
                        encoder->is_clock_enabled = icl_ddi_combo_is_clock_enabled;
                        encoder->get_config = icl_ddi_combo_get_config;
                }
-       } else if (IS_CANNONLAKE(dev_priv)) {
-               encoder->enable_clock = cnl_ddi_enable_clock;
-               encoder->disable_clock = cnl_ddi_disable_clock;
-               encoder->is_clock_enabled = cnl_ddi_is_clock_enabled;
-               encoder->get_config = cnl_ddi_get_config;
        } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
                /* BXT/GLK have fixed PLL->port mapping */
                encoder->get_config = bxt_ddi_get_config;
@@ -4679,8 +4621,6 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
                encoder->hpd_pin = ehl_hpd_pin(dev_priv, port);
        else if (DISPLAY_VER(dev_priv) == 11)
                encoder->hpd_pin = icl_hpd_pin(dev_priv, port);
-       else if (IS_CANNONLAKE(dev_priv))
-               encoder->hpd_pin = cnl_hpd_pin(dev_priv, port);
        else if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv))
                encoder->hpd_pin = skl_hpd_pin(dev_priv, port);
        else
index 63b1ae8..ba2c08f 100644 (file)
@@ -417,199 +417,19 @@ static const struct intel_ddi_buf_trans bxt_ddi_translations_hdmi = {
        .hdmi_default_entry = ARRAY_SIZE(_bxt_ddi_translations_hdmi) - 1,
 };
 
-/* Voltage Swing Programming for VccIO 0.85V for DP */
-static const union intel_ddi_buf_trans_entry _cnl_ddi_translations_dp_0_85V[] = {
-                                                       /* NT mV Trans mV db    */
-       { .cnl = { 0xA, 0x5D, 0x3F, 0x00, 0x00 } },     /* 350   350      0.0   */
-       { .cnl = { 0xA, 0x6A, 0x38, 0x00, 0x07 } },     /* 350   500      3.1   */
-       { .cnl = { 0xB, 0x7A, 0x32, 0x00, 0x0D } },     /* 350   700      6.0   */
-       { .cnl = { 0x6, 0x7C, 0x2D, 0x00, 0x12 } },     /* 350   900      8.2   */
-       { .cnl = { 0xA, 0x69, 0x3F, 0x00, 0x00 } },     /* 500   500      0.0   */
-       { .cnl = { 0xB, 0x7A, 0x36, 0x00, 0x09 } },     /* 500   700      2.9   */
-       { .cnl = { 0x6, 0x7C, 0x30, 0x00, 0x0F } },     /* 500   900      5.1   */
-       { .cnl = { 0xB, 0x7D, 0x3C, 0x00, 0x03 } },     /* 650   725      0.9   */
-       { .cnl = { 0x6, 0x7C, 0x34, 0x00, 0x0B } },     /* 600   900      3.5   */
-       { .cnl = { 0x6, 0x7B, 0x3F, 0x00, 0x00 } },     /* 900   900      0.0   */
-};
-
-static const struct intel_ddi_buf_trans cnl_ddi_translations_dp_0_85V = {
-       .entries = _cnl_ddi_translations_dp_0_85V,
-       .num_entries = ARRAY_SIZE(_cnl_ddi_translations_dp_0_85V),
-};
-
-/* Voltage Swing Programming for VccIO 0.85V for HDMI */
-static const union intel_ddi_buf_trans_entry _cnl_ddi_translations_hdmi_0_85V[] = {
-                                                       /* NT mV Trans mV db    */
-       { .cnl = { 0xA, 0x60, 0x3F, 0x00, 0x00 } },     /* 450   450      0.0   */
-       { .cnl = { 0xB, 0x73, 0x36, 0x00, 0x09 } },     /* 450   650      3.2   */
-       { .cnl = { 0x6, 0x7F, 0x31, 0x00, 0x0E } },     /* 450   850      5.5   */
-       { .cnl = { 0xB, 0x73, 0x3F, 0x00, 0x00 } },     /* 650   650      0.0   */
-       { .cnl = { 0x6, 0x7F, 0x37, 0x00, 0x08 } },     /* 650   850      2.3   */
-       { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } },     /* 850   850      0.0   */
-       { .cnl = { 0x6, 0x7F, 0x35, 0x00, 0x0A } },     /* 600   850      3.0   */
-};
-
-static const struct intel_ddi_buf_trans cnl_ddi_translations_hdmi_0_85V = {
-       .entries = _cnl_ddi_translations_hdmi_0_85V,
-       .num_entries = ARRAY_SIZE(_cnl_ddi_translations_hdmi_0_85V),
-       .hdmi_default_entry = ARRAY_SIZE(_cnl_ddi_translations_hdmi_0_85V) - 1,
-};
-
-/* Voltage Swing Programming for VccIO 0.85V for eDP */
-static const union intel_ddi_buf_trans_entry _cnl_ddi_translations_edp_0_85V[] = {
-                                                       /* NT mV Trans mV db    */
-       { .cnl = { 0xA, 0x66, 0x3A, 0x00, 0x05 } },     /* 384   500      2.3   */
-       { .cnl = { 0x0, 0x7F, 0x38, 0x00, 0x07 } },     /* 153   200      2.3   */
-       { .cnl = { 0x8, 0x7F, 0x38, 0x00, 0x07 } },     /* 192   250      2.3   */
-       { .cnl = { 0x1, 0x7F, 0x38, 0x00, 0x07 } },     /* 230   300      2.3   */
-       { .cnl = { 0x9, 0x7F, 0x38, 0x00, 0x07 } },     /* 269   350      2.3   */
-       { .cnl = { 0xA, 0x66, 0x3C, 0x00, 0x03 } },     /* 446   500      1.0   */
-       { .cnl = { 0xB, 0x70, 0x3C, 0x00, 0x03 } },     /* 460   600      2.3   */
-       { .cnl = { 0xC, 0x75, 0x3C, 0x00, 0x03 } },     /* 537   700      2.3   */
-       { .cnl = { 0x2, 0x7F, 0x3F, 0x00, 0x00 } },     /* 400   400      0.0   */
-};
-
-static const struct intel_ddi_buf_trans cnl_ddi_translations_edp_0_85V = {
-       .entries = _cnl_ddi_translations_edp_0_85V,
-       .num_entries = ARRAY_SIZE(_cnl_ddi_translations_edp_0_85V),
-};
-
-/* Voltage Swing Programming for VccIO 0.95V for DP */
-static const union intel_ddi_buf_trans_entry _cnl_ddi_translations_dp_0_95V[] = {
-                                                       /* NT mV Trans mV db    */
-       { .cnl = { 0xA, 0x5D, 0x3F, 0x00, 0x00 } },     /* 350   350      0.0   */
-       { .cnl = { 0xA, 0x6A, 0x38, 0x00, 0x07 } },     /* 350   500      3.1   */
-       { .cnl = { 0xB, 0x7A, 0x32, 0x00, 0x0D } },     /* 350   700      6.0   */
-       { .cnl = { 0x6, 0x7C, 0x2D, 0x00, 0x12 } },     /* 350   900      8.2   */
-       { .cnl = { 0xA, 0x69, 0x3F, 0x00, 0x00 } },     /* 500   500      0.0   */
-       { .cnl = { 0xB, 0x7A, 0x36, 0x00, 0x09 } },     /* 500   700      2.9   */
-       { .cnl = { 0x6, 0x7C, 0x30, 0x00, 0x0F } },     /* 500   900      5.1   */
-       { .cnl = { 0xB, 0x7D, 0x3C, 0x00, 0x03 } },     /* 650   725      0.9   */
-       { .cnl = { 0x6, 0x7C, 0x34, 0x00, 0x0B } },     /* 600   900      3.5   */
-       { .cnl = { 0x6, 0x7B, 0x3F, 0x00, 0x00 } },     /* 900   900      0.0   */
-};
-
-static const struct intel_ddi_buf_trans cnl_ddi_translations_dp_0_95V = {
-       .entries = _cnl_ddi_translations_dp_0_95V,
-       .num_entries = ARRAY_SIZE(_cnl_ddi_translations_dp_0_95V),
-};
-
-/* Voltage Swing Programming for VccIO 0.95V for HDMI */
-static const union intel_ddi_buf_trans_entry _cnl_ddi_translations_hdmi_0_95V[] = {
-                                                       /* NT mV Trans mV db    */
-       { .cnl = { 0xA, 0x5C, 0x3F, 0x00, 0x00 } },     /* 400   400      0.0   */
-       { .cnl = { 0xB, 0x69, 0x37, 0x00, 0x08 } },     /* 400   600      3.5   */
-       { .cnl = { 0x5, 0x76, 0x31, 0x00, 0x0E } },     /* 400   800      6.0   */
-       { .cnl = { 0xA, 0x5E, 0x3F, 0x00, 0x00 } },     /* 450   450      0.0   */
-       { .cnl = { 0xB, 0x69, 0x3F, 0x00, 0x00 } },     /* 600   600      0.0   */
-       { .cnl = { 0xB, 0x79, 0x35, 0x00, 0x0A } },     /* 600   850      3.0   */
-       { .cnl = { 0x6, 0x7D, 0x32, 0x00, 0x0D } },     /* 600   1000     4.4   */
-       { .cnl = { 0x5, 0x76, 0x3F, 0x00, 0x00 } },     /* 800   800      0.0   */
-       { .cnl = { 0x6, 0x7D, 0x39, 0x00, 0x06 } },     /* 800   1000     1.9   */
-       { .cnl = { 0x6, 0x7F, 0x39, 0x00, 0x06 } },     /* 850   1050     1.8   */
-       { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } },     /* 1050  1050     0.0   */
-};
-
-static const struct intel_ddi_buf_trans cnl_ddi_translations_hdmi_0_95V = {
-       .entries = _cnl_ddi_translations_hdmi_0_95V,
-       .num_entries = ARRAY_SIZE(_cnl_ddi_translations_hdmi_0_95V),
-       .hdmi_default_entry = ARRAY_SIZE(_cnl_ddi_translations_hdmi_0_95V) - 1,
-};
-
-/* Voltage Swing Programming for VccIO 0.95V for eDP */
-static const union intel_ddi_buf_trans_entry _cnl_ddi_translations_edp_0_95V[] = {
-                                                       /* NT mV Trans mV db    */
-       { .cnl = { 0xA, 0x61, 0x3A, 0x00, 0x05 } },     /* 384   500      2.3   */
-       { .cnl = { 0x0, 0x7F, 0x38, 0x00, 0x07 } },     /* 153   200      2.3   */
-       { .cnl = { 0x8, 0x7F, 0x38, 0x00, 0x07 } },     /* 192   250      2.3   */
-       { .cnl = { 0x1, 0x7F, 0x38, 0x00, 0x07 } },     /* 230   300      2.3   */
-       { .cnl = { 0x9, 0x7F, 0x38, 0x00, 0x07 } },     /* 269   350      2.3   */
-       { .cnl = { 0xA, 0x61, 0x3C, 0x00, 0x03 } },     /* 446   500      1.0   */
-       { .cnl = { 0xB, 0x68, 0x39, 0x00, 0x06 } },     /* 460   600      2.3   */
-       { .cnl = { 0xC, 0x6E, 0x39, 0x00, 0x06 } },     /* 537   700      2.3   */
-       { .cnl = { 0x4, 0x7F, 0x3A, 0x00, 0x05 } },     /* 460   600      2.3   */
-       { .cnl = { 0x2, 0x7F, 0x3F, 0x00, 0x00 } },     /* 400   400      0.0   */
-};
-
-static const struct intel_ddi_buf_trans cnl_ddi_translations_edp_0_95V = {
-       .entries = _cnl_ddi_translations_edp_0_95V,
-       .num_entries = ARRAY_SIZE(_cnl_ddi_translations_edp_0_95V),
-};
-
-/* Voltage Swing Programming for VccIO 1.05V for DP */
-static const union intel_ddi_buf_trans_entry _cnl_ddi_translations_dp_1_05V[] = {
-                                                       /* NT mV Trans mV db    */
-       { .cnl = { 0xA, 0x58, 0x3F, 0x00, 0x00 } },     /* 400   400      0.0   */
-       { .cnl = { 0xB, 0x64, 0x37, 0x00, 0x08 } },     /* 400   600      3.5   */
-       { .cnl = { 0x5, 0x70, 0x31, 0x00, 0x0E } },     /* 400   800      6.0   */
-       { .cnl = { 0x6, 0x7F, 0x2C, 0x00, 0x13 } },     /* 400   1050     8.4   */
-       { .cnl = { 0xB, 0x64, 0x3F, 0x00, 0x00 } },     /* 600   600      0.0   */
-       { .cnl = { 0x5, 0x73, 0x35, 0x00, 0x0A } },     /* 600   850      3.0   */
-       { .cnl = { 0x6, 0x7F, 0x30, 0x00, 0x0F } },     /* 550   1050     5.6   */
-       { .cnl = { 0x5, 0x76, 0x3E, 0x00, 0x01 } },     /* 850   900      0.5   */
-       { .cnl = { 0x6, 0x7F, 0x36, 0x00, 0x09 } },     /* 750   1050     2.9   */
-       { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } },     /* 1050  1050     0.0   */
-};
-
-static const struct intel_ddi_buf_trans cnl_ddi_translations_dp_1_05V = {
-       .entries = _cnl_ddi_translations_dp_1_05V,
-       .num_entries = ARRAY_SIZE(_cnl_ddi_translations_dp_1_05V),
-};
-
-/* Voltage Swing Programming for VccIO 1.05V for HDMI */
-static const union intel_ddi_buf_trans_entry _cnl_ddi_translations_hdmi_1_05V[] = {
-                                                       /* NT mV Trans mV db    */
-       { .cnl = { 0xA, 0x58, 0x3F, 0x00, 0x00 } },     /* 400   400      0.0   */
-       { .cnl = { 0xB, 0x64, 0x37, 0x00, 0x08 } },     /* 400   600      3.5   */
-       { .cnl = { 0x5, 0x70, 0x31, 0x00, 0x0E } },     /* 400   800      6.0   */
-       { .cnl = { 0xA, 0x5B, 0x3F, 0x00, 0x00 } },     /* 450   450      0.0   */
-       { .cnl = { 0xB, 0x64, 0x3F, 0x00, 0x00 } },     /* 600   600      0.0   */
-       { .cnl = { 0x5, 0x73, 0x35, 0x00, 0x0A } },     /* 600   850      3.0   */
-       { .cnl = { 0x6, 0x7C, 0x32, 0x00, 0x0D } },     /* 600   1000     4.4   */
-       { .cnl = { 0x5, 0x70, 0x3F, 0x00, 0x00 } },     /* 800   800      0.0   */
-       { .cnl = { 0x6, 0x7C, 0x39, 0x00, 0x06 } },     /* 800   1000     1.9   */
-       { .cnl = { 0x6, 0x7F, 0x39, 0x00, 0x06 } },     /* 850   1050     1.8   */
-       { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } },     /* 1050  1050     0.0   */
-};
-
-static const struct intel_ddi_buf_trans cnl_ddi_translations_hdmi_1_05V = {
-       .entries = _cnl_ddi_translations_hdmi_1_05V,
-       .num_entries = ARRAY_SIZE(_cnl_ddi_translations_hdmi_1_05V),
-       .hdmi_default_entry = ARRAY_SIZE(_cnl_ddi_translations_hdmi_1_05V) - 1,
-};
-
-/* Voltage Swing Programming for VccIO 1.05V for eDP */
-static const union intel_ddi_buf_trans_entry _cnl_ddi_translations_edp_1_05V[] = {
-                                                       /* NT mV Trans mV db    */
-       { .cnl = { 0xA, 0x5E, 0x3A, 0x00, 0x05 } },     /* 384   500      2.3   */
-       { .cnl = { 0x0, 0x7F, 0x38, 0x00, 0x07 } },     /* 153   200      2.3   */
-       { .cnl = { 0x8, 0x7F, 0x38, 0x00, 0x07 } },     /* 192   250      2.3   */
-       { .cnl = { 0x1, 0x7F, 0x38, 0x00, 0x07 } },     /* 230   300      2.3   */
-       { .cnl = { 0x9, 0x7F, 0x38, 0x00, 0x07 } },     /* 269   350      2.3   */
-       { .cnl = { 0xA, 0x5E, 0x3C, 0x00, 0x03 } },     /* 446   500      1.0   */
-       { .cnl = { 0xB, 0x64, 0x39, 0x00, 0x06 } },     /* 460   600      2.3   */
-       { .cnl = { 0xE, 0x6A, 0x39, 0x00, 0x06 } },     /* 537   700      2.3   */
-       { .cnl = { 0x2, 0x7F, 0x3F, 0x00, 0x00 } },     /* 400   400      0.0   */
-};
-
-static const struct intel_ddi_buf_trans cnl_ddi_translations_edp_1_05V = {
-       .entries = _cnl_ddi_translations_edp_1_05V,
-       .num_entries = ARRAY_SIZE(_cnl_ddi_translations_edp_1_05V),
-};
-
 /* icl_combo_phy_ddi_translations */
 static const union intel_ddi_buf_trans_entry _icl_combo_phy_ddi_translations_dp_hbr2_edp_hbr3[] = {
                                                        /* NT mV Trans mV db    */
-       { .cnl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } },     /* 350   350      0.0   */
-       { .cnl = { 0xA, 0x4F, 0x37, 0x00, 0x08 } },     /* 350   500      3.1   */
-       { .cnl = { 0xC, 0x71, 0x2F, 0x00, 0x10 } },     /* 350   700      6.0   */
-       { .cnl = { 0x6, 0x7F, 0x2B, 0x00, 0x14 } },     /* 350   900      8.2   */
-       { .cnl = { 0xA, 0x4C, 0x3F, 0x00, 0x00 } },     /* 500   500      0.0   */
-       { .cnl = { 0xC, 0x73, 0x34, 0x00, 0x0B } },     /* 500   700      2.9   */
-       { .cnl = { 0x6, 0x7F, 0x2F, 0x00, 0x10 } },     /* 500   900      5.1   */
-       { .cnl = { 0xC, 0x6C, 0x3C, 0x00, 0x03 } },     /* 650   700      0.6   */
-       { .cnl = { 0x6, 0x7F, 0x35, 0x00, 0x0A } },     /* 600   900      3.5   */
-       { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } },     /* 900   900      0.0   */
+       { .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } },     /* 350   350      0.0   */
+       { .icl = { 0xA, 0x4F, 0x37, 0x00, 0x08 } },     /* 350   500      3.1   */
+       { .icl = { 0xC, 0x71, 0x2F, 0x00, 0x10 } },     /* 350   700      6.0   */
+       { .icl = { 0x6, 0x7F, 0x2B, 0x00, 0x14 } },     /* 350   900      8.2   */
+       { .icl = { 0xA, 0x4C, 0x3F, 0x00, 0x00 } },     /* 500   500      0.0   */
+       { .icl = { 0xC, 0x73, 0x34, 0x00, 0x0B } },     /* 500   700      2.9   */
+       { .icl = { 0x6, 0x7F, 0x2F, 0x00, 0x10 } },     /* 500   900      5.1   */
+       { .icl = { 0xC, 0x6C, 0x3C, 0x00, 0x03 } },     /* 650   700      0.6   */
+       { .icl = { 0x6, 0x7F, 0x35, 0x00, 0x0A } },     /* 600   900      3.5   */
+       { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } },     /* 900   900      0.0   */
 };
 
 static const struct intel_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hbr2_edp_hbr3 = {
@@ -619,16 +439,16 @@ static const struct intel_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hbr2_e
 
 static const union intel_ddi_buf_trans_entry _icl_combo_phy_ddi_translations_edp_hbr2[] = {
                                                        /* NT mV Trans mV db    */
-       { .cnl = { 0x0, 0x7F, 0x3F, 0x00, 0x00 } },     /* 200   200      0.0   */
-       { .cnl = { 0x8, 0x7F, 0x38, 0x00, 0x07 } },     /* 200   250      1.9   */
-       { .cnl = { 0x1, 0x7F, 0x33, 0x00, 0x0C } },     /* 200   300      3.5   */
-       { .cnl = { 0x9, 0x7F, 0x31, 0x00, 0x0E } },     /* 200   350      4.9   */
-       { .cnl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } },     /* 250   250      0.0   */
-       { .cnl = { 0x1, 0x7F, 0x38, 0x00, 0x07 } },     /* 250   300      1.6   */
-       { .cnl = { 0x9, 0x7F, 0x35, 0x00, 0x0A } },     /* 250   350      2.9   */
-       { .cnl = { 0x1, 0x7F, 0x3F, 0x00, 0x00 } },     /* 300   300      0.0   */
-       { .cnl = { 0x9, 0x7F, 0x38, 0x00, 0x07 } },     /* 300   350      1.3   */
-       { .cnl = { 0x9, 0x7F, 0x3F, 0x00, 0x00 } },     /* 350   350      0.0   */
+       { .icl = { 0x0, 0x7F, 0x3F, 0x00, 0x00 } },     /* 200   200      0.0   */
+       { .icl = { 0x8, 0x7F, 0x38, 0x00, 0x07 } },     /* 200   250      1.9   */
+       { .icl = { 0x1, 0x7F, 0x33, 0x00, 0x0C } },     /* 200   300      3.5   */
+       { .icl = { 0x9, 0x7F, 0x31, 0x00, 0x0E } },     /* 200   350      4.9   */
+       { .icl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } },     /* 250   250      0.0   */
+       { .icl = { 0x1, 0x7F, 0x38, 0x00, 0x07 } },     /* 250   300      1.6   */
+       { .icl = { 0x9, 0x7F, 0x35, 0x00, 0x0A } },     /* 250   350      2.9   */
+       { .icl = { 0x1, 0x7F, 0x3F, 0x00, 0x00 } },     /* 300   300      0.0   */
+       { .icl = { 0x9, 0x7F, 0x38, 0x00, 0x07 } },     /* 300   350      1.3   */
+       { .icl = { 0x9, 0x7F, 0x3F, 0x00, 0x00 } },     /* 350   350      0.0   */
 };
 
 static const struct intel_ddi_buf_trans icl_combo_phy_ddi_translations_edp_hbr2 = {
@@ -638,13 +458,13 @@ static const struct intel_ddi_buf_trans icl_combo_phy_ddi_translations_edp_hbr2
 
 static const union intel_ddi_buf_trans_entry _icl_combo_phy_ddi_translations_hdmi[] = {
                                                        /* NT mV Trans mV db    */
-       { .cnl = { 0xA, 0x60, 0x3F, 0x00, 0x00 } },     /* 450   450      0.0   */
-       { .cnl = { 0xB, 0x73, 0x36, 0x00, 0x09 } },     /* 450   650      3.2   */
-       { .cnl = { 0x6, 0x7F, 0x31, 0x00, 0x0E } },     /* 450   850      5.5   */
-       { .cnl = { 0xB, 0x73, 0x3F, 0x00, 0x00 } },     /* 650   650      0.0   ALS */
-       { .cnl = { 0x6, 0x7F, 0x37, 0x00, 0x08 } },     /* 650   850      2.3   */
-       { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } },     /* 850   850      0.0   */
-       { .cnl = { 0x6, 0x7F, 0x35, 0x00, 0x0A } },     /* 600   850      3.0   */
+       { .icl = { 0xA, 0x60, 0x3F, 0x00, 0x00 } },     /* 450   450      0.0   */
+       { .icl = { 0xB, 0x73, 0x36, 0x00, 0x09 } },     /* 450   650      3.2   */
+       { .icl = { 0x6, 0x7F, 0x31, 0x00, 0x0E } },     /* 450   850      5.5   */
+       { .icl = { 0xB, 0x73, 0x3F, 0x00, 0x00 } },     /* 650   650      0.0   ALS */
+       { .icl = { 0x6, 0x7F, 0x37, 0x00, 0x08 } },     /* 650   850      2.3   */
+       { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } },     /* 850   850      0.0   */
+       { .icl = { 0x6, 0x7F, 0x35, 0x00, 0x0A } },     /* 600   850      3.0   */
 };
 
 static const struct intel_ddi_buf_trans icl_combo_phy_ddi_translations_hdmi = {
@@ -655,16 +475,16 @@ static const struct intel_ddi_buf_trans icl_combo_phy_ddi_translations_hdmi = {
 
 static const union intel_ddi_buf_trans_entry _ehl_combo_phy_ddi_translations_dp[] = {
                                                        /* NT mV Trans mV db    */
-       { .cnl = { 0xA, 0x33, 0x3F, 0x00, 0x00 } },     /* 350   350      0.0   */
-       { .cnl = { 0xA, 0x47, 0x36, 0x00, 0x09 } },     /* 350   500      3.1   */
-       { .cnl = { 0xC, 0x64, 0x34, 0x00, 0x0B } },     /* 350   700      6.0   */
-       { .cnl = { 0x6, 0x7F, 0x30, 0x00, 0x0F } },     /* 350   900      8.2   */
-       { .cnl = { 0xA, 0x46, 0x3F, 0x00, 0x00 } },     /* 500   500      0.0   */
-       { .cnl = { 0xC, 0x64, 0x38, 0x00, 0x07 } },     /* 500   700      2.9   */
-       { .cnl = { 0x6, 0x7F, 0x32, 0x00, 0x0D } },     /* 500   900      5.1   */
-       { .cnl = { 0xC, 0x61, 0x3F, 0x00, 0x00 } },     /* 650   700      0.6   */
-       { .cnl = { 0x6, 0x7F, 0x38, 0x00, 0x07 } },     /* 600   900      3.5   */
-       { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } },     /* 900   900      0.0   */
+       { .icl = { 0xA, 0x33, 0x3F, 0x00, 0x00 } },     /* 350   350      0.0   */
+       { .icl = { 0xA, 0x47, 0x36, 0x00, 0x09 } },     /* 350   500      3.1   */
+       { .icl = { 0xC, 0x64, 0x34, 0x00, 0x0B } },     /* 350   700      6.0   */
+       { .icl = { 0x6, 0x7F, 0x30, 0x00, 0x0F } },     /* 350   900      8.2   */
+       { .icl = { 0xA, 0x46, 0x3F, 0x00, 0x00 } },     /* 500   500      0.0   */
+       { .icl = { 0xC, 0x64, 0x38, 0x00, 0x07 } },     /* 500   700      2.9   */
+       { .icl = { 0x6, 0x7F, 0x32, 0x00, 0x0D } },     /* 500   900      5.1   */
+       { .icl = { 0xC, 0x61, 0x3F, 0x00, 0x00 } },     /* 650   700      0.6   */
+       { .icl = { 0x6, 0x7F, 0x38, 0x00, 0x07 } },     /* 600   900      3.5   */
+       { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } },     /* 900   900      0.0   */
 };
 
 static const struct intel_ddi_buf_trans ehl_combo_phy_ddi_translations_dp = {
@@ -674,16 +494,16 @@ static const struct intel_ddi_buf_trans ehl_combo_phy_ddi_translations_dp = {
 
 static const union intel_ddi_buf_trans_entry _ehl_combo_phy_ddi_translations_edp_hbr2[] = {
                                                        /* NT mV Trans mV db    */
-       { .cnl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } },     /* 200   200      0.0   */
-       { .cnl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } },     /* 200   250      1.9   */
-       { .cnl = { 0x1, 0x7F, 0x3D, 0x00, 0x02 } },     /* 200   300      3.5   */
-       { .cnl = { 0xA, 0x35, 0x39, 0x00, 0x06 } },     /* 200   350      4.9   */
-       { .cnl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } },     /* 250   250      0.0   */
-       { .cnl = { 0x1, 0x7F, 0x3C, 0x00, 0x03 } },     /* 250   300      1.6   */
-       { .cnl = { 0xA, 0x35, 0x39, 0x00, 0x06 } },     /* 250   350      2.9   */
-       { .cnl = { 0x1, 0x7F, 0x3F, 0x00, 0x00 } },     /* 300   300      0.0   */
-       { .cnl = { 0xA, 0x35, 0x38, 0x00, 0x07 } },     /* 300   350      1.3   */
-       { .cnl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } },     /* 350   350      0.0   */
+       { .icl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } },     /* 200   200      0.0   */
+       { .icl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } },     /* 200   250      1.9   */
+       { .icl = { 0x1, 0x7F, 0x3D, 0x00, 0x02 } },     /* 200   300      3.5   */
+       { .icl = { 0xA, 0x35, 0x39, 0x00, 0x06 } },     /* 200   350      4.9   */
+       { .icl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } },     /* 250   250      0.0   */
+       { .icl = { 0x1, 0x7F, 0x3C, 0x00, 0x03 } },     /* 250   300      1.6   */
+       { .icl = { 0xA, 0x35, 0x39, 0x00, 0x06 } },     /* 250   350      2.9   */
+       { .icl = { 0x1, 0x7F, 0x3F, 0x00, 0x00 } },     /* 300   300      0.0   */
+       { .icl = { 0xA, 0x35, 0x38, 0x00, 0x07 } },     /* 300   350      1.3   */
+       { .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } },     /* 350   350      0.0   */
 };
 
 static const struct intel_ddi_buf_trans ehl_combo_phy_ddi_translations_edp_hbr2 = {
@@ -693,16 +513,16 @@ static const struct intel_ddi_buf_trans ehl_combo_phy_ddi_translations_edp_hbr2
 
 static const union intel_ddi_buf_trans_entry _jsl_combo_phy_ddi_translations_edp_hbr[] = {
                                                        /* NT mV Trans mV db    */
-       { .cnl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } },     /* 200   200      0.0   */
-       { .cnl = { 0x8, 0x7F, 0x38, 0x00, 0x07 } },     /* 200   250      1.9   */
-       { .cnl = { 0x1, 0x7F, 0x33, 0x00, 0x0C } },     /* 200   300      3.5   */
-       { .cnl = { 0xA, 0x35, 0x36, 0x00, 0x09 } },     /* 200   350      4.9   */
-       { .cnl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } },     /* 250   250      0.0   */
-       { .cnl = { 0x1, 0x7F, 0x38, 0x00, 0x07 } },     /* 250   300      1.6   */
-       { .cnl = { 0xA, 0x35, 0x35, 0x00, 0x0A } },     /* 250   350      2.9   */
-       { .cnl = { 0x1, 0x7F, 0x3F, 0x00, 0x00 } },     /* 300   300      0.0   */
-       { .cnl = { 0xA, 0x35, 0x38, 0x00, 0x07 } },     /* 300   350      1.3   */
-       { .cnl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } },     /* 350   350      0.0   */
+       { .icl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } },     /* 200   200      0.0   */
+       { .icl = { 0x8, 0x7F, 0x38, 0x00, 0x07 } },     /* 200   250      1.9   */
+       { .icl = { 0x1, 0x7F, 0x33, 0x00, 0x0C } },     /* 200   300      3.5   */
+       { .icl = { 0xA, 0x35, 0x36, 0x00, 0x09 } },     /* 200   350      4.9   */
+       { .icl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } },     /* 250   250      0.0   */
+       { .icl = { 0x1, 0x7F, 0x38, 0x00, 0x07 } },     /* 250   300      1.6   */
+       { .icl = { 0xA, 0x35, 0x35, 0x00, 0x0A } },     /* 250   350      2.9   */
+       { .icl = { 0x1, 0x7F, 0x3F, 0x00, 0x00 } },     /* 300   300      0.0   */
+       { .icl = { 0xA, 0x35, 0x38, 0x00, 0x07 } },     /* 300   350      1.3   */
+       { .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } },     /* 350   350      0.0   */
 };
 
 static const struct intel_ddi_buf_trans jsl_combo_phy_ddi_translations_edp_hbr = {
@@ -712,16 +532,16 @@ static const struct intel_ddi_buf_trans jsl_combo_phy_ddi_translations_edp_hbr =
 
 static const union intel_ddi_buf_trans_entry _jsl_combo_phy_ddi_translations_edp_hbr2[] = {
                                                        /* NT mV Trans mV db    */
-       { .cnl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } },     /* 200   200      0.0   */
-       { .cnl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } },     /* 200   250      1.9   */
-       { .cnl = { 0x1, 0x7F, 0x3D, 0x00, 0x02 } },     /* 200   300      3.5   */
-       { .cnl = { 0xA, 0x35, 0x38, 0x00, 0x07 } },     /* 200   350      4.9   */
-       { .cnl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } },     /* 250   250      0.0   */
-       { .cnl = { 0x1, 0x7F, 0x3F, 0x00, 0x00 } },     /* 250   300      1.6   */
-       { .cnl = { 0xA, 0x35, 0x3A, 0x00, 0x05 } },     /* 250   350      2.9   */
-       { .cnl = { 0x1, 0x7F, 0x3F, 0x00, 0x00 } },     /* 300   300      0.0   */
-       { .cnl = { 0xA, 0x35, 0x38, 0x00, 0x07 } },     /* 300   350      1.3   */
-       { .cnl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } },     /* 350   350      0.0   */
+       { .icl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } },     /* 200   200      0.0   */
+       { .icl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } },     /* 200   250      1.9   */
+       { .icl = { 0x1, 0x7F, 0x3D, 0x00, 0x02 } },     /* 200   300      3.5   */
+       { .icl = { 0xA, 0x35, 0x38, 0x00, 0x07 } },     /* 200   350      4.9   */
+       { .icl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } },     /* 250   250      0.0   */
+       { .icl = { 0x1, 0x7F, 0x3F, 0x00, 0x00 } },     /* 250   300      1.6   */
+       { .icl = { 0xA, 0x35, 0x3A, 0x00, 0x05 } },     /* 250   350      2.9   */
+       { .icl = { 0x1, 0x7F, 0x3F, 0x00, 0x00 } },     /* 300   300      0.0   */
+       { .icl = { 0xA, 0x35, 0x38, 0x00, 0x07 } },     /* 300   350      1.3   */
+       { .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } },     /* 350   350      0.0   */
 };
 
 static const struct intel_ddi_buf_trans jsl_combo_phy_ddi_translations_edp_hbr2 = {
@@ -731,16 +551,16 @@ static const struct intel_ddi_buf_trans jsl_combo_phy_ddi_translations_edp_hbr2
 
 static const union intel_ddi_buf_trans_entry _dg1_combo_phy_ddi_translations_dp_rbr_hbr[] = {
                                                        /* NT mV Trans mV db    */
-       { .cnl = { 0xA, 0x32, 0x3F, 0x00, 0x00 } },     /* 350   350      0.0   */
-       { .cnl = { 0xA, 0x48, 0x35, 0x00, 0x0A } },     /* 350   500      3.1   */
-       { .cnl = { 0xC, 0x63, 0x2F, 0x00, 0x10 } },     /* 350   700      6.0   */
-       { .cnl = { 0x6, 0x7F, 0x2C, 0x00, 0x13 } },     /* 350   900      8.2   */
-       { .cnl = { 0xA, 0x43, 0x3F, 0x00, 0x00 } },     /* 500   500      0.0   */
-       { .cnl = { 0xC, 0x60, 0x36, 0x00, 0x09 } },     /* 500   700      2.9   */
-       { .cnl = { 0x6, 0x7F, 0x30, 0x00, 0x0F } },     /* 500   900      5.1   */
-       { .cnl = { 0xC, 0x60, 0x3F, 0x00, 0x00 } },     /* 650   700      0.6   */
-       { .cnl = { 0x6, 0x7F, 0x37, 0x00, 0x08 } },     /* 600   900      3.5   */
-       { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } },     /* 900   900      0.0   */
+       { .icl = { 0xA, 0x32, 0x3F, 0x00, 0x00 } },     /* 350   350      0.0   */
+       { .icl = { 0xA, 0x48, 0x35, 0x00, 0x0A } },     /* 350   500      3.1   */
+       { .icl = { 0xC, 0x63, 0x2F, 0x00, 0x10 } },     /* 350   700      6.0   */
+       { .icl = { 0x6, 0x7F, 0x2C, 0x00, 0x13 } },     /* 350   900      8.2   */
+       { .icl = { 0xA, 0x43, 0x3F, 0x00, 0x00 } },     /* 500   500      0.0   */
+       { .icl = { 0xC, 0x60, 0x36, 0x00, 0x09 } },     /* 500   700      2.9   */
+       { .icl = { 0x6, 0x7F, 0x30, 0x00, 0x0F } },     /* 500   900      5.1   */
+       { .icl = { 0xC, 0x60, 0x3F, 0x00, 0x00 } },     /* 650   700      0.6   */
+       { .icl = { 0x6, 0x7F, 0x37, 0x00, 0x08 } },     /* 600   900      3.5   */
+       { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } },     /* 900   900      0.0   */
 };
 
 static const struct intel_ddi_buf_trans dg1_combo_phy_ddi_translations_dp_rbr_hbr = {
@@ -750,16 +570,16 @@ static const struct intel_ddi_buf_trans dg1_combo_phy_ddi_translations_dp_rbr_hb
 
 static const union intel_ddi_buf_trans_entry _dg1_combo_phy_ddi_translations_dp_hbr2_hbr3[] = {
                                                        /* NT mV Trans mV db    */
-       { .cnl = { 0xA, 0x32, 0x3F, 0x00, 0x00 } },     /* 350   350      0.0   */
-       { .cnl = { 0xA, 0x48, 0x35, 0x00, 0x0A } },     /* 350   500      3.1   */
-       { .cnl = { 0xC, 0x63, 0x2F, 0x00, 0x10 } },     /* 350   700      6.0   */
-       { .cnl = { 0x6, 0x7F, 0x2C, 0x00, 0x13 } },     /* 350   900      8.2   */
-       { .cnl = { 0xA, 0x43, 0x3F, 0x00, 0x00 } },     /* 500   500      0.0   */
-       { .cnl = { 0xC, 0x60, 0x36, 0x00, 0x09 } },     /* 500   700      2.9   */
-       { .cnl = { 0x6, 0x7F, 0x30, 0x00, 0x0F } },     /* 500   900      5.1   */
-       { .cnl = { 0xC, 0x58, 0x3F, 0x00, 0x00 } },     /* 650   700      0.6   */
-       { .cnl = { 0x6, 0x7F, 0x35, 0x00, 0x0A } },     /* 600   900      3.5   */
-       { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } },     /* 900   900      0.0   */
+       { .icl = { 0xA, 0x32, 0x3F, 0x00, 0x00 } },     /* 350   350      0.0   */
+       { .icl = { 0xA, 0x48, 0x35, 0x00, 0x0A } },     /* 350   500      3.1   */
+       { .icl = { 0xC, 0x63, 0x2F, 0x00, 0x10 } },     /* 350   700      6.0   */
+       { .icl = { 0x6, 0x7F, 0x2C, 0x00, 0x13 } },     /* 350   900      8.2   */
+       { .icl = { 0xA, 0x43, 0x3F, 0x00, 0x00 } },     /* 500   500      0.0   */
+       { .icl = { 0xC, 0x60, 0x36, 0x00, 0x09 } },     /* 500   700      2.9   */
+       { .icl = { 0x6, 0x7F, 0x30, 0x00, 0x0F } },     /* 500   900      5.1   */
+       { .icl = { 0xC, 0x58, 0x3F, 0x00, 0x00 } },     /* 650   700      0.6   */
+       { .icl = { 0x6, 0x7F, 0x35, 0x00, 0x0A } },     /* 600   900      3.5   */
+       { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } },     /* 900   900      0.0   */
 };
 
 static const struct intel_ddi_buf_trans dg1_combo_phy_ddi_translations_dp_hbr2_hbr3 = {
@@ -885,16 +705,16 @@ static const struct intel_ddi_buf_trans tgl_dkl_phy_ddi_translations_hdmi = {
 
 static const union intel_ddi_buf_trans_entry _tgl_combo_phy_ddi_translations_dp_hbr[] = {
                                                        /* NT mV Trans mV db    */
-       { .cnl = { 0xA, 0x32, 0x3F, 0x00, 0x00 } },     /* 350   350      0.0   */
-       { .cnl = { 0xA, 0x4F, 0x37, 0x00, 0x08 } },     /* 350   500      3.1   */
-       { .cnl = { 0xC, 0x71, 0x2F, 0x00, 0x10 } },     /* 350   700      6.0   */
-       { .cnl = { 0x6, 0x7D, 0x2B, 0x00, 0x14 } },     /* 350   900      8.2   */
-       { .cnl = { 0xA, 0x4C, 0x3F, 0x00, 0x00 } },     /* 500   500      0.0   */
-       { .cnl = { 0xC, 0x73, 0x34, 0x00, 0x0B } },     /* 500   700      2.9   */
-       { .cnl = { 0x6, 0x7F, 0x2F, 0x00, 0x10 } },     /* 500   900      5.1   */
-       { .cnl = { 0xC, 0x6C, 0x3C, 0x00, 0x03 } },     /* 650   700      0.6   */
-       { .cnl = { 0x6, 0x7F, 0x35, 0x00, 0x0A } },     /* 600   900      3.5   */
-       { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } },     /* 900   900      0.0   */
+       { .icl = { 0xA, 0x32, 0x3F, 0x00, 0x00 } },     /* 350   350      0.0   */
+       { .icl = { 0xA, 0x4F, 0x37, 0x00, 0x08 } },     /* 350   500      3.1   */
+       { .icl = { 0xC, 0x71, 0x2F, 0x00, 0x10 } },     /* 350   700      6.0   */
+       { .icl = { 0x6, 0x7D, 0x2B, 0x00, 0x14 } },     /* 350   900      8.2   */
+       { .icl = { 0xA, 0x4C, 0x3F, 0x00, 0x00 } },     /* 500   500      0.0   */
+       { .icl = { 0xC, 0x73, 0x34, 0x00, 0x0B } },     /* 500   700      2.9   */
+       { .icl = { 0x6, 0x7F, 0x2F, 0x00, 0x10 } },     /* 500   900      5.1   */
+       { .icl = { 0xC, 0x6C, 0x3C, 0x00, 0x03 } },     /* 650   700      0.6   */
+       { .icl = { 0x6, 0x7F, 0x35, 0x00, 0x0A } },     /* 600   900      3.5   */
+       { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } },     /* 900   900      0.0   */
 };
 
 static const struct intel_ddi_buf_trans tgl_combo_phy_ddi_translations_dp_hbr = {
@@ -904,16 +724,16 @@ static const struct intel_ddi_buf_trans tgl_combo_phy_ddi_translations_dp_hbr =
 
 static const union intel_ddi_buf_trans_entry _tgl_combo_phy_ddi_translations_dp_hbr2[] = {
                                                        /* NT mV Trans mV db    */
-       { .cnl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } },     /* 350   350      0.0   */
-       { .cnl = { 0xA, 0x4F, 0x37, 0x00, 0x08 } },     /* 350   500      3.1   */
-       { .cnl = { 0xC, 0x63, 0x2F, 0x00, 0x10 } },     /* 350   700      6.0   */
-       { .cnl = { 0x6, 0x7F, 0x2B, 0x00, 0x14 } },     /* 350   900      8.2   */
-       { .cnl = { 0xA, 0x47, 0x3F, 0x00, 0x00 } },     /* 500   500      0.0   */
-       { .cnl = { 0xC, 0x63, 0x34, 0x00, 0x0B } },     /* 500   700      2.9   */
-       { .cnl = { 0x6, 0x7F, 0x2F, 0x00, 0x10 } },     /* 500   900      5.1   */
-       { .cnl = { 0xC, 0x61, 0x3C, 0x00, 0x03 } },     /* 650   700      0.6   */
-       { .cnl = { 0x6, 0x7B, 0x35, 0x00, 0x0A } },     /* 600   900      3.5   */
-       { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } },     /* 900   900      0.0   */
+       { .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } },     /* 350   350      0.0   */
+       { .icl = { 0xA, 0x4F, 0x37, 0x00, 0x08 } },     /* 350   500      3.1   */
+       { .icl = { 0xC, 0x63, 0x2F, 0x00, 0x10 } },     /* 350   700      6.0   */
+       { .icl = { 0x6, 0x7F, 0x2B, 0x00, 0x14 } },     /* 350   900      8.2   */
+       { .icl = { 0xA, 0x47, 0x3F, 0x00, 0x00 } },     /* 500   500      0.0   */
+       { .icl = { 0xC, 0x63, 0x34, 0x00, 0x0B } },     /* 500   700      2.9   */
+       { .icl = { 0x6, 0x7F, 0x2F, 0x00, 0x10 } },     /* 500   900      5.1   */
+       { .icl = { 0xC, 0x61, 0x3C, 0x00, 0x03 } },     /* 650   700      0.6   */
+       { .icl = { 0x6, 0x7B, 0x35, 0x00, 0x0A } },     /* 600   900      3.5   */
+       { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } },     /* 900   900      0.0   */
 };
 
 static const struct intel_ddi_buf_trans tgl_combo_phy_ddi_translations_dp_hbr2 = {
@@ -923,16 +743,16 @@ static const struct intel_ddi_buf_trans tgl_combo_phy_ddi_translations_dp_hbr2 =
 
 static const union intel_ddi_buf_trans_entry _tgl_uy_combo_phy_ddi_translations_dp_hbr2[] = {
                                                        /* NT mV Trans mV db    */
-       { .cnl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } },     /* 350   350      0.0   */
-       { .cnl = { 0xA, 0x4F, 0x36, 0x00, 0x09 } },     /* 350   500      3.1   */
-       { .cnl = { 0xC, 0x60, 0x32, 0x00, 0x0D } },     /* 350   700      6.0   */
-       { .cnl = { 0xC, 0x7F, 0x2D, 0x00, 0x12 } },     /* 350   900      8.2   */
-       { .cnl = { 0xC, 0x47, 0x3F, 0x00, 0x00 } },     /* 500   500      0.0   */
-       { .cnl = { 0xC, 0x6F, 0x36, 0x00, 0x09 } },     /* 500   700      2.9   */
-       { .cnl = { 0x6, 0x7D, 0x32, 0x00, 0x0D } },     /* 500   900      5.1   */
-       { .cnl = { 0x6, 0x60, 0x3C, 0x00, 0x03 } },     /* 650   700      0.6   */
-       { .cnl = { 0x6, 0x7F, 0x34, 0x00, 0x0B } },     /* 600   900      3.5   */
-       { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } },     /* 900   900      0.0   */
+       { .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } },     /* 350   350      0.0   */
+       { .icl = { 0xA, 0x4F, 0x36, 0x00, 0x09 } },     /* 350   500      3.1   */
+       { .icl = { 0xC, 0x60, 0x32, 0x00, 0x0D } },     /* 350   700      6.0   */
+       { .icl = { 0xC, 0x7F, 0x2D, 0x00, 0x12 } },     /* 350   900      8.2   */
+       { .icl = { 0xC, 0x47, 0x3F, 0x00, 0x00 } },     /* 500   500      0.0   */
+       { .icl = { 0xC, 0x6F, 0x36, 0x00, 0x09 } },     /* 500   700      2.9   */
+       { .icl = { 0x6, 0x7D, 0x32, 0x00, 0x0D } },     /* 500   900      5.1   */
+       { .icl = { 0x6, 0x60, 0x3C, 0x00, 0x03 } },     /* 650   700      0.6   */
+       { .icl = { 0x6, 0x7F, 0x34, 0x00, 0x0B } },     /* 600   900      3.5   */
+       { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } },     /* 900   900      0.0   */
 };
 
 static const struct intel_ddi_buf_trans tgl_uy_combo_phy_ddi_translations_dp_hbr2 = {
@@ -946,15 +766,15 @@ static const struct intel_ddi_buf_trans tgl_uy_combo_phy_ddi_translations_dp_hbr
  */
 static const union intel_ddi_buf_trans_entry _tgl_combo_phy_ddi_translations_edp_hbr2_hobl[] = {
                                                        /* VS   pre-emp */
-       { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } },     /* 0    0       */
-       { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } },     /* 0    1       */
-       { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } },     /* 0    2       */
-       { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } },     /* 0    3       */
-       { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } },     /* 1    0       */
-       { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } },     /* 1    1       */
-       { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } },     /* 1    2       */
-       { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } },     /* 2    0       */
-       { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } },     /* 2    1       */
+       { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } },     /* 0    0       */
+       { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } },     /* 0    1       */
+       { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } },     /* 0    2       */
+       { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } },     /* 0    3       */
+       { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } },     /* 1    0       */
+       { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } },     /* 1    1       */
+       { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } },     /* 1    2       */
+       { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } },     /* 2    0       */
+       { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } },     /* 2    1       */
 };
 
 static const struct intel_ddi_buf_trans tgl_combo_phy_ddi_translations_edp_hbr2_hobl = {
@@ -964,16 +784,16 @@ static const struct intel_ddi_buf_trans tgl_combo_phy_ddi_translations_edp_hbr2_
 
 static const union intel_ddi_buf_trans_entry _rkl_combo_phy_ddi_translations_dp_hbr[] = {
                                                        /* NT mV Trans mV db    */
-       { .cnl = { 0xA, 0x2F, 0x3F, 0x00, 0x00 } },     /* 350   350      0.0   */
-       { .cnl = { 0xA, 0x4F, 0x37, 0x00, 0x08 } },     /* 350   500      3.1   */
-       { .cnl = { 0xC, 0x63, 0x2F, 0x00, 0x10 } },     /* 350   700      6.0   */
-       { .cnl = { 0x6, 0x7D, 0x2A, 0x00, 0x15 } },     /* 350   900      8.2   */
-       { .cnl = { 0xA, 0x4C, 0x3F, 0x00, 0x00 } },     /* 500   500      0.0   */
-       { .cnl = { 0xC, 0x73, 0x34, 0x00, 0x0B } },     /* 500   700      2.9   */
-       { .cnl = { 0x6, 0x7F, 0x2F, 0x00, 0x10 } },     /* 500   900      5.1   */
-       { .cnl = { 0xC, 0x6E, 0x3E, 0x00, 0x01 } },     /* 650   700      0.6   */
-       { .cnl = { 0x6, 0x7F, 0x35, 0x00, 0x0A } },     /* 600   900      3.5   */
-       { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } },     /* 900   900      0.0   */
+       { .icl = { 0xA, 0x2F, 0x3F, 0x00, 0x00 } },     /* 350   350      0.0   */
+       { .icl = { 0xA, 0x4F, 0x37, 0x00, 0x08 } },     /* 350   500      3.1   */
+       { .icl = { 0xC, 0x63, 0x2F, 0x00, 0x10 } },     /* 350   700      6.0   */
+       { .icl = { 0x6, 0x7D, 0x2A, 0x00, 0x15 } },     /* 350   900      8.2   */
+       { .icl = { 0xA, 0x4C, 0x3F, 0x00, 0x00 } },     /* 500   500      0.0   */
+       { .icl = { 0xC, 0x73, 0x34, 0x00, 0x0B } },     /* 500   700      2.9   */
+       { .icl = { 0x6, 0x7F, 0x2F, 0x00, 0x10 } },     /* 500   900      5.1   */
+       { .icl = { 0xC, 0x6E, 0x3E, 0x00, 0x01 } },     /* 650   700      0.6   */
+       { .icl = { 0x6, 0x7F, 0x35, 0x00, 0x0A } },     /* 600   900      3.5   */
+       { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } },     /* 900   900      0.0   */
 };
 
 static const struct intel_ddi_buf_trans rkl_combo_phy_ddi_translations_dp_hbr = {
@@ -983,16 +803,16 @@ static const struct intel_ddi_buf_trans rkl_combo_phy_ddi_translations_dp_hbr =
 
 static const union intel_ddi_buf_trans_entry _rkl_combo_phy_ddi_translations_dp_hbr2_hbr3[] = {
                                                        /* NT mV Trans mV db    */
-       { .cnl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } },     /* 350   350      0.0   */
-       { .cnl = { 0xA, 0x50, 0x38, 0x00, 0x07 } },     /* 350   500      3.1   */
-       { .cnl = { 0xC, 0x61, 0x33, 0x00, 0x0C } },     /* 350   700      6.0   */
-       { .cnl = { 0x6, 0x7F, 0x2E, 0x00, 0x11 } },     /* 350   900      8.2   */
-       { .cnl = { 0xA, 0x47, 0x3F, 0x00, 0x00 } },     /* 500   500      0.0   */
-       { .cnl = { 0xC, 0x5F, 0x38, 0x00, 0x07 } },     /* 500   700      2.9   */
-       { .cnl = { 0x6, 0x7F, 0x2F, 0x00, 0x10 } },     /* 500   900      5.1   */
-       { .cnl = { 0xC, 0x5F, 0x3F, 0x00, 0x00 } },     /* 650   700      0.6   */
-       { .cnl = { 0x6, 0x7E, 0x36, 0x00, 0x09 } },     /* 600   900      3.5   */
-       { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } },     /* 900   900      0.0   */
+       { .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } },     /* 350   350      0.0   */
+       { .icl = { 0xA, 0x50, 0x38, 0x00, 0x07 } },     /* 350   500      3.1   */
+       { .icl = { 0xC, 0x61, 0x33, 0x00, 0x0C } },     /* 350   700      6.0   */
+       { .icl = { 0x6, 0x7F, 0x2E, 0x00, 0x11 } },     /* 350   900      8.2   */
+       { .icl = { 0xA, 0x47, 0x3F, 0x00, 0x00 } },     /* 500   500      0.0   */
+       { .icl = { 0xC, 0x5F, 0x38, 0x00, 0x07 } },     /* 500   700      2.9   */
+       { .icl = { 0x6, 0x7F, 0x2F, 0x00, 0x10 } },     /* 500   900      5.1   */
+       { .icl = { 0xC, 0x5F, 0x3F, 0x00, 0x00 } },     /* 650   700      0.6   */
+       { .icl = { 0x6, 0x7E, 0x36, 0x00, 0x09 } },     /* 600   900      3.5   */
+       { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } },     /* 900   900      0.0   */
 };
 
 static const struct intel_ddi_buf_trans rkl_combo_phy_ddi_translations_dp_hbr2_hbr3 = {
@@ -1002,16 +822,16 @@ static const struct intel_ddi_buf_trans rkl_combo_phy_ddi_translations_dp_hbr2_h
 
 static const union intel_ddi_buf_trans_entry _adls_combo_phy_ddi_translations_dp_hbr2_hbr3[] = {
                                                        /* NT mV Trans mV db    */
-       { .cnl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } },     /* 350   350      0.0   */
-       { .cnl = { 0xA, 0x4F, 0x37, 0x00, 0x08 } },     /* 350   500      3.1   */
-       { .cnl = { 0xC, 0x63, 0x30, 0x00, 0x0F } },     /* 350   700      6.0   */
-       { .cnl = { 0x6, 0x7F, 0x2B, 0x00, 0x14 } },     /* 350   900      8.2   */
-       { .cnl = { 0xA, 0x47, 0x3F, 0x00, 0x00 } },     /* 500   500      0.0   */
-       { .cnl = { 0xC, 0x63, 0x37, 0x00, 0x08 } },     /* 500   700      2.9   */
-       { .cnl = { 0x6, 0x7F, 0x31, 0x00, 0x0E } },     /* 500   900      5.1   */
-       { .cnl = { 0xC, 0x61, 0x3C, 0x00, 0x03 } },     /* 650   700      0.6   */
-       { .cnl = { 0x6, 0x7B, 0x35, 0x00, 0x0A } },     /* 600   900      3.5   */
-       { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } },     /* 900   900      0.0   */
+       { .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } },     /* 350   350      0.0   */
+       { .icl = { 0xA, 0x4F, 0x37, 0x00, 0x08 } },     /* 350   500      3.1   */
+       { .icl = { 0xC, 0x63, 0x31, 0x00, 0x0E } },     /* 350   700      6.0   */
+       { .icl = { 0x6, 0x7F, 0x2C, 0x00, 0x13 } },     /* 350   900      8.2   */
+       { .icl = { 0xA, 0x47, 0x3F, 0x00, 0x00 } },     /* 500   500      0.0   */
+       { .icl = { 0xC, 0x63, 0x37, 0x00, 0x08 } },     /* 500   700      2.9   */
+       { .icl = { 0x6, 0x73, 0x32, 0x00, 0x0D } },     /* 500   900      5.1   */
+       { .icl = { 0xC, 0x58, 0x3F, 0x00, 0x00 } },     /* 650   700      0.6   */
+       { .icl = { 0x6, 0x7F, 0x35, 0x00, 0x0A } },     /* 600   900      3.5   */
+       { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } },     /* 900   900      0.0   */
 };
 
 static const struct intel_ddi_buf_trans adls_combo_phy_ddi_translations_dp_hbr2_hbr3 = {
@@ -1021,16 +841,16 @@ static const struct intel_ddi_buf_trans adls_combo_phy_ddi_translations_dp_hbr2_
 
 static const union intel_ddi_buf_trans_entry _adls_combo_phy_ddi_translations_edp_hbr2[] = {
                                                        /* NT mV Trans mV db    */
-       { .cnl = { 0x9, 0x70, 0x3C, 0x00, 0x03 } },     /* 200   200      0.0   */
-       { .cnl = { 0x9, 0x6D, 0x3A, 0x00, 0x05 } },     /* 200   250      1.9   */
-       { .cnl = { 0x9, 0x7F, 0x36, 0x00, 0x09 } },     /* 200   300      3.5   */
-       { .cnl = { 0x4, 0x59, 0x32, 0x00, 0x0D } },     /* 200   350      4.9   */
-       { .cnl = { 0x2, 0x77, 0x3A, 0x00, 0x05 } },     /* 250   250      0.0   */
-       { .cnl = { 0x2, 0x7F, 0x38, 0x00, 0x07 } },     /* 250   300      1.6   */
-       { .cnl = { 0x4, 0x5A, 0x36, 0x00, 0x09 } },     /* 250   350      2.9   */
-       { .cnl = { 0x4, 0x5E, 0x3D, 0x00, 0x04 } },     /* 300   300      0.0   */
-       { .cnl = { 0x4, 0x65, 0x38, 0x00, 0x07 } },     /* 300   350      1.3   */
-       { .cnl = { 0x4, 0x6F, 0x3A, 0x00, 0x05 } },     /* 350   350      0.0   */
+       { .icl = { 0x9, 0x73, 0x3D, 0x00, 0x02 } },     /* 200   200      0.0   */
+       { .icl = { 0x9, 0x7A, 0x3C, 0x00, 0x03 } },     /* 200   250      1.9   */
+       { .icl = { 0x9, 0x7F, 0x3B, 0x00, 0x04 } },     /* 200   300      3.5   */
+       { .icl = { 0x4, 0x6C, 0x33, 0x00, 0x0C } },     /* 200   350      4.9   */
+       { .icl = { 0x2, 0x73, 0x3A, 0x00, 0x05 } },     /* 250   250      0.0   */
+       { .icl = { 0x2, 0x7C, 0x38, 0x00, 0x07 } },     /* 250   300      1.6   */
+       { .icl = { 0x4, 0x5A, 0x36, 0x00, 0x09 } },     /* 250   350      2.9   */
+       { .icl = { 0x4, 0x57, 0x3D, 0x00, 0x02 } },     /* 300   300      0.0   */
+       { .icl = { 0x4, 0x65, 0x38, 0x00, 0x07 } },     /* 300   350      1.3   */
+       { .icl = { 0x4, 0x6C, 0x3A, 0x00, 0x05 } },     /* 350   350      0.0   */
 };
 
 static const struct intel_ddi_buf_trans adls_combo_phy_ddi_translations_edp_hbr2 = {
@@ -1040,16 +860,16 @@ static const struct intel_ddi_buf_trans adls_combo_phy_ddi_translations_edp_hbr2
 
 static const union intel_ddi_buf_trans_entry _adls_combo_phy_ddi_translations_edp_hbr3[] = {
                                                        /* NT mV Trans mV db    */
-       { .cnl = { 0xA, 0x5E, 0x34, 0x00, 0x0B } },     /* 350   350      0.0   */
-       { .cnl = { 0xA, 0x69, 0x32, 0x00, 0x0D } },     /* 350   500      3.1   */
-       { .cnl = { 0xC, 0x74, 0x31, 0x00, 0x0E } },     /* 350   700      6.0   */
-       { .cnl = { 0x6, 0x7F, 0x2E, 0x00, 0x11 } },     /* 350   900      8.2   */
-       { .cnl = { 0xA, 0x5C, 0x3F, 0x00, 0x00 } },     /* 500   500      0.0   */
-       { .cnl = { 0xC, 0x7F, 0x34, 0x00, 0x0B } },     /* 500   700      2.9   */
-       { .cnl = { 0x6, 0x7F, 0x33, 0x00, 0x0C } },     /* 500   900      5.1   */
-       { .cnl = { 0xC, 0x7F, 0x3F, 0x00, 0x00 } },     /* 650   700      0.6   */
-       { .cnl = { 0x6, 0x7F, 0x3C, 0x00, 0x03 } },     /* 600   900      3.5   */
-       { .cnl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } },     /* 900   900      0.0   */
+       { .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } },     /* 350   350      0.0   */
+       { .icl = { 0xA, 0x4F, 0x37, 0x00, 0x08 } },     /* 350   500      3.1   */
+       { .icl = { 0xC, 0x63, 0x31, 0x00, 0x0E } },     /* 350   700      6.0   */
+       { .icl = { 0x6, 0x7F, 0x2C, 0x00, 0x13 } },     /* 350   900      8.2   */
+       { .icl = { 0xA, 0x47, 0x3F, 0x00, 0x00 } },     /* 500   500      0.0   */
+       { .icl = { 0xC, 0x63, 0x37, 0x00, 0x08 } },     /* 500   700      2.9   */
+       { .icl = { 0x6, 0x73, 0x32, 0x00, 0x0D } },     /* 500   900      5.1   */
+       { .icl = { 0xC, 0x58, 0x3F, 0x00, 0x00 } },     /* 650   700      0.6   */
+       { .icl = { 0x6, 0x7F, 0x35, 0x00, 0x0A } },     /* 600   900      3.5   */
+       { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } },     /* 900   900      0.0   */
 };
 
 static const struct intel_ddi_buf_trans adls_combo_phy_ddi_translations_edp_hbr3 = {
@@ -1057,6 +877,74 @@ static const struct intel_ddi_buf_trans adls_combo_phy_ddi_translations_edp_hbr3
        .num_entries = ARRAY_SIZE(_adls_combo_phy_ddi_translations_edp_hbr3),
 };
 
+static const union intel_ddi_buf_trans_entry _adlp_combo_phy_ddi_translations_hdmi[] = {
+                                                       /* NT mV Trans mV    db   */
+       { .icl = { 0x6, 0x60, 0x3F, 0x00, 0x00 } },     /*  400    400      0.0 */
+       { .icl = { 0x6, 0x68, 0x3F, 0x00, 0x00 } },     /*  500    500      0.0 */
+       { .icl = { 0xA, 0x73, 0x3F, 0x00, 0x00 } },     /*  650    650      0.0 ALS */
+       { .icl = { 0xA, 0x78, 0x3F, 0x00, 0x00 } },     /*  800    800      0.0 */
+       { .icl = { 0xB, 0x7F, 0x3F, 0x00, 0x00 } },     /* 1000   1000      0.0 Re-timer */
+       { .icl = { 0xB, 0x7F, 0x3B, 0x00, 0x04 } },     /* Full    Red     -1.5 */
+       { .icl = { 0xB, 0x7F, 0x39, 0x00, 0x06 } },     /* Full    Red     -1.8 */
+       { .icl = { 0xB, 0x7F, 0x37, 0x00, 0x08 } },     /* Full    Red     -2.0 CRLS */
+       { .icl = { 0xB, 0x7F, 0x35, 0x00, 0x0A } },     /* Full    Red     -2.5 */
+       { .icl = { 0xB, 0x7F, 0x33, 0x00, 0x0C } },     /* Full    Red     -3.0 */
+};
+
+static const struct intel_ddi_buf_trans adlp_combo_phy_ddi_translations_hdmi = {
+       .entries = _adlp_combo_phy_ddi_translations_hdmi,
+       .num_entries = ARRAY_SIZE(_adlp_combo_phy_ddi_translations_hdmi),
+       .hdmi_default_entry = ARRAY_SIZE(_adlp_combo_phy_ddi_translations_hdmi) - 1,
+};
+
+static const union intel_ddi_buf_trans_entry _adlp_combo_phy_ddi_translations_dp_hbr[] = {
+                                                       /* NT mV Trans mV db    */
+       { .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } },     /* 350   350      0.0   */
+       { .icl = { 0xA, 0x4F, 0x37, 0x00, 0x08 } },     /* 350   500      3.1   */
+       { .icl = { 0xC, 0x71, 0x31, 0x00, 0x0E } },     /* 350   700      6.0   */
+       { .icl = { 0x6, 0x7F, 0x2C, 0x00, 0x13 } },     /* 350   900      8.2   */
+       { .icl = { 0xA, 0x4C, 0x3F, 0x00, 0x00 } },     /* 500   500      0.0   */
+       { .icl = { 0xC, 0x73, 0x34, 0x00, 0x0B } },     /* 500   700      2.9   */
+       { .icl = { 0x6, 0x7F, 0x2F, 0x00, 0x10 } },     /* 500   900      5.1   */
+       { .icl = { 0xC, 0x73, 0x3E, 0x00, 0x01 } },     /* 650   700      0.6   */
+       { .icl = { 0x6, 0x7F, 0x35, 0x00, 0x0A } },     /* 600   900      3.5   */
+       { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } },     /* 900   900      0.0   */
+};
+
+static const struct intel_ddi_buf_trans adlp_combo_phy_ddi_translations_dp_hbr = {
+       .entries = _adlp_combo_phy_ddi_translations_dp_hbr,
+       .num_entries = ARRAY_SIZE(_adlp_combo_phy_ddi_translations_dp_hbr),
+};
+
+static const union intel_ddi_buf_trans_entry _adlp_combo_phy_ddi_translations_dp_hbr2_hbr3[] = {
+                                                       /* NT mV Trans mV db    */
+       { .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } },     /* 350   350      0.0   */
+       { .icl = { 0xA, 0x4F, 0x37, 0x00, 0x08 } },     /* 350   500      3.1   */
+       { .icl = { 0xC, 0x71, 0x2F, 0x00, 0x10 } },     /* 350   700      6.0   */
+       { .icl = { 0x6, 0x7F, 0x2B, 0x00, 0x14 } },     /* 350   900      8.2   */
+       { .icl = { 0xA, 0x4C, 0x3F, 0x00, 0x00 } },     /* 500   500      0.0   */
+       { .icl = { 0xC, 0x73, 0x34, 0x00, 0x0B } },     /* 500   700      2.9   */
+       { .icl = { 0x6, 0x7F, 0x30, 0x00, 0x0F } },     /* 500   900      5.1   */
+       { .icl = { 0xC, 0x63, 0x3F, 0x00, 0x00 } },     /* 650   700      0.6   */
+       { .icl = { 0x6, 0x7F, 0x38, 0x00, 0x07 } },     /* 600   900      3.5   */
+       { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } },     /* 900   900      0.0   */
+};
+
+static const struct intel_ddi_buf_trans adlp_combo_phy_ddi_translations_dp_hbr2_hbr3 = {
+       .entries = _adlp_combo_phy_ddi_translations_dp_hbr2_hbr3,
+       .num_entries = ARRAY_SIZE(_adlp_combo_phy_ddi_translations_dp_hbr2_hbr3),
+};
+
+static const struct intel_ddi_buf_trans adlp_combo_phy_ddi_translations_edp_hbr3 = {
+       .entries = _icl_combo_phy_ddi_translations_dp_hbr2_edp_hbr3,
+       .num_entries = ARRAY_SIZE(_icl_combo_phy_ddi_translations_dp_hbr2_edp_hbr3),
+};
+
+static const struct intel_ddi_buf_trans adlp_combo_phy_ddi_translations_edp_up_to_hbr2 = {
+       .entries = _icl_combo_phy_ddi_translations_edp_hbr2,
+       .num_entries = ARRAY_SIZE(_icl_combo_phy_ddi_translations_edp_hbr2),
+};
+
 static const union intel_ddi_buf_trans_entry _adlp_dkl_phy_ddi_translations_dp_hbr[] = {
                                        /* VS   pre-emp Non-trans mV    Pre-emph dB */
        { .dkl = { 0x7, 0x0, 0x01 } },  /* 0    0       400mV           0 dB */
@@ -1269,88 +1157,6 @@ bxt_get_buf_trans(struct intel_encoder *encoder,
                return intel_get_buf_trans(&bxt_ddi_translations_dp, n_entries);
 }
 
-static const struct intel_ddi_buf_trans *
-cnl_get_buf_trans_hdmi(struct intel_encoder *encoder, int *n_entries)
-{
-       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-       u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
-
-       if (voltage == VOLTAGE_INFO_0_85V) {
-               return intel_get_buf_trans(&cnl_ddi_translations_hdmi_0_85V,
-                                          n_entries);
-       } else if (voltage == VOLTAGE_INFO_0_95V) {
-               return intel_get_buf_trans(&cnl_ddi_translations_hdmi_0_95V,
-                                          n_entries);
-       } else if (voltage == VOLTAGE_INFO_1_05V) {
-               return intel_get_buf_trans(&cnl_ddi_translations_hdmi_1_05V,
-                                          n_entries);
-       } else {
-               *n_entries = 1; /* shut up gcc */
-               MISSING_CASE(voltage);
-       }
-       return NULL;
-}
-
-static const struct intel_ddi_buf_trans *
-cnl_get_buf_trans_dp(struct intel_encoder *encoder, int *n_entries)
-{
-       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-       u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
-
-       if (voltage == VOLTAGE_INFO_0_85V) {
-               return intel_get_buf_trans(&cnl_ddi_translations_dp_0_85V,
-                                          n_entries);
-       } else if (voltage == VOLTAGE_INFO_0_95V) {
-               return intel_get_buf_trans(&cnl_ddi_translations_dp_0_95V,
-                                          n_entries);
-       } else if (voltage == VOLTAGE_INFO_1_05V) {
-               return intel_get_buf_trans(&cnl_ddi_translations_dp_1_05V,
-                                          n_entries);
-       } else {
-               *n_entries = 1; /* shut up gcc */
-               MISSING_CASE(voltage);
-       }
-       return NULL;
-}
-
-static const struct intel_ddi_buf_trans *
-cnl_get_buf_trans_edp(struct intel_encoder *encoder, int *n_entries)
-{
-       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-       u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
-
-       if (dev_priv->vbt.edp.low_vswing) {
-               if (voltage == VOLTAGE_INFO_0_85V) {
-                       return intel_get_buf_trans(&cnl_ddi_translations_edp_0_85V,
-                                                  n_entries);
-               } else if (voltage == VOLTAGE_INFO_0_95V) {
-                       return intel_get_buf_trans(&cnl_ddi_translations_edp_0_95V,
-                                                  n_entries);
-               } else if (voltage == VOLTAGE_INFO_1_05V) {
-                       return intel_get_buf_trans(&cnl_ddi_translations_edp_1_05V,
-                                                  n_entries);
-               } else {
-                       *n_entries = 1; /* shut up gcc */
-                       MISSING_CASE(voltage);
-               }
-               return NULL;
-       } else {
-               return cnl_get_buf_trans_dp(encoder, n_entries);
-       }
-}
-
-static const struct intel_ddi_buf_trans *
-cnl_get_buf_trans(struct intel_encoder *encoder,
-                 const struct intel_crtc_state *crtc_state,
-                 int *n_entries)
-{
-       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
-               return cnl_get_buf_trans_hdmi(encoder, n_entries);
-       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
-               return cnl_get_buf_trans_edp(encoder, n_entries);
-       return cnl_get_buf_trans_dp(encoder, n_entries);
-}
-
 static const struct intel_ddi_buf_trans *
 icl_get_combo_buf_trans_dp(struct intel_encoder *encoder,
                           const struct intel_crtc_state *crtc_state,
@@ -1661,6 +1467,52 @@ adls_get_combo_buf_trans(struct intel_encoder *encoder,
                return adls_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
 }
 
+static const struct intel_ddi_buf_trans *
+adlp_get_combo_buf_trans_dp(struct intel_encoder *encoder,
+                           const struct intel_crtc_state *crtc_state,
+                           int *n_entries)
+{
+       if (crtc_state->port_clock > 270000)
+               return intel_get_buf_trans(&adlp_combo_phy_ddi_translations_dp_hbr2_hbr3, n_entries);
+       else
+               return intel_get_buf_trans(&adlp_combo_phy_ddi_translations_dp_hbr, n_entries);
+}
+
+static const struct intel_ddi_buf_trans *
+adlp_get_combo_buf_trans_edp(struct intel_encoder *encoder,
+                            const struct intel_crtc_state *crtc_state,
+                            int *n_entries)
+{
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+       if (crtc_state->port_clock > 540000) {
+               return intel_get_buf_trans(&adlp_combo_phy_ddi_translations_edp_hbr3,
+                                          n_entries);
+       } else if (dev_priv->vbt.edp.hobl && !intel_dp->hobl_failed) {
+               return intel_get_buf_trans(&tgl_combo_phy_ddi_translations_edp_hbr2_hobl,
+                                          n_entries);
+       } else if (dev_priv->vbt.edp.low_vswing) {
+               return intel_get_buf_trans(&adlp_combo_phy_ddi_translations_edp_up_to_hbr2,
+                                          n_entries);
+       }
+
+       return adlp_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
+}
+
+static const struct intel_ddi_buf_trans *
+adlp_get_combo_buf_trans(struct intel_encoder *encoder,
+                        const struct intel_crtc_state *crtc_state,
+                        int *n_entries)
+{
+       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+               return intel_get_buf_trans(&adlp_combo_phy_ddi_translations_hdmi, n_entries);
+       else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
+               return adlp_get_combo_buf_trans_edp(encoder, crtc_state, n_entries);
+       else
+               return adlp_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
+}
+
 static const struct intel_ddi_buf_trans *
 tgl_get_dkl_buf_trans_dp(struct intel_encoder *encoder,
                         const struct intel_crtc_state *crtc_state,
@@ -1738,7 +1590,7 @@ void intel_ddi_buf_trans_init(struct intel_encoder *encoder)
 
        if (IS_ALDERLAKE_P(i915)) {
                if (intel_phy_is_combo(i915, phy))
-                       encoder->get_buf_trans = tgl_get_combo_buf_trans;
+                       encoder->get_buf_trans = adlp_get_combo_buf_trans;
                else
                        encoder->get_buf_trans = adlp_get_dkl_buf_trans;
        } else if (IS_ALDERLAKE_S(i915)) {
@@ -1761,8 +1613,6 @@ void intel_ddi_buf_trans_init(struct intel_encoder *encoder)
                        encoder->get_buf_trans = icl_get_combo_buf_trans;
                else
                        encoder->get_buf_trans = icl_get_mg_buf_trans;
-       } else if (IS_CANNONLAKE(i915)) {
-               encoder->get_buf_trans = cnl_get_buf_trans;
        } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
                encoder->get_buf_trans = bxt_get_buf_trans;
        } else if (IS_CML_ULX(i915) || IS_CFL_ULX(i915) || IS_KBL_ULX(i915)) {
index 05226eb..2acd720 100644 (file)
@@ -25,7 +25,7 @@ struct bxt_ddi_buf_trans {
        u8 deemphasis;
 };
 
-struct cnl_ddi_buf_trans {
+struct icl_ddi_buf_trans {
        u8 dw2_swing_sel;
        u8 dw7_n_scalar;
        u8 dw4_cursor_coeff;
@@ -48,7 +48,7 @@ struct tgl_dkl_phy_ddi_buf_trans {
 union intel_ddi_buf_trans_entry {
        struct hsw_ddi_buf_trans hsw;
        struct bxt_ddi_buf_trans bxt;
-       struct cnl_ddi_buf_trans cnl;
+       struct icl_ddi_buf_trans icl;
        struct icl_mg_phy_ddi_buf_trans mg;
        struct tgl_dkl_phy_ddi_buf_trans dkl;
 };
index eec6c9e..b25c596 100644 (file)
@@ -59,6 +59,7 @@
 #include "display/intel_hdmi.h"
 #include "display/intel_lvds.h"
 #include "display/intel_sdvo.h"
+#include "display/intel_snps_phy.h"
 #include "display/intel_tv.h"
 #include "display/intel_vdsc.h"
 #include "display/intel_vrr.h"
@@ -975,7 +976,7 @@ void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
                /* FIXME: assert CPU port conditions for SNB+ */
        }
 
-       /* Wa_22012358565:adlp */
+       /* Wa_22012358565:adl-p */
        if (DISPLAY_VER(dev_priv) == 13)
                intel_de_rmw(dev_priv, PIPE_ARB_CTL(pipe),
                             0, PIPE_ARB_USE_PROG_SLOTS);
@@ -1035,6 +1036,10 @@ void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
        if (!IS_I830(dev_priv))
                val &= ~PIPECONF_ENABLE;
 
+       if (DISPLAY_VER(dev_priv) >= 12)
+               intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder),
+                            FECSTALL_DIS_DPTSTREAM_DPTTG, 0);
+
        intel_de_write(dev_priv, reg, val);
        if ((val & PIPECONF_ENABLE) == 0)
                intel_wait_for_pipe_off(old_crtc_state);
@@ -1331,6 +1336,9 @@ retry:
        ret = i915_gem_object_lock(obj, &ww);
        if (!ret && phys_cursor)
                ret = i915_gem_object_attach_phys(obj, alignment);
+       else if (!ret && HAS_LMEM(dev_priv))
+               ret = i915_gem_object_migrate(obj, &ww, INTEL_REGION_LMEM);
+       /* TODO: Do we need to sync when migration becomes async? */
        if (!ret)
                ret = i915_gem_object_pin_pages(obj);
        if (ret)
@@ -2204,8 +2212,26 @@ unlock:
        clear_bit_unlock(I915_RESET_MODESET, &dev_priv->gt.reset.flags);
 }
 
-static void icl_set_pipe_chicken(struct intel_crtc *crtc)
+static bool underrun_recovery_supported(const struct intel_crtc_state *crtc_state)
+{
+       if (crtc_state->pch_pfit.enabled &&
+           (crtc_state->pipe_src_w > drm_rect_width(&crtc_state->pch_pfit.dst) ||
+            crtc_state->pipe_src_h > drm_rect_height(&crtc_state->pch_pfit.dst) ||
+            crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420))
+               return false;
+
+       if (crtc_state->dsc.compression_enable)
+               return false;
+
+       if (crtc_state->has_psr2)
+               return false;
+
+       return true;
+}
+
+static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state)
 {
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        enum pipe pipe = crtc->pipe;
        u32 tmp;
@@ -2226,19 +2252,19 @@ static void icl_set_pipe_chicken(struct intel_crtc *crtc)
         */
        tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
 
-       /*
-        * "The underrun recovery mechanism should be disabled
-        *  when the following is enabled for this pipe:
-        *  WiDi
-        *  Downscaling (this includes YUV420 fullblend)
-        *  COG
-        *  DSC
-        *  PSR2"
-        *
-        * FIXME: enable whenever possible...
-        */
-       if (IS_ALDERLAKE_P(dev_priv))
-               tmp |= UNDERRUN_RECOVERY_DISABLE;
+       if (IS_DG2(dev_priv)) {
+               /*
+                * Underrun recovery must always be disabled on DG2.  However
+                * the chicken bit meaning is inverted compared to other
+                * platforms.
+                */
+               tmp &= ~UNDERRUN_RECOVERY_ENABLE_DG2;
+       } else if (DISPLAY_VER(dev_priv) >= 13) {
+               if (underrun_recovery_supported(crtc_state))
+                       tmp &= ~UNDERRUN_RECOVERY_DISABLE_ADLP;
+               else
+                       tmp |= UNDERRUN_RECOVERY_DISABLE_ADLP;
+       }
 
        intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
 }
@@ -3188,6 +3214,28 @@ static void intel_encoders_enable(struct intel_atomic_state *state,
        }
 }
 
+static void intel_encoders_pre_disable(struct intel_atomic_state *state,
+                                      struct intel_crtc *crtc)
+{
+       const struct intel_crtc_state *old_crtc_state =
+               intel_atomic_get_old_crtc_state(state, crtc);
+       const struct drm_connector_state *old_conn_state;
+       struct drm_connector *conn;
+       int i;
+
+       for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
+               struct intel_encoder *encoder =
+                       to_intel_encoder(old_conn_state->best_encoder);
+
+               if (old_conn_state->crtc != &crtc->base)
+                       continue;
+
+               if (encoder->pre_disable)
+                       encoder->pre_disable(state, encoder, old_crtc_state,
+                                            old_conn_state);
+       }
+}
+
 static void intel_encoders_disable(struct intel_atomic_state *state,
                                   struct intel_crtc *crtc)
 {
@@ -3397,13 +3445,17 @@ static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
        intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), val);
 }
 
-static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
+static void icl_pipe_mbus_enable(struct intel_crtc *crtc, bool joined_mbus)
 {
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        enum pipe pipe = crtc->pipe;
        u32 val;
 
-       val = MBUS_DBOX_A_CREDIT(2);
+       /* Wa_22010947358:adl-p */
+       if (IS_ALDERLAKE_P(dev_priv))
+               val = joined_mbus ? MBUS_DBOX_A_CREDIT(6) : MBUS_DBOX_A_CREDIT(4);
+       else
+               val = MBUS_DBOX_A_CREDIT(2);
 
        if (DISPLAY_VER(dev_priv) >= 12) {
                val |= MBUS_DBOX_BW_CREDIT(2);
@@ -3471,7 +3523,8 @@ static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state,
                 * Enable sequence steps 1-7 on bigjoiner master
                 */
                intel_encoders_pre_pll_enable(state, master);
-               intel_enable_shared_dpll(master_crtc_state);
+               if (master_crtc_state->shared_dpll)
+                       intel_enable_shared_dpll(master_crtc_state);
                intel_encoders_pre_enable(state, master);
 
                /* and DSC on slave */
@@ -3529,7 +3582,7 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
 
        crtc->active = true;
 
-       /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
+       /* Display WA #1180: WaDisableScalarClockGating: glk */
        psl_clkgate_wa = DISPLAY_VER(dev_priv) == 10 &&
                new_crtc_state->pch_pfit.enabled;
        if (psl_clkgate_wa)
@@ -3553,13 +3606,17 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
        hsw_set_linetime_wm(new_crtc_state);
 
        if (DISPLAY_VER(dev_priv) >= 11)
-               icl_set_pipe_chicken(crtc);
+               icl_set_pipe_chicken(new_crtc_state);
 
        if (dev_priv->display.initial_watermarks)
                dev_priv->display.initial_watermarks(state, crtc);
 
-       if (DISPLAY_VER(dev_priv) >= 11)
-               icl_pipe_mbus_enable(crtc);
+       if (DISPLAY_VER(dev_priv) >= 11) {
+               const struct intel_dbuf_state *dbuf_state =
+                               intel_atomic_get_new_dbuf_state(state);
+
+               icl_pipe_mbus_enable(crtc, dbuf_state->joined_mbus);
+       }
 
        if (new_crtc_state->bigjoiner_slave)
                intel_crtc_vblank_on(new_crtc_state);
@@ -3693,6 +3750,13 @@ bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
 {
        if (phy == PHY_NONE)
                return false;
+       else if (IS_DG2(dev_priv))
+               /*
+                * DG2 outputs labelled as "combo PHY" in the bspec use
+                * SNPS PHYs with completely different programming,
+                * hence we always return false here.
+                */
+               return false;
        else if (IS_ALDERLAKE_S(dev_priv))
                return phy <= PHY_E;
        else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
@@ -3707,7 +3771,10 @@ bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
 
 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
 {
-       if (IS_ALDERLAKE_P(dev_priv))
+       if (IS_DG2(dev_priv))
+               /* DG2's "TC1" output uses a SNPS PHY */
+               return false;
+       else if (IS_ALDERLAKE_P(dev_priv))
                return phy >= PHY_F && phy <= PHY_I;
        else if (IS_TIGERLAKE(dev_priv))
                return phy >= PHY_D && phy <= PHY_I;
@@ -3717,6 +3784,20 @@ bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
                return false;
 }
 
+bool intel_phy_is_snps(struct drm_i915_private *dev_priv, enum phy phy)
+{
+       if (phy == PHY_NONE)
+               return false;
+       else if (IS_DG2(dev_priv))
+               /*
+                * All four "combo" ports and the TC1 port (PHY E) use
+                * Synopsis PHYs.
+                */
+               return phy <= PHY_E;
+
+       return false;
+}
+
 enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
 {
        if (DISPLAY_VER(i915) >= 13 && port >= PORT_D_XELPD)
@@ -3861,7 +3942,7 @@ static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
        }
 
        if (HAS_DDI(dev_priv) && crtc_state->has_audio)
-               mask |= BIT_ULL(POWER_DOMAIN_AUDIO);
+               mask |= BIT_ULL(POWER_DOMAIN_AUDIO_MMIO);
 
        if (crtc_state->shared_dpll)
                mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE);
@@ -8630,10 +8711,11 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
 
        PIPE_CONF_CHECK_BOOL(double_wide);
 
-       PIPE_CONF_CHECK_P(shared_dpll);
+       if (dev_priv->dpll.mgr)
+               PIPE_CONF_CHECK_P(shared_dpll);
 
        /* FIXME do the readout properly and get rid of this quirk */
-       if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) {
+       if (dev_priv->dpll.mgr && !PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) {
                PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
                PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
                PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
@@ -8665,7 +8747,9 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
                PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
                PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
                PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
+       }
 
+       if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_BIGJOINER_SLAVE)) {
                PIPE_CONF_CHECK_X(dsi_pll.ctrl);
                PIPE_CONF_CHECK_X(dsi_pll.div);
 
@@ -9003,6 +9087,10 @@ verify_crtc_state(struct intel_crtc *crtc,
        if (!new_crtc_state->hw.active)
                return;
 
+       if (new_crtc_state->bigjoiner_slave)
+               /* No PLLs set for slave */
+               pipe_config->shared_dpll = NULL;
+
        intel_pipe_config_sanity_check(dev_priv, pipe_config);
 
        if (!intel_pipe_config_compare(new_crtc_state,
@@ -9105,6 +9193,55 @@ verify_shared_dpll_state(struct intel_crtc *crtc,
        }
 }
 
+static void
+verify_mpllb_state(struct intel_atomic_state *state,
+                  struct intel_crtc_state *new_crtc_state)
+{
+       struct drm_i915_private *i915 = to_i915(state->base.dev);
+       struct intel_mpllb_state mpllb_hw_state = { 0 };
+       struct intel_mpllb_state *mpllb_sw_state = &new_crtc_state->mpllb_state;
+       struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
+       struct intel_encoder *encoder;
+
+       if (!IS_DG2(i915))
+               return;
+
+       if (!new_crtc_state->hw.active)
+               return;
+
+       if (new_crtc_state->bigjoiner_slave)
+               return;
+
+       encoder = intel_get_crtc_new_encoder(state, new_crtc_state);
+       intel_mpllb_readout_hw_state(encoder, &mpllb_hw_state);
+
+#define MPLLB_CHECK(name) do { \
+       if (mpllb_sw_state->name != mpllb_hw_state.name) { \
+               pipe_config_mismatch(false, crtc, "MPLLB:" __stringify(name), \
+                                    "(expected 0x%08x, found 0x%08x)", \
+                                    mpllb_sw_state->name, \
+                                    mpllb_hw_state.name); \
+       } \
+} while (0)
+
+       MPLLB_CHECK(mpllb_cp);
+       MPLLB_CHECK(mpllb_div);
+       MPLLB_CHECK(mpllb_div2);
+       MPLLB_CHECK(mpllb_fracn1);
+       MPLLB_CHECK(mpllb_fracn2);
+       MPLLB_CHECK(mpllb_sscen);
+       MPLLB_CHECK(mpllb_sscstep);
+
+       /*
+        * ref_control is handled by the hardware/firemware and never
+        * programmed by the software, but the proper values are supplied
+        * in the bspec for verification purposes.
+        */
+       MPLLB_CHECK(ref_control);
+
+#undef MPLLB_CHECK
+}
+
 static void
 intel_modeset_verify_crtc(struct intel_crtc *crtc,
                          struct intel_atomic_state *state,
@@ -9118,6 +9255,7 @@ intel_modeset_verify_crtc(struct intel_crtc *crtc,
        verify_connector_state(state, crtc);
        verify_crtc_state(crtc, old_crtc_state, new_crtc_state);
        verify_shared_dpll_state(crtc, old_crtc_state, new_crtc_state);
+       verify_mpllb_state(state, new_crtc_state);
 }
 
 static void
@@ -9743,7 +9881,7 @@ static int intel_atomic_check_async(struct intel_atomic_state *state)
 
                /*
                 * FIXME: This check is kept generic for all platforms.
-                * Need to verify this for all gen9 and gen10 platforms to enable
+                * Need to verify this for all gen9 platforms to enable
                 * this selectively if required.
                 */
                switch (new_plane_state->hw.fb->modifier) {
@@ -10154,7 +10292,7 @@ static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
                hsw_set_linetime_wm(new_crtc_state);
 
        if (DISPLAY_VER(dev_priv) >= 11)
-               icl_set_pipe_chicken(crtc);
+               icl_set_pipe_chicken(new_crtc_state);
 }
 
 static void commit_pipe_pre_planes(struct intel_atomic_state *state,
@@ -10288,6 +10426,8 @@ static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
 
        drm_WARN_ON(&dev_priv->drm, old_crtc_state->bigjoiner_slave);
 
+       intel_encoders_pre_disable(state, crtc);
+
        intel_crtc_disable_planes(state, crtc);
 
        /*
@@ -11322,7 +11462,12 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
        if (!HAS_DISPLAY(dev_priv))
                return;
 
-       if (IS_ALDERLAKE_P(dev_priv)) {
+       if (IS_DG2(dev_priv)) {
+               intel_ddi_init(dev_priv, PORT_A);
+               intel_ddi_init(dev_priv, PORT_B);
+               intel_ddi_init(dev_priv, PORT_C);
+               intel_ddi_init(dev_priv, PORT_D_XELPD);
+       } else if (IS_ALDERLAKE_P(dev_priv)) {
                intel_ddi_init(dev_priv, PORT_A);
                intel_ddi_init(dev_priv, PORT_B);
                intel_ddi_init(dev_priv, PORT_TC1);
@@ -11375,7 +11520,6 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
                intel_ddi_init(dev_priv, PORT_C);
                intel_ddi_init(dev_priv, PORT_D);
                intel_ddi_init(dev_priv, PORT_E);
-               intel_ddi_init(dev_priv, PORT_F);
        } else if (HAS_DDI(dev_priv)) {
                u32 found;
 
@@ -11778,7 +11922,7 @@ intel_user_framebuffer_create(struct drm_device *dev,
 
        /* object is backed with LMEM for discrete */
        i915 = to_i915(obj->base.dev);
-       if (HAS_LMEM(i915) && !i915_gem_object_validates_to_lmem(obj)) {
+       if (HAS_LMEM(i915) && !i915_gem_object_can_migrate(obj, INTEL_REGION_LMEM)) {
                /* object is "remote", not in local memory */
                i915_gem_object_put(obj);
                return ERR_PTR(-EREMOTE);
@@ -13124,7 +13268,7 @@ get_encoder_power_domains(struct drm_i915_private *dev_priv)
 static void intel_early_display_was(struct drm_i915_private *dev_priv)
 {
        /*
-        * Display WA #1185 WaDisableDARBFClkGating:cnl,glk,icl,ehl,tgl
+        * Display WA #1185 WaDisableDARBFClkGating:glk,icl,ehl,tgl
         * Also known as Wa_14010480278.
         */
        if (IS_DISPLAY_VER(dev_priv, 10, 12))
index c9dbaf0..284936f 100644 (file)
@@ -561,6 +561,7 @@ struct drm_display_mode *
 intel_encoder_current_mode(struct intel_encoder *encoder);
 bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy);
 bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy);
+bool intel_phy_is_snps(struct drm_i915_private *dev_priv, enum phy phy);
 enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv,
                              enum port port);
 int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
index d5af570..8fdacb2 100644 (file)
@@ -2256,6 +2256,11 @@ static int i915_lpsp_capability_show(struct seq_file *m, void *data)
        if (connector->status != connector_status_connected)
                return -ENODEV;
 
+       if (DISPLAY_VER(i915) >= 13) {
+               LPSP_CAPABLE(encoder->port <= PORT_B);
+               return 0;
+       }
+
        switch (DISPLAY_VER(i915)) {
        case 12:
                /*
@@ -2390,6 +2395,73 @@ static const struct file_operations i915_dsc_fec_support_fops = {
        .write = i915_dsc_fec_support_write
 };
 
+static int i915_dsc_bpp_show(struct seq_file *m, void *data)
+{
+       struct drm_connector *connector = m->private;
+       struct drm_device *dev = connector->dev;
+       struct drm_crtc *crtc;
+       struct intel_crtc_state *crtc_state;
+       struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
+       int ret;
+
+       if (!encoder)
+               return -ENODEV;
+
+       ret = drm_modeset_lock_single_interruptible(&dev->mode_config.connection_mutex);
+       if (ret)
+               return ret;
+
+       crtc = connector->state->crtc;
+       if (connector->status != connector_status_connected || !crtc) {
+               ret = -ENODEV;
+               goto out;
+       }
+
+       crtc_state = to_intel_crtc_state(crtc->state);
+       seq_printf(m, "Compressed_BPP: %d\n", crtc_state->dsc.compressed_bpp);
+
+out:   drm_modeset_unlock(&dev->mode_config.connection_mutex);
+
+       return ret;
+}
+
+static ssize_t i915_dsc_bpp_write(struct file *file,
+                                 const char __user *ubuf,
+                                 size_t len, loff_t *offp)
+{
+       struct drm_connector *connector =
+               ((struct seq_file *)file->private_data)->private;
+       struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
+       struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+       int dsc_bpp = 0;
+       int ret;
+
+       ret = kstrtoint_from_user(ubuf, len, 0, &dsc_bpp);
+       if (ret < 0)
+               return ret;
+
+       intel_dp->force_dsc_bpp = dsc_bpp;
+       *offp += len;
+
+       return len;
+}
+
+static int i915_dsc_bpp_open(struct inode *inode,
+                            struct file *file)
+{
+       return single_open(file, i915_dsc_bpp_show,
+                          inode->i_private);
+}
+
+static const struct file_operations i915_dsc_bpp_fops = {
+       .owner = THIS_MODULE,
+       .open = i915_dsc_bpp_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+       .write = i915_dsc_bpp_write
+};
+
 /**
  * intel_connector_debugfs_add - add i915 specific connector debugfs files
  * @connector: pointer to a registered drm_connector
@@ -2428,10 +2500,17 @@ int intel_connector_debugfs_add(struct drm_connector *connector)
                                    connector, &i915_hdcp_sink_capability_fops);
        }
 
-       if ((DISPLAY_VER(dev_priv) >= 11 || IS_CANNONLAKE(dev_priv)) && ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort && !to_intel_connector(connector)->mst_port) || connector->connector_type == DRM_MODE_CONNECTOR_eDP))
-               debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
+       if (DISPLAY_VER(dev_priv) >= 11 &&
+           ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort &&
+           !to_intel_connector(connector)->mst_port) ||
+           connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
+               debugfs_create_file("i915_dsc_fec_support", 0644, root,
                                    connector, &i915_dsc_fec_support_fops);
 
+               debugfs_create_file("i915_dsc_bpp", 0644, root,
+                                   connector, &i915_dsc_bpp_fops);
+       }
+
        /* Legacy panels doesn't lpsp on any platform */
        if ((DISPLAY_VER(dev_priv) >= 9 || IS_HASWELL(dev_priv) ||
             IS_BROADWELL(dev_priv)) &&
index 2853800..5da2933 100644 (file)
@@ -18,6 +18,7 @@
 #include "intel_pm.h"
 #include "intel_pps.h"
 #include "intel_sideband.h"
+#include "intel_snps_phy.h"
 #include "intel_tc.h"
 #include "intel_vga.h"
 
@@ -106,8 +107,10 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
                return "PORT_OTHER";
        case POWER_DOMAIN_VGA:
                return "VGA";
-       case POWER_DOMAIN_AUDIO:
-               return "AUDIO";
+       case POWER_DOMAIN_AUDIO_MMIO:
+               return "AUDIO_MMIO";
+       case POWER_DOMAIN_AUDIO_PLAYBACK:
+               return "AUDIO_PLAYBACK";
        case POWER_DOMAIN_AUX_A:
                return "AUX_A";
        case POWER_DOMAIN_AUX_B:
@@ -341,6 +344,17 @@ static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
 {
        const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
        int pw_idx = power_well->desc->hsw.idx;
+       int enable_delay = power_well->desc->hsw.fixed_enable_delay;
+
+       /*
+        * For some power wells we're not supposed to watch the status bit for
+        * an ack, but rather just wait a fixed amount of time and then
+        * proceed.  This is only used on DG2.
+        */
+       if (IS_DG2(dev_priv) && enable_delay) {
+               usleep_range(enable_delay, 2 * enable_delay);
+               return;
+       }
 
        /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
        if (intel_de_wait_for_set(dev_priv, regs->driver,
@@ -436,17 +450,6 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
 
        hsw_wait_for_power_well_enable(dev_priv, power_well, false);
 
-       /* Display WA #1178: cnl */
-       if (IS_CANNONLAKE(dev_priv) &&
-           pw_idx >= GLK_PW_CTL_IDX_AUX_B &&
-           pw_idx <= CNL_PW_CTL_IDX_AUX_F) {
-               u32 val;
-
-               val = intel_de_read(dev_priv, CNL_AUX_ANAOVRD1(pw_idx));
-               val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS;
-               intel_de_write(dev_priv, CNL_AUX_ANAOVRD1(pw_idx), val);
-       }
-
        if (power_well->desc->hsw.has_fuses) {
                enum skl_power_gate pg;
 
@@ -2508,7 +2511,8 @@ intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
        BIT_ULL(POWER_DOMAIN_PORT_DSI) |                \
        BIT_ULL(POWER_DOMAIN_PORT_CRT) |                \
        BIT_ULL(POWER_DOMAIN_VGA) |                     \
-       BIT_ULL(POWER_DOMAIN_AUDIO) |           \
+       BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) |              \
+       BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) |          \
        BIT_ULL(POWER_DOMAIN_AUX_B) |           \
        BIT_ULL(POWER_DOMAIN_AUX_C) |           \
        BIT_ULL(POWER_DOMAIN_GMBUS) |           \
@@ -2558,7 +2562,8 @@ intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
        BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |        \
        BIT_ULL(POWER_DOMAIN_PORT_DSI) |                \
        BIT_ULL(POWER_DOMAIN_VGA) |                     \
-       BIT_ULL(POWER_DOMAIN_AUDIO) |           \
+       BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) |              \
+       BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) |          \
        BIT_ULL(POWER_DOMAIN_AUX_B) |           \
        BIT_ULL(POWER_DOMAIN_AUX_C) |           \
        BIT_ULL(POWER_DOMAIN_AUX_D) |           \
@@ -2591,7 +2596,8 @@ intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
        BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |                \
        BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */    \
        BIT_ULL(POWER_DOMAIN_VGA) |                             \
-       BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
+       BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) |              \
+       BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) |                  \
        BIT_ULL(POWER_DOMAIN_INIT))
 
 #define BDW_DISPLAY_POWER_DOMAINS (                    \
@@ -2607,7 +2613,8 @@ intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
        BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |                \
        BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */    \
        BIT_ULL(POWER_DOMAIN_VGA) |                             \
-       BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
+       BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) |              \
+       BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) |                  \
        BIT_ULL(POWER_DOMAIN_INIT))
 
 #define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS (                \
@@ -2625,7 +2632,8 @@ intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
        BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
        BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
        BIT_ULL(POWER_DOMAIN_AUX_D) |                   \
-       BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
+       BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) |              \
+       BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) |                  \
        BIT_ULL(POWER_DOMAIN_VGA) |                             \
        BIT_ULL(POWER_DOMAIN_INIT))
 #define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS (         \
@@ -2660,7 +2668,8 @@ intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
        BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
        BIT_ULL(POWER_DOMAIN_AUX_B) |                   \
        BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
-       BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
+       BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) |              \
+       BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) |                  \
        BIT_ULL(POWER_DOMAIN_VGA) |                             \
        BIT_ULL(POWER_DOMAIN_INIT))
 #define BXT_DISPLAY_DC_OFF_POWER_DOMAINS (             \
@@ -2693,7 +2702,8 @@ intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
        BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
        BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
        BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
-       BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
+       BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) |              \
+       BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) |                  \
        BIT_ULL(POWER_DOMAIN_VGA) |                             \
        BIT_ULL(POWER_DOMAIN_INIT))
 #define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS (           \
@@ -2732,63 +2742,6 @@ intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
        BIT_ULL(POWER_DOMAIN_GMBUS) |                   \
        BIT_ULL(POWER_DOMAIN_INIT))
 
-#define CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS (                \
-       BIT_ULL(POWER_DOMAIN_TRANSCODER_A) |            \
-       BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
-       BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
-       BIT_ULL(POWER_DOMAIN_PIPE_C) |                  \
-       BIT_ULL(POWER_DOMAIN_TRANSCODER_C) |            \
-       BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |             \
-       BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) |             \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) |                \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |                \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) |                \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) |                \
-       BIT_ULL(POWER_DOMAIN_AUX_B) |                       \
-       BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
-       BIT_ULL(POWER_DOMAIN_AUX_D) |                   \
-       BIT_ULL(POWER_DOMAIN_AUX_F) |                   \
-       BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
-       BIT_ULL(POWER_DOMAIN_VGA) |                             \
-       BIT_ULL(POWER_DOMAIN_INIT))
-#define CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS (           \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) |           \
-       BIT_ULL(POWER_DOMAIN_INIT))
-#define CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS (           \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) |           \
-       BIT_ULL(POWER_DOMAIN_INIT))
-#define CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS (           \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) |           \
-       BIT_ULL(POWER_DOMAIN_INIT))
-#define CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS (           \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) |           \
-       BIT_ULL(POWER_DOMAIN_INIT))
-#define CNL_DISPLAY_AUX_A_POWER_DOMAINS (              \
-       BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
-       BIT_ULL(POWER_DOMAIN_AUX_IO_A) |                \
-       BIT_ULL(POWER_DOMAIN_INIT))
-#define CNL_DISPLAY_AUX_B_POWER_DOMAINS (              \
-       BIT_ULL(POWER_DOMAIN_AUX_B) |                   \
-       BIT_ULL(POWER_DOMAIN_INIT))
-#define CNL_DISPLAY_AUX_C_POWER_DOMAINS (              \
-       BIT_ULL(POWER_DOMAIN_AUX_C) |                   \
-       BIT_ULL(POWER_DOMAIN_INIT))
-#define CNL_DISPLAY_AUX_D_POWER_DOMAINS (              \
-       BIT_ULL(POWER_DOMAIN_AUX_D) |                   \
-       BIT_ULL(POWER_DOMAIN_INIT))
-#define CNL_DISPLAY_AUX_F_POWER_DOMAINS (              \
-       BIT_ULL(POWER_DOMAIN_AUX_F) |                   \
-       BIT_ULL(POWER_DOMAIN_INIT))
-#define CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS (           \
-       BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO) |           \
-       BIT_ULL(POWER_DOMAIN_INIT))
-#define CNL_DISPLAY_DC_OFF_POWER_DOMAINS (             \
-       CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS |         \
-       BIT_ULL(POWER_DOMAIN_GT_IRQ) |                  \
-       BIT_ULL(POWER_DOMAIN_MODESET) |                 \
-       BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
-       BIT_ULL(POWER_DOMAIN_INIT))
-
 /*
  * ICL PW_0/PG_0 domains (HW/DMC control):
  * - PCI
@@ -2830,7 +2783,8 @@ intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
        BIT_ULL(POWER_DOMAIN_AUX_E_TBT) |               \
        BIT_ULL(POWER_DOMAIN_AUX_F_TBT) |               \
        BIT_ULL(POWER_DOMAIN_VGA) |                     \
-       BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
+       BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) |              \
+       BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) |                  \
        BIT_ULL(POWER_DOMAIN_INIT))
        /*
         * - transcoder WD
@@ -2922,7 +2876,8 @@ intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
        BIT_ULL(POWER_DOMAIN_AUX_TBT5) |                \
        BIT_ULL(POWER_DOMAIN_AUX_TBT6) |                \
        BIT_ULL(POWER_DOMAIN_VGA) |                     \
-       BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
+       BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) |              \
+       BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) |                  \
        BIT_ULL(POWER_DOMAIN_INIT))
 
 #define TGL_PW_2_POWER_DOMAINS (                       \
@@ -2992,7 +2947,8 @@ intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
        RKL_PW_4_POWER_DOMAINS |                        \
        BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
        BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |     \
-       BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
+       BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) |              \
+       BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) |                  \
        BIT_ULL(POWER_DOMAIN_VGA) |                     \
        BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
        BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) |      \
@@ -3029,6 +2985,35 @@ intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
        BIT_ULL(POWER_DOMAIN_AUX_B) |                   \
        BIT_ULL(POWER_DOMAIN_INIT))
 
+/*
+ * DG1 onwards Audio MMIO/VERBS lies in PG0 power well.
+ */
+#define DG1_PW_3_POWER_DOMAINS (                       \
+       TGL_PW_4_POWER_DOMAINS |                        \
+       BIT_ULL(POWER_DOMAIN_PIPE_B) |                  \
+       BIT_ULL(POWER_DOMAIN_TRANSCODER_B) |            \
+       BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) |     \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC1) |      \
+       BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_TC2) |      \
+       BIT_ULL(POWER_DOMAIN_AUX_USBC1) |               \
+       BIT_ULL(POWER_DOMAIN_AUX_USBC2) |               \
+       BIT_ULL(POWER_DOMAIN_VGA) |                     \
+       BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) |                  \
+       BIT_ULL(POWER_DOMAIN_INIT))
+
+#define DG1_PW_2_POWER_DOMAINS (                       \
+       DG1_PW_3_POWER_DOMAINS |                        \
+       BIT_ULL(POWER_DOMAIN_TRANSCODER_VDSC_PW2) |     \
+       BIT_ULL(POWER_DOMAIN_INIT))
+
+#define DG1_DISPLAY_DC_OFF_POWER_DOMAINS (             \
+       DG1_PW_3_POWER_DOMAINS |                        \
+       BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) |              \
+       BIT_ULL(POWER_DOMAIN_MODESET) |                 \
+       BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
+       BIT_ULL(POWER_DOMAIN_AUX_B) |                   \
+       BIT_ULL(POWER_DOMAIN_INIT))
+
 /*
  * XE_LPD Power Domains
  *
@@ -3074,7 +3059,7 @@ intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
        XELPD_PW_B_POWER_DOMAINS |                      \
        XELPD_PW_C_POWER_DOMAINS |                      \
        XELPD_PW_D_POWER_DOMAINS |                      \
-       BIT_ULL(POWER_DOMAIN_AUDIO) |                   \
+       BIT_ULL(POWER_DOMAIN_AUDIO_PLAYBACK) |                  \
        BIT_ULL(POWER_DOMAIN_VGA) |                     \
        BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) |        \
        BIT_ULL(POWER_DOMAIN_PORT_DDI_LANES_D_XELPD) |  \
@@ -3115,6 +3100,7 @@ intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
 
 #define XELPD_DISPLAY_DC_OFF_POWER_DOMAINS (           \
        XELPD_PW_2_POWER_DOMAINS |                      \
+       BIT_ULL(POWER_DOMAIN_AUDIO_MMIO) |              \
        BIT_ULL(POWER_DOMAIN_MODESET) |                 \
        BIT_ULL(POWER_DOMAIN_AUX_A) |                   \
        BIT_ULL(POWER_DOMAIN_AUX_B) |                   \
@@ -3695,148 +3681,6 @@ static const struct i915_power_well_desc glk_power_wells[] = {
        },
 };
 
-static const struct i915_power_well_desc cnl_power_wells[] = {
-       {
-               .name = "always-on",
-               .always_on = true,
-               .domains = POWER_DOMAIN_MASK,
-               .ops = &i9xx_always_on_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-       },
-       {
-               .name = "power well 1",
-               /* Handled by the DMC firmware */
-               .always_on = true,
-               .domains = 0,
-               .ops = &hsw_power_well_ops,
-               .id = SKL_DISP_PW_1,
-               {
-                       .hsw.regs = &hsw_power_well_regs,
-                       .hsw.idx = SKL_PW_CTL_IDX_PW_1,
-                       .hsw.has_fuses = true,
-               },
-       },
-       {
-               .name = "AUX A",
-               .domains = CNL_DISPLAY_AUX_A_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-               {
-                       .hsw.regs = &hsw_power_well_regs,
-                       .hsw.idx = GLK_PW_CTL_IDX_AUX_A,
-               },
-       },
-       {
-               .name = "AUX B",
-               .domains = CNL_DISPLAY_AUX_B_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-               {
-                       .hsw.regs = &hsw_power_well_regs,
-                       .hsw.idx = GLK_PW_CTL_IDX_AUX_B,
-               },
-       },
-       {
-               .name = "AUX C",
-               .domains = CNL_DISPLAY_AUX_C_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-               {
-                       .hsw.regs = &hsw_power_well_regs,
-                       .hsw.idx = GLK_PW_CTL_IDX_AUX_C,
-               },
-       },
-       {
-               .name = "AUX D",
-               .domains = CNL_DISPLAY_AUX_D_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-               {
-                       .hsw.regs = &hsw_power_well_regs,
-                       .hsw.idx = CNL_PW_CTL_IDX_AUX_D,
-               },
-       },
-       {
-               .name = "DC off",
-               .domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS,
-               .ops = &gen9_dc_off_power_well_ops,
-               .id = SKL_DISP_DC_OFF,
-       },
-       {
-               .name = "power well 2",
-               .domains = CNL_DISPLAY_POWERWELL_2_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-               .id = SKL_DISP_PW_2,
-               {
-                       .hsw.regs = &hsw_power_well_regs,
-                       .hsw.idx = SKL_PW_CTL_IDX_PW_2,
-                       .hsw.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
-                       .hsw.has_vga = true,
-                       .hsw.has_fuses = true,
-               },
-       },
-       {
-               .name = "DDI A IO power well",
-               .domains = CNL_DISPLAY_DDI_A_IO_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-               {
-                       .hsw.regs = &hsw_power_well_regs,
-                       .hsw.idx = GLK_PW_CTL_IDX_DDI_A,
-               },
-       },
-       {
-               .name = "DDI B IO power well",
-               .domains = CNL_DISPLAY_DDI_B_IO_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-               {
-                       .hsw.regs = &hsw_power_well_regs,
-                       .hsw.idx = SKL_PW_CTL_IDX_DDI_B,
-               },
-       },
-       {
-               .name = "DDI C IO power well",
-               .domains = CNL_DISPLAY_DDI_C_IO_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-               {
-                       .hsw.regs = &hsw_power_well_regs,
-                       .hsw.idx = SKL_PW_CTL_IDX_DDI_C,
-               },
-       },
-       {
-               .name = "DDI D IO power well",
-               .domains = CNL_DISPLAY_DDI_D_IO_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-               .id = DISP_PW_ID_NONE,
-               {
-                       .hsw.regs = &hsw_power_well_regs,
-                       .hsw.idx = SKL_PW_CTL_IDX_DDI_D,
-               },
-       },
-       {
-               .name = "DDI F IO power well",
-               .domains = CNL_DISPLAY_DDI_F_IO_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-               .id = CNL_DISP_PW_DDI_F_IO,
-               {
-                       .hsw.regs = &hsw_power_well_regs,
-                       .hsw.idx = CNL_PW_CTL_IDX_DDI_F,
-               },
-       },
-       {
-               .name = "AUX F",
-               .domains = CNL_DISPLAY_AUX_F_POWER_DOMAINS,
-               .ops = &hsw_power_well_ops,
-               .id = CNL_DISP_PW_DDI_F_AUX,
-               {
-                       .hsw.regs = &hsw_power_well_regs,
-                       .hsw.idx = CNL_PW_CTL_IDX_AUX_F,
-               },
-       },
-};
-
 static const struct i915_power_well_ops icl_aux_power_well_ops = {
        .sync_hw = hsw_power_well_sync_hw,
        .enable = icl_aux_power_well_enable,
@@ -4643,6 +4487,165 @@ static const struct i915_power_well_desc rkl_power_wells[] = {
        },
 };
 
+static const struct i915_power_well_desc dg1_power_wells[] = {
+       {
+               .name = "always-on",
+               .always_on = true,
+               .domains = POWER_DOMAIN_MASK,
+               .ops = &i9xx_always_on_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+       },
+       {
+               .name = "power well 1",
+               /* Handled by the DMC firmware */
+               .always_on = true,
+               .domains = 0,
+               .ops = &hsw_power_well_ops,
+               .id = SKL_DISP_PW_1,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = ICL_PW_CTL_IDX_PW_1,
+                       .hsw.has_fuses = true,
+               },
+       },
+       {
+               .name = "DC off",
+               .domains = DG1_DISPLAY_DC_OFF_POWER_DOMAINS,
+               .ops = &gen9_dc_off_power_well_ops,
+               .id = SKL_DISP_DC_OFF,
+       },
+       {
+               .name = "power well 2",
+               .domains = DG1_PW_2_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = SKL_DISP_PW_2,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = ICL_PW_CTL_IDX_PW_2,
+                       .hsw.has_fuses = true,
+               },
+       },
+       {
+               .name = "power well 3",
+               .domains = DG1_PW_3_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = ICL_DISP_PW_3,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = ICL_PW_CTL_IDX_PW_3,
+                       .hsw.irq_pipe_mask = BIT(PIPE_B),
+                       .hsw.has_vga = true,
+                       .hsw.has_fuses = true,
+               },
+       },
+       {
+               .name = "DDI A IO",
+               .domains = ICL_DDI_IO_A_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &icl_ddi_power_well_regs,
+                       .hsw.idx = ICL_PW_CTL_IDX_DDI_A,
+               }
+       },
+       {
+               .name = "DDI B IO",
+               .domains = ICL_DDI_IO_B_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &icl_ddi_power_well_regs,
+                       .hsw.idx = ICL_PW_CTL_IDX_DDI_B,
+               }
+       },
+       {
+               .name = "DDI IO TC1",
+               .domains = TGL_DDI_IO_TC1_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &icl_ddi_power_well_regs,
+                       .hsw.idx = TGL_PW_CTL_IDX_DDI_TC1,
+               },
+       },
+       {
+               .name = "DDI IO TC2",
+               .domains = TGL_DDI_IO_TC2_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &icl_ddi_power_well_regs,
+                       .hsw.idx = TGL_PW_CTL_IDX_DDI_TC2,
+               },
+       },
+       {
+               .name = "AUX A",
+               .domains = TGL_AUX_A_IO_POWER_DOMAINS,
+               .ops = &icl_aux_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &icl_aux_power_well_regs,
+                       .hsw.idx = ICL_PW_CTL_IDX_AUX_A,
+               },
+       },
+       {
+               .name = "AUX B",
+               .domains = TGL_AUX_B_IO_POWER_DOMAINS,
+               .ops = &icl_aux_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &icl_aux_power_well_regs,
+                       .hsw.idx = ICL_PW_CTL_IDX_AUX_B,
+               },
+       },
+       {
+               .name = "AUX USBC1",
+               .domains = TGL_AUX_IO_USBC1_POWER_DOMAINS,
+               .ops = &icl_aux_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &icl_aux_power_well_regs,
+                       .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1,
+                       .hsw.is_tc_tbt = false,
+               },
+       },
+       {
+               .name = "AUX USBC2",
+               .domains = TGL_AUX_IO_USBC2_POWER_DOMAINS,
+               .ops = &icl_aux_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &icl_aux_power_well_regs,
+                       .hsw.idx = TGL_PW_CTL_IDX_AUX_TC2,
+                       .hsw.is_tc_tbt = false,
+               },
+       },
+       {
+               .name = "power well 4",
+               .domains = TGL_PW_4_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = ICL_PW_CTL_IDX_PW_4,
+                       .hsw.has_fuses = true,
+                       .hsw.irq_pipe_mask = BIT(PIPE_C),
+               }
+       },
+       {
+               .name = "power well 5",
+               .domains = TGL_PW_5_POWER_DOMAINS,
+               .ops = &hsw_power_well_ops,
+               .id = DISP_PW_ID_NONE,
+               {
+                       .hsw.regs = &hsw_power_well_regs,
+                       .hsw.idx = TGL_PW_CTL_IDX_PW_5,
+                       .hsw.has_fuses = true,
+                       .hsw.irq_pipe_mask = BIT(PIPE_D),
+               },
+       },
+};
+
 static const struct i915_power_well_desc xelpd_power_wells[] = {
        {
                .name = "always-on",
@@ -4828,6 +4831,7 @@ static const struct i915_power_well_desc xelpd_power_wells[] = {
                {
                        .hsw.regs = &icl_aux_power_well_regs,
                        .hsw.idx = ICL_PW_CTL_IDX_AUX_A,
+                       .hsw.fixed_enable_delay = 600,
                },
        },
        {
@@ -4838,6 +4842,7 @@ static const struct i915_power_well_desc xelpd_power_wells[] = {
                {
                        .hsw.regs = &icl_aux_power_well_regs,
                        .hsw.idx = ICL_PW_CTL_IDX_AUX_B,
+                       .hsw.fixed_enable_delay = 600,
                },
        },
        {
@@ -4848,6 +4853,7 @@ static const struct i915_power_well_desc xelpd_power_wells[] = {
                {
                        .hsw.regs = &icl_aux_power_well_regs,
                        .hsw.idx = ICL_PW_CTL_IDX_AUX_C,
+                       .hsw.fixed_enable_delay = 600,
                },
        },
        {
@@ -4858,6 +4864,7 @@ static const struct i915_power_well_desc xelpd_power_wells[] = {
                {
                        .hsw.regs = &icl_aux_power_well_regs,
                        .hsw.idx = XELPD_PW_CTL_IDX_AUX_D,
+                       .hsw.fixed_enable_delay = 600,
                },
        },
        {
@@ -4878,6 +4885,7 @@ static const struct i915_power_well_desc xelpd_power_wells[] = {
                {
                        .hsw.regs = &icl_aux_power_well_regs,
                        .hsw.idx = TGL_PW_CTL_IDX_AUX_TC1,
+                       .hsw.fixed_enable_delay = 600,
                },
        },
        {
@@ -5122,7 +5130,9 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
                err = 0;
        } else if (DISPLAY_VER(dev_priv) >= 13) {
                err = set_power_wells(power_domains, xelpd_power_wells);
-       } else if (IS_ALDERLAKE_S(dev_priv) || IS_DG1(dev_priv)) {
+       } else if (IS_DG1(dev_priv)) {
+               err = set_power_wells(power_domains, dg1_power_wells);
+       } else if (IS_ALDERLAKE_S(dev_priv)) {
                err = set_power_wells_mask(power_domains, tgl_power_wells,
                                           BIT_ULL(TGL_DISP_PW_TC_COLD_OFF));
        } else if (IS_ROCKETLAKE(dev_priv)) {
@@ -5131,12 +5141,6 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
                err = set_power_wells(power_domains, tgl_power_wells);
        } else if (DISPLAY_VER(dev_priv) == 11) {
                err = set_power_wells(power_domains, icl_power_wells);
-       } else if (IS_CNL_WITH_PORT_F(dev_priv)) {
-               err = set_power_wells(power_domains, cnl_power_wells);
-       } else if (IS_CANNONLAKE(dev_priv)) {
-               err = set_power_wells_mask(power_domains, cnl_power_wells,
-                                          BIT_ULL(CNL_DISP_PW_DDI_F_IO) |
-                                          BIT_ULL(CNL_DISP_PW_DDI_F_AUX));
        } else if (IS_GEMINILAKE(dev_priv)) {
                err = set_power_wells(power_domains, glk_power_wells);
        } else if (IS_BROXTON(dev_priv)) {
@@ -5691,75 +5695,6 @@ static void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
        usleep_range(10, 30);           /* 10 us delay per Bspec */
 }
 
-static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume)
-{
-       struct i915_power_domains *power_domains = &dev_priv->power_domains;
-       struct i915_power_well *well;
-
-       gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
-
-       /* 1. Enable PCH Reset Handshake */
-       intel_pch_reset_handshake(dev_priv, !HAS_PCH_NOP(dev_priv));
-
-       if (!HAS_DISPLAY(dev_priv))
-               return;
-
-       /* 2-3. */
-       intel_combo_phy_init(dev_priv);
-
-       /*
-        * 4. Enable Power Well 1 (PG1).
-        *    The AUX IO power wells will be enabled on demand.
-        */
-       mutex_lock(&power_domains->lock);
-       well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
-       intel_power_well_enable(dev_priv, well);
-       mutex_unlock(&power_domains->lock);
-
-       /* 5. Enable CD clock */
-       intel_cdclk_init_hw(dev_priv);
-
-       /* 6. Enable DBUF */
-       gen9_dbuf_enable(dev_priv);
-
-       if (resume && intel_dmc_has_payload(dev_priv))
-               intel_dmc_load_program(dev_priv);
-}
-
-static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
-{
-       struct i915_power_domains *power_domains = &dev_priv->power_domains;
-       struct i915_power_well *well;
-
-       if (!HAS_DISPLAY(dev_priv))
-               return;
-
-       gen9_disable_dc_states(dev_priv);
-
-       /* 1. Disable all display engine functions -> aready done */
-
-       /* 2. Disable DBUF */
-       gen9_dbuf_disable(dev_priv);
-
-       /* 3. Disable CD clock */
-       intel_cdclk_uninit_hw(dev_priv);
-
-       /*
-        * 4. Disable Power Well 1 (PG1).
-        *    The AUX IO power wells are toggled on demand, so they are already
-        *    disabled at this point.
-        */
-       mutex_lock(&power_domains->lock);
-       well = lookup_power_well(dev_priv, SKL_DISP_PW_1);
-       intel_power_well_disable(dev_priv, well);
-       mutex_unlock(&power_domains->lock);
-
-       usleep_range(10, 30);           /* 10 us delay per Bspec */
-
-       /* 5. */
-       intel_combo_phy_uninit(dev_priv);
-}
-
 struct buddy_page_mask {
        u32 page_mask;
        u8 type;
@@ -5798,9 +5733,14 @@ static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
        unsigned long abox_mask = INTEL_INFO(dev_priv)->abox_mask;
        int config, i;
 
+       /* BW_BUDDY registers are not used on dgpu's beyond DG1 */
+       if (IS_DGFX(dev_priv) && !IS_DG1(dev_priv))
+               return;
+
        if (IS_ALDERLAKE_S(dev_priv) ||
-           IS_DG1_REVID(dev_priv, DG1_REVID_A0, DG1_REVID_A0) ||
-           IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
+           IS_DG1_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) ||
+           IS_RKL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) ||
+           IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0))
                /* Wa_1409767108:tgl,dg1,adl-s */
                table = wa_1409767108_buddy_page_masks;
        else
@@ -5822,10 +5762,11 @@ static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
                        intel_de_write(dev_priv, BW_BUDDY_PAGE_MASK(i),
                                       table[config].page_mask);
 
-                       /* Wa_22010178259:tgl,rkl */
-                       intel_de_rmw(dev_priv, BW_BUDDY_CTL(i),
-                                    BW_BUDDY_TLB_REQ_TIMER_MASK,
-                                    BW_BUDDY_TLB_REQ_TIMER(0x8));
+                       /* Wa_22010178259:tgl,dg1,rkl,adl-s */
+                       if (DISPLAY_VER(dev_priv) == 12)
+                               intel_de_rmw(dev_priv, BW_BUDDY_CTL(i),
+                                            BW_BUDDY_TLB_REQ_TIMER_MASK,
+                                            BW_BUDDY_TLB_REQ_TIMER(0x8));
                }
        }
 }
@@ -5879,11 +5820,15 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
        if (DISPLAY_VER(dev_priv) >= 12)
                tgl_bw_buddy_init(dev_priv);
 
+       /* 8. Ensure PHYs have completed calibration and adaptation */
+       if (IS_DG2(dev_priv))
+               intel_snps_phy_wait_for_calibration(dev_priv);
+
        if (resume && intel_dmc_has_payload(dev_priv))
                intel_dmc_load_program(dev_priv);
 
-       /* Wa_14011508470 */
-       if (DISPLAY_VER(dev_priv) == 12) {
+       /* Wa_14011508470:tgl,dg1,rkl,adl-s,adl-p */
+       if (DISPLAY_VER(dev_priv) >= 12) {
                val = DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM |
                      DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR;
                intel_uncore_rmw(&dev_priv->uncore, GEN11_CHICKEN_DCPR_2, 0, val);
@@ -6098,8 +6043,6 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
 
        if (DISPLAY_VER(i915) >= 11) {
                icl_display_core_init(i915, resume);
-       } else if (IS_CANNONLAKE(i915)) {
-               cnl_display_core_init(i915, resume);
        } else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) {
                bxt_display_core_init(i915, resume);
        } else if (DISPLAY_VER(i915) == 9) {
@@ -6259,8 +6202,6 @@ void intel_power_domains_suspend(struct drm_i915_private *i915,
 
        if (DISPLAY_VER(i915) >= 11)
                icl_display_core_uninit(i915);
-       else if (IS_CANNONLAKE(i915))
-               cnl_display_core_uninit(i915);
        else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
                bxt_display_core_uninit(i915);
        else if (DISPLAY_VER(i915) == 9)
index 4f0917d..9785318 100644 (file)
@@ -76,7 +76,8 @@ enum intel_display_power_domain {
        POWER_DOMAIN_PORT_CRT,
        POWER_DOMAIN_PORT_OTHER,
        POWER_DOMAIN_VGA,
-       POWER_DOMAIN_AUDIO,
+       POWER_DOMAIN_AUDIO_MMIO,
+       POWER_DOMAIN_AUDIO_PLAYBACK,
        POWER_DOMAIN_AUX_A,
        POWER_DOMAIN_AUX_B,
        POWER_DOMAIN_AUX_C,
@@ -142,8 +143,6 @@ enum i915_power_well_id {
        SKL_DISP_PW_MISC_IO,
        SKL_DISP_PW_1,
        SKL_DISP_PW_2,
-       CNL_DISP_PW_DDI_F_IO,
-       CNL_DISP_PW_DDI_F_AUX,
        ICL_DISP_PW_3,
        SKL_DISP_DC_OFF,
        TGL_DISP_PW_TC_COLD_OFF,
@@ -223,6 +222,12 @@ struct i915_power_well_desc {
                        u8 idx;
                        /* Mask of pipes whose IRQ logic is backed by the pw */
                        u8 irq_pipe_mask;
+                       /*
+                        * Instead of waiting for the status bit to ack enables,
+                        * just wait a specific amount of time and then consider
+                        * the well enabled.
+                        */
+                       u16 fixed_enable_delay;
                        /* The pw is backing the VGA functionality */
                        bool has_vga:1;
                        bool has_fuses:1;
@@ -386,6 +391,10 @@ intel_display_power_put_all_in_set(struct drm_i915_private *i915,
        intel_display_power_put_mask_in_set(i915, power_domain_set, power_domain_set->mask);
 }
 
+/*
+ * FIXME: We should probably switch this to a 0-based scheme to be consistent
+ * with how we now name/number DBUF_CTL instances.
+ */
 enum dbuf_slice {
        DBUF_S1,
        DBUF_S2,
index d94f361..6beeeeb 100644 (file)
@@ -196,6 +196,10 @@ struct intel_encoder {
        void (*update_complete)(struct intel_atomic_state *,
                                struct intel_encoder *,
                                struct intel_crtc *);
+       void (*pre_disable)(struct intel_atomic_state *,
+                           struct intel_encoder *,
+                           const struct intel_crtc_state *,
+                           const struct drm_connector_state *);
        void (*disable)(struct intel_atomic_state *,
                        struct intel_encoder *,
                        const struct intel_crtc_state *,
@@ -884,6 +888,18 @@ enum intel_output_format {
        INTEL_OUTPUT_FORMAT_YCBCR444,
 };
 
+struct intel_mpllb_state {
+       u32 clock; /* in KHz */
+       u32 ref_control;
+       u32 mpllb_cp;
+       u32 mpllb_div;
+       u32 mpllb_div2;
+       u32 mpllb_fracn1;
+       u32 mpllb_fracn2;
+       u32 mpllb_sscen;
+       u32 mpllb_sscstep;
+};
+
 struct intel_crtc_state {
        /*
         * uapi (drm) state. This is the software state shown to userspace.
@@ -1018,7 +1034,10 @@ struct intel_crtc_state {
        struct intel_shared_dpll *shared_dpll;
 
        /* Actual register state of the dpll, for shared dpll cross-checking. */
-       struct intel_dpll_hw_state dpll_hw_state;
+       union {
+               struct intel_dpll_hw_state dpll_hw_state;
+               struct intel_mpllb_state mpllb_state;
+       };
 
        /*
         * ICL reserved DPLLs for the CRTC/port. The active PLL is selected by
@@ -1612,6 +1631,7 @@ struct intel_dp {
 
        /* Display stream compression testing */
        bool force_dsc_en;
+       int force_dsc_bpp;
 
        bool hobl_failed;
        bool hobl_active;
index f8789d4..3c3c6cb 100644 (file)
@@ -57,12 +57,12 @@ MODULE_FIRMWARE(ADLS_DMC_PATH);
 #define DG1_DMC_VERSION_REQUIRED       DMC_VERSION(2, 2)
 MODULE_FIRMWARE(DG1_DMC_PATH);
 
-#define RKL_DMC_PATH                   DMC_PATH(rkl, 2, 02)
-#define RKL_DMC_VERSION_REQUIRED       DMC_VERSION(2, 2)
+#define RKL_DMC_PATH                   DMC_PATH(rkl, 2, 03)
+#define RKL_DMC_VERSION_REQUIRED       DMC_VERSION(2, 3)
 MODULE_FIRMWARE(RKL_DMC_PATH);
 
-#define TGL_DMC_PATH                   DMC_PATH(tgl, 2, 08)
-#define TGL_DMC_VERSION_REQUIRED       DMC_VERSION(2, 8)
+#define TGL_DMC_PATH                   DMC_PATH(tgl, 2, 12)
+#define TGL_DMC_VERSION_REQUIRED       DMC_VERSION(2, 12)
 MODULE_FIRMWARE(TGL_DMC_PATH);
 
 #define ICL_DMC_PATH                   DMC_PATH(icl, 1, 09)
@@ -70,11 +70,6 @@ MODULE_FIRMWARE(TGL_DMC_PATH);
 #define ICL_DMC_MAX_FW_SIZE            0x6000
 MODULE_FIRMWARE(ICL_DMC_PATH);
 
-#define CNL_DMC_PATH                   DMC_PATH(cnl, 1, 07)
-#define CNL_DMC_VERSION_REQUIRED       DMC_VERSION(1, 7)
-#define CNL_DMC_MAX_FW_SIZE            GLK_DMC_MAX_FW_SIZE
-MODULE_FIRMWARE(CNL_DMC_PATH);
-
 #define GLK_DMC_PATH                   DMC_PATH(glk, 1, 04)
 #define GLK_DMC_VERSION_REQUIRED       DMC_VERSION(1, 4)
 #define GLK_DMC_MAX_FW_SIZE            0x4000
@@ -247,50 +242,15 @@ bool intel_dmc_has_payload(struct drm_i915_private *i915)
        return i915->dmc.dmc_info[DMC_FW_MAIN].payload;
 }
 
-static const struct stepping_info skl_stepping_info[] = {
-       {'A', '0'}, {'B', '0'}, {'C', '0'},
-       {'D', '0'}, {'E', '0'}, {'F', '0'},
-       {'G', '0'}, {'H', '0'}, {'I', '0'},
-       {'J', '0'}, {'K', '0'}
-};
-
-static const struct stepping_info bxt_stepping_info[] = {
-       {'A', '0'}, {'A', '1'}, {'A', '2'},
-       {'B', '0'}, {'B', '1'}, {'B', '2'}
-};
-
-static const struct stepping_info icl_stepping_info[] = {
-       {'A', '0'}, {'A', '1'}, {'A', '2'},
-       {'B', '0'}, {'B', '2'},
-       {'C', '0'}
-};
-
-static const struct stepping_info no_stepping_info = { '*', '*' };
-
 static const struct stepping_info *
-intel_get_stepping_info(struct drm_i915_private *dev_priv)
+intel_get_stepping_info(struct drm_i915_private *i915,
+                       struct stepping_info *si)
 {
-       const struct stepping_info *si;
-       unsigned int size;
-
-       if (IS_ICELAKE(dev_priv)) {
-               size = ARRAY_SIZE(icl_stepping_info);
-               si = icl_stepping_info;
-       } else if (IS_SKYLAKE(dev_priv)) {
-               size = ARRAY_SIZE(skl_stepping_info);
-               si = skl_stepping_info;
-       } else if (IS_BROXTON(dev_priv)) {
-               size = ARRAY_SIZE(bxt_stepping_info);
-               si = bxt_stepping_info;
-       } else {
-               size = 0;
-               si = NULL;
-       }
-
-       if (INTEL_REVID(dev_priv) < size)
-               return si + INTEL_REVID(dev_priv);
+       const char *step_name = intel_step_name(RUNTIME_INFO(i915)->step.display_step);
 
-       return &no_stepping_info;
+       si->stepping = step_name[0];
+       si->substepping = step_name[1];
+       return si;
 }
 
 static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv)
@@ -616,7 +576,8 @@ static void parse_dmc_fw(struct drm_i915_private *dev_priv,
        struct intel_package_header *package_header;
        struct intel_dmc_header_base *dmc_header;
        struct intel_dmc *dmc = &dev_priv->dmc;
-       const struct stepping_info *si = intel_get_stepping_info(dev_priv);
+       struct stepping_info display_info = { '*', '*'};
+       const struct stepping_info *si = intel_get_stepping_info(dev_priv, &display_info);
        u32 readcount = 0;
        u32 r, offset;
        int id;
@@ -752,10 +713,6 @@ void intel_dmc_ucode_init(struct drm_i915_private *dev_priv)
                dmc->fw_path = ICL_DMC_PATH;
                dmc->required_version = ICL_DMC_VERSION_REQUIRED;
                dmc->max_fw_size = ICL_DMC_MAX_FW_SIZE;
-       } else if (IS_CANNONLAKE(dev_priv)) {
-               dmc->fw_path = CNL_DMC_PATH;
-               dmc->required_version = CNL_DMC_VERSION_REQUIRED;
-               dmc->max_fw_size = CNL_DMC_MAX_FW_SIZE;
        } else if (IS_GEMINILAKE(dev_priv)) {
                dmc->fw_path = GLK_DMC_PATH;
                dmc->required_version = GLK_DMC_VERSION_REQUIRED;
index 5b52bea..75d4ebc 100644 (file)
@@ -222,29 +222,6 @@ bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp)
                 encoder->port != PORT_A);
 }
 
-static int cnl_max_source_rate(struct intel_dp *intel_dp)
-{
-       struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
-       struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
-       enum port port = dig_port->base.port;
-
-       u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
-
-       /* Low voltage SKUs are limited to max of 5.4G */
-       if (voltage == VOLTAGE_INFO_0_85V)
-               return 540000;
-
-       /* For this SKU 8.1G is supported in all ports */
-       if (IS_CNL_WITH_PORT_F(dev_priv))
-               return 810000;
-
-       /* For other SKUs, max rate on ports A and D is 5.4G */
-       if (port == PORT_A || port == PORT_D)
-               return 540000;
-
-       return 810000;
-}
-
 static int icl_max_source_rate(struct intel_dp *intel_dp)
 {
        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
@@ -270,7 +247,7 @@ static void
 intel_dp_set_source_rates(struct intel_dp *intel_dp)
 {
        /* The values must be in increasing order */
-       static const int cnl_rates[] = {
+       static const int icl_rates[] = {
                162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000
        };
        static const int bxt_rates[] = {
@@ -295,12 +272,10 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
        drm_WARN_ON(&dev_priv->drm,
                    intel_dp->source_rates || intel_dp->num_source_rates);
 
-       if (DISPLAY_VER(dev_priv) >= 11 || IS_CANNONLAKE(dev_priv)) {
-               source_rates = cnl_rates;
-               size = ARRAY_SIZE(cnl_rates);
-               if (DISPLAY_VER(dev_priv) == 10)
-                       max_rate = cnl_max_source_rate(intel_dp);
-               else if (IS_JSL_EHL(dev_priv))
+       if (DISPLAY_VER(dev_priv) >= 11) {
+               source_rates = icl_rates;
+               size = ARRAY_SIZE(icl_rates);
+               if (IS_JSL_EHL(dev_priv))
                        max_rate = ehl_max_source_rate(intel_dp);
                else
                        max_rate = icl_max_source_rate(intel_dp);
@@ -1274,6 +1249,23 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
                                                               pipe_config->pipe_bpp);
                pipe_config->dsc.slice_count = dsc_dp_slice_count;
        }
+
+       /* As of today we support DSC for only RGB */
+       if (intel_dp->force_dsc_bpp) {
+               if (intel_dp->force_dsc_bpp >= 8 &&
+                   intel_dp->force_dsc_bpp < pipe_bpp) {
+                       drm_dbg_kms(&dev_priv->drm,
+                                   "DSC BPP forced to %d",
+                                   intel_dp->force_dsc_bpp);
+                       pipe_config->dsc.compressed_bpp =
+                                               intel_dp->force_dsc_bpp;
+               } else {
+                       drm_dbg_kms(&dev_priv->drm,
+                                   "Invalid DSC BPP %d",
+                                   intel_dp->force_dsc_bpp);
+               }
+       }
+
        /*
         * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate
         * is greater than the maximum Cdclock and if slice count is even
@@ -3339,6 +3331,9 @@ static void intel_dp_process_phy_request(struct intel_dp *intel_dp,
 
        intel_dp_autotest_phy_ddi_enable(intel_dp, crtc_state);
 
+       drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
+                         intel_dp->train_set, crtc_state->lane_count);
+
        drm_dp_set_phy_test_pattern(&intel_dp->aux, data,
                                    link_status[DP_DPCD_REV]);
 }
index 7c048d2..f483f47 100644 (file)
@@ -158,7 +158,6 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
        /*
         * Max timeout values:
         * SKL-GLK: 1.6ms
-        * CNL: 3.2ms
         * ICL+: 4ms
         */
        ret = DP_AUX_CH_CTL_SEND_BUSY |
index 3661cd1..8d13d7b 100644 (file)
@@ -348,6 +348,16 @@ static void wait_for_act_sent(struct intel_encoder *encoder,
        drm_dp_check_act_status(&intel_dp->mst_mgr);
 }
 
+static void intel_mst_pre_disable_dp(struct intel_atomic_state *state,
+                                    struct intel_encoder *encoder,
+                                    const struct intel_crtc_state *old_crtc_state,
+                                    const struct drm_connector_state *old_conn_state)
+{
+       if (old_crtc_state->has_audio)
+               intel_audio_codec_disable(encoder, old_crtc_state,
+                                         old_conn_state);
+}
+
 static void intel_mst_disable_dp(struct intel_atomic_state *state,
                                 struct intel_encoder *encoder,
                                 const struct intel_crtc_state *old_crtc_state,
@@ -372,9 +382,6 @@ static void intel_mst_disable_dp(struct intel_atomic_state *state,
        if (ret) {
                drm_dbg_kms(&i915->drm, "failed to update payload %d\n", ret);
        }
-       if (old_crtc_state->has_audio)
-               intel_audio_codec_disable(encoder,
-                                         old_crtc_state, old_conn_state);
 }
 
 static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
@@ -542,7 +549,7 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
        struct intel_digital_port *dig_port = intel_mst->primary;
        struct intel_dp *intel_dp = &dig_port->dp;
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-       u32 val;
+       enum transcoder trans = pipe_config->cpu_transcoder;
 
        drm_WARN_ON(&dev_priv->drm, pipe_config->has_pch_encoder);
 
@@ -550,12 +557,8 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
 
        intel_ddi_enable_transcoder_func(encoder, pipe_config);
 
-       val = intel_de_read(dev_priv,
-                           TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
-       val |= TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
-       intel_de_write(dev_priv,
-                      TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder),
-                      val);
+       intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL(trans), 0,
+                    TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
 
        drm_dbg_kms(&dev_priv->drm, "active links %d\n",
                    intel_dp->active_mst_links);
@@ -564,6 +567,10 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
 
        drm_dp_update_payload_part2(&intel_dp->mst_mgr);
 
+       if (DISPLAY_VER(dev_priv) >= 12 && pipe_config->fec_enable)
+               intel_de_rmw(dev_priv, CHICKEN_TRANS(trans), 0,
+                            FECSTALL_DIS_DPTSTREAM_DPTTG);
+
        intel_enable_pipe(pipe_config);
 
        intel_crtc_vblank_on(pipe_config);
@@ -903,6 +910,7 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *dig_port, enum pipe
 
        intel_encoder->compute_config = intel_dp_mst_compute_config;
        intel_encoder->compute_config_late = intel_dp_mst_compute_config_late;
+       intel_encoder->pre_disable = intel_mst_pre_disable_dp;
        intel_encoder->disable = intel_mst_disable_dp;
        intel_encoder->post_disable = intel_mst_post_disable_dp;
        intel_encoder->update_pipe = intel_ddi_update_pipe;
index 89635da..14515e6 100644 (file)
@@ -11,6 +11,7 @@
 #include "intel_lvds.h"
 #include "intel_panel.h"
 #include "intel_sideband.h"
+#include "display/intel_snps_phy.h"
 
 struct intel_limit {
        struct {
@@ -923,12 +924,13 @@ static int hsw_crtc_compute_clock(struct intel_crtc *crtc,
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        struct intel_atomic_state *state =
                to_intel_atomic_state(crtc_state->uapi.state);
+       struct intel_encoder *encoder =
+               intel_get_crtc_new_encoder(state, crtc_state);
 
-       if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
-           DISPLAY_VER(dev_priv) >= 11) {
-               struct intel_encoder *encoder =
-                       intel_get_crtc_new_encoder(state, crtc_state);
-
+       if (IS_DG2(dev_priv)) {
+               return intel_mpllb_calc_state(crtc_state, encoder);
+       } else if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
+                  DISPLAY_VER(dev_priv) >= 11) {
                if (!intel_reserve_shared_dplls(state, crtc, encoder)) {
                        drm_dbg_kms(&dev_priv->drm,
                                    "failed to find PLL for pipe %c\n",
index 71ac576..5c91d12 100644 (file)
@@ -168,7 +168,7 @@ intel_combo_pll_enable_reg(struct drm_i915_private *i915,
        else if (IS_JSL_EHL(i915) && (pll->info->id == DPLL_ID_EHL_DPLL4))
                return MG_PLL_ENABLE(0);
 
-       return CNL_DPLL_ENABLE(pll->info->id);
+       return ICL_DPLL_ENABLE(pll->info->id);
 }
 
 static i915_reg_t
@@ -2346,160 +2346,7 @@ static const struct intel_dpll_mgr bxt_pll_mgr = {
        .dump_hw_state = bxt_dump_hw_state,
 };
 
-static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
-                              struct intel_shared_dpll *pll)
-{
-       const enum intel_dpll_id id = pll->info->id;
-       u32 val;
-
-       /* 1. Enable DPLL power in DPLL_ENABLE. */
-       val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
-       val |= PLL_POWER_ENABLE;
-       intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
-
-       /* 2. Wait for DPLL power state enabled in DPLL_ENABLE. */
-       if (intel_de_wait_for_set(dev_priv, CNL_DPLL_ENABLE(id),
-                                 PLL_POWER_STATE, 5))
-               drm_err(&dev_priv->drm, "PLL %d Power not enabled\n", id);
-
-       /*
-        * 3. Configure DPLL_CFGCR0 to set SSC enable/disable,
-        * select DP mode, and set DP link rate.
-        */
-       val = pll->state.hw_state.cfgcr0;
-       intel_de_write(dev_priv, CNL_DPLL_CFGCR0(id), val);
-
-       /* 4. Reab back to ensure writes completed */
-       intel_de_posting_read(dev_priv, CNL_DPLL_CFGCR0(id));
-
-       /* 3. Configure DPLL_CFGCR0 */
-       /* Avoid touch CFGCR1 if HDMI mode is not enabled */
-       if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE) {
-               val = pll->state.hw_state.cfgcr1;
-               intel_de_write(dev_priv, CNL_DPLL_CFGCR1(id), val);
-               /* 4. Reab back to ensure writes completed */
-               intel_de_posting_read(dev_priv, CNL_DPLL_CFGCR1(id));
-       }
-
-       /*
-        * 5. If the frequency will result in a change to the voltage
-        * requirement, follow the Display Voltage Frequency Switching
-        * Sequence Before Frequency Change
-        *
-        * Note: DVFS is actually handled via the cdclk code paths,
-        * hence we do nothing here.
-        */
-
-       /* 6. Enable DPLL in DPLL_ENABLE. */
-       val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
-       val |= PLL_ENABLE;
-       intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
-
-       /* 7. Wait for PLL lock status in DPLL_ENABLE. */
-       if (intel_de_wait_for_set(dev_priv, CNL_DPLL_ENABLE(id), PLL_LOCK, 5))
-               drm_err(&dev_priv->drm, "PLL %d not locked\n", id);
-
-       /*
-        * 8. If the frequency will result in a change to the voltage
-        * requirement, follow the Display Voltage Frequency Switching
-        * Sequence After Frequency Change
-        *
-        * Note: DVFS is actually handled via the cdclk code paths,
-        * hence we do nothing here.
-        */
-
-       /*
-        * 9. turn on the clock for the DDI and map the DPLL to the DDI
-        * Done at intel_ddi_clk_select
-        */
-}
-
-static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
-                               struct intel_shared_dpll *pll)
-{
-       const enum intel_dpll_id id = pll->info->id;
-       u32 val;
-
-       /*
-        * 1. Configure DPCLKA_CFGCR0 to turn off the clock for the DDI.
-        * Done at intel_ddi_post_disable
-        */
-
-       /*
-        * 2. If the frequency will result in a change to the voltage
-        * requirement, follow the Display Voltage Frequency Switching
-        * Sequence Before Frequency Change
-        *
-        * Note: DVFS is actually handled via the cdclk code paths,
-        * hence we do nothing here.
-        */
-
-       /* 3. Disable DPLL through DPLL_ENABLE. */
-       val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
-       val &= ~PLL_ENABLE;
-       intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
-
-       /* 4. Wait for PLL not locked status in DPLL_ENABLE. */
-       if (intel_de_wait_for_clear(dev_priv, CNL_DPLL_ENABLE(id), PLL_LOCK, 5))
-               drm_err(&dev_priv->drm, "PLL %d locked\n", id);
-
-       /*
-        * 5. If the frequency will result in a change to the voltage
-        * requirement, follow the Display Voltage Frequency Switching
-        * Sequence After Frequency Change
-        *
-        * Note: DVFS is actually handled via the cdclk code paths,
-        * hence we do nothing here.
-        */
-
-       /* 6. Disable DPLL power in DPLL_ENABLE. */
-       val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
-       val &= ~PLL_POWER_ENABLE;
-       intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
-
-       /* 7. Wait for DPLL power state disabled in DPLL_ENABLE. */
-       if (intel_de_wait_for_clear(dev_priv, CNL_DPLL_ENABLE(id),
-                                   PLL_POWER_STATE, 5))
-               drm_err(&dev_priv->drm, "PLL %d Power not disabled\n", id);
-}
-
-static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
-                                    struct intel_shared_dpll *pll,
-                                    struct intel_dpll_hw_state *hw_state)
-{
-       const enum intel_dpll_id id = pll->info->id;
-       intel_wakeref_t wakeref;
-       u32 val;
-       bool ret;
-
-       wakeref = intel_display_power_get_if_enabled(dev_priv,
-                                                    POWER_DOMAIN_DISPLAY_CORE);
-       if (!wakeref)
-               return false;
-
-       ret = false;
-
-       val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
-       if (!(val & PLL_ENABLE))
-               goto out;
-
-       val = intel_de_read(dev_priv, CNL_DPLL_CFGCR0(id));
-       hw_state->cfgcr0 = val;
-
-       /* avoid reading back stale values if HDMI mode is not enabled */
-       if (val & DPLL_CFGCR0_HDMI_MODE) {
-               hw_state->cfgcr1 = intel_de_read(dev_priv,
-                                                CNL_DPLL_CFGCR1(id));
-       }
-       ret = true;
-
-out:
-       intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
-
-       return ret;
-}
-
-static void cnl_wrpll_get_multipliers(int bestdiv, int *pdiv,
+static void icl_wrpll_get_multipliers(int bestdiv, int *pdiv,
                                      int *qdiv, int *kdiv)
 {
        /* even dividers */
@@ -2538,7 +2385,7 @@ static void cnl_wrpll_get_multipliers(int bestdiv, int *pdiv,
        }
 }
 
-static void cnl_wrpll_params_populate(struct skl_wrpll_params *params,
+static void icl_wrpll_params_populate(struct skl_wrpll_params *params,
                                      u32 dco_freq, u32 ref_freq,
                                      int pdiv, int qdiv, int kdiv)
 {
@@ -2586,349 +2433,19 @@ static void cnl_wrpll_params_populate(struct skl_wrpll_params *params,
        params->dco_fraction = dco & 0x7fff;
 }
 
-static bool
-__cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
-                         struct skl_wrpll_params *wrpll_params,
-                         int ref_clock)
-{
-       u32 afe_clock = crtc_state->port_clock * 5;
-       u32 dco_min = 7998000;
-       u32 dco_max = 10000000;
-       u32 dco_mid = (dco_min + dco_max) / 2;
-       static const int dividers[] = {  2,  4,  6,  8, 10, 12,  14,  16,
-                                        18, 20, 24, 28, 30, 32,  36,  40,
-                                        42, 44, 48, 50, 52, 54,  56,  60,
-                                        64, 66, 68, 70, 72, 76,  78,  80,
-                                        84, 88, 90, 92, 96, 98, 100, 102,
-                                         3,  5,  7,  9, 15, 21 };
-       u32 dco, best_dco = 0, dco_centrality = 0;
-       u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
-       int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
-
-       for (d = 0; d < ARRAY_SIZE(dividers); d++) {
-               dco = afe_clock * dividers[d];
-
-               if ((dco <= dco_max) && (dco >= dco_min)) {
-                       dco_centrality = abs(dco - dco_mid);
-
-                       if (dco_centrality < best_dco_centrality) {
-                               best_dco_centrality = dco_centrality;
-                               best_div = dividers[d];
-                               best_dco = dco;
-                       }
-               }
-       }
-
-       if (best_div == 0)
-               return false;
-
-       cnl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
-       cnl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
-                                 pdiv, qdiv, kdiv);
-
-       return true;
-}
-
-static bool
-cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
-                       struct skl_wrpll_params *wrpll_params)
-{
-       struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
-
-       return __cnl_ddi_calculate_wrpll(crtc_state, wrpll_params,
-                                        i915->dpll.ref_clks.nssc);
-}
-
-static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
-{
-       u32 cfgcr0, cfgcr1;
-       struct skl_wrpll_params wrpll_params = { 0, };
-
-       cfgcr0 = DPLL_CFGCR0_HDMI_MODE;
-
-       if (!cnl_ddi_calculate_wrpll(crtc_state, &wrpll_params))
-               return false;
-
-       cfgcr0 |= DPLL_CFGCR0_DCO_FRACTION(wrpll_params.dco_fraction) |
-               wrpll_params.dco_integer;
-
-       cfgcr1 = DPLL_CFGCR1_QDIV_RATIO(wrpll_params.qdiv_ratio) |
-               DPLL_CFGCR1_QDIV_MODE(wrpll_params.qdiv_mode) |
-               DPLL_CFGCR1_KDIV(wrpll_params.kdiv) |
-               DPLL_CFGCR1_PDIV(wrpll_params.pdiv) |
-               DPLL_CFGCR1_CENTRAL_FREQ;
-
-       memset(&crtc_state->dpll_hw_state, 0,
-              sizeof(crtc_state->dpll_hw_state));
-
-       crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
-       crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
-       return true;
-}
-
 /*
- * Display WA #22010492432: ehl, tgl
+ * Display WA #22010492432: ehl, tgl, adl-p
  * Program half of the nominal DCO divider fraction value.
  */
 static bool
 ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915)
 {
        return ((IS_PLATFORM(i915, INTEL_ELKHARTLAKE) &&
-                IS_JSL_EHL_REVID(i915, EHL_REVID_B0, REVID_FOREVER)) ||
-                IS_TIGERLAKE(i915)) &&
+                IS_JSL_EHL_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) ||
+                IS_TIGERLAKE(i915) || IS_ALDERLAKE_P(i915)) &&
                 i915->dpll.ref_clks.nssc == 38400;
 }
 
-static int __cnl_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
-                                   const struct intel_shared_dpll *pll,
-                                   const struct intel_dpll_hw_state *pll_state,
-                                   int ref_clock)
-{
-       u32 dco_fraction;
-       u32 p0, p1, p2, dco_freq;
-
-       p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
-       p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
-
-       if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
-               p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
-                       DPLL_CFGCR1_QDIV_RATIO_SHIFT;
-       else
-               p1 = 1;
-
-
-       switch (p0) {
-       case DPLL_CFGCR1_PDIV_2:
-               p0 = 2;
-               break;
-       case DPLL_CFGCR1_PDIV_3:
-               p0 = 3;
-               break;
-       case DPLL_CFGCR1_PDIV_5:
-               p0 = 5;
-               break;
-       case DPLL_CFGCR1_PDIV_7:
-               p0 = 7;
-               break;
-       }
-
-       switch (p2) {
-       case DPLL_CFGCR1_KDIV_1:
-               p2 = 1;
-               break;
-       case DPLL_CFGCR1_KDIV_2:
-               p2 = 2;
-               break;
-       case DPLL_CFGCR1_KDIV_3:
-               p2 = 3;
-               break;
-       }
-
-       dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
-                  ref_clock;
-
-       dco_fraction = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
-                      DPLL_CFGCR0_DCO_FRACTION_SHIFT;
-
-       if (ehl_combo_pll_div_frac_wa_needed(dev_priv))
-               dco_fraction *= 2;
-
-       dco_freq += (dco_fraction * ref_clock) / 0x8000;
-
-       if (drm_WARN_ON(&dev_priv->drm, p0 == 0 || p1 == 0 || p2 == 0))
-               return 0;
-
-       return dco_freq / (p0 * p1 * p2 * 5);
-}
-
-static int cnl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
-                                 const struct intel_shared_dpll *pll,
-                                 const struct intel_dpll_hw_state *pll_state)
-{
-       return __cnl_ddi_wrpll_get_freq(i915, pll, pll_state,
-                                       i915->dpll.ref_clks.nssc);
-}
-
-static bool
-cnl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
-{
-       u32 cfgcr0;
-
-       cfgcr0 = DPLL_CFGCR0_SSC_ENABLE;
-
-       switch (crtc_state->port_clock / 2) {
-       case 81000:
-               cfgcr0 |= DPLL_CFGCR0_LINK_RATE_810;
-               break;
-       case 135000:
-               cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1350;
-               break;
-       case 270000:
-               cfgcr0 |= DPLL_CFGCR0_LINK_RATE_2700;
-               break;
-               /* eDP 1.4 rates */
-       case 162000:
-               cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1620;
-               break;
-       case 108000:
-               cfgcr0 |= DPLL_CFGCR0_LINK_RATE_1080;
-               break;
-       case 216000:
-               cfgcr0 |= DPLL_CFGCR0_LINK_RATE_2160;
-               break;
-       case 324000:
-               /* Some SKUs may require elevated I/O voltage to support this */
-               cfgcr0 |= DPLL_CFGCR0_LINK_RATE_3240;
-               break;
-       case 405000:
-               /* Some SKUs may require elevated I/O voltage to support this */
-               cfgcr0 |= DPLL_CFGCR0_LINK_RATE_4050;
-               break;
-       }
-
-       memset(&crtc_state->dpll_hw_state, 0,
-              sizeof(crtc_state->dpll_hw_state));
-
-       crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
-
-       return true;
-}
-
-static int cnl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
-                                 const struct intel_shared_dpll *pll,
-                                 const struct intel_dpll_hw_state *pll_state)
-{
-       int link_clock = 0;
-
-       switch (pll_state->cfgcr0 & DPLL_CFGCR0_LINK_RATE_MASK) {
-       case DPLL_CFGCR0_LINK_RATE_810:
-               link_clock = 81000;
-               break;
-       case DPLL_CFGCR0_LINK_RATE_1080:
-               link_clock = 108000;
-               break;
-       case DPLL_CFGCR0_LINK_RATE_1350:
-               link_clock = 135000;
-               break;
-       case DPLL_CFGCR0_LINK_RATE_1620:
-               link_clock = 162000;
-               break;
-       case DPLL_CFGCR0_LINK_RATE_2160:
-               link_clock = 216000;
-               break;
-       case DPLL_CFGCR0_LINK_RATE_2700:
-               link_clock = 270000;
-               break;
-       case DPLL_CFGCR0_LINK_RATE_3240:
-               link_clock = 324000;
-               break;
-       case DPLL_CFGCR0_LINK_RATE_4050:
-               link_clock = 405000;
-               break;
-       default:
-               drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
-               break;
-       }
-
-       return link_clock * 2;
-}
-
-static bool cnl_get_dpll(struct intel_atomic_state *state,
-                        struct intel_crtc *crtc,
-                        struct intel_encoder *encoder)
-{
-       struct intel_crtc_state *crtc_state =
-               intel_atomic_get_new_crtc_state(state, crtc);
-       struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
-       struct intel_shared_dpll *pll;
-       bool bret;
-
-       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
-               bret = cnl_ddi_hdmi_pll_dividers(crtc_state);
-               if (!bret) {
-                       drm_dbg_kms(&i915->drm,
-                                   "Could not get HDMI pll dividers.\n");
-                       return false;
-               }
-       } else if (intel_crtc_has_dp_encoder(crtc_state)) {
-               bret = cnl_ddi_dp_set_dpll_hw_state(crtc_state);
-               if (!bret) {
-                       drm_dbg_kms(&i915->drm,
-                                   "Could not set DP dpll HW state.\n");
-                       return false;
-               }
-       } else {
-               drm_dbg_kms(&i915->drm,
-                           "Skip DPLL setup for output_types 0x%x\n",
-                           crtc_state->output_types);
-               return false;
-       }
-
-       pll = intel_find_shared_dpll(state, crtc,
-                                    &crtc_state->dpll_hw_state,
-                                    BIT(DPLL_ID_SKL_DPLL2) |
-                                    BIT(DPLL_ID_SKL_DPLL1) |
-                                    BIT(DPLL_ID_SKL_DPLL0));
-       if (!pll) {
-               drm_dbg_kms(&i915->drm, "No PLL selected\n");
-               return false;
-       }
-
-       intel_reference_shared_dpll(state, crtc,
-                                   pll, &crtc_state->dpll_hw_state);
-
-       crtc_state->shared_dpll = pll;
-
-       return true;
-}
-
-static int cnl_ddi_pll_get_freq(struct drm_i915_private *i915,
-                               const struct intel_shared_dpll *pll,
-                               const struct intel_dpll_hw_state *pll_state)
-{
-       if (pll_state->cfgcr0 & DPLL_CFGCR0_HDMI_MODE)
-               return cnl_ddi_wrpll_get_freq(i915, pll, pll_state);
-       else
-               return cnl_ddi_lcpll_get_freq(i915, pll, pll_state);
-}
-
-static void cnl_update_dpll_ref_clks(struct drm_i915_private *i915)
-{
-       /* No SSC reference */
-       i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
-}
-
-static void cnl_dump_hw_state(struct drm_i915_private *dev_priv,
-                             const struct intel_dpll_hw_state *hw_state)
-{
-       drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: "
-                   "cfgcr0: 0x%x, cfgcr1: 0x%x\n",
-                   hw_state->cfgcr0,
-                   hw_state->cfgcr1);
-}
-
-static const struct intel_shared_dpll_funcs cnl_ddi_pll_funcs = {
-       .enable = cnl_ddi_pll_enable,
-       .disable = cnl_ddi_pll_disable,
-       .get_hw_state = cnl_ddi_pll_get_hw_state,
-       .get_freq = cnl_ddi_pll_get_freq,
-};
-
-static const struct dpll_info cnl_plls[] = {
-       { "DPLL 0", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL0, 0 },
-       { "DPLL 1", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
-       { "DPLL 2", &cnl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
-       { },
-};
-
-static const struct intel_dpll_mgr cnl_pll_mgr = {
-       .dpll_info = cnl_plls,
-       .get_dplls = cnl_get_dpll,
-       .put_dplls = intel_put_dpll,
-       .update_ref_clks = cnl_update_dpll_ref_clks,
-       .dump_hw_state = cnl_dump_hw_state,
-};
-
 struct icl_combo_pll_params {
        int clock;
        struct skl_wrpll_params wrpll;
@@ -3105,17 +2622,104 @@ icl_calc_wrpll(struct intel_crtc_state *crtc_state,
               struct skl_wrpll_params *wrpll_params)
 {
        struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+       int ref_clock = icl_wrpll_ref_clock(i915);
+       u32 afe_clock = crtc_state->port_clock * 5;
+       u32 dco_min = 7998000;
+       u32 dco_max = 10000000;
+       u32 dco_mid = (dco_min + dco_max) / 2;
+       static const int dividers[] = {  2,  4,  6,  8, 10, 12,  14,  16,
+                                        18, 20, 24, 28, 30, 32,  36,  40,
+                                        42, 44, 48, 50, 52, 54,  56,  60,
+                                        64, 66, 68, 70, 72, 76,  78,  80,
+                                        84, 88, 90, 92, 96, 98, 100, 102,
+                                         3,  5,  7,  9, 15, 21 };
+       u32 dco, best_dco = 0, dco_centrality = 0;
+       u32 best_dco_centrality = U32_MAX; /* Spec meaning of 999999 MHz */
+       int d, best_div = 0, pdiv = 0, qdiv = 0, kdiv = 0;
 
-       return __cnl_ddi_calculate_wrpll(crtc_state, wrpll_params,
-                                        icl_wrpll_ref_clock(i915));
+       for (d = 0; d < ARRAY_SIZE(dividers); d++) {
+               dco = afe_clock * dividers[d];
+
+               if (dco <= dco_max && dco >= dco_min) {
+                       dco_centrality = abs(dco - dco_mid);
+
+                       if (dco_centrality < best_dco_centrality) {
+                               best_dco_centrality = dco_centrality;
+                               best_div = dividers[d];
+                               best_dco = dco;
+                       }
+               }
+       }
+
+       if (best_div == 0)
+               return false;
+
+       icl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
+       icl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
+                                 pdiv, qdiv, kdiv);
+
+       return true;
 }
 
 static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
                                      const struct intel_shared_dpll *pll,
                                      const struct intel_dpll_hw_state *pll_state)
 {
-       return __cnl_ddi_wrpll_get_freq(i915, pll, pll_state,
-                                       icl_wrpll_ref_clock(i915));
+       int ref_clock = icl_wrpll_ref_clock(i915);
+       u32 dco_fraction;
+       u32 p0, p1, p2, dco_freq;
+
+       p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
+       p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
+
+       if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
+               p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
+                       DPLL_CFGCR1_QDIV_RATIO_SHIFT;
+       else
+               p1 = 1;
+
+       switch (p0) {
+       case DPLL_CFGCR1_PDIV_2:
+               p0 = 2;
+               break;
+       case DPLL_CFGCR1_PDIV_3:
+               p0 = 3;
+               break;
+       case DPLL_CFGCR1_PDIV_5:
+               p0 = 5;
+               break;
+       case DPLL_CFGCR1_PDIV_7:
+               p0 = 7;
+               break;
+       }
+
+       switch (p2) {
+       case DPLL_CFGCR1_KDIV_1:
+               p2 = 1;
+               break;
+       case DPLL_CFGCR1_KDIV_2:
+               p2 = 2;
+               break;
+       case DPLL_CFGCR1_KDIV_3:
+               p2 = 3;
+               break;
+       }
+
+       dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
+                  ref_clock;
+
+       dco_fraction = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
+                      DPLL_CFGCR0_DCO_FRACTION_SHIFT;
+
+       if (ehl_combo_pll_div_frac_wa_needed(i915))
+               dco_fraction *= 2;
+
+       dco_freq += (dco_fraction * ref_clock) / 0x8000;
+
+       if (drm_WARN_ON(&i915->drm, p0 == 0 || p1 == 0 || p2 == 0))
+               return 0;
+
+       return dco_freq / (p0 * p1 * p2 * 5);
 }
 
 static void icl_calc_dpll_state(struct drm_i915_private *i915,
@@ -4131,6 +3735,31 @@ static void icl_pll_enable(struct drm_i915_private *dev_priv,
                drm_err(&dev_priv->drm, "PLL %d not locked\n", pll->info->id);
 }
 
+static void adlp_cmtg_clock_gating_wa(struct drm_i915_private *i915, struct intel_shared_dpll *pll)
+{
+       u32 val;
+
+       if (!IS_ADLP_DISPLAY_STEP(i915, STEP_A0, STEP_B0) ||
+           pll->info->id != DPLL_ID_ICL_DPLL0)
+               return;
+       /*
+        * Wa_16011069516:adl-p[a0]
+        *
+        * All CMTG regs are unreliable until CMTG clock gating is disabled,
+        * so we can only assume the default TRANS_CMTG_CHICKEN reg value and
+        * sanity check this assumption with a double read, which presumably
+        * returns the correct value even with clock gating on.
+        *
+        * Instead of the usual place for workarounds we apply this one here,
+        * since TRANS_CMTG_CHICKEN is only accessible while DPLL0 is enabled.
+        */
+       val = intel_de_read(i915, TRANS_CMTG_CHICKEN);
+       val = intel_de_read(i915, TRANS_CMTG_CHICKEN);
+       intel_de_write(i915, TRANS_CMTG_CHICKEN, DISABLE_DPT_CLK_GATING);
+       if (drm_WARN_ON(&i915->drm, val & ~DISABLE_DPT_CLK_GATING))
+               drm_dbg_kms(&i915->drm, "Unexpected flags in TRANS_CMTG_CHICKEN: %08x\n", val);
+}
+
 static void combo_pll_enable(struct drm_i915_private *dev_priv,
                             struct intel_shared_dpll *pll)
 {
@@ -4160,6 +3789,8 @@ static void combo_pll_enable(struct drm_i915_private *dev_priv,
 
        icl_pll_enable(dev_priv, pll, enable_reg);
 
+       adlp_cmtg_clock_gating_wa(dev_priv, pll);
+
        /* DVFS post sequence would be here. See the comment above. */
 }
 
@@ -4462,7 +4093,10 @@ void intel_shared_dpll_init(struct drm_device *dev)
        const struct dpll_info *dpll_info;
        int i;
 
-       if (IS_ALDERLAKE_P(dev_priv))
+       if (IS_DG2(dev_priv))
+               /* No shared DPLLs on DG2; port PLLs are part of the PHY */
+               dpll_mgr = NULL;
+       else if (IS_ALDERLAKE_P(dev_priv))
                dpll_mgr = &adlp_pll_mgr;
        else if (IS_ALDERLAKE_S(dev_priv))
                dpll_mgr = &adls_pll_mgr;
@@ -4476,8 +4110,6 @@ void intel_shared_dpll_init(struct drm_device *dev)
                dpll_mgr = &ehl_pll_mgr;
        else if (DISPLAY_VER(dev_priv) >= 11)
                dpll_mgr = &icl_pll_mgr;
-       else if (IS_CANNONLAKE(dev_priv))
-               dpll_mgr = &cnl_pll_mgr;
        else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
                dpll_mgr = &bxt_pll_mgr;
        else if (DISPLAY_VER(dev_priv) == 9)
@@ -4668,7 +4300,12 @@ void intel_dpll_readout_hw_state(struct drm_i915_private *i915)
 static void sanitize_dpll_state(struct drm_i915_private *i915,
                                struct intel_shared_dpll *pll)
 {
-       if (!pll->on || pll->active_mask)
+       if (!pll->on)
+               return;
+
+       adlp_cmtg_clock_gating_wa(i915, pll);
+
+       if (pll->active_mask)
                return;
 
        drm_dbg_kms(&i915->drm,
index 7fd031a..30e0aa5 100644 (file)
@@ -204,9 +204,8 @@ struct intel_dpll_hw_state {
        /* HDMI only, 0 when used for DP */
        u32 cfgcr1, cfgcr2;
 
-       /* cnl */
+       /* icl */
        u32 cfgcr0;
-       /* CNL also uses cfgcr1 */
 
        /* bxt */
        u32 ebb0, ebb4, pll0, pll1, pll2, pll3, pll6, pll8, pll9, pll10, pcsdw12;
index 82effb6..ddfc17e 100644 (file)
@@ -912,11 +912,11 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
        }
 
        /*
-        * Tigerlake is not supporting FBC with PSR2.
+        * Display 12+ is not supporting FBC with PSR2.
         * Recommendation is to keep this combination disabled
         * Bspec: 50422 HSD: 14010260002
         */
-       if (fbc->state_cache.psr2_active && IS_TIGERLAKE(dev_priv)) {
+       if (fbc->state_cache.psr2_active && DISPLAY_VER(dev_priv) >= 12) {
                fbc->no_fbc_reason = "not supported with PSR2";
                return false;
        }
index fcf47f9..ceb1bf8 100644 (file)
@@ -600,7 +600,7 @@ do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num,
        int i = 0, inc, try = 0;
        int ret = 0;
 
-       /* Display WA #0868: skl,bxt,kbl,cfl,glk,cnl */
+       /* Display WA #0868: skl,bxt,kbl,cfl,glk */
        if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
                bxt_gmbus_clock_gating(dev_priv, false);
        else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_CNP(dev_priv))
@@ -713,7 +713,7 @@ timeout:
        ret = -EAGAIN;
 
 out:
-       /* Display WA #0868: skl,bxt,kbl,cfl,glk,cnl */
+       /* Display WA #0868: skl,bxt,kbl,cfl,glk */
        if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
                bxt_gmbus_clock_gating(dev_priv, true);
        else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_CNP(dev_priv))
index 852af2b..b04685b 100644 (file)
@@ -51,6 +51,7 @@
 #include "intel_hdmi.h"
 #include "intel_lspcon.h"
 #include "intel_panel.h"
+#include "intel_snps_phy.h"
 
 static struct drm_device *intel_hdmi_to_dev(struct intel_hdmi *intel_hdmi)
 {
@@ -1850,6 +1851,16 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi,
        if (IS_CHERRYVIEW(dev_priv) && clock > 216000 && clock < 240000)
                return MODE_CLOCK_RANGE;
 
+       /*
+        * SNPS PHYs' MPLLB table-based programming can only handle a fixed
+        * set of link rates.
+        *
+        * FIXME: We will hopefully get an algorithmic way of programming
+        * the MPLLB for HDMI in the future.
+        */
+       if (IS_DG2(dev_priv))
+               return intel_snps_phy_check_hdmi_link_rate(clock);
+
        return MODE_OK;
 }
 
index dfd724e..3855fba 100644 (file)
@@ -1078,6 +1078,9 @@ void intel_opregion_resume(struct drm_i915_private *i915)
                opregion->asle->ardy = ASLE_ARDY_READY;
        }
 
+       /* Some platforms abuse the _DSM to enable MUX */
+       intel_dsm_get_bios_data_funcs_supported(i915);
+
        intel_opregion_notify_adapter(i915, PCI_D0);
 }
 
index 9643624..1b0daf6 100644 (file)
@@ -32,6 +32,7 @@
 #include "intel_dp_aux.h"
 #include "intel_hdmi.h"
 #include "intel_psr.h"
+#include "intel_snps_phy.h"
 #include "intel_sprite.h"
 #include "skl_universal_plane.h"
 
@@ -534,19 +535,21 @@ static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
 static void hsw_activate_psr2(struct intel_dp *intel_dp)
 {
        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-       u32 val;
+       u32 val = EDP_PSR2_ENABLE;
+
+       val |= psr_compute_idle_frames(intel_dp) << EDP_PSR2_IDLE_FRAME_SHIFT;
 
-       val = psr_compute_idle_frames(intel_dp) << EDP_PSR2_IDLE_FRAME_SHIFT;
+       if (!IS_ALDERLAKE_P(dev_priv))
+               val |= EDP_SU_TRACK_ENABLE;
 
-       val |= EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE;
        if (DISPLAY_VER(dev_priv) >= 10 && DISPLAY_VER(dev_priv) <= 12)
                val |= EDP_Y_COORDINATE_ENABLE;
 
        val |= EDP_PSR2_FRAME_BEFORE_SU(intel_dp->psr.sink_sync_latency + 1);
        val |= intel_psr2_get_tp_time(intel_dp);
 
-       /* Wa_22012278275:adlp */
-       if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_D1)) {
+       /* Wa_22012278275:adl-p */
+       if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_E0)) {
                static const u8 map[] = {
                        2, /* 5 lines */
                        1, /* 6 lines */
@@ -592,9 +595,8 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
                val |= EDP_PSR2_SU_SDP_SCANLINE;
 
        if (intel_dp->psr.psr2_sel_fetch_enabled) {
-               /* WA 1408330847 */
-               if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_A0) ||
-                   IS_RKL_REVID(dev_priv, RKL_REVID_A0, RKL_REVID_A0))
+               /* Wa_1408330847 */
+               if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
                        intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
                                     DIS_RAM_BYPASS_PSR2_MAN_TRACK,
                                     DIS_RAM_BYPASS_PSR2_MAN_TRACK);
@@ -733,8 +735,8 @@ tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
        if (!dc3co_is_pipe_port_compatible(intel_dp, crtc_state))
                return;
 
-       /* Wa_16011303918:adlp */
-       if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_A0))
+       /* Wa_16011303918:adl-p */
+       if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
                return;
 
        /*
@@ -781,7 +783,7 @@ static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
        }
 
        /* Wa_14010254185 Wa_14010103792 */
-       if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B1)) {
+       if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0)) {
                drm_dbg_kms(&dev_priv->drm,
                            "PSR2 sel fetch not enabled, missing the implementation of WAs\n");
                return false;
@@ -793,6 +795,7 @@ static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
 static bool psr2_granularity_check(struct intel_dp *intel_dp,
                                   struct intel_crtc_state *crtc_state)
 {
+       struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
        const int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
        const int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
        u16 y_granularity = 0;
@@ -809,10 +812,13 @@ static bool psr2_granularity_check(struct intel_dp *intel_dp,
                return intel_dp->psr.su_y_granularity == 4;
 
        /*
-        * For SW tracking we can adjust the y to match sink requirement if
-        * multiple of 4
+        * adl_p has 1 line granularity. For other platforms with SW tracking we
+        * can adjust the y coordinates to match sink requirement if multiple of
+        * 4.
         */
-       if (intel_dp->psr.su_y_granularity <= 2)
+       if (IS_ALDERLAKE_P(dev_priv))
+               y_granularity = intel_dp->psr.su_y_granularity;
+       else if (intel_dp->psr.su_y_granularity <= 2)
                y_granularity = 4;
        else if ((intel_dp->psr.su_y_granularity % 4) == 0)
                y_granularity = intel_dp->psr.su_y_granularity;
@@ -865,7 +871,8 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
        }
 
        /* Wa_16011181250 */
-       if (IS_ROCKETLAKE(dev_priv) || IS_ALDERLAKE_S(dev_priv)) {
+       if (IS_ROCKETLAKE(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
+           IS_DG2(dev_priv)) {
                drm_dbg_kms(&dev_priv->drm, "PSR2 is defeatured for this platform\n");
                return false;
        }
@@ -940,7 +947,7 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
 
        /* Wa_2209313811 */
        if (!crtc_state->enable_psr2_sel_fetch &&
-           IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B1)) {
+           IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0)) {
                drm_dbg_kms(&dev_priv->drm, "PSR2 HW tracking is not supported this Display stepping\n");
                return false;
        }
@@ -965,9 +972,9 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
                return false;
        }
 
-       /* Wa_16011303918:adlp */
+       /* Wa_16011303918:adl-p */
        if (crtc_state->vrr.enable &&
-           IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_A0)) {
+           IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
                drm_dbg_kms(&dev_priv->drm,
                            "PSR2 not enabled, not compatible with HW stepping + VRR\n");
                return false;
@@ -1160,8 +1167,8 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp)
                             intel_dp->psr.psr2_sel_fetch_enabled ?
                             IGNORE_PSR2_HW_TRACKING : 0);
 
-       /* Wa_16011168373:adlp */
-       if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_A0) &&
+       /* Wa_16011168373:adl-p */
+       if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) &&
            intel_dp->psr.psr2_enabled)
                intel_de_rmw(dev_priv,
                             TRANS_SET_CONTEXT_LATENCY(intel_dp->psr.transcoder),
@@ -1206,6 +1213,7 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
 {
        struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+       enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
        struct intel_encoder *encoder = &dig_port->base;
        u32 val;
 
@@ -1231,6 +1239,7 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
        intel_dp_compute_psr_vsc_sdp(intel_dp, crtc_state, conn_state,
                                     &intel_dp->psr.vsc);
        intel_write_dp_vsc_sdp(encoder, crtc_state, &intel_dp->psr.vsc);
+       intel_snps_phy_update_psr_power_state(dev_priv, phy, true);
        intel_psr_enable_sink(intel_dp);
        intel_psr_enable_source(intel_dp);
        intel_dp->psr.enabled = true;
@@ -1327,6 +1336,8 @@ static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp)
 static void intel_psr_disable_locked(struct intel_dp *intel_dp)
 {
        struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+       enum phy phy = intel_port_to_phy(dev_priv,
+                                        dp_to_dig_port(intel_dp)->base.port);
 
        lockdep_assert_held(&intel_dp->psr.lock);
 
@@ -1339,20 +1350,21 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
        intel_psr_exit(intel_dp);
        intel_psr_wait_exit_locked(intel_dp);
 
-       /* W1408330847 */
+       /* Wa_1408330847 */
        if (intel_dp->psr.psr2_sel_fetch_enabled &&
-           (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_A0) ||
-            IS_RKL_REVID(dev_priv, RKL_REVID_A0, RKL_REVID_A0)))
+           IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
                intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
                             DIS_RAM_BYPASS_PSR2_MAN_TRACK, 0);
 
-       /* Wa_16011168373:adlp */
-       if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_A0) &&
+       /* Wa_16011168373:adl-p */
+       if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0) &&
            intel_dp->psr.psr2_enabled)
                intel_de_rmw(dev_priv,
                             TRANS_SET_CONTEXT_LATENCY(intel_dp->psr.transcoder),
                             TRANS_SET_CONTEXT_LATENCY_MASK, 0);
 
+       intel_snps_phy_update_psr_power_state(dev_priv, phy, false);
+
        /* Disable PSR on Sink */
        drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
 
@@ -1525,21 +1537,32 @@ void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_st
 static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
                                  struct drm_rect *clip, bool full_update)
 {
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+       struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        u32 val = PSR2_MAN_TRK_CTL_ENABLE;
 
        if (full_update) {
-               val |= PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
+               if (IS_ALDERLAKE_P(dev_priv))
+                       val |= ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
+               else
+                       val |= PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
+
                goto exit;
        }
 
        if (clip->y1 == -1)
                goto exit;
 
-       drm_WARN_ON(crtc_state->uapi.crtc->dev, clip->y1 % 4 || clip->y2 % 4);
+       if (IS_ALDERLAKE_P(dev_priv)) {
+               val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1);
+               val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2);
+       } else {
+               drm_WARN_ON(crtc_state->uapi.crtc->dev, clip->y1 % 4 || clip->y2 % 4);
 
-       val |= PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
-       val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1 / 4 + 1);
-       val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 / 4 + 1);
+               val |= PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
+               val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1 / 4 + 1);
+               val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 / 4 + 1);
+       }
 exit:
        crtc_state->psr2_man_track_ctl = val;
 }
@@ -1563,11 +1586,15 @@ static void clip_area_update(struct drm_rect *overlap_damage_area,
 static void intel_psr2_sel_fetch_pipe_alignment(const struct intel_crtc_state *crtc_state,
                                                struct drm_rect *pipe_clip)
 {
+       struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
        const u16 y_alignment = crtc_state->su_y_granularity;
 
        pipe_clip->y1 -= pipe_clip->y1 % y_alignment;
        if (pipe_clip->y2 % y_alignment)
                pipe_clip->y2 = ((pipe_clip->y2 / y_alignment) + 1) * y_alignment;
+
+       if (IS_ALDERLAKE_P(dev_priv) && crtc_state->dsc.compression_enable)
+               drm_warn(&dev_priv->drm, "Missing PSR2 sel fetch alignment with DSC\n");
 }
 
 int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
@@ -1699,6 +1726,7 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
                sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
                sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1;
                sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1;
+               crtc_state->update_planes |= BIT(plane->id);
        }
 
 skip_sel_fetch_set_loop:
diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c
new file mode 100644 (file)
index 0000000..18b52b6
--- /dev/null
@@ -0,0 +1,862 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright Â© 2019 Intel Corporation
+ */
+
+#include <linux/util_macros.h>
+
+#include "intel_de.h"
+#include "intel_display_types.h"
+#include "intel_snps_phy.h"
+
+/**
+ * DOC: Synopsis PHY support
+ *
+ * Synopsis PHYs are primarily programmed by looking up magic register values
+ * in tables rather than calculating the necessary values at runtime.
+ *
+ * Of special note is that the SNPS PHYs include a dedicated port PLL, known as
+ * an "MPLLB."  The MPLLB replaces the shared DPLL functionality used on other
+ * platforms and must be programming directly during the modeset sequence
+ * since it is not handled by the shared DPLL framework as on other platforms.
+ */
+
+void intel_snps_phy_wait_for_calibration(struct drm_i915_private *dev_priv)
+{
+       enum phy phy;
+
+       for_each_phy_masked(phy, ~0) {
+               if (!intel_phy_is_snps(dev_priv, phy))
+                       continue;
+
+               if (intel_de_wait_for_clear(dev_priv, ICL_PHY_MISC(phy),
+                                           DG2_PHY_DP_TX_ACK_MASK, 25))
+                       DRM_ERROR("SNPS PHY %c failed to calibrate after 25ms.\n",
+                                 phy);
+       }
+}
+
+void intel_snps_phy_update_psr_power_state(struct drm_i915_private *dev_priv,
+                                          enum phy phy, bool enable)
+{
+       u32 val;
+
+       if (!intel_phy_is_snps(dev_priv, phy))
+               return;
+
+       val = REG_FIELD_PREP(SNPS_PHY_TX_REQ_LN_DIS_PWR_STATE_PSR,
+                            enable ? 2 : 3);
+       intel_uncore_rmw(&dev_priv->uncore, SNPS_PHY_TX_REQ(phy),
+                        SNPS_PHY_TX_REQ_LN_DIS_PWR_STATE_PSR, val);
+}
+
+static const u32 dg2_ddi_translations[] = {
+       /* VS 0, pre-emph 0 */
+       REG_FIELD_PREP(SNPS_PHY_TX_EQ_MAIN, 26),
+
+       /* VS 0, pre-emph 1 */
+       REG_FIELD_PREP(SNPS_PHY_TX_EQ_MAIN, 33) |
+               REG_FIELD_PREP(SNPS_PHY_TX_EQ_POST, 6),
+
+       /* VS 0, pre-emph 2 */
+       REG_FIELD_PREP(SNPS_PHY_TX_EQ_MAIN, 38) |
+               REG_FIELD_PREP(SNPS_PHY_TX_EQ_POST, 12),
+
+       /* VS 0, pre-emph 3 */
+       REG_FIELD_PREP(SNPS_PHY_TX_EQ_MAIN, 43) |
+               REG_FIELD_PREP(SNPS_PHY_TX_EQ_POST, 19),
+
+       /* VS 1, pre-emph 0 */
+       REG_FIELD_PREP(SNPS_PHY_TX_EQ_MAIN, 39),
+
+       /* VS 1, pre-emph 1 */
+       REG_FIELD_PREP(SNPS_PHY_TX_EQ_MAIN, 44) |
+               REG_FIELD_PREP(SNPS_PHY_TX_EQ_POST, 8),
+
+       /* VS 1, pre-emph 2 */
+       REG_FIELD_PREP(SNPS_PHY_TX_EQ_MAIN, 47) |
+               REG_FIELD_PREP(SNPS_PHY_TX_EQ_POST, 15),
+
+       /* VS 2, pre-emph 0 */
+       REG_FIELD_PREP(SNPS_PHY_TX_EQ_MAIN, 52),
+
+       /* VS 2, pre-emph 1 */
+       REG_FIELD_PREP(SNPS_PHY_TX_EQ_MAIN, 51) |
+               REG_FIELD_PREP(SNPS_PHY_TX_EQ_POST, 10),
+
+       /* VS 3, pre-emph 0 */
+       REG_FIELD_PREP(SNPS_PHY_TX_EQ_MAIN, 62),
+};
+
+void intel_snps_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
+                                       u32 level)
+{
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+       int n_entries, ln;
+
+       n_entries = ARRAY_SIZE(dg2_ddi_translations);
+       if (level >= n_entries)
+               level = n_entries - 1;
+
+       for (ln = 0; ln < 4; ln++)
+               intel_de_write(dev_priv, SNPS_PHY_TX_EQ(ln, phy),
+                              dg2_ddi_translations[level]);
+}
+
+/*
+ * Basic DP link rates with 100 MHz reference clock.
+ */
+
+static const struct intel_mpllb_state dg2_dp_rbr_100 = {
+       .clock = 162000,
+       .ref_control =
+               REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+       .mpllb_cp =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 4) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 20) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 65) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 127),
+       .mpllb_div =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 2),
+       .mpllb_div2 =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 2) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 226),
+       .mpllb_fracn1 =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 5),
+       .mpllb_fracn2 =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 39321) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 3),
+};
+
+static const struct intel_mpllb_state dg2_dp_hbr1_100 = {
+       .clock = 270000,
+       .ref_control =
+               REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+       .mpllb_cp =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 4) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 20) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 65) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 127),
+       .mpllb_div =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+       .mpllb_div2 =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 2) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 184),
+       .mpllb_fracn1 =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 1),
+};
+
+static const struct intel_mpllb_state dg2_dp_hbr2_100 = {
+       .clock = 540000,
+       .ref_control =
+               REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+       .mpllb_cp =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 4) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 20) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 65) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 127),
+       .mpllb_div =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+       .mpllb_div2 =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 2) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 184),
+       .mpllb_fracn1 =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 1),
+};
+
+static const struct intel_mpllb_state dg2_dp_hbr3_100 = {
+       .clock = 810000,
+       .ref_control =
+               REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+       .mpllb_cp =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 4) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 19) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 65) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 127),
+       .mpllb_div =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2),
+       .mpllb_div2 =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 2) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 292),
+       .mpllb_fracn1 =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 1),
+};
+
+static const struct intel_mpllb_state *dg2_dp_100_tables[] = {
+       &dg2_dp_rbr_100,
+       &dg2_dp_hbr1_100,
+       &dg2_dp_hbr2_100,
+       &dg2_dp_hbr3_100,
+       NULL,
+};
+
+/*
+ * Basic DP link rates with 38.4 MHz reference clock.
+ */
+
+static const struct intel_mpllb_state dg2_dp_rbr_38_4 = {
+       .clock = 162000,
+       .ref_control =
+               REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 1),
+       .mpllb_cp =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 5) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 25) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 65) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 127),
+       .mpllb_div =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 2),
+       .mpllb_div2 =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 304),
+       .mpllb_fracn1 =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 1),
+       .mpllb_fracn2 =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 49152),
+};
+
+static const struct intel_mpllb_state dg2_dp_hbr1_38_4 = {
+       .clock = 270000,
+       .ref_control =
+               REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 1),
+       .mpllb_cp =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 5) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 25) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 65) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 127),
+       .mpllb_div =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+       .mpllb_div2 =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 248),
+       .mpllb_fracn1 =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 1),
+       .mpllb_fracn2 =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 40960),
+};
+
+static const struct intel_mpllb_state dg2_dp_hbr2_38_4 = {
+       .clock = 540000,
+       .ref_control =
+               REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 1),
+       .mpllb_cp =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 5) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 25) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 65) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 127),
+       .mpllb_div =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+       .mpllb_div2 =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 248),
+       .mpllb_fracn1 =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 1),
+       .mpllb_fracn2 =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 40960),
+};
+
+static const struct intel_mpllb_state dg2_dp_hbr3_38_4 = {
+       .clock = 810000,
+       .ref_control =
+               REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 1),
+       .mpllb_cp =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 26) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 65) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 127),
+       .mpllb_div =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2),
+       .mpllb_div2 =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 388),
+       .mpllb_fracn1 =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 1),
+       .mpllb_fracn2 =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 61440),
+};
+
+static const struct intel_mpllb_state *dg2_dp_38_4_tables[] = {
+       &dg2_dp_rbr_38_4,
+       &dg2_dp_hbr1_38_4,
+       &dg2_dp_hbr2_38_4,
+       &dg2_dp_hbr3_38_4,
+       NULL,
+};
+
+/*
+ * eDP link rates with 100 MHz reference clock.
+ */
+
+static const struct intel_mpllb_state dg2_edp_r216 = {
+       .clock = 216000,
+       .ref_control =
+               REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+       .mpllb_cp =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 4) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 19) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 65) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 127),
+       .mpllb_div =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2),
+       .mpllb_div2 =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 2) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 312),
+       .mpllb_fracn1 =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 5),
+       .mpllb_fracn2 =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 52428) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 4),
+       .mpllb_sscen =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_PEAK, 50961),
+       .mpllb_sscstep =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_STEPSIZE, 65752),
+};
+
+static const struct intel_mpllb_state dg2_edp_r243 = {
+       .clock = 243000,
+       .ref_control =
+               REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+       .mpllb_cp =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 4) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 20) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 65) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 127),
+       .mpllb_div =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2),
+       .mpllb_div2 =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 2) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 356),
+       .mpllb_fracn1 =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 5),
+       .mpllb_fracn2 =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 26214) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 2),
+       .mpllb_sscen =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_PEAK, 57331),
+       .mpllb_sscstep =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_STEPSIZE, 73971),
+};
+
+static const struct intel_mpllb_state dg2_edp_r324 = {
+       .clock = 324000,
+       .ref_control =
+               REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+       .mpllb_cp =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 4) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 20) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 65) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 127),
+       .mpllb_div =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 2),
+       .mpllb_div2 =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 2) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 226),
+       .mpllb_fracn1 =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 5),
+       .mpllb_fracn2 =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 39321) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 3),
+       .mpllb_sscen =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_PEAK, 38221),
+       .mpllb_sscstep =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_STEPSIZE, 49314),
+};
+
+static const struct intel_mpllb_state dg2_edp_r432 = {
+       .clock = 432000,
+       .ref_control =
+               REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+       .mpllb_cp =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 4) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 19) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 65) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 127),
+       .mpllb_div =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2),
+       .mpllb_div2 =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 2) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 312),
+       .mpllb_fracn1 =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 5),
+       .mpllb_fracn2 =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 52428) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 4),
+       .mpllb_sscen =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_PEAK, 50961),
+       .mpllb_sscstep =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_STEPSIZE, 65752),
+};
+
+static const struct intel_mpllb_state *dg2_edp_tables[] = {
+       &dg2_dp_rbr_100,
+       &dg2_edp_r216,
+       &dg2_edp_r243,
+       &dg2_dp_hbr1_100,
+       &dg2_edp_r324,
+       &dg2_edp_r432,
+       &dg2_dp_hbr2_100,
+       &dg2_dp_hbr3_100,
+       NULL,
+};
+
+/*
+ * HDMI link rates with 100 MHz reference clock.
+ */
+
+static const struct intel_mpllb_state dg2_hdmi_25_175 = {
+       .clock = 25175,
+       .ref_control =
+               REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+       .mpllb_cp =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 5) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 15) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+       .mpllb_div =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 5) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2),
+       .mpllb_div2 =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 128) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+       .mpllb_fracn1 =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 143),
+       .mpllb_fracn2 =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 36663) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 71),
+       .mpllb_sscen =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_27_0 = {
+       .clock = 27000,
+       .ref_control =
+               REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+       .mpllb_cp =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 5) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 15) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+       .mpllb_div =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 5) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2),
+       .mpllb_div2 =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 140) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+       .mpllb_fracn1 =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 5),
+       .mpllb_fracn2 =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 26214) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 2),
+       .mpllb_sscen =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_74_25 = {
+       .clock = 74250,
+       .ref_control =
+               REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+       .mpllb_cp =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 4) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 15) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+       .mpllb_div =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 3) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+       .mpllb_div2 =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 86) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+       .mpllb_fracn1 =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 5),
+       .mpllb_fracn2 =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 26214) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 2),
+       .mpllb_sscen =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_148_5 = {
+       .clock = 148500,
+       .ref_control =
+               REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+       .mpllb_cp =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 4) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 15) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+       .mpllb_div =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 2) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+       .mpllb_div2 =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 86) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+       .mpllb_fracn1 =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 5),
+       .mpllb_fracn2 =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 26214) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 2),
+       .mpllb_sscen =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state dg2_hdmi_594 = {
+       .clock = 594000,
+       .ref_control =
+               REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+       .mpllb_cp =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 4) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 15) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124),
+       .mpllb_div =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3),
+       .mpllb_div2 =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 86) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1),
+       .mpllb_fracn1 =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 5),
+       .mpllb_fracn2 =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 26214) |
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 2),
+       .mpllb_sscen =
+               REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
+};
+
+static const struct intel_mpllb_state *dg2_hdmi_tables[] = {
+       &dg2_hdmi_25_175,
+       &dg2_hdmi_27_0,
+       &dg2_hdmi_74_25,
+       &dg2_hdmi_148_5,
+       &dg2_hdmi_594,
+       NULL,
+};
+
+static const struct intel_mpllb_state **
+intel_mpllb_tables_get(struct intel_crtc_state *crtc_state,
+                      struct intel_encoder *encoder)
+{
+       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) {
+               return dg2_edp_tables;
+       } else if (intel_crtc_has_dp_encoder(crtc_state)) {
+               /*
+                * FIXME: Initially we're just enabling the "combo" outputs on
+                * port A-D.  The MPLLB for those ports takes an input from the
+                * "Display Filter PLL" which always has an output frequency
+                * of 100 MHz, hence the use of the _100 tables below.
+                *
+                * Once we enable port TC1 it will either use the same 100 MHz
+                * "Display Filter PLL" (when strapped to support a native
+                * display connection) or different 38.4 MHz "Filter PLL" when
+                * strapped to support a USB connection, so we'll need to check
+                * that to determine which table to use.
+                */
+               if (0)
+                       return dg2_dp_38_4_tables;
+               else
+                       return dg2_dp_100_tables;
+       } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
+               return dg2_hdmi_tables;
+       }
+
+       MISSING_CASE(encoder->type);
+       return NULL;
+}
+
+int intel_mpllb_calc_state(struct intel_crtc_state *crtc_state,
+                          struct intel_encoder *encoder)
+{
+       const struct intel_mpllb_state **tables;
+       int i;
+
+       if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
+               if (intel_snps_phy_check_hdmi_link_rate(crtc_state->port_clock)
+                   != MODE_OK) {
+                       /*
+                        * FIXME: Can only support fixed HDMI frequencies
+                        * until we have a proper algorithm under a valid
+                        * license.
+                        */
+                       DRM_DEBUG_KMS("Can't support HDMI link rate %d\n",
+                                     crtc_state->port_clock);
+                       return -EINVAL;
+               }
+       }
+
+       tables = intel_mpllb_tables_get(crtc_state, encoder);
+       if (!tables)
+               return -EINVAL;
+
+       for (i = 0; tables[i]; i++) {
+               if (crtc_state->port_clock <= tables[i]->clock) {
+                       crtc_state->mpllb_state = *tables[i];
+                       return 0;
+               }
+       }
+
+       return -EINVAL;
+}
+
+void intel_mpllb_enable(struct intel_encoder *encoder,
+                       const struct intel_crtc_state *crtc_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       const struct intel_mpllb_state *pll_state = &crtc_state->mpllb_state;
+       enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+       i915_reg_t enable_reg = (phy <= PHY_D ?
+                                DG2_PLL_ENABLE(phy) : MG_PLL_ENABLE(0));
+
+       /*
+        * 3. Software programs the following PLL registers for the desired
+        * frequency.
+        */
+       intel_de_write(dev_priv, SNPS_PHY_MPLLB_CP(phy), pll_state->mpllb_cp);
+       intel_de_write(dev_priv, SNPS_PHY_MPLLB_DIV(phy), pll_state->mpllb_div);
+       intel_de_write(dev_priv, SNPS_PHY_MPLLB_DIV2(phy), pll_state->mpllb_div2);
+       intel_de_write(dev_priv, SNPS_PHY_MPLLB_SSCEN(phy), pll_state->mpllb_sscen);
+       intel_de_write(dev_priv, SNPS_PHY_MPLLB_SSCSTEP(phy), pll_state->mpllb_sscstep);
+       intel_de_write(dev_priv, SNPS_PHY_MPLLB_FRACN1(phy), pll_state->mpllb_fracn1);
+       intel_de_write(dev_priv, SNPS_PHY_MPLLB_FRACN2(phy), pll_state->mpllb_fracn2);
+
+       /*
+        * 4. If the frequency will result in a change to the voltage
+        * requirement, follow the Display Voltage Frequency Switching -
+        * Sequence Before Frequency Change.
+        *
+        * We handle this step in bxt_set_cdclk().
+        */
+
+       /* 5. Software sets DPLL_ENABLE [PLL Enable] to "1". */
+       intel_uncore_rmw(&dev_priv->uncore, enable_reg, 0, PLL_ENABLE);
+
+       /*
+        * 9. Software sets SNPS_PHY_MPLLB_DIV dp_mpllb_force_en to "1". This
+        * will keep the PLL running during the DDI lane programming and any
+        * typeC DP cable disconnect. Do not set the force before enabling the
+        * PLL because that will start the PLL before it has sampled the
+        * divider values.
+        */
+       intel_de_write(dev_priv, SNPS_PHY_MPLLB_DIV(phy),
+                      pll_state->mpllb_div | SNPS_PHY_MPLLB_FORCE_EN);
+
+       /*
+        * 10. Software polls on register DPLL_ENABLE [PLL Lock] to confirm PLL
+        * is locked at new settings. This register bit is sampling PHY
+        * dp_mpllb_state interface signal.
+        */
+       if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_LOCK, 5))
+               DRM_ERROR("Port %c PLL not locked\n", phy_name(phy));
+
+       /*
+        * 11. If the frequency will result in a change to the voltage
+        * requirement, follow the Display Voltage Frequency Switching -
+        * Sequence After Frequency Change.
+        *
+        * We handle this step in bxt_set_cdclk().
+        */
+}
+
+void intel_mpllb_disable(struct intel_encoder *encoder)
+{
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+       i915_reg_t enable_reg = (phy <= PHY_D ?
+                                DG2_PLL_ENABLE(phy) : MG_PLL_ENABLE(0));
+
+       /*
+        * 1. If the frequency will result in a change to the voltage
+        * requirement, follow the Display Voltage Frequency Switching -
+        * Sequence Before Frequency Change.
+        *
+        * We handle this step in bxt_set_cdclk().
+        */
+
+       /* 2. Software programs DPLL_ENABLE [PLL Enable] to "0" */
+       intel_uncore_rmw(&dev_priv->uncore, enable_reg, PLL_ENABLE, 0);
+
+       /*
+        * 4. Software programs SNPS_PHY_MPLLB_DIV dp_mpllb_force_en to "0".
+        * This will allow the PLL to stop running.
+        */
+       intel_uncore_rmw(&dev_priv->uncore, SNPS_PHY_MPLLB_DIV(phy),
+                        SNPS_PHY_MPLLB_FORCE_EN, 0);
+
+       /*
+        * 5. Software polls DPLL_ENABLE [PLL Lock] for PHY acknowledgment
+        * (dp_txX_ack) that the new transmitter setting request is completed.
+        */
+       if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_LOCK, 5))
+               DRM_ERROR("Port %c PLL not locked\n", phy_name(phy));
+
+       /*
+        * 6. If the frequency will result in a change to the voltage
+        * requirement, follow the Display Voltage Frequency Switching -
+        * Sequence After Frequency Change.
+        *
+        * We handle this step in bxt_set_cdclk().
+        */
+}
+
+int intel_mpllb_calc_port_clock(struct intel_encoder *encoder,
+                               const struct intel_mpllb_state *pll_state)
+{
+       unsigned int frac_quot = 0, frac_rem = 0, frac_den = 1;
+       unsigned int multiplier, tx_clk_div, refclk;
+       bool frac_en;
+
+       if (0)
+               refclk = 38400;
+       else
+               refclk = 100000;
+
+       refclk >>= REG_FIELD_GET(SNPS_PHY_MPLLB_REF_CLK_DIV, pll_state->mpllb_div2) - 1;
+
+       frac_en = REG_FIELD_GET(SNPS_PHY_MPLLB_FRACN_EN, pll_state->mpllb_fracn1);
+
+       if (frac_en) {
+               frac_quot = REG_FIELD_GET(SNPS_PHY_MPLLB_FRACN_QUOT, pll_state->mpllb_fracn2);
+               frac_rem = REG_FIELD_GET(SNPS_PHY_MPLLB_FRACN_REM, pll_state->mpllb_fracn2);
+               frac_den = REG_FIELD_GET(SNPS_PHY_MPLLB_FRACN_DEN, pll_state->mpllb_fracn1);
+       }
+
+       multiplier = REG_FIELD_GET(SNPS_PHY_MPLLB_MULTIPLIER, pll_state->mpllb_div2) / 2 + 16;
+
+       tx_clk_div = REG_FIELD_GET(SNPS_PHY_MPLLB_TX_CLK_DIV, pll_state->mpllb_div);
+
+       return DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, (multiplier << 16) + frac_quot) +
+                                    DIV_ROUND_CLOSEST(refclk * frac_rem, frac_den),
+                                    10 << (tx_clk_div + 16));
+}
+
+void intel_mpllb_readout_hw_state(struct intel_encoder *encoder,
+                                 struct intel_mpllb_state *pll_state)
+{
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+
+       pll_state->mpllb_cp = intel_de_read(dev_priv, SNPS_PHY_MPLLB_CP(phy));
+       pll_state->mpllb_div = intel_de_read(dev_priv, SNPS_PHY_MPLLB_DIV(phy));
+       pll_state->mpllb_div2 = intel_de_read(dev_priv, SNPS_PHY_MPLLB_DIV2(phy));
+       pll_state->mpllb_sscen = intel_de_read(dev_priv, SNPS_PHY_MPLLB_SSCEN(phy));
+       pll_state->mpllb_sscstep = intel_de_read(dev_priv, SNPS_PHY_MPLLB_SSCSTEP(phy));
+       pll_state->mpllb_fracn1 = intel_de_read(dev_priv, SNPS_PHY_MPLLB_FRACN1(phy));
+       pll_state->mpllb_fracn2 = intel_de_read(dev_priv, SNPS_PHY_MPLLB_FRACN2(phy));
+
+       /*
+        * REF_CONTROL is under firmware control and never programmed by the
+        * driver; we read it only for sanity checking purposes.  The bspec
+        * only tells us the expected value for one field in this register,
+        * so we'll only read out those specific bits here.
+        */
+       pll_state->ref_control = intel_de_read(dev_priv, SNPS_PHY_REF_CONTROL(phy)) &
+               SNPS_PHY_REF_CONTROL_REF_RANGE;
+
+       /*
+        * MPLLB_DIV is programmed twice, once with the software-computed
+        * state, then again with the MPLLB_FORCE_EN bit added.  Drop that
+        * extra bit during readout so that we return the actual expected
+        * software state.
+        */
+       pll_state->mpllb_div &= ~SNPS_PHY_MPLLB_FORCE_EN;
+}
+
+int intel_snps_phy_check_hdmi_link_rate(int clock)
+{
+       const struct intel_mpllb_state **tables = dg2_hdmi_tables;
+       int i;
+
+       for (i = 0; tables[i]; i++) {
+               if (clock == tables[i]->clock)
+                       return MODE_OK;
+       }
+
+       return MODE_CLOCK_RANGE;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.h b/drivers/gpu/drm/i915/display/intel_snps_phy.h
new file mode 100644 (file)
index 0000000..6261ff8
--- /dev/null
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright Â© 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_SNPS_PHY_H__
+#define __INTEL_SNPS_PHY_H__
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+struct intel_encoder;
+struct intel_crtc_state;
+struct intel_mpllb_state;
+enum phy;
+
+void intel_snps_phy_wait_for_calibration(struct drm_i915_private *dev_priv);
+void intel_snps_phy_update_psr_power_state(struct drm_i915_private *dev_priv,
+                                          enum phy phy, bool enable);
+
+int intel_mpllb_calc_state(struct intel_crtc_state *crtc_state,
+                          struct intel_encoder *encoder);
+void intel_mpllb_enable(struct intel_encoder *encoder,
+                       const struct intel_crtc_state *crtc_state);
+void intel_mpllb_disable(struct intel_encoder *encoder);
+void intel_mpllb_readout_hw_state(struct intel_encoder *encoder,
+                                 struct intel_mpllb_state *pll_state);
+int intel_mpllb_calc_port_clock(struct intel_encoder *encoder,
+                               const struct intel_mpllb_state *pll_state);
+
+int intel_snps_phy_check_hdmi_link_rate(int clock);
+void intel_snps_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
+                                       u32 level);
+
+#endif /* __INTEL_SNPS_PHY_H__ */
index 4ae9a74..08116f4 100644 (file)
@@ -1856,7 +1856,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
        zpos = sprite + 1;
        drm_plane_create_zpos_immutable_property(&plane->base, zpos);
 
-       drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
+       intel_plane_helper_add(plane);
 
        return plane;
 
index dbe24d7..330077c 100644 (file)
@@ -456,7 +456,7 @@ struct child_device_config {
        u16 dp_gpio_pin_num;                                    /* 195 */
        u8 dp_iboost_level:4;                                   /* 196 */
        u8 hdmi_iboost_level:4;                                 /* 196 */
-       u8 dp_max_link_rate:3;                                  /* 216/230 CNL+ */
+       u8 dp_max_link_rate:3;                                  /* 216/230 GLK+ */
        u8 dp_max_link_rate_reserved:5;                         /* 216/230 */
 } __packed;
 
index 8574937..df3286a 100644 (file)
@@ -348,7 +348,10 @@ bool intel_dsc_source_support(const struct intel_crtc_state *crtc_state)
        if (DISPLAY_VER(i915) >= 12)
                return true;
 
-       if ((DISPLAY_VER(i915) >= 11 || IS_CANNONLAKE(i915)) && (pipe != PIPE_A || (cpu_transcoder == TRANSCODER_EDP || cpu_transcoder == TRANSCODER_DSI_0 || cpu_transcoder == TRANSCODER_DSI_1)))
+       if (DISPLAY_VER(i915) >= 11 &&
+           (pipe != PIPE_A || cpu_transcoder == TRANSCODER_EDP ||
+            cpu_transcoder == TRANSCODER_DSI_0 ||
+            cpu_transcoder == TRANSCODER_DSI_1))
                return true;
 
        return false;
index 911a113..37eabef 100644 (file)
@@ -294,12 +294,12 @@ int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
        return 0;
 }
 
-static int cnl_coef_tap(int i)
+static int glk_coef_tap(int i)
 {
        return i % 7;
 }
 
-static u16 cnl_nearest_filter_coef(int t)
+static u16 glk_nearest_filter_coef(int t)
 {
        return t == 3 ? 0x0800 : 0x3000;
 }
@@ -341,29 +341,29 @@ static u16 cnl_nearest_filter_coef(int t)
  *
  */
 
-static void cnl_program_nearest_filter_coefs(struct drm_i915_private *dev_priv,
+static void glk_program_nearest_filter_coefs(struct drm_i915_private *dev_priv,
                                             enum pipe pipe, int id, int set)
 {
        int i;
 
-       intel_de_write_fw(dev_priv, CNL_PS_COEF_INDEX_SET(pipe, id, set),
+       intel_de_write_fw(dev_priv, GLK_PS_COEF_INDEX_SET(pipe, id, set),
                          PS_COEE_INDEX_AUTO_INC);
 
        for (i = 0; i < 17 * 7; i += 2) {
                u32 tmp;
                int t;
 
-               t = cnl_coef_tap(i);
-               tmp = cnl_nearest_filter_coef(t);
+               t = glk_coef_tap(i);
+               tmp = glk_nearest_filter_coef(t);
 
-               t = cnl_coef_tap(i + 1);
-               tmp |= cnl_nearest_filter_coef(t) << 16;
+               t = glk_coef_tap(i + 1);
+               tmp |= glk_nearest_filter_coef(t) << 16;
 
-               intel_de_write_fw(dev_priv, CNL_PS_COEF_DATA_SET(pipe, id, set),
+               intel_de_write_fw(dev_priv, GLK_PS_COEF_DATA_SET(pipe, id, set),
                                  tmp);
        }
 
-       intel_de_write_fw(dev_priv, CNL_PS_COEF_INDEX_SET(pipe, id, set), 0);
+       intel_de_write_fw(dev_priv, GLK_PS_COEF_INDEX_SET(pipe, id, set), 0);
 }
 
 static u32 skl_scaler_get_filter_select(enum drm_scaling_filter filter, int set)
@@ -386,7 +386,7 @@ static void skl_scaler_setup_filter(struct drm_i915_private *dev_priv, enum pipe
        case DRM_SCALING_FILTER_DEFAULT:
                break;
        case DRM_SCALING_FILTER_NEAREST_NEIGHBOR:
-               cnl_program_nearest_filter_coefs(dev_priv, pipe, id, set);
+               glk_program_nearest_filter_coefs(dev_priv, pipe, id, set);
                break;
        default:
                MISSING_CASE(filter);
index 92a4fd5..724e7b0 100644 (file)
@@ -835,7 +835,7 @@ static u32 skl_plane_ctl_rotate(unsigned int rotate)
        return 0;
 }
 
-static u32 cnl_plane_ctl_flip(unsigned int reflect)
+static u32 icl_plane_ctl_flip(unsigned int reflect)
 {
        switch (reflect) {
        case 0:
@@ -917,8 +917,8 @@ static u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
        plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
        plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK);
 
-       if (DISPLAY_VER(dev_priv) >= 11 || IS_CANNONLAKE(dev_priv))
-               plane_ctl |= cnl_plane_ctl_flip(rotation &
+       if (DISPLAY_VER(dev_priv) >= 11)
+               plane_ctl |= icl_plane_ctl_flip(rotation &
                                                DRM_MODE_REFLECT_MASK);
 
        if (key->flags & I915_SET_COLORKEY_DESTINATION)
@@ -926,7 +926,7 @@ static u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
        else if (key->flags & I915_SET_COLORKEY_SOURCE)
                plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
 
-       /* Wa_22012358565:adlp */
+       /* Wa_22012358565:adl-p */
        if (DISPLAY_VER(dev_priv) == 13)
                plane_ctl |= adlp_plane_ctl_arb_slots(plane_state);
 
@@ -1270,7 +1270,7 @@ static int skl_plane_check_dst_coordinates(const struct intel_crtc_state *crtc_s
        int pipe_src_w = crtc_state->pipe_src_w;
 
        /*
-        * Display WA #1175: cnl,glk
+        * Display WA #1175: glk
         * Planes other than the cursor may cause FIFO underflow and display
         * corruption if starting less than 4 pixels from the right edge of
         * the screen.
@@ -1828,7 +1828,7 @@ static bool skl_plane_has_ccs(struct drm_i915_private *dev_priv,
        if (plane_id == PLANE_CURSOR)
                return false;
 
-       if (DISPLAY_VER(dev_priv) >= 11 || IS_CANNONLAKE(dev_priv))
+       if (DISPLAY_VER(dev_priv) >= 11)
                return true;
 
        if (IS_GEMINILAKE(dev_priv))
@@ -1910,11 +1910,11 @@ static bool gen12_plane_supports_mc_ccs(struct drm_i915_private *dev_priv,
 {
        /* Wa_14010477008:tgl[a0..c0],rkl[all],dg1[all] */
        if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv) ||
-           IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0))
+           IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_D0))
                return false;
 
        /* Wa_22011186057 */
-       if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_A0))
+       if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
                return false;
 
        return plane_id < PLANE_SPRITE4;
@@ -1938,7 +1938,7 @@ static bool gen12_plane_format_mod_supported(struct drm_plane *_plane,
        case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
        case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
                /* Wa_22011186057 */
-               if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_A0))
+               if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
                        return false;
                break;
        default:
@@ -1995,7 +1995,7 @@ static const u64 *gen12_get_plane_modifiers(struct drm_i915_private *dev_priv,
                                            enum plane_id plane_id)
 {
        /* Wa_22011186057 */
-       if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_A0))
+       if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
                return adlp_step_a_plane_format_modifiers;
        else if (gen12_plane_supports_mc_ccs(dev_priv, plane_id))
                return gen12_plane_format_modifiers_mc_ccs;
@@ -2144,7 +2144,7 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
                        DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
                        DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
 
-       if (DISPLAY_VER(dev_priv) >= 11 || IS_CANNONLAKE(dev_priv))
+       if (DISPLAY_VER(dev_priv) >= 11)
                supported_rotations |= DRM_MODE_REFLECT_X;
 
        drm_plane_create_rotation_property(&plane->base,
@@ -2174,12 +2174,12 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
        if (DISPLAY_VER(dev_priv) >= 12)
                drm_plane_enable_fb_damage_clips(&plane->base);
 
-       if (DISPLAY_VER(dev_priv) >= 11 || IS_CANNONLAKE(dev_priv))
+       if (DISPLAY_VER(dev_priv) >= 11)
                drm_plane_create_scaling_filter_property(&plane->base,
                                                BIT(DRM_SCALING_FILTER_DEFAULT) |
                                                BIT(DRM_SCALING_FILTER_NEAREST_NEIGHBOR));
 
-       drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
+       intel_plane_helper_add(plane);
 
        return plane;
 
@@ -2295,7 +2295,7 @@ skl_get_initial_plane_config(struct intel_crtc *crtc,
                break;
        }
 
-       if ((DISPLAY_VER(dev_priv) >= 11 || IS_CANNONLAKE(dev_priv)) && val & PLANE_CTL_FLIP_HORIZONTAL)
+       if (DISPLAY_VER(dev_priv) >= 11 && val & PLANE_CTL_FLIP_HORIZONTAL)
                plane_config->rotation |= DRM_MODE_REFLECT_X;
 
        /* 90/270 degree rotation would require extra work */
index daf9284..f0435c6 100644 (file)
@@ -24,13 +24,11 @@ static void __do_clflush(struct drm_i915_gem_object *obj)
        i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
 }
 
-static int clflush_work(struct dma_fence_work *base)
+static void clflush_work(struct dma_fence_work *base)
 {
        struct clflush *clflush = container_of(base, typeof(*clflush), base);
 
        __do_clflush(clflush->obj);
-
-       return 0;
 }
 
 static void clflush_release(struct dma_fence_work *base)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
deleted file mode 100644 (file)
index 44821d9..0000000
+++ /dev/null
@@ -1,355 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright Â© 2019 Intel Corporation
- */
-
-#include "i915_drv.h"
-#include "gt/intel_context.h"
-#include "gt/intel_engine_pm.h"
-#include "i915_gem_client_blt.h"
-#include "i915_gem_object_blt.h"
-
-struct i915_sleeve {
-       struct i915_vma *vma;
-       struct drm_i915_gem_object *obj;
-       struct sg_table *pages;
-       struct i915_page_sizes page_sizes;
-};
-
-static int vma_set_pages(struct i915_vma *vma)
-{
-       struct i915_sleeve *sleeve = vma->private;
-
-       vma->pages = sleeve->pages;
-       vma->page_sizes = sleeve->page_sizes;
-
-       return 0;
-}
-
-static void vma_clear_pages(struct i915_vma *vma)
-{
-       GEM_BUG_ON(!vma->pages);
-       vma->pages = NULL;
-}
-
-static void vma_bind(struct i915_address_space *vm,
-                    struct i915_vm_pt_stash *stash,
-                    struct i915_vma *vma,
-                    enum i915_cache_level cache_level,
-                    u32 flags)
-{
-       vm->vma_ops.bind_vma(vm, stash, vma, cache_level, flags);
-}
-
-static void vma_unbind(struct i915_address_space *vm, struct i915_vma *vma)
-{
-       vm->vma_ops.unbind_vma(vm, vma);
-}
-
-static const struct i915_vma_ops proxy_vma_ops = {
-       .set_pages = vma_set_pages,
-       .clear_pages = vma_clear_pages,
-       .bind_vma = vma_bind,
-       .unbind_vma = vma_unbind,
-};
-
-static struct i915_sleeve *create_sleeve(struct i915_address_space *vm,
-                                        struct drm_i915_gem_object *obj,
-                                        struct sg_table *pages,
-                                        struct i915_page_sizes *page_sizes)
-{
-       struct i915_sleeve *sleeve;
-       struct i915_vma *vma;
-       int err;
-
-       sleeve = kzalloc(sizeof(*sleeve), GFP_KERNEL);
-       if (!sleeve)
-               return ERR_PTR(-ENOMEM);
-
-       vma = i915_vma_instance(obj, vm, NULL);
-       if (IS_ERR(vma)) {
-               err = PTR_ERR(vma);
-               goto err_free;
-       }
-
-       vma->private = sleeve;
-       vma->ops = &proxy_vma_ops;
-
-       sleeve->vma = vma;
-       sleeve->pages = pages;
-       sleeve->page_sizes = *page_sizes;
-
-       return sleeve;
-
-err_free:
-       kfree(sleeve);
-       return ERR_PTR(err);
-}
-
-static void destroy_sleeve(struct i915_sleeve *sleeve)
-{
-       kfree(sleeve);
-}
-
-struct clear_pages_work {
-       struct dma_fence dma;
-       struct dma_fence_cb cb;
-       struct i915_sw_fence wait;
-       struct work_struct work;
-       struct irq_work irq_work;
-       struct i915_sleeve *sleeve;
-       struct intel_context *ce;
-       u32 value;
-};
-
-static const char *clear_pages_work_driver_name(struct dma_fence *fence)
-{
-       return DRIVER_NAME;
-}
-
-static const char *clear_pages_work_timeline_name(struct dma_fence *fence)
-{
-       return "clear";
-}
-
-static void clear_pages_work_release(struct dma_fence *fence)
-{
-       struct clear_pages_work *w = container_of(fence, typeof(*w), dma);
-
-       destroy_sleeve(w->sleeve);
-
-       i915_sw_fence_fini(&w->wait);
-
-       BUILD_BUG_ON(offsetof(typeof(*w), dma));
-       dma_fence_free(&w->dma);
-}
-
-static const struct dma_fence_ops clear_pages_work_ops = {
-       .get_driver_name = clear_pages_work_driver_name,
-       .get_timeline_name = clear_pages_work_timeline_name,
-       .release = clear_pages_work_release,
-};
-
-static void clear_pages_signal_irq_worker(struct irq_work *work)
-{
-       struct clear_pages_work *w = container_of(work, typeof(*w), irq_work);
-
-       dma_fence_signal(&w->dma);
-       dma_fence_put(&w->dma);
-}
-
-static void clear_pages_dma_fence_cb(struct dma_fence *fence,
-                                    struct dma_fence_cb *cb)
-{
-       struct clear_pages_work *w = container_of(cb, typeof(*w), cb);
-
-       if (fence->error)
-               dma_fence_set_error(&w->dma, fence->error);
-
-       /*
-        * Push the signalling of the fence into yet another worker to avoid
-        * the nightmare locking around the fence spinlock.
-        */
-       irq_work_queue(&w->irq_work);
-}
-
-static void clear_pages_worker(struct work_struct *work)
-{
-       struct clear_pages_work *w = container_of(work, typeof(*w), work);
-       struct drm_i915_gem_object *obj = w->sleeve->vma->obj;
-       struct i915_vma *vma = w->sleeve->vma;
-       struct i915_gem_ww_ctx ww;
-       struct i915_request *rq;
-       struct i915_vma *batch;
-       int err = w->dma.error;
-
-       if (unlikely(err))
-               goto out_signal;
-
-       if (obj->cache_dirty) {
-               if (i915_gem_object_has_struct_page(obj))
-                       drm_clflush_sg(w->sleeve->pages);
-               obj->cache_dirty = false;
-       }
-       obj->read_domains = I915_GEM_GPU_DOMAINS;
-       obj->write_domain = 0;
-
-       i915_gem_ww_ctx_init(&ww, false);
-       intel_engine_pm_get(w->ce->engine);
-retry:
-       err = intel_context_pin_ww(w->ce, &ww);
-       if (err)
-               goto out_signal;
-
-       batch = intel_emit_vma_fill_blt(w->ce, vma, &ww, w->value);
-       if (IS_ERR(batch)) {
-               err = PTR_ERR(batch);
-               goto out_ctx;
-       }
-
-       rq = i915_request_create(w->ce);
-       if (IS_ERR(rq)) {
-               err = PTR_ERR(rq);
-               goto out_batch;
-       }
-
-       /* There's no way the fence has signalled */
-       if (dma_fence_add_callback(&rq->fence, &w->cb,
-                                  clear_pages_dma_fence_cb))
-               GEM_BUG_ON(1);
-
-       err = intel_emit_vma_mark_active(batch, rq);
-       if (unlikely(err))
-               goto out_request;
-
-       /*
-        * w->dma is already exported via (vma|obj)->resv we need only
-        * keep track of the GPU activity within this vma/request, and
-        * propagate the signal from the request to w->dma.
-        */
-       err = __i915_vma_move_to_active(vma, rq);
-       if (err)
-               goto out_request;
-
-       if (rq->engine->emit_init_breadcrumb) {
-               err = rq->engine->emit_init_breadcrumb(rq);
-               if (unlikely(err))
-                       goto out_request;
-       }
-
-       err = rq->engine->emit_bb_start(rq,
-                                       batch->node.start, batch->node.size,
-                                       0);
-out_request:
-       if (unlikely(err)) {
-               i915_request_set_error_once(rq, err);
-               err = 0;
-       }
-
-       i915_request_add(rq);
-out_batch:
-       intel_emit_vma_release(w->ce, batch);
-out_ctx:
-       intel_context_unpin(w->ce);
-out_signal:
-       if (err == -EDEADLK) {
-               err = i915_gem_ww_ctx_backoff(&ww);
-               if (!err)
-                       goto retry;
-       }
-       i915_gem_ww_ctx_fini(&ww);
-
-       i915_vma_unpin(w->sleeve->vma);
-       intel_engine_pm_put(w->ce->engine);
-
-       if (unlikely(err)) {
-               dma_fence_set_error(&w->dma, err);
-               dma_fence_signal(&w->dma);
-               dma_fence_put(&w->dma);
-       }
-}
-
-static int pin_wait_clear_pages_work(struct clear_pages_work *w,
-                                    struct intel_context *ce)
-{
-       struct i915_vma *vma = w->sleeve->vma;
-       struct i915_gem_ww_ctx ww;
-       int err;
-
-       i915_gem_ww_ctx_init(&ww, false);
-retry:
-       err = i915_gem_object_lock(vma->obj, &ww);
-       if (err)
-               goto out;
-
-       err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER);
-       if (unlikely(err))
-               goto out;
-
-       err = i915_sw_fence_await_reservation(&w->wait,
-                                             vma->obj->base.resv, NULL,
-                                             true, 0, I915_FENCE_GFP);
-       if (err)
-               goto err_unpin_vma;
-
-       dma_resv_add_excl_fence(vma->obj->base.resv, &w->dma);
-
-err_unpin_vma:
-       if (err)
-               i915_vma_unpin(vma);
-out:
-       if (err == -EDEADLK) {
-               err = i915_gem_ww_ctx_backoff(&ww);
-               if (!err)
-                       goto retry;
-       }
-       i915_gem_ww_ctx_fini(&ww);
-       return err;
-}
-
-static int __i915_sw_fence_call
-clear_pages_work_notify(struct i915_sw_fence *fence,
-                       enum i915_sw_fence_notify state)
-{
-       struct clear_pages_work *w = container_of(fence, typeof(*w), wait);
-
-       switch (state) {
-       case FENCE_COMPLETE:
-               schedule_work(&w->work);
-               break;
-
-       case FENCE_FREE:
-               dma_fence_put(&w->dma);
-               break;
-       }
-
-       return NOTIFY_DONE;
-}
-
-static DEFINE_SPINLOCK(fence_lock);
-
-/* XXX: better name please */
-int i915_gem_schedule_fill_pages_blt(struct drm_i915_gem_object *obj,
-                                    struct intel_context *ce,
-                                    struct sg_table *pages,
-                                    struct i915_page_sizes *page_sizes,
-                                    u32 value)
-{
-       struct clear_pages_work *work;
-       struct i915_sleeve *sleeve;
-       int err;
-
-       sleeve = create_sleeve(ce->vm, obj, pages, page_sizes);
-       if (IS_ERR(sleeve))
-               return PTR_ERR(sleeve);
-
-       work = kmalloc(sizeof(*work), GFP_KERNEL);
-       if (!work) {
-               destroy_sleeve(sleeve);
-               return -ENOMEM;
-       }
-
-       work->value = value;
-       work->sleeve = sleeve;
-       work->ce = ce;
-
-       INIT_WORK(&work->work, clear_pages_worker);
-
-       init_irq_work(&work->irq_work, clear_pages_signal_irq_worker);
-
-       dma_fence_init(&work->dma, &clear_pages_work_ops, &fence_lock, 0, 0);
-       i915_sw_fence_init(&work->wait, clear_pages_work_notify);
-
-       err = pin_wait_clear_pages_work(work, ce);
-       if (err < 0)
-               dma_fence_set_error(&work->dma, err);
-
-       dma_fence_get(&work->dma);
-       i915_sw_fence_commit(&work->wait);
-
-       return err;
-}
-
-#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
-#include "selftests/i915_gem_client_blt.c"
-#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.h b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.h
deleted file mode 100644 (file)
index 3dbd28c..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright Â© 2019 Intel Corporation
- */
-#ifndef __I915_GEM_CLIENT_BLT_H__
-#define __I915_GEM_CLIENT_BLT_H__
-
-#include <linux/types.h>
-
-struct drm_i915_gem_object;
-struct i915_page_sizes;
-struct intel_context;
-struct sg_table;
-
-int i915_gem_schedule_fill_pages_blt(struct drm_i915_gem_object *obj,
-                                    struct intel_context *ce,
-                                    struct sg_table *pages,
-                                    struct i915_page_sizes *page_sizes,
-                                    u32 value);
-
-#endif
index 7720b8c..cff7267 100644 (file)
 #include <linux/log2.h>
 #include <linux/nospec.h>
 
+#include <drm/drm_syncobj.h>
+
 #include "gt/gen6_ppgtt.h"
 #include "gt/intel_context.h"
 #include "gt/intel_context_param.h"
 #include "gt/intel_engine_heartbeat.h"
 #include "gt/intel_engine_user.h"
-#include "gt/intel_execlists_submission.h" /* virtual_engine */
 #include "gt/intel_gpu_commands.h"
 #include "gt/intel_ring.h"
 
 #include "i915_gem_context.h"
-#include "i915_globals.h"
 #include "i915_trace.h"
 #include "i915_user_extensions.h"
 
 #define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
 
-static struct i915_global_gem_context {
-       struct i915_global base;
-       struct kmem_cache *slab_luts;
-} global;
+static struct kmem_cache *slab_luts;
 
 struct i915_lut_handle *i915_lut_handle_alloc(void)
 {
-       return kmem_cache_alloc(global.slab_luts, GFP_KERNEL);
+       return kmem_cache_alloc(slab_luts, GFP_KERNEL);
 }
 
 void i915_lut_handle_free(struct i915_lut_handle *lut)
 {
-       return kmem_cache_free(global.slab_luts, lut);
+       return kmem_cache_free(slab_luts, lut);
 }
 
 static void lut_close(struct i915_gem_context *ctx)
@@ -167,1228 +164,1445 @@ lookup_user_engine(struct i915_gem_context *ctx,
        return i915_gem_context_get_engine(ctx, idx);
 }
 
-static struct i915_address_space *
-context_get_vm_rcu(struct i915_gem_context *ctx)
+static int validate_priority(struct drm_i915_private *i915,
+                            const struct drm_i915_gem_context_param *args)
 {
-       GEM_BUG_ON(!rcu_access_pointer(ctx->vm));
+       s64 priority = args->value;
 
-       do {
-               struct i915_address_space *vm;
+       if (args->size)
+               return -EINVAL;
+
+       if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
+               return -ENODEV;
+
+       if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
+           priority < I915_CONTEXT_MIN_USER_PRIORITY)
+               return -EINVAL;
+
+       if (priority > I915_CONTEXT_DEFAULT_PRIORITY &&
+           !capable(CAP_SYS_NICE))
+               return -EPERM;
+
+       return 0;
+}
+
+static void proto_context_close(struct i915_gem_proto_context *pc)
+{
+       int i;
+
+       if (pc->vm)
+               i915_vm_put(pc->vm);
+       if (pc->user_engines) {
+               for (i = 0; i < pc->num_user_engines; i++)
+                       kfree(pc->user_engines[i].siblings);
+               kfree(pc->user_engines);
+       }
+       kfree(pc);
+}
 
+static int proto_context_set_persistence(struct drm_i915_private *i915,
+                                        struct i915_gem_proto_context *pc,
+                                        bool persist)
+{
+       if (persist) {
                /*
-                * We do not allow downgrading from full-ppgtt [to a shared
-                * global gtt], so ctx->vm cannot become NULL.
+                * Only contexts that are short-lived [that will expire or be
+                * reset] are allowed to survive past termination. We require
+                * hangcheck to ensure that the persistent requests are healthy.
                 */
-               vm = rcu_dereference(ctx->vm);
-               if (!kref_get_unless_zero(&vm->ref))
-                       continue;
+               if (!i915->params.enable_hangcheck)
+                       return -EINVAL;
+
+               pc->user_flags |= BIT(UCONTEXT_PERSISTENCE);
+       } else {
+               /* To cancel a context we use "preempt-to-idle" */
+               if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
+                       return -ENODEV;
 
                /*
-                * This ppgtt may have be reallocated between
-                * the read and the kref, and reassigned to a third
-                * context. In order to avoid inadvertent sharing
-                * of this ppgtt with that third context (and not
-                * src), we have to confirm that we have the same
-                * ppgtt after passing through the strong memory
-                * barrier implied by a successful
-                * kref_get_unless_zero().
+                * If the cancel fails, we then need to reset, cleanly!
                 *
-                * Once we have acquired the current ppgtt of ctx,
-                * we no longer care if it is released from ctx, as
-                * it cannot be reallocated elsewhere.
+                * If the per-engine reset fails, all hope is lost! We resort
+                * to a full GPU reset in that unlikely case, but realistically
+                * if the engine could not reset, the full reset does not fare
+                * much better. The damage has been done.
+                *
+                * However, if we cannot reset an engine by itself, we cannot
+                * cleanup a hanging persistent context without causing
+                * colateral damage, and we should not pretend we can by
+                * exposing the interface.
                 */
+               if (!intel_has_reset_engine(&i915->gt))
+                       return -ENODEV;
 
-               if (vm == rcu_access_pointer(ctx->vm))
-                       return rcu_pointer_handoff(vm);
+               pc->user_flags &= ~BIT(UCONTEXT_PERSISTENCE);
+       }
 
-               i915_vm_put(vm);
-       } while (1);
+       return 0;
 }
 
-static void intel_context_set_gem(struct intel_context *ce,
-                                 struct i915_gem_context *ctx)
+static struct i915_gem_proto_context *
+proto_context_create(struct drm_i915_private *i915, unsigned int flags)
 {
-       GEM_BUG_ON(rcu_access_pointer(ce->gem_context));
-       RCU_INIT_POINTER(ce->gem_context, ctx);
+       struct i915_gem_proto_context *pc, *err;
 
-       if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))
-               ce->ring = __intel_context_ring_size(SZ_16K);
-
-       if (rcu_access_pointer(ctx->vm)) {
-               struct i915_address_space *vm;
+       pc = kzalloc(sizeof(*pc), GFP_KERNEL);
+       if (!pc)
+               return ERR_PTR(-ENOMEM);
 
-               rcu_read_lock();
-               vm = context_get_vm_rcu(ctx); /* hmm */
-               rcu_read_unlock();
+       pc->num_user_engines = -1;
+       pc->user_engines = NULL;
+       pc->user_flags = BIT(UCONTEXT_BANNABLE) |
+                        BIT(UCONTEXT_RECOVERABLE);
+       if (i915->params.enable_hangcheck)
+               pc->user_flags |= BIT(UCONTEXT_PERSISTENCE);
+       pc->sched.priority = I915_PRIORITY_NORMAL;
 
-               i915_vm_put(ce->vm);
-               ce->vm = vm;
+       if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) {
+               if (!HAS_EXECLISTS(i915)) {
+                       err = ERR_PTR(-EINVAL);
+                       goto proto_close;
+               }
+               pc->single_timeline = true;
        }
 
-       GEM_BUG_ON(ce->timeline);
-       if (ctx->timeline)
-               ce->timeline = intel_timeline_get(ctx->timeline);
-
-       if (ctx->sched.priority >= I915_PRIORITY_NORMAL &&
-           intel_engine_has_timeslices(ce->engine))
-               __set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
+       return pc;
 
-       intel_context_set_watchdog_us(ce, ctx->watchdog.timeout_us);
+proto_close:
+       proto_context_close(pc);
+       return err;
 }
 
-static void __free_engines(struct i915_gem_engines *e, unsigned int count)
+static int proto_context_register_locked(struct drm_i915_file_private *fpriv,
+                                        struct i915_gem_proto_context *pc,
+                                        u32 *id)
 {
-       while (count--) {
-               if (!e->engines[count])
-                       continue;
+       int ret;
+       void *old;
 
-               intel_context_put(e->engines[count]);
+       lockdep_assert_held(&fpriv->proto_context_lock);
+
+       ret = xa_alloc(&fpriv->context_xa, id, NULL, xa_limit_32b, GFP_KERNEL);
+       if (ret)
+               return ret;
+
+       old = xa_store(&fpriv->proto_context_xa, *id, pc, GFP_KERNEL);
+       if (xa_is_err(old)) {
+               xa_erase(&fpriv->context_xa, *id);
+               return xa_err(old);
        }
-       kfree(e);
-}
+       WARN_ON(old);
 
-static void free_engines(struct i915_gem_engines *e)
-{
-       __free_engines(e, e->num_engines);
+       return 0;
 }
 
-static void free_engines_rcu(struct rcu_head *rcu)
+static int proto_context_register(struct drm_i915_file_private *fpriv,
+                                 struct i915_gem_proto_context *pc,
+                                 u32 *id)
 {
-       struct i915_gem_engines *engines =
-               container_of(rcu, struct i915_gem_engines, rcu);
+       int ret;
 
-       i915_sw_fence_fini(&engines->fence);
-       free_engines(engines);
+       mutex_lock(&fpriv->proto_context_lock);
+       ret = proto_context_register_locked(fpriv, pc, id);
+       mutex_unlock(&fpriv->proto_context_lock);
+
+       return ret;
 }
 
-static int __i915_sw_fence_call
-engines_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
+static int set_proto_ctx_vm(struct drm_i915_file_private *fpriv,
+                           struct i915_gem_proto_context *pc,
+                           const struct drm_i915_gem_context_param *args)
 {
-       struct i915_gem_engines *engines =
-               container_of(fence, typeof(*engines), fence);
+       struct drm_i915_private *i915 = fpriv->dev_priv;
+       struct i915_address_space *vm;
 
-       switch (state) {
-       case FENCE_COMPLETE:
-               if (!list_empty(&engines->link)) {
-                       struct i915_gem_context *ctx = engines->ctx;
-                       unsigned long flags;
+       if (args->size)
+               return -EINVAL;
 
-                       spin_lock_irqsave(&ctx->stale.lock, flags);
-                       list_del(&engines->link);
-                       spin_unlock_irqrestore(&ctx->stale.lock, flags);
-               }
-               i915_gem_context_put(engines->ctx);
-               break;
+       if (!HAS_FULL_PPGTT(i915))
+               return -ENODEV;
 
-       case FENCE_FREE:
-               init_rcu_head(&engines->rcu);
-               call_rcu(&engines->rcu, free_engines_rcu);
-               break;
-       }
+       if (upper_32_bits(args->value))
+               return -ENOENT;
 
-       return NOTIFY_DONE;
+       vm = i915_gem_vm_lookup(fpriv, args->value);
+       if (!vm)
+               return -ENOENT;
+
+       if (pc->vm)
+               i915_vm_put(pc->vm);
+       pc->vm = vm;
+
+       return 0;
 }
 
-static struct i915_gem_engines *alloc_engines(unsigned int count)
+struct set_proto_ctx_engines {
+       struct drm_i915_private *i915;
+       unsigned num_engines;
+       struct i915_gem_proto_engine *engines;
+};
+
+static int
+set_proto_ctx_engines_balance(struct i915_user_extension __user *base,
+                             void *data)
 {
-       struct i915_gem_engines *e;
+       struct i915_context_engines_load_balance __user *ext =
+               container_of_user(base, typeof(*ext), base);
+       const struct set_proto_ctx_engines *set = data;
+       struct drm_i915_private *i915 = set->i915;
+       struct intel_engine_cs **siblings;
+       u16 num_siblings, idx;
+       unsigned int n;
+       int err;
 
-       e = kzalloc(struct_size(e, engines, count), GFP_KERNEL);
-       if (!e)
-               return NULL;
+       if (!HAS_EXECLISTS(i915))
+               return -ENODEV;
 
-       i915_sw_fence_init(&e->fence, engines_notify);
-       return e;
-}
+       if (get_user(idx, &ext->engine_index))
+               return -EFAULT;
 
-static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
-{
-       const struct intel_gt *gt = &ctx->i915->gt;
-       struct intel_engine_cs *engine;
-       struct i915_gem_engines *e;
-       enum intel_engine_id id;
+       if (idx >= set->num_engines) {
+               drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n",
+                       idx, set->num_engines);
+               return -EINVAL;
+       }
 
-       e = alloc_engines(I915_NUM_ENGINES);
-       if (!e)
-               return ERR_PTR(-ENOMEM);
+       idx = array_index_nospec(idx, set->num_engines);
+       if (set->engines[idx].type != I915_GEM_ENGINE_TYPE_INVALID) {
+               drm_dbg(&i915->drm,
+                       "Invalid placement[%d], already occupied\n", idx);
+               return -EEXIST;
+       }
 
-       for_each_engine(engine, gt, id) {
-               struct intel_context *ce;
+       if (get_user(num_siblings, &ext->num_siblings))
+               return -EFAULT;
 
-               if (engine->legacy_idx == INVALID_ENGINE)
-                       continue;
+       err = check_user_mbz(&ext->flags);
+       if (err)
+               return err;
 
-               GEM_BUG_ON(engine->legacy_idx >= I915_NUM_ENGINES);
-               GEM_BUG_ON(e->engines[engine->legacy_idx]);
+       err = check_user_mbz(&ext->mbz64);
+       if (err)
+               return err;
 
-               ce = intel_context_create(engine);
-               if (IS_ERR(ce)) {
-                       __free_engines(e, e->num_engines + 1);
-                       return ERR_CAST(ce);
+       if (num_siblings == 0)
+               return 0;
+
+       siblings = kmalloc_array(num_siblings, sizeof(*siblings), GFP_KERNEL);
+       if (!siblings)
+               return -ENOMEM;
+
+       for (n = 0; n < num_siblings; n++) {
+               struct i915_engine_class_instance ci;
+
+               if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) {
+                       err = -EFAULT;
+                       goto err_siblings;
                }
 
-               intel_context_set_gem(ce, ctx);
+               siblings[n] = intel_engine_lookup_user(i915,
+                                                      ci.engine_class,
+                                                      ci.engine_instance);
+               if (!siblings[n]) {
+                       drm_dbg(&i915->drm,
+                               "Invalid sibling[%d]: { class:%d, inst:%d }\n",
+                               n, ci.engine_class, ci.engine_instance);
+                       err = -EINVAL;
+                       goto err_siblings;
+               }
+       }
 
-               e->engines[engine->legacy_idx] = ce;
-               e->num_engines = max(e->num_engines, engine->legacy_idx);
+       if (num_siblings == 1) {
+               set->engines[idx].type = I915_GEM_ENGINE_TYPE_PHYSICAL;
+               set->engines[idx].engine = siblings[0];
+               kfree(siblings);
+       } else {
+               set->engines[idx].type = I915_GEM_ENGINE_TYPE_BALANCED;
+               set->engines[idx].num_siblings = num_siblings;
+               set->engines[idx].siblings = siblings;
        }
-       e->num_engines++;
 
-       return e;
+       return 0;
+
+err_siblings:
+       kfree(siblings);
+
+       return err;
 }
 
-void i915_gem_context_release(struct kref *ref)
+static int
+set_proto_ctx_engines_bond(struct i915_user_extension __user *base, void *data)
 {
-       struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
+       struct i915_context_engines_bond __user *ext =
+               container_of_user(base, typeof(*ext), base);
+       const struct set_proto_ctx_engines *set = data;
+       struct drm_i915_private *i915 = set->i915;
+       struct i915_engine_class_instance ci;
+       struct intel_engine_cs *master;
+       u16 idx, num_bonds;
+       int err, n;
 
-       trace_i915_context_free(ctx);
-       GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
+       if (get_user(idx, &ext->virtual_index))
+               return -EFAULT;
 
-       mutex_destroy(&ctx->engines_mutex);
-       mutex_destroy(&ctx->lut_mutex);
+       if (idx >= set->num_engines) {
+               drm_dbg(&i915->drm,
+                       "Invalid index for virtual engine: %d >= %d\n",
+                       idx, set->num_engines);
+               return -EINVAL;
+       }
 
-       if (ctx->timeline)
-               intel_timeline_put(ctx->timeline);
+       idx = array_index_nospec(idx, set->num_engines);
+       if (set->engines[idx].type == I915_GEM_ENGINE_TYPE_INVALID) {
+               drm_dbg(&i915->drm, "Invalid engine at %d\n", idx);
+               return -EINVAL;
+       }
 
-       put_pid(ctx->pid);
-       mutex_destroy(&ctx->mutex);
+       if (set->engines[idx].type != I915_GEM_ENGINE_TYPE_PHYSICAL) {
+               drm_dbg(&i915->drm,
+                       "Bonding with virtual engines not allowed\n");
+               return -EINVAL;
+       }
 
-       kfree_rcu(ctx, rcu);
-}
+       err = check_user_mbz(&ext->flags);
+       if (err)
+               return err;
 
-static inline struct i915_gem_engines *
-__context_engines_static(const struct i915_gem_context *ctx)
-{
-       return rcu_dereference_protected(ctx->engines, true);
-}
-
-static void __reset_context(struct i915_gem_context *ctx,
-                           struct intel_engine_cs *engine)
-{
-       intel_gt_handle_error(engine->gt, engine->mask, 0,
-                             "context closure in %s", ctx->name);
-}
-
-static bool __cancel_engine(struct intel_engine_cs *engine)
-{
-       /*
-        * Send a "high priority pulse" down the engine to cause the
-        * current request to be momentarily preempted. (If it fails to
-        * be preempted, it will be reset). As we have marked our context
-        * as banned, any incomplete request, including any running, will
-        * be skipped following the preemption.
-        *
-        * If there is no hangchecking (one of the reasons why we try to
-        * cancel the context) and no forced preemption, there may be no
-        * means by which we reset the GPU and evict the persistent hog.
-        * Ergo if we are unable to inject a preemptive pulse that can
-        * kill the banned context, we fallback to doing a local reset
-        * instead.
-        */
-       return intel_engine_pulse(engine) == 0;
-}
+       for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) {
+               err = check_user_mbz(&ext->mbz64[n]);
+               if (err)
+                       return err;
+       }
 
-static struct intel_engine_cs *active_engine(struct intel_context *ce)
-{
-       struct intel_engine_cs *engine = NULL;
-       struct i915_request *rq;
+       if (copy_from_user(&ci, &ext->master, sizeof(ci)))
+               return -EFAULT;
 
-       if (intel_context_has_inflight(ce))
-               return intel_context_inflight(ce);
+       master = intel_engine_lookup_user(i915,
+                                         ci.engine_class,
+                                         ci.engine_instance);
+       if (!master) {
+               drm_dbg(&i915->drm,
+                       "Unrecognised master engine: { class:%u, instance:%u }\n",
+                       ci.engine_class, ci.engine_instance);
+               return -EINVAL;
+       }
 
-       if (!ce->timeline)
-               return NULL;
+       if (intel_engine_uses_guc(master)) {
+               DRM_DEBUG("bonding extension not supported with GuC submission");
+               return -ENODEV;
+       }
 
-       /*
-        * rq->link is only SLAB_TYPESAFE_BY_RCU, we need to hold a reference
-        * to the request to prevent it being transferred to a new timeline
-        * (and onto a new timeline->requests list).
-        */
-       rcu_read_lock();
-       list_for_each_entry_reverse(rq, &ce->timeline->requests, link) {
-               bool found;
+       if (get_user(num_bonds, &ext->num_bonds))
+               return -EFAULT;
 
-               /* timeline is already completed upto this point? */
-               if (!i915_request_get_rcu(rq))
-                       break;
+       for (n = 0; n < num_bonds; n++) {
+               struct intel_engine_cs *bond;
 
-               /* Check with the backend if the request is inflight */
-               found = true;
-               if (likely(rcu_access_pointer(rq->timeline) == ce->timeline))
-                       found = i915_request_active_engine(rq, &engine);
+               if (copy_from_user(&ci, &ext->engines[n], sizeof(ci)))
+                       return -EFAULT;
 
-               i915_request_put(rq);
-               if (found)
-                       break;
+               bond = intel_engine_lookup_user(i915,
+                                               ci.engine_class,
+                                               ci.engine_instance);
+               if (!bond) {
+                       drm_dbg(&i915->drm,
+                               "Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n",
+                               n, ci.engine_class, ci.engine_instance);
+                       return -EINVAL;
+               }
        }
-       rcu_read_unlock();
 
-       return engine;
+       return 0;
 }
 
-static void kill_engines(struct i915_gem_engines *engines, bool ban)
+static const i915_user_extension_fn set_proto_ctx_engines_extensions[] = {
+       [I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE] = set_proto_ctx_engines_balance,
+       [I915_CONTEXT_ENGINES_EXT_BOND] = set_proto_ctx_engines_bond,
+};
+
+static int set_proto_ctx_engines(struct drm_i915_file_private *fpriv,
+                                struct i915_gem_proto_context *pc,
+                                const struct drm_i915_gem_context_param *args)
 {
-       struct i915_gem_engines_iter it;
-       struct intel_context *ce;
+       struct drm_i915_private *i915 = fpriv->dev_priv;
+       struct set_proto_ctx_engines set = { .i915 = i915 };
+       struct i915_context_param_engines __user *user =
+               u64_to_user_ptr(args->value);
+       unsigned int n;
+       u64 extensions;
+       int err;
 
-       /*
-        * Map the user's engine back to the actual engines; one virtual
-        * engine will be mapped to multiple engines, and using ctx->engine[]
-        * the same engine may be have multiple instances in the user's map.
-        * However, we only care about pending requests, so only include
-        * engines on which there are incomplete requests.
-        */
-       for_each_gem_engine(ce, engines, it) {
-               struct intel_engine_cs *engine;
+       if (pc->num_user_engines >= 0) {
+               drm_dbg(&i915->drm, "Cannot set engines twice");
+               return -EINVAL;
+       }
 
-               if (ban && intel_context_set_banned(ce))
-                       continue;
+       if (args->size < sizeof(*user) ||
+           !IS_ALIGNED(args->size - sizeof(*user), sizeof(*user->engines))) {
+               drm_dbg(&i915->drm, "Invalid size for engine array: %d\n",
+                       args->size);
+               return -EINVAL;
+       }
 
-               /*
-                * Check the current active state of this context; if we
-                * are currently executing on the GPU we need to evict
-                * ourselves. On the other hand, if we haven't yet been
-                * submitted to the GPU or if everything is complete,
-                * we have nothing to do.
-                */
-               engine = active_engine(ce);
+       set.num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines);
+       /* RING_MASK has no shift so we can use it directly here */
+       if (set.num_engines > I915_EXEC_RING_MASK + 1)
+               return -EINVAL;
 
-               /* First attempt to gracefully cancel the context */
-               if (engine && !__cancel_engine(engine) && ban)
-                       /*
-                        * If we are unable to send a preemptive pulse to bump
-                        * the context from the GPU, we have to resort to a full
-                        * reset. We hope the collateral damage is worth it.
-                        */
-                       __reset_context(engines->ctx, engine);
-       }
-}
+       set.engines = kmalloc_array(set.num_engines, sizeof(*set.engines), GFP_KERNEL);
+       if (!set.engines)
+               return -ENOMEM;
 
-static void kill_context(struct i915_gem_context *ctx)
-{
-       bool ban = (!i915_gem_context_is_persistent(ctx) ||
-                   !ctx->i915->params.enable_hangcheck);
-       struct i915_gem_engines *pos, *next;
+       for (n = 0; n < set.num_engines; n++) {
+               struct i915_engine_class_instance ci;
+               struct intel_engine_cs *engine;
 
-       spin_lock_irq(&ctx->stale.lock);
-       GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
-       list_for_each_entry_safe(pos, next, &ctx->stale.engines, link) {
-               if (!i915_sw_fence_await(&pos->fence)) {
-                       list_del_init(&pos->link);
-                       continue;
+               if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) {
+                       kfree(set.engines);
+                       return -EFAULT;
                }
 
-               spin_unlock_irq(&ctx->stale.lock);
+               memset(&set.engines[n], 0, sizeof(set.engines[n]));
 
-               kill_engines(pos, ban);
+               if (ci.engine_class == (u16)I915_ENGINE_CLASS_INVALID &&
+                   ci.engine_instance == (u16)I915_ENGINE_CLASS_INVALID_NONE)
+                       continue;
 
-               spin_lock_irq(&ctx->stale.lock);
-               GEM_BUG_ON(i915_sw_fence_signaled(&pos->fence));
-               list_safe_reset_next(pos, next, link);
-               list_del_init(&pos->link); /* decouple from FENCE_COMPLETE */
+               engine = intel_engine_lookup_user(i915,
+                                                 ci.engine_class,
+                                                 ci.engine_instance);
+               if (!engine) {
+                       drm_dbg(&i915->drm,
+                               "Invalid engine[%d]: { class:%d, instance:%d }\n",
+                               n, ci.engine_class, ci.engine_instance);
+                       kfree(set.engines);
+                       return -ENOENT;
+               }
 
-               i915_sw_fence_complete(&pos->fence);
+               set.engines[n].type = I915_GEM_ENGINE_TYPE_PHYSICAL;
+               set.engines[n].engine = engine;
        }
-       spin_unlock_irq(&ctx->stale.lock);
+
+       err = -EFAULT;
+       if (!get_user(extensions, &user->extensions))
+               err = i915_user_extensions(u64_to_user_ptr(extensions),
+                                          set_proto_ctx_engines_extensions,
+                                          ARRAY_SIZE(set_proto_ctx_engines_extensions),
+                                          &set);
+       if (err) {
+               kfree(set.engines);
+               return err;
+       }
+
+       pc->num_user_engines = set.num_engines;
+       pc->user_engines = set.engines;
+
+       return 0;
 }
 
-static void engines_idle_release(struct i915_gem_context *ctx,
-                                struct i915_gem_engines *engines)
+static int set_proto_ctx_sseu(struct drm_i915_file_private *fpriv,
+                             struct i915_gem_proto_context *pc,
+                             struct drm_i915_gem_context_param *args)
 {
-       struct i915_gem_engines_iter it;
-       struct intel_context *ce;
+       struct drm_i915_private *i915 = fpriv->dev_priv;
+       struct drm_i915_gem_context_param_sseu user_sseu;
+       struct intel_sseu *sseu;
+       int ret;
 
-       INIT_LIST_HEAD(&engines->link);
+       if (args->size < sizeof(user_sseu))
+               return -EINVAL;
 
-       engines->ctx = i915_gem_context_get(ctx);
+       if (GRAPHICS_VER(i915) != 11)
+               return -ENODEV;
 
-       for_each_gem_engine(ce, engines, it) {
-               int err;
+       if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
+                          sizeof(user_sseu)))
+               return -EFAULT;
 
-               /* serialises with execbuf */
-               set_bit(CONTEXT_CLOSED_BIT, &ce->flags);
-               if (!intel_context_pin_if_active(ce))
-                       continue;
+       if (user_sseu.rsvd)
+               return -EINVAL;
 
-               /* Wait until context is finally scheduled out and retired */
-               err = i915_sw_fence_await_active(&engines->fence,
-                                                &ce->active,
-                                                I915_ACTIVE_AWAIT_BARRIER);
-               intel_context_unpin(ce);
-               if (err)
-                       goto kill;
-       }
+       if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
+               return -EINVAL;
 
-       spin_lock_irq(&ctx->stale.lock);
-       if (!i915_gem_context_is_closed(ctx))
-               list_add_tail(&engines->link, &ctx->stale.engines);
-       spin_unlock_irq(&ctx->stale.lock);
+       if (!!(user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX) != (pc->num_user_engines >= 0))
+               return -EINVAL;
 
-kill:
-       if (list_empty(&engines->link)) /* raced, already closed */
-               kill_engines(engines, true);
+       if (pc->num_user_engines >= 0) {
+               int idx = user_sseu.engine.engine_instance;
+               struct i915_gem_proto_engine *pe;
 
-       i915_sw_fence_commit(&engines->fence);
-}
+               if (idx >= pc->num_user_engines)
+                       return -EINVAL;
 
-static void set_closed_name(struct i915_gem_context *ctx)
-{
-       char *s;
+               pe = &pc->user_engines[idx];
 
-       /* Replace '[]' with '<>' to indicate closed in debug prints */
+               /* Only render engine supports RPCS configuration. */
+               if (pe->engine->class != RENDER_CLASS)
+                       return -EINVAL;
 
-       s = strrchr(ctx->name, '[');
-       if (!s)
-               return;
+               sseu = &pe->sseu;
+       } else {
+               /* Only render engine supports RPCS configuration. */
+               if (user_sseu.engine.engine_class != I915_ENGINE_CLASS_RENDER)
+                       return -EINVAL;
 
-       *s = '<';
+               /* There is only one render engine */
+               if (user_sseu.engine.engine_instance != 0)
+                       return -EINVAL;
 
-       s = strchr(s + 1, ']');
-       if (s)
-               *s = '>';
+               sseu = &pc->legacy_rcs_sseu;
+       }
+
+       ret = i915_gem_user_to_context_sseu(&i915->gt, &user_sseu, sseu);
+       if (ret)
+               return ret;
+
+       args->size = sizeof(user_sseu);
+
+       return 0;
 }
 
-static void context_close(struct i915_gem_context *ctx)
+static int set_proto_ctx_param(struct drm_i915_file_private *fpriv,
+                              struct i915_gem_proto_context *pc,
+                              struct drm_i915_gem_context_param *args)
 {
-       struct i915_address_space *vm;
+       int ret = 0;
 
-       /* Flush any concurrent set_engines() */
-       mutex_lock(&ctx->engines_mutex);
-       engines_idle_release(ctx, rcu_replace_pointer(ctx->engines, NULL, 1));
-       i915_gem_context_set_closed(ctx);
-       mutex_unlock(&ctx->engines_mutex);
+       switch (args->param) {
+       case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
+               if (args->size)
+                       ret = -EINVAL;
+               else if (args->value)
+                       pc->user_flags |= BIT(UCONTEXT_NO_ERROR_CAPTURE);
+               else
+                       pc->user_flags &= ~BIT(UCONTEXT_NO_ERROR_CAPTURE);
+               break;
 
-       mutex_lock(&ctx->mutex);
+       case I915_CONTEXT_PARAM_BANNABLE:
+               if (args->size)
+                       ret = -EINVAL;
+               else if (!capable(CAP_SYS_ADMIN) && !args->value)
+                       ret = -EPERM;
+               else if (args->value)
+                       pc->user_flags |= BIT(UCONTEXT_BANNABLE);
+               else
+                       pc->user_flags &= ~BIT(UCONTEXT_BANNABLE);
+               break;
 
-       set_closed_name(ctx);
+       case I915_CONTEXT_PARAM_RECOVERABLE:
+               if (args->size)
+                       ret = -EINVAL;
+               else if (args->value)
+                       pc->user_flags |= BIT(UCONTEXT_RECOVERABLE);
+               else
+                       pc->user_flags &= ~BIT(UCONTEXT_RECOVERABLE);
+               break;
 
-       vm = i915_gem_context_vm(ctx);
-       if (vm)
-               i915_vm_close(vm);
+       case I915_CONTEXT_PARAM_PRIORITY:
+               ret = validate_priority(fpriv->dev_priv, args);
+               if (!ret)
+                       pc->sched.priority = args->value;
+               break;
 
-       ctx->file_priv = ERR_PTR(-EBADF);
+       case I915_CONTEXT_PARAM_SSEU:
+               ret = set_proto_ctx_sseu(fpriv, pc, args);
+               break;
 
-       /*
-        * The LUT uses the VMA as a backpointer to unref the object,
-        * so we need to clear the LUT before we close all the VMA (inside
-        * the ppgtt).
-        */
-       lut_close(ctx);
+       case I915_CONTEXT_PARAM_VM:
+               ret = set_proto_ctx_vm(fpriv, pc, args);
+               break;
 
-       spin_lock(&ctx->i915->gem.contexts.lock);
-       list_del(&ctx->link);
-       spin_unlock(&ctx->i915->gem.contexts.lock);
+       case I915_CONTEXT_PARAM_ENGINES:
+               ret = set_proto_ctx_engines(fpriv, pc, args);
+               break;
 
-       mutex_unlock(&ctx->mutex);
+       case I915_CONTEXT_PARAM_PERSISTENCE:
+               if (args->size)
+                       ret = -EINVAL;
+               ret = proto_context_set_persistence(fpriv->dev_priv, pc,
+                                                   args->value);
+               break;
 
-       /*
-        * If the user has disabled hangchecking, we can not be sure that
-        * the batches will ever complete after the context is closed,
-        * keeping the context and all resources pinned forever. So in this
-        * case we opt to forcibly kill off all remaining requests on
-        * context close.
-        */
-       kill_context(ctx);
+       case I915_CONTEXT_PARAM_NO_ZEROMAP:
+       case I915_CONTEXT_PARAM_BAN_PERIOD:
+       case I915_CONTEXT_PARAM_RINGSIZE:
+       default:
+               ret = -EINVAL;
+               break;
+       }
 
-       i915_gem_context_put(ctx);
+       return ret;
 }
 
-static int __context_set_persistence(struct i915_gem_context *ctx, bool state)
+static struct i915_address_space *
+context_get_vm_rcu(struct i915_gem_context *ctx)
 {
-       if (i915_gem_context_is_persistent(ctx) == state)
-               return 0;
+       GEM_BUG_ON(!rcu_access_pointer(ctx->vm));
+
+       do {
+               struct i915_address_space *vm;
 
-       if (state) {
                /*
-                * Only contexts that are short-lived [that will expire or be
-                * reset] are allowed to survive past termination. We require
-                * hangcheck to ensure that the persistent requests are healthy.
+                * We do not allow downgrading from full-ppgtt [to a shared
+                * global gtt], so ctx->vm cannot become NULL.
                 */
-               if (!ctx->i915->params.enable_hangcheck)
-                       return -EINVAL;
-
-               i915_gem_context_set_persistence(ctx);
-       } else {
-               /* To cancel a context we use "preempt-to-idle" */
-               if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
-                       return -ENODEV;
+               vm = rcu_dereference(ctx->vm);
+               if (!kref_get_unless_zero(&vm->ref))
+                       continue;
 
                /*
-                * If the cancel fails, we then need to reset, cleanly!
-                *
-                * If the per-engine reset fails, all hope is lost! We resort
-                * to a full GPU reset in that unlikely case, but realistically
-                * if the engine could not reset, the full reset does not fare
-                * much better. The damage has been done.
+                * This ppgtt may have be reallocated between
+                * the read and the kref, and reassigned to a third
+                * context. In order to avoid inadvertent sharing
+                * of this ppgtt with that third context (and not
+                * src), we have to confirm that we have the same
+                * ppgtt after passing through the strong memory
+                * barrier implied by a successful
+                * kref_get_unless_zero().
                 *
-                * However, if we cannot reset an engine by itself, we cannot
-                * cleanup a hanging persistent context without causing
-                * colateral damage, and we should not pretend we can by
-                * exposing the interface.
+                * Once we have acquired the current ppgtt of ctx,
+                * we no longer care if it is released from ctx, as
+                * it cannot be reallocated elsewhere.
                 */
-               if (!intel_has_reset_engine(&ctx->i915->gt))
-                       return -ENODEV;
 
-               i915_gem_context_clear_persistence(ctx);
-       }
+               if (vm == rcu_access_pointer(ctx->vm))
+                       return rcu_pointer_handoff(vm);
 
-       return 0;
+               i915_vm_put(vm);
+       } while (1);
 }
 
-static struct i915_gem_context *
-__create_context(struct drm_i915_private *i915)
+static int intel_context_set_gem(struct intel_context *ce,
+                                struct i915_gem_context *ctx,
+                                struct intel_sseu sseu)
 {
-       struct i915_gem_context *ctx;
-       struct i915_gem_engines *e;
-       int err;
-       int i;
+       int ret = 0;
 
-       ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
-       if (!ctx)
-               return ERR_PTR(-ENOMEM);
+       GEM_BUG_ON(rcu_access_pointer(ce->gem_context));
+       RCU_INIT_POINTER(ce->gem_context, ctx);
 
-       kref_init(&ctx->ref);
-       ctx->i915 = i915;
-       ctx->sched.priority = I915_PRIORITY_NORMAL;
-       mutex_init(&ctx->mutex);
-       INIT_LIST_HEAD(&ctx->link);
+       ce->ring_size = SZ_16K;
 
-       spin_lock_init(&ctx->stale.lock);
-       INIT_LIST_HEAD(&ctx->stale.engines);
+       if (rcu_access_pointer(ctx->vm)) {
+               struct i915_address_space *vm;
 
-       mutex_init(&ctx->engines_mutex);
-       e = default_engines(ctx);
-       if (IS_ERR(e)) {
-               err = PTR_ERR(e);
-               goto err_free;
-       }
-       RCU_INIT_POINTER(ctx->engines, e);
+               rcu_read_lock();
+               vm = context_get_vm_rcu(ctx); /* hmm */
+               rcu_read_unlock();
 
-       INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
-       mutex_init(&ctx->lut_mutex);
+               i915_vm_put(ce->vm);
+               ce->vm = vm;
+       }
 
-       /* NB: Mark all slices as needing a remap so that when the context first
-        * loads it will restore whatever remap state already exists. If there
-        * is no remap info, it will be a NOP. */
-       ctx->remap_slice = ALL_L3_SLICES(i915);
+       if (ctx->sched.priority >= I915_PRIORITY_NORMAL &&
+           intel_engine_has_timeslices(ce->engine) &&
+           intel_engine_has_semaphores(ce->engine))
+               __set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
 
-       i915_gem_context_set_bannable(ctx);
-       i915_gem_context_set_recoverable(ctx);
-       __context_set_persistence(ctx, true /* cgroup hook? */);
+       if (IS_ACTIVE(CONFIG_DRM_I915_REQUEST_TIMEOUT) &&
+           ctx->i915->params.request_timeout_ms) {
+               unsigned int timeout_ms = ctx->i915->params.request_timeout_ms;
 
-       for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
-               ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
+               intel_context_set_watchdog_us(ce, (u64)timeout_ms * 1000);
+       }
 
-       return ctx;
+       /* A valid SSEU has no zero fields */
+       if (sseu.slice_mask && !WARN_ON(ce->engine->class != RENDER_CLASS))
+               ret = intel_context_reconfigure_sseu(ce, sseu);
 
-err_free:
-       kfree(ctx);
-       return ERR_PTR(err);
+       return ret;
 }
 
-static inline struct i915_gem_engines *
-__context_engines_await(const struct i915_gem_context *ctx,
-                       bool *user_engines)
+static void __free_engines(struct i915_gem_engines *e, unsigned int count)
 {
-       struct i915_gem_engines *engines;
-
-       rcu_read_lock();
-       do {
-               engines = rcu_dereference(ctx->engines);
-               GEM_BUG_ON(!engines);
-
-               if (user_engines)
-                       *user_engines = i915_gem_context_user_engines(ctx);
-
-               /* successful await => strong mb */
-               if (unlikely(!i915_sw_fence_await(&engines->fence)))
+       while (count--) {
+               if (!e->engines[count])
                        continue;
 
-               if (likely(engines == rcu_access_pointer(ctx->engines)))
-                       break;
-
-               i915_sw_fence_complete(&engines->fence);
-       } while (1);
-       rcu_read_unlock();
-
-       return engines;
+               intel_context_put(e->engines[count]);
+       }
+       kfree(e);
 }
 
-static int
-context_apply_all(struct i915_gem_context *ctx,
-                 int (*fn)(struct intel_context *ce, void *data),
-                 void *data)
+static void free_engines(struct i915_gem_engines *e)
 {
-       struct i915_gem_engines_iter it;
-       struct i915_gem_engines *e;
-       struct intel_context *ce;
-       int err = 0;
-
-       e = __context_engines_await(ctx, NULL);
-       for_each_gem_engine(ce, e, it) {
-               err = fn(ce, data);
-               if (err)
-                       break;
-       }
-       i915_sw_fence_complete(&e->fence);
-
-       return err;
+       __free_engines(e, e->num_engines);
 }
 
-static int __apply_ppgtt(struct intel_context *ce, void *vm)
+static void free_engines_rcu(struct rcu_head *rcu)
 {
-       i915_vm_put(ce->vm);
-       ce->vm = i915_vm_get(vm);
-       return 0;
+       struct i915_gem_engines *engines =
+               container_of(rcu, struct i915_gem_engines, rcu);
+
+       i915_sw_fence_fini(&engines->fence);
+       free_engines(engines);
 }
 
-static struct i915_address_space *
-__set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm)
+static int __i915_sw_fence_call
+engines_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
 {
-       struct i915_address_space *old;
-
-       old = rcu_replace_pointer(ctx->vm,
-                                 i915_vm_open(vm),
-                                 lockdep_is_held(&ctx->mutex));
-       GEM_BUG_ON(old && i915_vm_is_4lvl(vm) != i915_vm_is_4lvl(old));
+       struct i915_gem_engines *engines =
+               container_of(fence, typeof(*engines), fence);
 
-       context_apply_all(ctx, __apply_ppgtt, vm);
+       switch (state) {
+       case FENCE_COMPLETE:
+               if (!list_empty(&engines->link)) {
+                       struct i915_gem_context *ctx = engines->ctx;
+                       unsigned long flags;
 
-       return old;
-}
+                       spin_lock_irqsave(&ctx->stale.lock, flags);
+                       list_del(&engines->link);
+                       spin_unlock_irqrestore(&ctx->stale.lock, flags);
+               }
+               i915_gem_context_put(engines->ctx);
+               break;
 
-static void __assign_ppgtt(struct i915_gem_context *ctx,
-                          struct i915_address_space *vm)
-{
-       if (vm == rcu_access_pointer(ctx->vm))
-               return;
+       case FENCE_FREE:
+               init_rcu_head(&engines->rcu);
+               call_rcu(&engines->rcu, free_engines_rcu);
+               break;
+       }
 
-       vm = __set_ppgtt(ctx, vm);
-       if (vm)
-               i915_vm_close(vm);
+       return NOTIFY_DONE;
 }
 
-static void __set_timeline(struct intel_timeline **dst,
-                          struct intel_timeline *src)
+static struct i915_gem_engines *alloc_engines(unsigned int count)
 {
-       struct intel_timeline *old = *dst;
+       struct i915_gem_engines *e;
 
-       *dst = src ? intel_timeline_get(src) : NULL;
+       e = kzalloc(struct_size(e, engines, count), GFP_KERNEL);
+       if (!e)
+               return NULL;
 
-       if (old)
-               intel_timeline_put(old);
+       i915_sw_fence_init(&e->fence, engines_notify);
+       return e;
 }
 
-static int __apply_timeline(struct intel_context *ce, void *timeline)
+static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx,
+                                               struct intel_sseu rcs_sseu)
 {
-       __set_timeline(&ce->timeline, timeline);
-       return 0;
-}
+       const struct intel_gt *gt = &ctx->i915->gt;
+       struct intel_engine_cs *engine;
+       struct i915_gem_engines *e, *err;
+       enum intel_engine_id id;
 
-static void __assign_timeline(struct i915_gem_context *ctx,
-                             struct intel_timeline *timeline)
-{
-       __set_timeline(&ctx->timeline, timeline);
-       context_apply_all(ctx, __apply_timeline, timeline);
-}
+       e = alloc_engines(I915_NUM_ENGINES);
+       if (!e)
+               return ERR_PTR(-ENOMEM);
 
-static int __apply_watchdog(struct intel_context *ce, void *timeout_us)
-{
-       return intel_context_set_watchdog_us(ce, (uintptr_t)timeout_us);
-}
+       for_each_engine(engine, gt, id) {
+               struct intel_context *ce;
+               struct intel_sseu sseu = {};
+               int ret;
 
-static int
-__set_watchdog(struct i915_gem_context *ctx, unsigned long timeout_us)
-{
-       int ret;
+               if (engine->legacy_idx == INVALID_ENGINE)
+                       continue;
 
-       ret = context_apply_all(ctx, __apply_watchdog,
-                               (void *)(uintptr_t)timeout_us);
-       if (!ret)
-               ctx->watchdog.timeout_us = timeout_us;
+               GEM_BUG_ON(engine->legacy_idx >= I915_NUM_ENGINES);
+               GEM_BUG_ON(e->engines[engine->legacy_idx]);
 
-       return ret;
-}
+               ce = intel_context_create(engine);
+               if (IS_ERR(ce)) {
+                       err = ERR_CAST(ce);
+                       goto free_engines;
+               }
 
-static void __set_default_fence_expiry(struct i915_gem_context *ctx)
-{
-       struct drm_i915_private *i915 = ctx->i915;
-       int ret;
+               e->engines[engine->legacy_idx] = ce;
+               e->num_engines = max(e->num_engines, engine->legacy_idx + 1);
 
-       if (!IS_ACTIVE(CONFIG_DRM_I915_REQUEST_TIMEOUT) ||
-           !i915->params.request_timeout_ms)
-               return;
+               if (engine->class == RENDER_CLASS)
+                       sseu = rcs_sseu;
 
-       /* Default expiry for user fences. */
-       ret = __set_watchdog(ctx, i915->params.request_timeout_ms * 1000);
-       if (ret)
-               drm_notice(&i915->drm,
-                          "Failed to configure default fence expiry! (%d)",
-                          ret);
+               ret = intel_context_set_gem(ce, ctx, sseu);
+               if (ret) {
+                       err = ERR_PTR(ret);
+                       goto free_engines;
+               }
+
+       }
+
+       return e;
+
+free_engines:
+       free_engines(e);
+       return err;
 }
 
-static struct i915_gem_context *
-i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags)
+static struct i915_gem_engines *user_engines(struct i915_gem_context *ctx,
+                                            unsigned int num_engines,
+                                            struct i915_gem_proto_engine *pe)
 {
-       struct i915_gem_context *ctx;
+       struct i915_gem_engines *e, *err;
+       unsigned int n;
 
-       if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE &&
-           !HAS_EXECLISTS(i915))
-               return ERR_PTR(-EINVAL);
+       e = alloc_engines(num_engines);
+       for (n = 0; n < num_engines; n++) {
+               struct intel_context *ce;
+               int ret;
 
-       ctx = __create_context(i915);
-       if (IS_ERR(ctx))
-               return ctx;
+               switch (pe[n].type) {
+               case I915_GEM_ENGINE_TYPE_PHYSICAL:
+                       ce = intel_context_create(pe[n].engine);
+                       break;
 
-       if (HAS_FULL_PPGTT(i915)) {
-               struct i915_ppgtt *ppgtt;
+               case I915_GEM_ENGINE_TYPE_BALANCED:
+                       ce = intel_engine_create_virtual(pe[n].siblings,
+                                                        pe[n].num_siblings);
+                       break;
 
-               ppgtt = i915_ppgtt_create(&i915->gt);
-               if (IS_ERR(ppgtt)) {
-                       drm_dbg(&i915->drm, "PPGTT setup failed (%ld)\n",
-                               PTR_ERR(ppgtt));
-                       context_close(ctx);
-                       return ERR_CAST(ppgtt);
+               case I915_GEM_ENGINE_TYPE_INVALID:
+               default:
+                       GEM_WARN_ON(pe[n].type != I915_GEM_ENGINE_TYPE_INVALID);
+                       continue;
+               }
+
+               if (IS_ERR(ce)) {
+                       err = ERR_CAST(ce);
+                       goto free_engines;
                }
 
-               mutex_lock(&ctx->mutex);
-               __assign_ppgtt(ctx, &ppgtt->vm);
-               mutex_unlock(&ctx->mutex);
+               e->engines[n] = ce;
 
-               i915_vm_put(&ppgtt->vm);
+               ret = intel_context_set_gem(ce, ctx, pe->sseu);
+               if (ret) {
+                       err = ERR_PTR(ret);
+                       goto free_engines;
+               }
        }
+       e->num_engines = num_engines;
 
-       if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) {
-               struct intel_timeline *timeline;
+       return e;
 
-               timeline = intel_timeline_create(&i915->gt);
-               if (IS_ERR(timeline)) {
-                       context_close(ctx);
-                       return ERR_CAST(timeline);
-               }
+free_engines:
+       free_engines(e);
+       return err;
+}
 
-               __assign_timeline(ctx, timeline);
-               intel_timeline_put(timeline);
-       }
+void i915_gem_context_release(struct kref *ref)
+{
+       struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
 
-       __set_default_fence_expiry(ctx);
+       trace_i915_context_free(ctx);
+       GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
 
-       trace_i915_context_create(ctx);
+       mutex_destroy(&ctx->engines_mutex);
+       mutex_destroy(&ctx->lut_mutex);
 
-       return ctx;
+       put_pid(ctx->pid);
+       mutex_destroy(&ctx->mutex);
+
+       kfree_rcu(ctx, rcu);
 }
 
-static void init_contexts(struct i915_gem_contexts *gc)
+static inline struct i915_gem_engines *
+__context_engines_static(const struct i915_gem_context *ctx)
 {
-       spin_lock_init(&gc->lock);
-       INIT_LIST_HEAD(&gc->list);
+       return rcu_dereference_protected(ctx->engines, true);
 }
 
-void i915_gem_init__contexts(struct drm_i915_private *i915)
+static void __reset_context(struct i915_gem_context *ctx,
+                           struct intel_engine_cs *engine)
 {
-       init_contexts(&i915->gem.contexts);
+       intel_gt_handle_error(engine->gt, engine->mask, 0,
+                             "context closure in %s", ctx->name);
 }
 
-static int gem_context_register(struct i915_gem_context *ctx,
-                               struct drm_i915_file_private *fpriv,
-                               u32 *id)
+static bool __cancel_engine(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *i915 = ctx->i915;
-       struct i915_address_space *vm;
-       int ret;
+       /*
+        * Send a "high priority pulse" down the engine to cause the
+        * current request to be momentarily preempted. (If it fails to
+        * be preempted, it will be reset). As we have marked our context
+        * as banned, any incomplete request, including any running, will
+        * be skipped following the preemption.
+        *
+        * If there is no hangchecking (one of the reasons why we try to
+        * cancel the context) and no forced preemption, there may be no
+        * means by which we reset the GPU and evict the persistent hog.
+        * Ergo if we are unable to inject a preemptive pulse that can
+        * kill the banned context, we fallback to doing a local reset
+        * instead.
+        */
+       return intel_engine_pulse(engine) == 0;
+}
 
-       ctx->file_priv = fpriv;
+static struct intel_engine_cs *active_engine(struct intel_context *ce)
+{
+       struct intel_engine_cs *engine = NULL;
+       struct i915_request *rq;
 
-       mutex_lock(&ctx->mutex);
-       vm = i915_gem_context_vm(ctx);
-       if (vm)
-               WRITE_ONCE(vm->file, fpriv); /* XXX */
-       mutex_unlock(&ctx->mutex);
+       if (intel_context_has_inflight(ce))
+               return intel_context_inflight(ce);
 
-       ctx->pid = get_task_pid(current, PIDTYPE_PID);
-       snprintf(ctx->name, sizeof(ctx->name), "%s[%d]",
-                current->comm, pid_nr(ctx->pid));
+       if (!ce->timeline)
+               return NULL;
 
-       /* And finally expose ourselves to userspace via the idr */
-       ret = xa_alloc(&fpriv->context_xa, id, ctx, xa_limit_32b, GFP_KERNEL);
-       if (ret)
-               goto err_pid;
+       /*
+        * rq->link is only SLAB_TYPESAFE_BY_RCU, we need to hold a reference
+        * to the request to prevent it being transferred to a new timeline
+        * (and onto a new timeline->requests list).
+        */
+       rcu_read_lock();
+       list_for_each_entry_reverse(rq, &ce->timeline->requests, link) {
+               bool found;
 
-       spin_lock(&i915->gem.contexts.lock);
-       list_add_tail(&ctx->link, &i915->gem.contexts.list);
-       spin_unlock(&i915->gem.contexts.lock);
+               /* timeline is already completed upto this point? */
+               if (!i915_request_get_rcu(rq))
+                       break;
 
-       return 0;
+               /* Check with the backend if the request is inflight */
+               found = true;
+               if (likely(rcu_access_pointer(rq->timeline) == ce->timeline))
+                       found = i915_request_active_engine(rq, &engine);
 
-err_pid:
-       put_pid(fetch_and_zero(&ctx->pid));
-       return ret;
+               i915_request_put(rq);
+               if (found)
+                       break;
+       }
+       rcu_read_unlock();
+
+       return engine;
 }
 
-int i915_gem_context_open(struct drm_i915_private *i915,
-                         struct drm_file *file)
+static void kill_engines(struct i915_gem_engines *engines, bool ban)
 {
-       struct drm_i915_file_private *file_priv = file->driver_priv;
-       struct i915_gem_context *ctx;
-       int err;
-       u32 id;
+       struct i915_gem_engines_iter it;
+       struct intel_context *ce;
 
-       xa_init_flags(&file_priv->context_xa, XA_FLAGS_ALLOC);
+       /*
+        * Map the user's engine back to the actual engines; one virtual
+        * engine will be mapped to multiple engines, and using ctx->engine[]
+        * the same engine may be have multiple instances in the user's map.
+        * However, we only care about pending requests, so only include
+        * engines on which there are incomplete requests.
+        */
+       for_each_gem_engine(ce, engines, it) {
+               struct intel_engine_cs *engine;
 
-       /* 0 reserved for invalid/unassigned ppgtt */
-       xa_init_flags(&file_priv->vm_xa, XA_FLAGS_ALLOC1);
+               if (ban && intel_context_ban(ce, NULL))
+                       continue;
 
-       ctx = i915_gem_create_context(i915, 0);
-       if (IS_ERR(ctx)) {
-               err = PTR_ERR(ctx);
-               goto err;
+               /*
+                * Check the current active state of this context; if we
+                * are currently executing on the GPU we need to evict
+                * ourselves. On the other hand, if we haven't yet been
+                * submitted to the GPU or if everything is complete,
+                * we have nothing to do.
+                */
+               engine = active_engine(ce);
+
+               /* First attempt to gracefully cancel the context */
+               if (engine && !__cancel_engine(engine) && ban)
+                       /*
+                        * If we are unable to send a preemptive pulse to bump
+                        * the context from the GPU, we have to resort to a full
+                        * reset. We hope the collateral damage is worth it.
+                        */
+                       __reset_context(engines->ctx, engine);
        }
+}
 
-       err = gem_context_register(ctx, file_priv, &id);
-       if (err < 0)
-               goto err_ctx;
+static void kill_context(struct i915_gem_context *ctx)
+{
+       bool ban = (!i915_gem_context_is_persistent(ctx) ||
+                   !ctx->i915->params.enable_hangcheck);
+       struct i915_gem_engines *pos, *next;
 
-       GEM_BUG_ON(id);
-       return 0;
+       spin_lock_irq(&ctx->stale.lock);
+       GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
+       list_for_each_entry_safe(pos, next, &ctx->stale.engines, link) {
+               if (!i915_sw_fence_await(&pos->fence)) {
+                       list_del_init(&pos->link);
+                       continue;
+               }
 
-err_ctx:
-       context_close(ctx);
-err:
-       xa_destroy(&file_priv->vm_xa);
-       xa_destroy(&file_priv->context_xa);
-       return err;
-}
+               spin_unlock_irq(&ctx->stale.lock);
 
-void i915_gem_context_close(struct drm_file *file)
-{
-       struct drm_i915_file_private *file_priv = file->driver_priv;
-       struct i915_address_space *vm;
-       struct i915_gem_context *ctx;
-       unsigned long idx;
+               kill_engines(pos, ban);
 
-       xa_for_each(&file_priv->context_xa, idx, ctx)
-               context_close(ctx);
-       xa_destroy(&file_priv->context_xa);
+               spin_lock_irq(&ctx->stale.lock);
+               GEM_BUG_ON(i915_sw_fence_signaled(&pos->fence));
+               list_safe_reset_next(pos, next, link);
+               list_del_init(&pos->link); /* decouple from FENCE_COMPLETE */
 
-       xa_for_each(&file_priv->vm_xa, idx, vm)
-               i915_vm_put(vm);
-       xa_destroy(&file_priv->vm_xa);
+               i915_sw_fence_complete(&pos->fence);
+       }
+       spin_unlock_irq(&ctx->stale.lock);
 }
 
-int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
-                            struct drm_file *file)
+static void engines_idle_release(struct i915_gem_context *ctx,
+                                struct i915_gem_engines *engines)
 {
-       struct drm_i915_private *i915 = to_i915(dev);
-       struct drm_i915_gem_vm_control *args = data;
-       struct drm_i915_file_private *file_priv = file->driver_priv;
-       struct i915_ppgtt *ppgtt;
-       u32 id;
-       int err;
+       struct i915_gem_engines_iter it;
+       struct intel_context *ce;
 
-       if (!HAS_FULL_PPGTT(i915))
-               return -ENODEV;
+       INIT_LIST_HEAD(&engines->link);
 
-       if (args->flags)
-               return -EINVAL;
+       engines->ctx = i915_gem_context_get(ctx);
 
-       ppgtt = i915_ppgtt_create(&i915->gt);
-       if (IS_ERR(ppgtt))
-               return PTR_ERR(ppgtt);
+       for_each_gem_engine(ce, engines, it) {
+               int err;
 
-       ppgtt->vm.file = file_priv;
+               /* serialises with execbuf */
+               set_bit(CONTEXT_CLOSED_BIT, &ce->flags);
+               if (!intel_context_pin_if_active(ce))
+                       continue;
 
-       if (args->extensions) {
-               err = i915_user_extensions(u64_to_user_ptr(args->extensions),
-                                          NULL, 0,
-                                          ppgtt);
+               /* Wait until context is finally scheduled out and retired */
+               err = i915_sw_fence_await_active(&engines->fence,
+                                                &ce->active,
+                                                I915_ACTIVE_AWAIT_BARRIER);
+               intel_context_unpin(ce);
                if (err)
-                       goto err_put;
+                       goto kill;
        }
 
-       err = xa_alloc(&file_priv->vm_xa, &id, &ppgtt->vm,
-                      xa_limit_32b, GFP_KERNEL);
-       if (err)
-               goto err_put;
+       spin_lock_irq(&ctx->stale.lock);
+       if (!i915_gem_context_is_closed(ctx))
+               list_add_tail(&engines->link, &ctx->stale.engines);
+       spin_unlock_irq(&ctx->stale.lock);
 
-       GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */
-       args->vm_id = id;
-       return 0;
+kill:
+       if (list_empty(&engines->link)) /* raced, already closed */
+               kill_engines(engines, true);
 
-err_put:
-       i915_vm_put(&ppgtt->vm);
-       return err;
+       i915_sw_fence_commit(&engines->fence);
 }
 
-int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
-                             struct drm_file *file)
+static void set_closed_name(struct i915_gem_context *ctx)
 {
-       struct drm_i915_file_private *file_priv = file->driver_priv;
-       struct drm_i915_gem_vm_control *args = data;
-       struct i915_address_space *vm;
+       char *s;
 
-       if (args->flags)
-               return -EINVAL;
+       /* Replace '[]' with '<>' to indicate closed in debug prints */
 
-       if (args->extensions)
-               return -EINVAL;
+       s = strrchr(ctx->name, '[');
+       if (!s)
+               return;
 
-       vm = xa_erase(&file_priv->vm_xa, args->vm_id);
-       if (!vm)
-               return -ENOENT;
+       *s = '<';
 
-       i915_vm_put(vm);
-       return 0;
+       s = strchr(s + 1, ']');
+       if (s)
+               *s = '>';
 }
 
-struct context_barrier_task {
-       struct i915_active base;
-       void (*task)(void *data);
-       void *data;
-};
-
-static void cb_retire(struct i915_active *base)
+static void context_close(struct i915_gem_context *ctx)
 {
-       struct context_barrier_task *cb = container_of(base, typeof(*cb), base);
-
-       if (cb->task)
-               cb->task(cb->data);
+       struct i915_address_space *vm;
 
-       i915_active_fini(&cb->base);
-       kfree(cb);
-}
+       /* Flush any concurrent set_engines() */
+       mutex_lock(&ctx->engines_mutex);
+       engines_idle_release(ctx, rcu_replace_pointer(ctx->engines, NULL, 1));
+       i915_gem_context_set_closed(ctx);
+       mutex_unlock(&ctx->engines_mutex);
 
-I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault);
-static int context_barrier_task(struct i915_gem_context *ctx,
-                               intel_engine_mask_t engines,
-                               bool (*skip)(struct intel_context *ce, void *data),
-                               int (*pin)(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void *data),
-                               int (*emit)(struct i915_request *rq, void *data),
-                               void (*task)(void *data),
-                               void *data)
-{
-       struct context_barrier_task *cb;
-       struct i915_gem_engines_iter it;
-       struct i915_gem_engines *e;
-       struct i915_gem_ww_ctx ww;
-       struct intel_context *ce;
-       int err = 0;
+       mutex_lock(&ctx->mutex);
 
-       GEM_BUG_ON(!task);
+       set_closed_name(ctx);
 
-       cb = kmalloc(sizeof(*cb), GFP_KERNEL);
-       if (!cb)
-               return -ENOMEM;
+       vm = i915_gem_context_vm(ctx);
+       if (vm)
+               i915_vm_close(vm);
 
-       i915_active_init(&cb->base, NULL, cb_retire, 0);
-       err = i915_active_acquire(&cb->base);
-       if (err) {
-               kfree(cb);
-               return err;
-       }
+       if (ctx->syncobj)
+               drm_syncobj_put(ctx->syncobj);
 
-       e = __context_engines_await(ctx, NULL);
-       if (!e) {
-               i915_active_release(&cb->base);
-               return -ENOENT;
-       }
+       ctx->file_priv = ERR_PTR(-EBADF);
 
-       for_each_gem_engine(ce, e, it) {
-               struct i915_request *rq;
+       /*
+        * The LUT uses the VMA as a backpointer to unref the object,
+        * so we need to clear the LUT before we close all the VMA (inside
+        * the ppgtt).
+        */
+       lut_close(ctx);
 
-               if (I915_SELFTEST_ONLY(context_barrier_inject_fault &
-                                      ce->engine->mask)) {
-                       err = -ENXIO;
-                       break;
-               }
+       spin_lock(&ctx->i915->gem.contexts.lock);
+       list_del(&ctx->link);
+       spin_unlock(&ctx->i915->gem.contexts.lock);
 
-               if (!(ce->engine->mask & engines))
-                       continue;
+       mutex_unlock(&ctx->mutex);
 
-               if (skip && skip(ce, data))
-                       continue;
+       /*
+        * If the user has disabled hangchecking, we can not be sure that
+        * the batches will ever complete after the context is closed,
+        * keeping the context and all resources pinned forever. So in this
+        * case we opt to forcibly kill off all remaining requests on
+        * context close.
+        */
+       kill_context(ctx);
 
-               i915_gem_ww_ctx_init(&ww, true);
-retry:
-               err = intel_context_pin_ww(ce, &ww);
-               if (err)
-                       goto err;
+       i915_gem_context_put(ctx);
+}
 
-               if (pin)
-                       err = pin(ce, &ww, data);
-               if (err)
-                       goto err_unpin;
+static int __context_set_persistence(struct i915_gem_context *ctx, bool state)
+{
+       if (i915_gem_context_is_persistent(ctx) == state)
+               return 0;
 
-               rq = i915_request_create(ce);
-               if (IS_ERR(rq)) {
-                       err = PTR_ERR(rq);
-                       goto err_unpin;
-               }
+       if (state) {
+               /*
+                * Only contexts that are short-lived [that will expire or be
+                * reset] are allowed to survive past termination. We require
+                * hangcheck to ensure that the persistent requests are healthy.
+                */
+               if (!ctx->i915->params.enable_hangcheck)
+                       return -EINVAL;
 
-               err = 0;
-               if (emit)
-                       err = emit(rq, data);
-               if (err == 0)
-                       err = i915_active_add_request(&cb->base, rq);
+               i915_gem_context_set_persistence(ctx);
+       } else {
+               /* To cancel a context we use "preempt-to-idle" */
+               if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
+                       return -ENODEV;
 
-               i915_request_add(rq);
-err_unpin:
-               intel_context_unpin(ce);
-err:
-               if (err == -EDEADLK) {
-                       err = i915_gem_ww_ctx_backoff(&ww);
-                       if (!err)
-                               goto retry;
-               }
-               i915_gem_ww_ctx_fini(&ww);
+               /*
+                * If the cancel fails, we then need to reset, cleanly!
+                *
+                * If the per-engine reset fails, all hope is lost! We resort
+                * to a full GPU reset in that unlikely case, but realistically
+                * if the engine could not reset, the full reset does not fare
+                * much better. The damage has been done.
+                *
+                * However, if we cannot reset an engine by itself, we cannot
+                * cleanup a hanging persistent context without causing
+                * colateral damage, and we should not pretend we can by
+                * exposing the interface.
+                */
+               if (!intel_has_reset_engine(&ctx->i915->gt))
+                       return -ENODEV;
 
-               if (err)
-                       break;
+               i915_gem_context_clear_persistence(ctx);
        }
-       i915_sw_fence_complete(&e->fence);
-
-       cb->task = err ? NULL : task; /* caller needs to unwind instead */
-       cb->data = data;
-
-       i915_active_release(&cb->base);
 
-       return err;
+       return 0;
 }
 
-static int get_ppgtt(struct drm_i915_file_private *file_priv,
-                    struct i915_gem_context *ctx,
-                    struct drm_i915_gem_context_param *args)
+static inline struct i915_gem_engines *
+__context_engines_await(const struct i915_gem_context *ctx,
+                       bool *user_engines)
 {
-       struct i915_address_space *vm;
-       int err;
-       u32 id;
-
-       if (!rcu_access_pointer(ctx->vm))
-               return -ENODEV;
+       struct i915_gem_engines *engines;
 
        rcu_read_lock();
-       vm = context_get_vm_rcu(ctx);
-       rcu_read_unlock();
-       if (!vm)
-               return -ENODEV;
-
-       err = xa_alloc(&file_priv->vm_xa, &id, vm, xa_limit_32b, GFP_KERNEL);
-       if (err)
-               goto err_put;
-
-       i915_vm_open(vm);
+       do {
+               engines = rcu_dereference(ctx->engines);
+               GEM_BUG_ON(!engines);
 
-       GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */
-       args->value = id;
-       args->size = 0;
+               if (user_engines)
+                       *user_engines = i915_gem_context_user_engines(ctx);
 
-err_put:
-       i915_vm_put(vm);
-       return err;
-}
+               /* successful await => strong mb */
+               if (unlikely(!i915_sw_fence_await(&engines->fence)))
+                       continue;
 
-static void set_ppgtt_barrier(void *data)
-{
-       struct i915_address_space *old = data;
+               if (likely(engines == rcu_access_pointer(ctx->engines)))
+                       break;
 
-       if (GRAPHICS_VER(old->i915) < 8)
-               gen6_ppgtt_unpin_all(i915_vm_to_ppgtt(old));
+               i915_sw_fence_complete(&engines->fence);
+       } while (1);
+       rcu_read_unlock();
 
-       i915_vm_close(old);
+       return engines;
 }
 
-static int pin_ppgtt_update(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void *data)
+static void
+context_apply_all(struct i915_gem_context *ctx,
+                 void (*fn)(struct intel_context *ce, void *data),
+                 void *data)
 {
-       struct i915_address_space *vm = ce->vm;
-
-       if (!HAS_LOGICAL_RING_CONTEXTS(vm->i915))
-               /* ppGTT is not part of the legacy context image */
-               return gen6_ppgtt_pin(i915_vm_to_ppgtt(vm), ww);
+       struct i915_gem_engines_iter it;
+       struct i915_gem_engines *e;
+       struct intel_context *ce;
 
-       return 0;
+       e = __context_engines_await(ctx, NULL);
+       for_each_gem_engine(ce, e, it)
+               fn(ce, data);
+       i915_sw_fence_complete(&e->fence);
 }
 
-static int emit_ppgtt_update(struct i915_request *rq, void *data)
+static struct i915_gem_context *
+i915_gem_create_context(struct drm_i915_private *i915,
+                       const struct i915_gem_proto_context *pc)
 {
-       struct i915_address_space *vm = rq->context->vm;
-       struct intel_engine_cs *engine = rq->engine;
-       u32 base = engine->mmio_base;
-       u32 *cs;
+       struct i915_gem_context *ctx;
+       struct i915_address_space *vm = NULL;
+       struct i915_gem_engines *e;
+       int err;
        int i;
 
-       if (i915_vm_is_4lvl(vm)) {
-               struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
-               const dma_addr_t pd_daddr = px_dma(ppgtt->pd);
-
-               cs = intel_ring_begin(rq, 6);
-               if (IS_ERR(cs))
-                       return PTR_ERR(cs);
-
-               *cs++ = MI_LOAD_REGISTER_IMM(2);
-
-               *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 0));
-               *cs++ = upper_32_bits(pd_daddr);
-               *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 0));
-               *cs++ = lower_32_bits(pd_daddr);
-
-               *cs++ = MI_NOOP;
-               intel_ring_advance(rq, cs);
-       } else if (HAS_LOGICAL_RING_CONTEXTS(engine->i915)) {
-               struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
-               int err;
+       ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+       if (!ctx)
+               return ERR_PTR(-ENOMEM);
 
-               /* Magic required to prevent forcewake errors! */
-               err = engine->emit_flush(rq, EMIT_INVALIDATE);
-               if (err)
-                       return err;
+       kref_init(&ctx->ref);
+       ctx->i915 = i915;
+       ctx->sched = pc->sched;
+       mutex_init(&ctx->mutex);
+       INIT_LIST_HEAD(&ctx->link);
 
-               cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
-               if (IS_ERR(cs))
-                       return PTR_ERR(cs);
+       spin_lock_init(&ctx->stale.lock);
+       INIT_LIST_HEAD(&ctx->stale.engines);
 
-               *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED;
-               for (i = GEN8_3LVL_PDPES; i--; ) {
-                       const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
+       if (pc->vm) {
+               vm = i915_vm_get(pc->vm);
+       } else if (HAS_FULL_PPGTT(i915)) {
+               struct i915_ppgtt *ppgtt;
 
-                       *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i));
-                       *cs++ = upper_32_bits(pd_daddr);
-                       *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i));
-                       *cs++ = lower_32_bits(pd_daddr);
+               ppgtt = i915_ppgtt_create(&i915->gt);
+               if (IS_ERR(ppgtt)) {
+                       drm_dbg(&i915->drm, "PPGTT setup failed (%ld)\n",
+                               PTR_ERR(ppgtt));
+                       err = PTR_ERR(ppgtt);
+                       goto err_ctx;
                }
-               *cs++ = MI_NOOP;
-               intel_ring_advance(rq, cs);
+               vm = &ppgtt->vm;
        }
+       if (vm) {
+               RCU_INIT_POINTER(ctx->vm, i915_vm_open(vm));
 
-       return 0;
-}
+               /* i915_vm_open() takes a reference */
+               i915_vm_put(vm);
+       }
 
-static bool skip_ppgtt_update(struct intel_context *ce, void *data)
-{
-       if (HAS_LOGICAL_RING_CONTEXTS(ce->engine->i915))
-               return !ce->state;
-       else
-               return !atomic_read(&ce->pin_count);
-}
+       mutex_init(&ctx->engines_mutex);
+       if (pc->num_user_engines >= 0) {
+               i915_gem_context_set_user_engines(ctx);
+               e = user_engines(ctx, pc->num_user_engines, pc->user_engines);
+       } else {
+               i915_gem_context_clear_user_engines(ctx);
+               e = default_engines(ctx, pc->legacy_rcs_sseu);
+       }
+       if (IS_ERR(e)) {
+               err = PTR_ERR(e);
+               goto err_vm;
+       }
+       RCU_INIT_POINTER(ctx->engines, e);
 
-static int set_ppgtt(struct drm_i915_file_private *file_priv,
-                    struct i915_gem_context *ctx,
-                    struct drm_i915_gem_context_param *args)
+       INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
+       mutex_init(&ctx->lut_mutex);
+
+       /* NB: Mark all slices as needing a remap so that when the context first
+        * loads it will restore whatever remap state already exists. If there
+        * is no remap info, it will be a NOP. */
+       ctx->remap_slice = ALL_L3_SLICES(i915);
+
+       ctx->user_flags = pc->user_flags;
+
+       for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
+               ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
+
+       if (pc->single_timeline) {
+               err = drm_syncobj_create(&ctx->syncobj,
+                                        DRM_SYNCOBJ_CREATE_SIGNALED,
+                                        NULL);
+               if (err)
+                       goto err_engines;
+       }
+
+       trace_i915_context_create(ctx);
+
+       return ctx;
+
+err_engines:
+       free_engines(e);
+err_vm:
+       if (ctx->vm)
+               i915_vm_close(ctx->vm);
+err_ctx:
+       kfree(ctx);
+       return ERR_PTR(err);
+}
+
+static void init_contexts(struct i915_gem_contexts *gc)
 {
-       struct i915_address_space *vm, *old;
-       int err;
+       spin_lock_init(&gc->lock);
+       INIT_LIST_HEAD(&gc->list);
+}
 
-       if (args->size)
-               return -EINVAL;
+void i915_gem_init__contexts(struct drm_i915_private *i915)
+{
+       init_contexts(&i915->gem.contexts);
+}
 
-       if (!rcu_access_pointer(ctx->vm))
-               return -ENODEV;
+static void gem_context_register(struct i915_gem_context *ctx,
+                                struct drm_i915_file_private *fpriv,
+                                u32 id)
+{
+       struct drm_i915_private *i915 = ctx->i915;
+       void *old;
 
-       if (upper_32_bits(args->value))
-               return -ENOENT;
+       ctx->file_priv = fpriv;
 
-       rcu_read_lock();
-       vm = xa_load(&file_priv->vm_xa, args->value);
-       if (vm && !kref_get_unless_zero(&vm->ref))
-               vm = NULL;
-       rcu_read_unlock();
-       if (!vm)
-               return -ENOENT;
+       ctx->pid = get_task_pid(current, PIDTYPE_PID);
+       snprintf(ctx->name, sizeof(ctx->name), "%s[%d]",
+                current->comm, pid_nr(ctx->pid));
 
-       err = mutex_lock_interruptible(&ctx->mutex);
-       if (err)
-               goto out;
+       /* And finally expose ourselves to userspace via the idr */
+       old = xa_store(&fpriv->context_xa, id, ctx, GFP_KERNEL);
+       WARN_ON(old);
 
-       if (i915_gem_context_is_closed(ctx)) {
-               err = -ENOENT;
-               goto unlock;
-       }
+       spin_lock(&i915->gem.contexts.lock);
+       list_add_tail(&ctx->link, &i915->gem.contexts.list);
+       spin_unlock(&i915->gem.contexts.lock);
+}
 
-       if (vm == rcu_access_pointer(ctx->vm))
-               goto unlock;
+int i915_gem_context_open(struct drm_i915_private *i915,
+                         struct drm_file *file)
+{
+       struct drm_i915_file_private *file_priv = file->driver_priv;
+       struct i915_gem_proto_context *pc;
+       struct i915_gem_context *ctx;
+       int err;
 
-       old = __set_ppgtt(ctx, vm);
+       mutex_init(&file_priv->proto_context_lock);
+       xa_init_flags(&file_priv->proto_context_xa, XA_FLAGS_ALLOC);
 
-       /* Teardown the existing obj:vma cache, it will have to be rebuilt. */
-       lut_close(ctx);
+       /* 0 reserved for the default context */
+       xa_init_flags(&file_priv->context_xa, XA_FLAGS_ALLOC1);
 
-       /*
-        * We need to flush any requests using the current ppgtt before
-        * we release it as the requests do not hold a reference themselves,
-        * only indirectly through the context.
-        */
-       err = context_barrier_task(ctx, ALL_ENGINES,
-                                  skip_ppgtt_update,
-                                  pin_ppgtt_update,
-                                  emit_ppgtt_update,
-                                  set_ppgtt_barrier,
-                                  old);
-       if (err) {
-               i915_vm_close(__set_ppgtt(ctx, old));
-               i915_vm_close(old);
-               lut_close(ctx); /* force a rebuild of the old obj:vma cache */
+       /* 0 reserved for invalid/unassigned ppgtt */
+       xa_init_flags(&file_priv->vm_xa, XA_FLAGS_ALLOC1);
+
+       pc = proto_context_create(i915, 0);
+       if (IS_ERR(pc)) {
+               err = PTR_ERR(pc);
+               goto err;
        }
 
-unlock:
-       mutex_unlock(&ctx->mutex);
-out:
-       i915_vm_put(vm);
+       ctx = i915_gem_create_context(i915, pc);
+       proto_context_close(pc);
+       if (IS_ERR(ctx)) {
+               err = PTR_ERR(ctx);
+               goto err;
+       }
+
+       gem_context_register(ctx, file_priv, 0);
+
+       return 0;
+
+err:
+       xa_destroy(&file_priv->vm_xa);
+       xa_destroy(&file_priv->context_xa);
+       xa_destroy(&file_priv->proto_context_xa);
+       mutex_destroy(&file_priv->proto_context_lock);
        return err;
 }
 
-static int __apply_ringsize(struct intel_context *ce, void *sz)
+void i915_gem_context_close(struct drm_file *file)
 {
-       return intel_context_set_ring_size(ce, (unsigned long)sz);
+       struct drm_i915_file_private *file_priv = file->driver_priv;
+       struct i915_gem_proto_context *pc;
+       struct i915_address_space *vm;
+       struct i915_gem_context *ctx;
+       unsigned long idx;
+
+       xa_for_each(&file_priv->proto_context_xa, idx, pc)
+               proto_context_close(pc);
+       xa_destroy(&file_priv->proto_context_xa);
+       mutex_destroy(&file_priv->proto_context_lock);
+
+       xa_for_each(&file_priv->context_xa, idx, ctx)
+               context_close(ctx);
+       xa_destroy(&file_priv->context_xa);
+
+       xa_for_each(&file_priv->vm_xa, idx, vm)
+               i915_vm_put(vm);
+       xa_destroy(&file_priv->vm_xa);
 }
 
-static int set_ringsize(struct i915_gem_context *ctx,
-                       struct drm_i915_gem_context_param *args)
+int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
+                            struct drm_file *file)
 {
-       if (!HAS_LOGICAL_RING_CONTEXTS(ctx->i915))
+       struct drm_i915_private *i915 = to_i915(dev);
+       struct drm_i915_gem_vm_control *args = data;
+       struct drm_i915_file_private *file_priv = file->driver_priv;
+       struct i915_ppgtt *ppgtt;
+       u32 id;
+       int err;
+
+       if (!HAS_FULL_PPGTT(i915))
                return -ENODEV;
 
-       if (args->size)
+       if (args->flags)
                return -EINVAL;
 
-       if (!IS_ALIGNED(args->value, I915_GTT_PAGE_SIZE))
-               return -EINVAL;
+       ppgtt = i915_ppgtt_create(&i915->gt);
+       if (IS_ERR(ppgtt))
+               return PTR_ERR(ppgtt);
 
-       if (args->value < I915_GTT_PAGE_SIZE)
-               return -EINVAL;
+       if (args->extensions) {
+               err = i915_user_extensions(u64_to_user_ptr(args->extensions),
+                                          NULL, 0,
+                                          ppgtt);
+               if (err)
+                       goto err_put;
+       }
 
-       if (args->value > 128 * I915_GTT_PAGE_SIZE)
-               return -EINVAL;
+       err = xa_alloc(&file_priv->vm_xa, &id, &ppgtt->vm,
+                      xa_limit_32b, GFP_KERNEL);
+       if (err)
+               goto err_put;
 
-       return context_apply_all(ctx,
-                                __apply_ringsize,
-                                __intel_context_ring_size(args->value));
+       GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */
+       args->vm_id = id;
+       return 0;
+
+err_put:
+       i915_vm_put(&ppgtt->vm);
+       return err;
 }
 
-static int __get_ringsize(struct intel_context *ce, void *arg)
+int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
+                             struct drm_file *file)
 {
-       long sz;
+       struct drm_i915_file_private *file_priv = file->driver_priv;
+       struct drm_i915_gem_vm_control *args = data;
+       struct i915_address_space *vm;
 
-       sz = intel_context_get_ring_size(ce);
-       GEM_BUG_ON(sz > INT_MAX);
+       if (args->flags)
+               return -EINVAL;
+
+       if (args->extensions)
+               return -EINVAL;
+
+       vm = xa_erase(&file_priv->vm_xa, args->vm_id);
+       if (!vm)
+               return -ENOENT;
 
-       return sz; /* stop on first engine */
+       i915_vm_put(vm);
+       return 0;
 }
 
-static int get_ringsize(struct i915_gem_context *ctx,
-                       struct drm_i915_gem_context_param *args)
+static int get_ppgtt(struct drm_i915_file_private *file_priv,
+                    struct i915_gem_context *ctx,
+                    struct drm_i915_gem_context_param *args)
 {
-       int sz;
+       struct i915_address_space *vm;
+       int err;
+       u32 id;
 
-       if (!HAS_LOGICAL_RING_CONTEXTS(ctx->i915))
+       if (!rcu_access_pointer(ctx->vm))
                return -ENODEV;
 
-       if (args->size)
-               return -EINVAL;
+       rcu_read_lock();
+       vm = context_get_vm_rcu(ctx);
+       rcu_read_unlock();
+       if (!vm)
+               return -ENODEV;
 
-       sz = context_apply_all(ctx, __get_ringsize, NULL);
-       if (sz < 0)
-               return sz;
+       err = xa_alloc(&file_priv->vm_xa, &id, vm, xa_limit_32b, GFP_KERNEL);
+       if (err)
+               goto err_put;
 
-       args->value = sz;
-       return 0;
+       i915_vm_open(vm);
+
+       GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */
+       args->value = id;
+       args->size = 0;
+
+err_put:
+       i915_vm_put(vm);
+       return err;
 }
 
 int
@@ -1490,435 +1704,59 @@ i915_gem_user_to_context_sseu(struct intel_gt *gt,
        return 0;
 }
 
-static int set_sseu(struct i915_gem_context *ctx,
-                   struct drm_i915_gem_context_param *args)
-{
-       struct drm_i915_private *i915 = ctx->i915;
-       struct drm_i915_gem_context_param_sseu user_sseu;
-       struct intel_context *ce;
-       struct intel_sseu sseu;
-       unsigned long lookup;
-       int ret;
-
-       if (args->size < sizeof(user_sseu))
-               return -EINVAL;
-
-       if (GRAPHICS_VER(i915) != 11)
-               return -ENODEV;
-
-       if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
-                          sizeof(user_sseu)))
-               return -EFAULT;
-
-       if (user_sseu.rsvd)
-               return -EINVAL;
-
-       if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
-               return -EINVAL;
-
-       lookup = 0;
-       if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
-               lookup |= LOOKUP_USER_INDEX;
-
-       ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
-       if (IS_ERR(ce))
-               return PTR_ERR(ce);
-
-       /* Only render engine supports RPCS configuration. */
-       if (ce->engine->class != RENDER_CLASS) {
-               ret = -ENODEV;
-               goto out_ce;
-       }
-
-       ret = i915_gem_user_to_context_sseu(ce->engine->gt, &user_sseu, &sseu);
-       if (ret)
-               goto out_ce;
-
-       ret = intel_context_reconfigure_sseu(ce, sseu);
-       if (ret)
-               goto out_ce;
-
-       args->size = sizeof(user_sseu);
-
-out_ce:
-       intel_context_put(ce);
-       return ret;
-}
-
-struct set_engines {
-       struct i915_gem_context *ctx;
-       struct i915_gem_engines *engines;
-};
-
-static int
-set_engines__load_balance(struct i915_user_extension __user *base, void *data)
-{
-       struct i915_context_engines_load_balance __user *ext =
-               container_of_user(base, typeof(*ext), base);
-       const struct set_engines *set = data;
-       struct drm_i915_private *i915 = set->ctx->i915;
-       struct intel_engine_cs *stack[16];
-       struct intel_engine_cs **siblings;
-       struct intel_context *ce;
-       u16 num_siblings, idx;
-       unsigned int n;
-       int err;
-
-       if (!HAS_EXECLISTS(i915))
-               return -ENODEV;
-
-       if (intel_uc_uses_guc_submission(&i915->gt.uc))
-               return -ENODEV; /* not implement yet */
-
-       if (get_user(idx, &ext->engine_index))
-               return -EFAULT;
-
-       if (idx >= set->engines->num_engines) {
-               drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n",
-                       idx, set->engines->num_engines);
-               return -EINVAL;
-       }
-
-       idx = array_index_nospec(idx, set->engines->num_engines);
-       if (set->engines->engines[idx]) {
-               drm_dbg(&i915->drm,
-                       "Invalid placement[%d], already occupied\n", idx);
-               return -EEXIST;
-       }
-
-       if (get_user(num_siblings, &ext->num_siblings))
-               return -EFAULT;
-
-       err = check_user_mbz(&ext->flags);
-       if (err)
-               return err;
-
-       err = check_user_mbz(&ext->mbz64);
-       if (err)
-               return err;
-
-       siblings = stack;
-       if (num_siblings > ARRAY_SIZE(stack)) {
-               siblings = kmalloc_array(num_siblings,
-                                        sizeof(*siblings),
-                                        GFP_KERNEL);
-               if (!siblings)
-                       return -ENOMEM;
-       }
-
-       for (n = 0; n < num_siblings; n++) {
-               struct i915_engine_class_instance ci;
-
-               if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) {
-                       err = -EFAULT;
-                       goto out_siblings;
-               }
-
-               siblings[n] = intel_engine_lookup_user(i915,
-                                                      ci.engine_class,
-                                                      ci.engine_instance);
-               if (!siblings[n]) {
-                       drm_dbg(&i915->drm,
-                               "Invalid sibling[%d]: { class:%d, inst:%d }\n",
-                               n, ci.engine_class, ci.engine_instance);
-                       err = -EINVAL;
-                       goto out_siblings;
-               }
-       }
-
-       ce = intel_execlists_create_virtual(siblings, n);
-       if (IS_ERR(ce)) {
-               err = PTR_ERR(ce);
-               goto out_siblings;
-       }
-
-       intel_context_set_gem(ce, set->ctx);
-
-       if (cmpxchg(&set->engines->engines[idx], NULL, ce)) {
-               intel_context_put(ce);
-               err = -EEXIST;
-               goto out_siblings;
-       }
-
-out_siblings:
-       if (siblings != stack)
-               kfree(siblings);
-
-       return err;
-}
-
-static int
-set_engines__bond(struct i915_user_extension __user *base, void *data)
-{
-       struct i915_context_engines_bond __user *ext =
-               container_of_user(base, typeof(*ext), base);
-       const struct set_engines *set = data;
-       struct drm_i915_private *i915 = set->ctx->i915;
-       struct i915_engine_class_instance ci;
-       struct intel_engine_cs *virtual;
-       struct intel_engine_cs *master;
-       u16 idx, num_bonds;
-       int err, n;
-
-       if (get_user(idx, &ext->virtual_index))
-               return -EFAULT;
-
-       if (idx >= set->engines->num_engines) {
-               drm_dbg(&i915->drm,
-                       "Invalid index for virtual engine: %d >= %d\n",
-                       idx, set->engines->num_engines);
-               return -EINVAL;
-       }
-
-       idx = array_index_nospec(idx, set->engines->num_engines);
-       if (!set->engines->engines[idx]) {
-               drm_dbg(&i915->drm, "Invalid engine at %d\n", idx);
-               return -EINVAL;
-       }
-       virtual = set->engines->engines[idx]->engine;
-
-       err = check_user_mbz(&ext->flags);
-       if (err)
-               return err;
-
-       for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) {
-               err = check_user_mbz(&ext->mbz64[n]);
-               if (err)
-                       return err;
-       }
-
-       if (copy_from_user(&ci, &ext->master, sizeof(ci)))
-               return -EFAULT;
-
-       master = intel_engine_lookup_user(i915,
-                                         ci.engine_class, ci.engine_instance);
-       if (!master) {
-               drm_dbg(&i915->drm,
-                       "Unrecognised master engine: { class:%u, instance:%u }\n",
-                       ci.engine_class, ci.engine_instance);
-               return -EINVAL;
-       }
-
-       if (get_user(num_bonds, &ext->num_bonds))
-               return -EFAULT;
-
-       for (n = 0; n < num_bonds; n++) {
-               struct intel_engine_cs *bond;
-
-               if (copy_from_user(&ci, &ext->engines[n], sizeof(ci)))
-                       return -EFAULT;
-
-               bond = intel_engine_lookup_user(i915,
-                                               ci.engine_class,
-                                               ci.engine_instance);
-               if (!bond) {
-                       drm_dbg(&i915->drm,
-                               "Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n",
-                               n, ci.engine_class, ci.engine_instance);
-                       return -EINVAL;
-               }
-
-               /*
-                * A non-virtual engine has no siblings to choose between; and
-                * a submit fence will always be directed to the one engine.
-                */
-               if (intel_engine_is_virtual(virtual)) {
-                       err = intel_virtual_engine_attach_bond(virtual,
-                                                              master,
-                                                              bond);
-                       if (err)
-                               return err;
-               }
-       }
-
-       return 0;
-}
-
-static const i915_user_extension_fn set_engines__extensions[] = {
-       [I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE] = set_engines__load_balance,
-       [I915_CONTEXT_ENGINES_EXT_BOND] = set_engines__bond,
-};
-
-static int
-set_engines(struct i915_gem_context *ctx,
-           const struct drm_i915_gem_context_param *args)
-{
-       struct drm_i915_private *i915 = ctx->i915;
-       struct i915_context_param_engines __user *user =
-               u64_to_user_ptr(args->value);
-       struct set_engines set = { .ctx = ctx };
-       unsigned int num_engines, n;
-       u64 extensions;
-       int err;
-
-       if (!args->size) { /* switch back to legacy user_ring_map */
-               if (!i915_gem_context_user_engines(ctx))
-                       return 0;
-
-               set.engines = default_engines(ctx);
-               if (IS_ERR(set.engines))
-                       return PTR_ERR(set.engines);
-
-               goto replace;
-       }
-
-       BUILD_BUG_ON(!IS_ALIGNED(sizeof(*user), sizeof(*user->engines)));
-       if (args->size < sizeof(*user) ||
-           !IS_ALIGNED(args->size, sizeof(*user->engines))) {
-               drm_dbg(&i915->drm, "Invalid size for engine array: %d\n",
-                       args->size);
-               return -EINVAL;
-       }
-
-       /*
-        * Note that I915_EXEC_RING_MASK limits execbuf to only using the
-        * first 64 engines defined here.
-        */
-       num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines);
-       set.engines = alloc_engines(num_engines);
-       if (!set.engines)
-               return -ENOMEM;
-
-       for (n = 0; n < num_engines; n++) {
-               struct i915_engine_class_instance ci;
-               struct intel_engine_cs *engine;
-               struct intel_context *ce;
-
-               if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) {
-                       __free_engines(set.engines, n);
-                       return -EFAULT;
-               }
-
-               if (ci.engine_class == (u16)I915_ENGINE_CLASS_INVALID &&
-                   ci.engine_instance == (u16)I915_ENGINE_CLASS_INVALID_NONE) {
-                       set.engines->engines[n] = NULL;
-                       continue;
-               }
-
-               engine = intel_engine_lookup_user(ctx->i915,
-                                                 ci.engine_class,
-                                                 ci.engine_instance);
-               if (!engine) {
-                       drm_dbg(&i915->drm,
-                               "Invalid engine[%d]: { class:%d, instance:%d }\n",
-                               n, ci.engine_class, ci.engine_instance);
-                       __free_engines(set.engines, n);
-                       return -ENOENT;
-               }
-
-               ce = intel_context_create(engine);
-               if (IS_ERR(ce)) {
-                       __free_engines(set.engines, n);
-                       return PTR_ERR(ce);
-               }
-
-               intel_context_set_gem(ce, ctx);
-
-               set.engines->engines[n] = ce;
-       }
-       set.engines->num_engines = num_engines;
-
-       err = -EFAULT;
-       if (!get_user(extensions, &user->extensions))
-               err = i915_user_extensions(u64_to_user_ptr(extensions),
-                                          set_engines__extensions,
-                                          ARRAY_SIZE(set_engines__extensions),
-                                          &set);
-       if (err) {
-               free_engines(set.engines);
-               return err;
-       }
-
-replace:
-       mutex_lock(&ctx->engines_mutex);
-       if (i915_gem_context_is_closed(ctx)) {
-               mutex_unlock(&ctx->engines_mutex);
-               free_engines(set.engines);
-               return -ENOENT;
-       }
-       if (args->size)
-               i915_gem_context_set_user_engines(ctx);
-       else
-               i915_gem_context_clear_user_engines(ctx);
-       set.engines = rcu_replace_pointer(ctx->engines, set.engines, 1);
-       mutex_unlock(&ctx->engines_mutex);
-
-       /* Keep track of old engine sets for kill_context() */
-       engines_idle_release(ctx, set.engines);
-
-       return 0;
-}
-
-static int
-get_engines(struct i915_gem_context *ctx,
-           struct drm_i915_gem_context_param *args)
+static int set_sseu(struct i915_gem_context *ctx,
+                   struct drm_i915_gem_context_param *args)
 {
-       struct i915_context_param_engines __user *user;
-       struct i915_gem_engines *e;
-       size_t n, count, size;
-       bool user_engines;
-       int err = 0;
+       struct drm_i915_private *i915 = ctx->i915;
+       struct drm_i915_gem_context_param_sseu user_sseu;
+       struct intel_context *ce;
+       struct intel_sseu sseu;
+       unsigned long lookup;
+       int ret;
 
-       e = __context_engines_await(ctx, &user_engines);
-       if (!e)
-               return -ENOENT;
+       if (args->size < sizeof(user_sseu))
+               return -EINVAL;
 
-       if (!user_engines) {
-               i915_sw_fence_complete(&e->fence);
-               args->size = 0;
-               return 0;
-       }
+       if (GRAPHICS_VER(i915) != 11)
+               return -ENODEV;
 
-       count = e->num_engines;
+       if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
+                          sizeof(user_sseu)))
+               return -EFAULT;
 
-       /* Be paranoid in case we have an impedance mismatch */
-       if (!check_struct_size(user, engines, count, &size)) {
-               err = -EINVAL;
-               goto err_free;
-       }
-       if (overflows_type(size, args->size)) {
-               err = -EINVAL;
-               goto err_free;
-       }
+       if (user_sseu.rsvd)
+               return -EINVAL;
 
-       if (!args->size) {
-               args->size = size;
-               goto err_free;
-       }
+       if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX))
+               return -EINVAL;
 
-       if (args->size < size) {
-               err = -EINVAL;
-               goto err_free;
-       }
+       lookup = 0;
+       if (user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)
+               lookup |= LOOKUP_USER_INDEX;
 
-       user = u64_to_user_ptr(args->value);
-       if (put_user(0, &user->extensions)) {
-               err = -EFAULT;
-               goto err_free;
-       }
+       ce = lookup_user_engine(ctx, lookup, &user_sseu.engine);
+       if (IS_ERR(ce))
+               return PTR_ERR(ce);
 
-       for (n = 0; n < count; n++) {
-               struct i915_engine_class_instance ci = {
-                       .engine_class = I915_ENGINE_CLASS_INVALID,
-                       .engine_instance = I915_ENGINE_CLASS_INVALID_NONE,
-               };
+       /* Only render engine supports RPCS configuration. */
+       if (ce->engine->class != RENDER_CLASS) {
+               ret = -ENODEV;
+               goto out_ce;
+       }
 
-               if (e->engines[n]) {
-                       ci.engine_class = e->engines[n]->engine->uabi_class;
-                       ci.engine_instance = e->engines[n]->engine->uabi_instance;
-               }
+       ret = i915_gem_user_to_context_sseu(ce->engine->gt, &user_sseu, &sseu);
+       if (ret)
+               goto out_ce;
 
-               if (copy_to_user(&user->engines[n], &ci, sizeof(ci))) {
-                       err = -EFAULT;
-                       goto err_free;
-               }
-       }
+       ret = intel_context_reconfigure_sseu(ce, sseu);
+       if (ret)
+               goto out_ce;
 
-       args->size = size;
+       args->size = sizeof(user_sseu);
 
-err_free:
-       i915_sw_fence_complete(&e->fence);
-       return err;
+out_ce:
+       intel_context_put(ce);
+       return ret;
 }
 
 static int
@@ -1931,41 +1769,30 @@ set_persistence(struct i915_gem_context *ctx,
        return __context_set_persistence(ctx, args->value);
 }
 
-static int __apply_priority(struct intel_context *ce, void *arg)
+static void __apply_priority(struct intel_context *ce, void *arg)
 {
        struct i915_gem_context *ctx = arg;
 
        if (!intel_engine_has_timeslices(ce->engine))
-               return 0;
+               return;
 
-       if (ctx->sched.priority >= I915_PRIORITY_NORMAL)
+       if (ctx->sched.priority >= I915_PRIORITY_NORMAL &&
+           intel_engine_has_semaphores(ce->engine))
                intel_context_set_use_semaphores(ce);
        else
                intel_context_clear_use_semaphores(ce);
-
-       return 0;
 }
 
 static int set_priority(struct i915_gem_context *ctx,
                        const struct drm_i915_gem_context_param *args)
 {
-       s64 priority = args->value;
-
-       if (args->size)
-               return -EINVAL;
-
-       if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
-               return -ENODEV;
-
-       if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
-           priority < I915_CONTEXT_MIN_USER_PRIORITY)
-               return -EINVAL;
+       int err;
 
-       if (priority > I915_CONTEXT_DEFAULT_PRIORITY &&
-           !capable(CAP_SYS_NICE))
-               return -EPERM;
+       err = validate_priority(ctx->i915, args);
+       if (err)
+               return err;
 
-       ctx->sched.priority = priority;
+       ctx->sched.priority = args->value;
        context_apply_all(ctx, __apply_priority, ctx);
 
        return 0;
@@ -1978,15 +1805,6 @@ static int ctx_setparam(struct drm_i915_file_private *fpriv,
        int ret = 0;
 
        switch (args->param) {
-       case I915_CONTEXT_PARAM_NO_ZEROMAP:
-               if (args->size)
-                       ret = -EINVAL;
-               else if (args->value)
-                       set_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
-               else
-                       clear_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
-               break;
-
        case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
                if (args->size)
                        ret = -EINVAL;
@@ -2024,23 +1842,15 @@ static int ctx_setparam(struct drm_i915_file_private *fpriv,
                ret = set_sseu(ctx, args);
                break;
 
-       case I915_CONTEXT_PARAM_VM:
-               ret = set_ppgtt(fpriv, ctx, args);
-               break;
-
-       case I915_CONTEXT_PARAM_ENGINES:
-               ret = set_engines(ctx, args);
-               break;
-
        case I915_CONTEXT_PARAM_PERSISTENCE:
                ret = set_persistence(ctx, args);
                break;
 
-       case I915_CONTEXT_PARAM_RINGSIZE:
-               ret = set_ringsize(ctx, args);
-               break;
-
+       case I915_CONTEXT_PARAM_NO_ZEROMAP:
        case I915_CONTEXT_PARAM_BAN_PERIOD:
+       case I915_CONTEXT_PARAM_RINGSIZE:
+       case I915_CONTEXT_PARAM_VM:
+       case I915_CONTEXT_PARAM_ENGINES:
        default:
                ret = -EINVAL;
                break;
@@ -2050,7 +1860,7 @@ static int ctx_setparam(struct drm_i915_file_private *fpriv,
 }
 
 struct create_ext {
-       struct i915_gem_context *ctx;
+       struct i915_gem_proto_context *pc;
        struct drm_i915_file_private *fpriv;
 };
 
@@ -2065,233 +1875,84 @@ static int create_setparam(struct i915_user_extension __user *ext, void *data)
        if (local.param.ctx_id)
                return -EINVAL;
 
-       return ctx_setparam(arg->fpriv, arg->ctx, &local.param);
-}
-
-static int copy_ring_size(struct intel_context *dst,
-                         struct intel_context *src)
-{
-       long sz;
-
-       sz = intel_context_get_ring_size(src);
-       if (sz < 0)
-               return sz;
-
-       return intel_context_set_ring_size(dst, sz);
-}
-
-static int clone_engines(struct i915_gem_context *dst,
-                        struct i915_gem_context *src)
-{
-       struct i915_gem_engines *clone, *e;
-       bool user_engines;
-       unsigned long n;
-
-       e = __context_engines_await(src, &user_engines);
-       if (!e)
-               return -ENOENT;
-
-       clone = alloc_engines(e->num_engines);
-       if (!clone)
-               goto err_unlock;
-
-       for (n = 0; n < e->num_engines; n++) {
-               struct intel_engine_cs *engine;
-
-               if (!e->engines[n]) {
-                       clone->engines[n] = NULL;
-                       continue;
-               }
-               engine = e->engines[n]->engine;
-
-               /*
-                * Virtual engines are singletons; they can only exist
-                * inside a single context, because they embed their
-                * HW context... As each virtual context implies a single
-                * timeline (each engine can only dequeue a single request
-                * at any time), it would be surprising for two contexts
-                * to use the same engine. So let's create a copy of
-                * the virtual engine instead.
-                */
-               if (intel_engine_is_virtual(engine))
-                       clone->engines[n] =
-                               intel_execlists_clone_virtual(engine);
-               else
-                       clone->engines[n] = intel_context_create(engine);
-               if (IS_ERR_OR_NULL(clone->engines[n])) {
-                       __free_engines(clone, n);
-                       goto err_unlock;
-               }
-
-               intel_context_set_gem(clone->engines[n], dst);
-
-               /* Copy across the preferred ringsize */
-               if (copy_ring_size(clone->engines[n], e->engines[n])) {
-                       __free_engines(clone, n + 1);
-                       goto err_unlock;
-               }
-       }
-       clone->num_engines = n;
-       i915_sw_fence_complete(&e->fence);
-
-       /* Serialised by constructor */
-       engines_idle_release(dst, rcu_replace_pointer(dst->engines, clone, 1));
-       if (user_engines)
-               i915_gem_context_set_user_engines(dst);
-       else
-               i915_gem_context_clear_user_engines(dst);
-       return 0;
-
-err_unlock:
-       i915_sw_fence_complete(&e->fence);
-       return -ENOMEM;
-}
-
-static int clone_flags(struct i915_gem_context *dst,
-                      struct i915_gem_context *src)
-{
-       dst->user_flags = src->user_flags;
-       return 0;
+       return set_proto_ctx_param(arg->fpriv, arg->pc, &local.param);
 }
 
-static int clone_schedattr(struct i915_gem_context *dst,
-                          struct i915_gem_context *src)
+static int invalid_ext(struct i915_user_extension __user *ext, void *data)
 {
-       dst->sched = src->sched;
-       return 0;
+       return -EINVAL;
 }
 
-static int clone_sseu(struct i915_gem_context *dst,
-                     struct i915_gem_context *src)
-{
-       struct i915_gem_engines *e = i915_gem_context_lock_engines(src);
-       struct i915_gem_engines *clone;
-       unsigned long n;
-       int err;
-
-       /* no locking required; sole access under constructor*/
-       clone = __context_engines_static(dst);
-       if (e->num_engines != clone->num_engines) {
-               err = -EINVAL;
-               goto unlock;
-       }
-
-       for (n = 0; n < e->num_engines; n++) {
-               struct intel_context *ce = e->engines[n];
-
-               if (clone->engines[n]->engine->class != ce->engine->class) {
-                       /* Must have compatible engine maps! */
-                       err = -EINVAL;
-                       goto unlock;
-               }
-
-               /* serialises with set_sseu */
-               err = intel_context_lock_pinned(ce);
-               if (err)
-                       goto unlock;
-
-               clone->engines[n]->sseu = ce->sseu;
-               intel_context_unlock_pinned(ce);
-       }
-
-       err = 0;
-unlock:
-       i915_gem_context_unlock_engines(src);
-       return err;
-}
+static const i915_user_extension_fn create_extensions[] = {
+       [I915_CONTEXT_CREATE_EXT_SETPARAM] = create_setparam,
+       [I915_CONTEXT_CREATE_EXT_CLONE] = invalid_ext,
+};
 
-static int clone_timeline(struct i915_gem_context *dst,
-                         struct i915_gem_context *src)
+static bool client_is_banned(struct drm_i915_file_private *file_priv)
 {
-       if (src->timeline)
-               __assign_timeline(dst, src->timeline);
-
-       return 0;
+       return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED;
 }
 
-static int clone_vm(struct i915_gem_context *dst,
-                   struct i915_gem_context *src)
+static inline struct i915_gem_context *
+__context_lookup(struct drm_i915_file_private *file_priv, u32 id)
 {
-       struct i915_address_space *vm;
-       int err = 0;
-
-       if (!rcu_access_pointer(src->vm))
-               return 0;
+       struct i915_gem_context *ctx;
 
        rcu_read_lock();
-       vm = context_get_vm_rcu(src);
+       ctx = xa_load(&file_priv->context_xa, id);
+       if (ctx && !kref_get_unless_zero(&ctx->ref))
+               ctx = NULL;
        rcu_read_unlock();
 
-       if (!mutex_lock_interruptible(&dst->mutex)) {
-               __assign_ppgtt(dst, vm);
-               mutex_unlock(&dst->mutex);
-       } else {
-               err = -EINTR;
-       }
-
-       i915_vm_put(vm);
-       return err;
+       return ctx;
 }
 
-static int create_clone(struct i915_user_extension __user *ext, void *data)
-{
-       static int (* const fn[])(struct i915_gem_context *dst,
-                                 struct i915_gem_context *src) = {
-#define MAP(x, y) [ilog2(I915_CONTEXT_CLONE_##x)] = y
-               MAP(ENGINES, clone_engines),
-               MAP(FLAGS, clone_flags),
-               MAP(SCHEDATTR, clone_schedattr),
-               MAP(SSEU, clone_sseu),
-               MAP(TIMELINE, clone_timeline),
-               MAP(VM, clone_vm),
-#undef MAP
-       };
-       struct drm_i915_gem_context_create_ext_clone local;
-       const struct create_ext *arg = data;
-       struct i915_gem_context *dst = arg->ctx;
-       struct i915_gem_context *src;
-       int err, bit;
+static struct i915_gem_context *
+finalize_create_context_locked(struct drm_i915_file_private *file_priv,
+                              struct i915_gem_proto_context *pc, u32 id)
+{
+       struct i915_gem_context *ctx;
+       void *old;
 
-       if (copy_from_user(&local, ext, sizeof(local)))
-               return -EFAULT;
+       lockdep_assert_held(&file_priv->proto_context_lock);
 
-       BUILD_BUG_ON(GENMASK(BITS_PER_TYPE(local.flags) - 1, ARRAY_SIZE(fn)) !=
-                    I915_CONTEXT_CLONE_UNKNOWN);
+       ctx = i915_gem_create_context(file_priv->dev_priv, pc);
+       if (IS_ERR(ctx))
+               return ctx;
 
-       if (local.flags & I915_CONTEXT_CLONE_UNKNOWN)
-               return -EINVAL;
+       gem_context_register(ctx, file_priv, id);
 
-       if (local.rsvd)
-               return -EINVAL;
+       old = xa_erase(&file_priv->proto_context_xa, id);
+       GEM_BUG_ON(old != pc);
+       proto_context_close(pc);
 
-       rcu_read_lock();
-       src = __i915_gem_context_lookup_rcu(arg->fpriv, local.clone_id);
-       rcu_read_unlock();
-       if (!src)
-               return -ENOENT;
+       /* One for the xarray and one for the caller */
+       return i915_gem_context_get(ctx);
+}
 
-       GEM_BUG_ON(src == dst);
+struct i915_gem_context *
+i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)
+{
+       struct i915_gem_proto_context *pc;
+       struct i915_gem_context *ctx;
 
-       for (bit = 0; bit < ARRAY_SIZE(fn); bit++) {
-               if (!(local.flags & BIT(bit)))
-                       continue;
+       ctx = __context_lookup(file_priv, id);
+       if (ctx)
+               return ctx;
 
-               err = fn[bit](dst, src);
-               if (err)
-                       return err;
+       mutex_lock(&file_priv->proto_context_lock);
+       /* Try one more time under the lock */
+       ctx = __context_lookup(file_priv, id);
+       if (!ctx) {
+               pc = xa_load(&file_priv->proto_context_xa, id);
+               if (!pc)
+                       ctx = ERR_PTR(-ENOENT);
+               else
+                       ctx = finalize_create_context_locked(file_priv, pc, id);
        }
+       mutex_unlock(&file_priv->proto_context_lock);
 
-       return 0;
-}
-
-static const i915_user_extension_fn create_extensions[] = {
-       [I915_CONTEXT_CREATE_EXT_SETPARAM] = create_setparam,
-       [I915_CONTEXT_CREATE_EXT_CLONE] = create_clone,
-};
-
-static bool client_is_banned(struct drm_i915_file_private *file_priv)
-{
-       return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED;
+       return ctx;
 }
 
 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
@@ -2321,9 +1982,9 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
                return -EIO;
        }
 
-       ext_data.ctx = i915_gem_create_context(i915, args->flags);
-       if (IS_ERR(ext_data.ctx))
-               return PTR_ERR(ext_data.ctx);
+       ext_data.pc = proto_context_create(i915, args->flags);
+       if (IS_ERR(ext_data.pc))
+               return PTR_ERR(ext_data.pc);
 
        if (args->flags & I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS) {
                ret = i915_user_extensions(u64_to_user_ptr(args->extensions),
@@ -2331,20 +1992,39 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
                                           ARRAY_SIZE(create_extensions),
                                           &ext_data);
                if (ret)
-                       goto err_ctx;
+                       goto err_pc;
        }
 
-       ret = gem_context_register(ext_data.ctx, ext_data.fpriv, &id);
-       if (ret < 0)
-               goto err_ctx;
+       if (GRAPHICS_VER(i915) > 12) {
+               struct i915_gem_context *ctx;
+
+               /* Get ourselves a context ID */
+               ret = xa_alloc(&ext_data.fpriv->context_xa, &id, NULL,
+                              xa_limit_32b, GFP_KERNEL);
+               if (ret)
+                       goto err_pc;
+
+               ctx = i915_gem_create_context(i915, ext_data.pc);
+               if (IS_ERR(ctx)) {
+                       ret = PTR_ERR(ctx);
+                       goto err_pc;
+               }
+
+               proto_context_close(ext_data.pc);
+               gem_context_register(ctx, ext_data.fpriv, id);
+       } else {
+               ret = proto_context_register(ext_data.fpriv, ext_data.pc, &id);
+               if (ret < 0)
+                       goto err_pc;
+       }
 
        args->ctx_id = id;
        drm_dbg(&i915->drm, "HW context %d created\n", args->ctx_id);
 
        return 0;
 
-err_ctx:
-       context_close(ext_data.ctx);
+err_pc:
+       proto_context_close(ext_data.pc);
        return ret;
 }
 
@@ -2353,6 +2033,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
 {
        struct drm_i915_gem_context_destroy *args = data;
        struct drm_i915_file_private *file_priv = file->driver_priv;
+       struct i915_gem_proto_context *pc;
        struct i915_gem_context *ctx;
 
        if (args->pad != 0)
@@ -2361,11 +2042,24 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
        if (!args->ctx_id)
                return -ENOENT;
 
+       /* We need to hold the proto-context lock here to prevent races
+        * with finalize_create_context_locked().
+        */
+       mutex_lock(&file_priv->proto_context_lock);
        ctx = xa_erase(&file_priv->context_xa, args->ctx_id);
-       if (!ctx)
+       pc = xa_erase(&file_priv->proto_context_xa, args->ctx_id);
+       mutex_unlock(&file_priv->proto_context_lock);
+
+       if (!ctx && !pc)
                return -ENOENT;
+       GEM_WARN_ON(ctx && pc);
+
+       if (pc)
+               proto_context_close(pc);
+
+       if (ctx)
+               context_close(ctx);
 
-       context_close(ctx);
        return 0;
 }
 
@@ -2433,15 +2127,10 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
        int ret = 0;
 
        ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
-       if (!ctx)
-               return -ENOENT;
+       if (IS_ERR(ctx))
+               return PTR_ERR(ctx);
 
        switch (args->param) {
-       case I915_CONTEXT_PARAM_NO_ZEROMAP:
-               args->size = 0;
-               args->value = test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
-               break;
-
        case I915_CONTEXT_PARAM_GTT_SIZE:
                args->size = 0;
                rcu_read_lock();
@@ -2480,20 +2169,15 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
                ret = get_ppgtt(file_priv, ctx, args);
                break;
 
-       case I915_CONTEXT_PARAM_ENGINES:
-               ret = get_engines(ctx, args);
-               break;
-
        case I915_CONTEXT_PARAM_PERSISTENCE:
                args->size = 0;
                args->value = i915_gem_context_is_persistent(ctx);
                break;
 
-       case I915_CONTEXT_PARAM_RINGSIZE:
-               ret = get_ringsize(ctx, args);
-               break;
-
+       case I915_CONTEXT_PARAM_NO_ZEROMAP:
        case I915_CONTEXT_PARAM_BAN_PERIOD:
+       case I915_CONTEXT_PARAM_ENGINES:
+       case I915_CONTEXT_PARAM_RINGSIZE:
        default:
                ret = -EINVAL;
                break;
@@ -2508,16 +2192,32 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
 {
        struct drm_i915_file_private *file_priv = file->driver_priv;
        struct drm_i915_gem_context_param *args = data;
+       struct i915_gem_proto_context *pc;
        struct i915_gem_context *ctx;
-       int ret;
+       int ret = 0;
 
-       ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
-       if (!ctx)
-               return -ENOENT;
+       mutex_lock(&file_priv->proto_context_lock);
+       ctx = __context_lookup(file_priv, args->ctx_id);
+       if (!ctx) {
+               pc = xa_load(&file_priv->proto_context_xa, args->ctx_id);
+               if (pc) {
+                       /* Contexts should be finalized inside
+                        * GEM_CONTEXT_CREATE starting with graphics
+                        * version 13.
+                        */
+                       WARN_ON(GRAPHICS_VER(file_priv->dev_priv) > 12);
+                       ret = set_proto_ctx_param(file_priv, pc, args);
+               } else {
+                       ret = -ENOENT;
+               }
+       }
+       mutex_unlock(&file_priv->proto_context_lock);
 
-       ret = ctx_setparam(file_priv, ctx, args);
+       if (ctx) {
+               ret = ctx_setparam(file_priv, ctx, args);
+               i915_gem_context_put(ctx);
+       }
 
-       i915_gem_context_put(ctx);
        return ret;
 }
 
@@ -2527,16 +2227,13 @@ int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
        struct drm_i915_private *i915 = to_i915(dev);
        struct drm_i915_reset_stats *args = data;
        struct i915_gem_context *ctx;
-       int ret;
 
        if (args->flags || args->pad)
                return -EINVAL;
 
-       ret = -ENOENT;
-       rcu_read_lock();
-       ctx = __i915_gem_context_lookup_rcu(file->driver_priv, args->ctx_id);
-       if (!ctx)
-               goto out;
+       ctx = i915_gem_context_lookup(file->driver_priv, args->ctx_id);
+       if (IS_ERR(ctx))
+               return PTR_ERR(ctx);
 
        /*
         * We opt for unserialised reads here. This may result in tearing
@@ -2553,10 +2250,8 @@ int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
        args->batch_active = atomic_read(&ctx->guilty_count);
        args->batch_pending = atomic_read(&ctx->active_count);
 
-       ret = 0;
-out:
-       rcu_read_unlock();
-       return ret;
+       i915_gem_context_put(ctx);
+       return 0;
 }
 
 /* GEM context-engines iterator: for_each_gem_engine() */
@@ -2584,27 +2279,16 @@ i915_gem_engines_iter_next(struct i915_gem_engines_iter *it)
 #include "selftests/i915_gem_context.c"
 #endif
 
-static void i915_global_gem_context_shrink(void)
+void i915_gem_context_module_exit(void)
 {
-       kmem_cache_shrink(global.slab_luts);
+       kmem_cache_destroy(slab_luts);
 }
 
-static void i915_global_gem_context_exit(void)
-{
-       kmem_cache_destroy(global.slab_luts);
-}
-
-static struct i915_global_gem_context global = { {
-       .shrink = i915_global_gem_context_shrink,
-       .exit = i915_global_gem_context_exit,
-} };
-
-int __init i915_global_gem_context_init(void)
+int __init i915_gem_context_module_init(void)
 {
-       global.slab_luts = KMEM_CACHE(i915_lut_handle, 0);
-       if (!global.slab_luts)
+       slab_luts = KMEM_CACHE(i915_lut_handle, 0);
+       if (!slab_luts)
                return -ENOMEM;
 
-       i915_global_register(&global.base);
        return 0;
 }
index b5c908f..1806053 100644 (file)
@@ -133,6 +133,9 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
 int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data,
                                       struct drm_file *file);
 
+struct i915_gem_context *
+i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id);
+
 static inline struct i915_gem_context *
 i915_gem_context_get(struct i915_gem_context *ctx)
 {
@@ -221,6 +224,9 @@ i915_gem_engines_iter_next(struct i915_gem_engines_iter *it);
        for (i915_gem_engines_iter_init(&(it), (engines)); \
             ((ce) = i915_gem_engines_iter_next(&(it)));)
 
+void i915_gem_context_module_exit(void);
+int i915_gem_context_module_init(void);
+
 struct i915_lut_handle *i915_lut_handle_alloc(void);
 void i915_lut_handle_free(struct i915_lut_handle *lut);
 
index 340473a..94c03a9 100644 (file)
@@ -30,22 +30,176 @@ struct i915_address_space;
 struct intel_timeline;
 struct intel_ring;
 
+/**
+ * struct i915_gem_engines - A set of engines
+ */
 struct i915_gem_engines {
        union {
+               /** @link: Link in i915_gem_context::stale::engines */
                struct list_head link;
+
+               /** @rcu: RCU to use when freeing */
                struct rcu_head rcu;
        };
+
+       /** @fence: Fence used for delayed destruction of engines */
        struct i915_sw_fence fence;
+
+       /** @ctx: i915_gem_context backpointer */
        struct i915_gem_context *ctx;
+
+       /** @num_engines: Number of engines in this set */
        unsigned int num_engines;
+
+       /** @engines: Array of engines */
        struct intel_context *engines[];
 };
 
+/**
+ * struct i915_gem_engines_iter - Iterator for an i915_gem_engines set
+ */
 struct i915_gem_engines_iter {
+       /** @idx: Index into i915_gem_engines::engines */
        unsigned int idx;
+
+       /** @engines: Engine set being iterated */
        const struct i915_gem_engines *engines;
 };
 
+/**
+ * enum i915_gem_engine_type - Describes the type of an i915_gem_proto_engine
+ */
+enum i915_gem_engine_type {
+       /** @I915_GEM_ENGINE_TYPE_INVALID: An invalid engine */
+       I915_GEM_ENGINE_TYPE_INVALID = 0,
+
+       /** @I915_GEM_ENGINE_TYPE_PHYSICAL: A single physical engine */
+       I915_GEM_ENGINE_TYPE_PHYSICAL,
+
+       /** @I915_GEM_ENGINE_TYPE_BALANCED: A load-balanced engine set */
+       I915_GEM_ENGINE_TYPE_BALANCED,
+};
+
+/**
+ * struct i915_gem_proto_engine - prototype engine
+ *
+ * This struct describes an engine that a context may contain.  Engines
+ * have three types:
+ *
+ *  - I915_GEM_ENGINE_TYPE_INVALID: Invalid engines can be created but they
+ *    show up as a NULL in i915_gem_engines::engines[i] and any attempt to
+ *    use them by the user results in -EINVAL.  They are also useful during
+ *    proto-context construction because the client may create invalid
+ *    engines and then set them up later as virtual engines.
+ *
+ *  - I915_GEM_ENGINE_TYPE_PHYSICAL: A single physical engine, described by
+ *    i915_gem_proto_engine::engine.
+ *
+ *  - I915_GEM_ENGINE_TYPE_BALANCED: A load-balanced engine set, described
+ *    i915_gem_proto_engine::num_siblings and i915_gem_proto_engine::siblings.
+ */
+struct i915_gem_proto_engine {
+       /** @type: Type of this engine */
+       enum i915_gem_engine_type type;
+
+       /** @engine: Engine, for physical */
+       struct intel_engine_cs *engine;
+
+       /** @num_siblings: Number of balanced siblings */
+       unsigned int num_siblings;
+
+       /** @siblings: Balanced siblings */
+       struct intel_engine_cs **siblings;
+
+       /** @sseu: Client-set SSEU parameters */
+       struct intel_sseu sseu;
+};
+
+/**
+ * struct i915_gem_proto_context - prototype context
+ *
+ * The struct i915_gem_proto_context represents the creation parameters for
+ * a struct i915_gem_context.  This is used to gather parameters provided
+ * either through creation flags or via SET_CONTEXT_PARAM so that, when we
+ * create the final i915_gem_context, those parameters can be immutable.
+ *
+ * The context uAPI allows for two methods of setting context parameters:
+ * SET_CONTEXT_PARAM and CONTEXT_CREATE_EXT_SETPARAM.  The former is
+ * allowed to be called at any time while the later happens as part of
+ * GEM_CONTEXT_CREATE.  When these were initially added, Currently,
+ * everything settable via one is settable via the other.  While some
+ * params are fairly simple and setting them on a live context is harmless
+ * such the context priority, others are far trickier such as the VM or the
+ * set of engines.  To avoid some truly nasty race conditions, we don't
+ * allow setting the VM or the set of engines on live contexts.
+ *
+ * The way we dealt with this without breaking older userspace that sets
+ * the VM or engine set via SET_CONTEXT_PARAM is to delay the creation of
+ * the actual context until after the client is done configuring it with
+ * SET_CONTEXT_PARAM.  From the perspective of the client, it has the same
+ * u32 context ID the whole time.  From the perspective of i915, however,
+ * it's an i915_gem_proto_context right up until the point where we attempt
+ * to do something which the proto-context can't handle at which point the
+ * real context gets created.
+ *
+ * This is accomplished via a little xarray dance.  When GEM_CONTEXT_CREATE
+ * is called, we create a proto-context, reserve a slot in context_xa but
+ * leave it NULL, the proto-context in the corresponding slot in
+ * proto_context_xa.  Then, whenever we go to look up a context, we first
+ * check context_xa.  If it's there, we return the i915_gem_context and
+ * we're done.  If it's not, we look in proto_context_xa and, if we find it
+ * there, we create the actual context and kill the proto-context.
+ *
+ * At the time we made this change (April, 2021), we did a fairly complete
+ * audit of existing userspace to ensure this wouldn't break anything:
+ *
+ *  - Mesa/i965 didn't use the engines or VM APIs at all
+ *
+ *  - Mesa/ANV used the engines API but via CONTEXT_CREATE_EXT_SETPARAM and
+ *    didn't use the VM API.
+ *
+ *  - Mesa/iris didn't use the engines or VM APIs at all
+ *
+ *  - The open-source compute-runtime didn't yet use the engines API but
+ *    did use the VM API via SET_CONTEXT_PARAM.  However, CONTEXT_SETPARAM
+ *    was always the second ioctl on that context, immediately following
+ *    GEM_CONTEXT_CREATE.
+ *
+ *  - The media driver sets engines and bonding/balancing via
+ *    SET_CONTEXT_PARAM.  However, CONTEXT_SETPARAM to set the VM was
+ *    always the second ioctl on that context, immediately following
+ *    GEM_CONTEXT_CREATE and setting engines immediately followed that.
+ *
+ * In order for this dance to work properly, any modification to an
+ * i915_gem_proto_context that is exposed to the client via
+ * drm_i915_file_private::proto_context_xa must be guarded by
+ * drm_i915_file_private::proto_context_lock.  The exception is when a
+ * proto-context has not yet been exposed such as when handling
+ * CONTEXT_CREATE_SET_PARAM during GEM_CONTEXT_CREATE.
+ */
+struct i915_gem_proto_context {
+       /** @vm: See &i915_gem_context.vm */
+       struct i915_address_space *vm;
+
+       /** @user_flags: See &i915_gem_context.user_flags */
+       unsigned long user_flags;
+
+       /** @sched: See &i915_gem_context.sched */
+       struct i915_sched_attr sched;
+
+       /** @num_user_engines: Number of user-specified engines or -1 */
+       int num_user_engines;
+
+       /** @user_engines: User-specified engines */
+       struct i915_gem_proto_engine *user_engines;
+
+       /** @legacy_rcs_sseu: Client-set SSEU parameters for the legacy RCS */
+       struct intel_sseu legacy_rcs_sseu;
+
+       /** @single_timeline: See See &i915_gem_context.syncobj */
+       bool single_timeline;
+};
+
 /**
  * struct i915_gem_context - client state
  *
@@ -53,10 +207,10 @@ struct i915_gem_engines_iter {
  * logical hardware state for a particular client.
  */
 struct i915_gem_context {
-       /** i915: i915 device backpointer */
+       /** @i915: i915 device backpointer */
        struct drm_i915_private *i915;
 
-       /** file_priv: owning file descriptor */
+       /** @file_priv: owning file descriptor */
        struct drm_i915_file_private *file_priv;
 
        /**
@@ -81,9 +235,23 @@ struct i915_gem_context {
         * CONTEXT_USER_ENGINES flag is set).
         */
        struct i915_gem_engines __rcu *engines;
-       struct mutex engines_mutex; /* guards writes to engines */
 
-       struct intel_timeline *timeline;
+       /** @engines_mutex: guards writes to engines */
+       struct mutex engines_mutex;
+
+       /**
+        * @syncobj: Shared timeline syncobj
+        *
+        * When the SHARED_TIMELINE flag is set on context creation, we
+        * emulate a single timeline across all engines using this syncobj.
+        * For every execbuffer2 call, this syncobj is used as both an in-
+        * and out-fence.  Unlike the real intel_timeline, this doesn't
+        * provide perfect atomic in-order guarantees if the client races
+        * with itself by calling execbuffer2 twice concurrently.  However,
+        * if userspace races with itself, that's not likely to yield well-
+        * defined results anyway so we choose to not care.
+        */
+       struct drm_syncobj *syncobj;
 
        /**
         * @vm: unique address space (GTT)
@@ -106,7 +274,7 @@ struct i915_gem_context {
         */
        struct pid *pid;
 
-       /** link: place with &drm_i915_private.context_list */
+       /** @link: place with &drm_i915_private.context_list */
        struct list_head link;
 
        /**
@@ -129,7 +297,6 @@ struct i915_gem_context {
         * @user_flags: small set of booleans controlled by the user
         */
        unsigned long user_flags;
-#define UCONTEXT_NO_ZEROMAP            0
 #define UCONTEXT_NO_ERROR_CAPTURE      1
 #define UCONTEXT_BANNABLE              2
 #define UCONTEXT_RECOVERABLE           3
@@ -142,11 +309,13 @@ struct i915_gem_context {
 #define CONTEXT_CLOSED                 0
 #define CONTEXT_USER_ENGINES           1
 
+       /** @mutex: guards everything that isn't engines or handles_vma */
        struct mutex mutex;
 
+       /** @sched: scheduler parameters */
        struct i915_sched_attr sched;
 
-       /** guilty_count: How many times this context has caused a GPU hang. */
+       /** @guilty_count: How many times this context has caused a GPU hang. */
        atomic_t guilty_count;
        /**
         * @active_count: How many times this context was active during a GPU
@@ -154,25 +323,23 @@ struct i915_gem_context {
         */
        atomic_t active_count;
 
-       struct {
-               u64 timeout_us;
-       } watchdog;
-
        /**
         * @hang_timestamp: The last time(s) this context caused a GPU hang
         */
        unsigned long hang_timestamp[2];
 #define CONTEXT_FAST_HANG_JIFFIES (120 * HZ) /* 3 hangs within 120s? Banned! */
 
-       /** remap_slice: Bitmask of cache lines that need remapping */
+       /** @remap_slice: Bitmask of cache lines that need remapping */
        u8 remap_slice;
 
        /**
-        * handles_vma: rbtree to look up our context specific obj/vma for
+        * @handles_vma: rbtree to look up our context specific obj/vma for
         * the user handle. (user handles are per fd, but the binding is
         * per vm, which may be one per context or shared with the global GTT)
         */
        struct radix_tree_root handles_vma;
+
+       /** @lut_mutex: Locks handles_vma */
        struct mutex lut_mutex;
 
        /**
@@ -184,8 +351,11 @@ struct i915_gem_context {
         */
        char name[TASK_COMM_LEN + 8];
 
+       /** @stale: tracks stale engines to be destroyed */
        struct {
+               /** @lock: guards engines */
                spinlock_t lock;
+               /** @engines: list of stale engines */
                struct list_head engines;
        } stale;
 };
index 93bf63b..23fee13 100644 (file)
 #include "i915_trace.h"
 #include "i915_user_extensions.h"
 
-static u32 object_max_page_size(struct drm_i915_gem_object *obj)
+static u32 object_max_page_size(struct intel_memory_region **placements,
+                               unsigned int n_placements)
 {
        u32 max_page_size = 0;
        int i;
 
-       for (i = 0; i < obj->mm.n_placements; i++) {
-               struct intel_memory_region *mr = obj->mm.placements[i];
+       for (i = 0; i < n_placements; i++) {
+               struct intel_memory_region *mr = placements[i];
 
                GEM_BUG_ON(!is_power_of_2(mr->min_page_size));
                max_page_size = max_t(u32, max_page_size, mr->min_page_size);
@@ -27,10 +28,13 @@ static u32 object_max_page_size(struct drm_i915_gem_object *obj)
        return max_page_size;
 }
 
-static void object_set_placements(struct drm_i915_gem_object *obj,
-                                 struct intel_memory_region **placements,
-                                 unsigned int n_placements)
+static int object_set_placements(struct drm_i915_gem_object *obj,
+                                struct intel_memory_region **placements,
+                                unsigned int n_placements)
 {
+       struct intel_memory_region **arr;
+       unsigned int i;
+
        GEM_BUG_ON(!n_placements);
 
        /*
@@ -44,9 +48,20 @@ static void object_set_placements(struct drm_i915_gem_object *obj,
                obj->mm.placements = &i915->mm.regions[mr->id];
                obj->mm.n_placements = 1;
        } else {
-               obj->mm.placements = placements;
+               arr = kmalloc_array(n_placements,
+                                   sizeof(struct intel_memory_region *),
+                                   GFP_KERNEL);
+               if (!arr)
+                       return -ENOMEM;
+
+               for (i = 0; i < n_placements; i++)
+                       arr[i] = placements[i];
+
+               obj->mm.placements = arr;
                obj->mm.n_placements = n_placements;
        }
+
+       return 0;
 }
 
 static int i915_gem_publish(struct drm_i915_gem_object *obj,
@@ -67,22 +82,46 @@ static int i915_gem_publish(struct drm_i915_gem_object *obj,
        return 0;
 }
 
-static int
-i915_gem_setup(struct drm_i915_gem_object *obj, u64 size)
+/**
+ * Creates a new object using the same path as DRM_I915_GEM_CREATE_EXT
+ * @i915: i915 private
+ * @size: size of the buffer, in bytes
+ * @placements: possible placement regions, in priority order
+ * @n_placements: number of possible placement regions
+ *
+ * This function is exposed primarily for selftests and does very little
+ * error checking.  It is assumed that the set of placement regions has
+ * already been verified to be valid.
+ */
+struct drm_i915_gem_object *
+__i915_gem_object_create_user(struct drm_i915_private *i915, u64 size,
+                             struct intel_memory_region **placements,
+                             unsigned int n_placements)
 {
-       struct intel_memory_region *mr = obj->mm.placements[0];
+       struct intel_memory_region *mr = placements[0];
+       struct drm_i915_gem_object *obj;
        unsigned int flags;
        int ret;
 
-       size = round_up(size, object_max_page_size(obj));
+       i915_gem_flush_free_objects(i915);
+
+       size = round_up(size, object_max_page_size(placements, n_placements));
        if (size == 0)
-               return -EINVAL;
+               return ERR_PTR(-EINVAL);
 
        /* For most of the ABI (e.g. mmap) we think in system pages */
        GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
 
        if (i915_gem_object_size_2big(size))
-               return -E2BIG;
+               return ERR_PTR(-E2BIG);
+
+       obj = i915_gem_object_alloc();
+       if (!obj)
+               return ERR_PTR(-ENOMEM);
+
+       ret = object_set_placements(obj, placements, n_placements);
+       if (ret)
+               goto object_free;
 
        /*
         * I915_BO_ALLOC_USER will make sure the object is cleared before
@@ -90,14 +129,20 @@ i915_gem_setup(struct drm_i915_gem_object *obj, u64 size)
         */
        flags = I915_BO_ALLOC_USER;
 
-       ret = mr->ops->init_object(mr, obj, size, flags);
+       ret = mr->ops->init_object(mr, obj, size, 0, flags);
        if (ret)
-               return ret;
+               goto object_free;
 
        GEM_BUG_ON(size != obj->base.size);
 
        trace_i915_gem_object_create(obj);
-       return 0;
+       return obj;
+
+object_free:
+       if (obj->mm.n_placements > 1)
+               kfree(obj->mm.placements);
+       i915_gem_object_free(obj);
+       return ERR_PTR(ret);
 }
 
 int
@@ -110,7 +155,6 @@ i915_gem_dumb_create(struct drm_file *file,
        enum intel_memory_type mem_type;
        int cpp = DIV_ROUND_UP(args->bpp, 8);
        u32 format;
-       int ret;
 
        switch (cpp) {
        case 1:
@@ -143,22 +187,13 @@ i915_gem_dumb_create(struct drm_file *file,
        if (HAS_LMEM(to_i915(dev)))
                mem_type = INTEL_MEMORY_LOCAL;
 
-       obj = i915_gem_object_alloc();
-       if (!obj)
-               return -ENOMEM;
-
        mr = intel_memory_region_by_type(to_i915(dev), mem_type);
-       object_set_placements(obj, &mr, 1);
 
-       ret = i915_gem_setup(obj, args->size);
-       if (ret)
-               goto object_free;
+       obj = __i915_gem_object_create_user(to_i915(dev), args->size, &mr, 1);
+       if (IS_ERR(obj))
+               return PTR_ERR(obj);
 
        return i915_gem_publish(obj, file, &args->size, &args->handle);
-
-object_free:
-       i915_gem_object_free(obj);
-       return ret;
 }
 
 /**
@@ -175,31 +210,20 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
        struct drm_i915_gem_create *args = data;
        struct drm_i915_gem_object *obj;
        struct intel_memory_region *mr;
-       int ret;
-
-       i915_gem_flush_free_objects(i915);
-
-       obj = i915_gem_object_alloc();
-       if (!obj)
-               return -ENOMEM;
 
        mr = intel_memory_region_by_type(i915, INTEL_MEMORY_SYSTEM);
-       object_set_placements(obj, &mr, 1);
 
-       ret = i915_gem_setup(obj, args->size);
-       if (ret)
-               goto object_free;
+       obj = __i915_gem_object_create_user(i915, args->size, &mr, 1);
+       if (IS_ERR(obj))
+               return PTR_ERR(obj);
 
        return i915_gem_publish(obj, file, &args->size, &args->handle);
-
-object_free:
-       i915_gem_object_free(obj);
-       return ret;
 }
 
 struct create_ext {
        struct drm_i915_private *i915;
-       struct drm_i915_gem_object *vanilla_object;
+       struct intel_memory_region *placements[INTEL_REGION_UNKNOWN];
+       unsigned int n_placements;
 };
 
 static void repr_placements(char *buf, size_t size,
@@ -230,8 +254,7 @@ static int set_placements(struct drm_i915_gem_create_ext_memory_regions *args,
        struct drm_i915_private *i915 = ext_data->i915;
        struct drm_i915_gem_memory_class_instance __user *uregions =
                u64_to_user_ptr(args->regions);
-       struct drm_i915_gem_object *obj = ext_data->vanilla_object;
-       struct intel_memory_region **placements;
+       struct intel_memory_region *placements[INTEL_REGION_UNKNOWN];
        u32 mask;
        int i, ret = 0;
 
@@ -245,6 +268,8 @@ static int set_placements(struct drm_i915_gem_create_ext_memory_regions *args,
                ret = -EINVAL;
        }
 
+       BUILD_BUG_ON(ARRAY_SIZE(i915->mm.regions) != ARRAY_SIZE(placements));
+       BUILD_BUG_ON(ARRAY_SIZE(ext_data->placements) != ARRAY_SIZE(placements));
        if (args->num_regions > ARRAY_SIZE(i915->mm.regions)) {
                drm_dbg(&i915->drm, "num_regions is too large\n");
                ret = -EINVAL;
@@ -253,21 +278,13 @@ static int set_placements(struct drm_i915_gem_create_ext_memory_regions *args,
        if (ret)
                return ret;
 
-       placements = kmalloc_array(args->num_regions,
-                                  sizeof(struct intel_memory_region *),
-                                  GFP_KERNEL);
-       if (!placements)
-               return -ENOMEM;
-
        mask = 0;
        for (i = 0; i < args->num_regions; i++) {
                struct drm_i915_gem_memory_class_instance region;
                struct intel_memory_region *mr;
 
-               if (copy_from_user(&region, uregions, sizeof(region))) {
-                       ret = -EFAULT;
-                       goto out_free;
-               }
+               if (copy_from_user(&region, uregions, sizeof(region)))
+                       return -EFAULT;
 
                mr = intel_memory_region_lookup(i915,
                                                region.memory_class,
@@ -293,14 +310,14 @@ static int set_placements(struct drm_i915_gem_create_ext_memory_regions *args,
                ++uregions;
        }
 
-       if (obj->mm.placements) {
+       if (ext_data->n_placements) {
                ret = -EINVAL;
                goto out_dump;
        }
 
-       object_set_placements(obj, placements, args->num_regions);
-       if (args->num_regions == 1)
-               kfree(placements);
+       ext_data->n_placements = args->num_regions;
+       for (i = 0; i < args->num_regions; i++)
+               ext_data->placements[i] = placements[i];
 
        return 0;
 
@@ -308,11 +325,11 @@ out_dump:
        if (1) {
                char buf[256];
 
-               if (obj->mm.placements) {
+               if (ext_data->n_placements) {
                        repr_placements(buf,
                                        sizeof(buf),
-                                       obj->mm.placements,
-                                       obj->mm.n_placements);
+                                       ext_data->placements,
+                                       ext_data->n_placements);
                        drm_dbg(&i915->drm,
                                "Placements were already set in previous EXT. Existing placements: %s\n",
                                buf);
@@ -322,8 +339,6 @@ out_dump:
                drm_dbg(&i915->drm, "New placements(so far validated): %s\n", buf);
        }
 
-out_free:
-       kfree(placements);
        return ret;
 }
 
@@ -358,44 +373,30 @@ i915_gem_create_ext_ioctl(struct drm_device *dev, void *data,
        struct drm_i915_private *i915 = to_i915(dev);
        struct drm_i915_gem_create_ext *args = data;
        struct create_ext ext_data = { .i915 = i915 };
-       struct intel_memory_region **placements_ext;
        struct drm_i915_gem_object *obj;
        int ret;
 
        if (args->flags)
                return -EINVAL;
 
-       i915_gem_flush_free_objects(i915);
-
-       obj = i915_gem_object_alloc();
-       if (!obj)
-               return -ENOMEM;
-
-       ext_data.vanilla_object = obj;
        ret = i915_user_extensions(u64_to_user_ptr(args->extensions),
                                   create_extensions,
                                   ARRAY_SIZE(create_extensions),
                                   &ext_data);
-       placements_ext = obj->mm.placements;
        if (ret)
-               goto object_free;
+               return ret;
 
-       if (!placements_ext) {
-               struct intel_memory_region *mr =
+       if (!ext_data.n_placements) {
+               ext_data.placements[0] =
                        intel_memory_region_by_type(i915, INTEL_MEMORY_SYSTEM);
-
-               object_set_placements(obj, &mr, 1);
+               ext_data.n_placements = 1;
        }
 
-       ret = i915_gem_setup(obj, args->size);
-       if (ret)
-               goto object_free;
+       obj = __i915_gem_object_create_user(i915, args->size,
+                                           ext_data.placements,
+                                           ext_data.n_placements);
+       if (IS_ERR(obj))
+               return PTR_ERR(obj);
 
        return i915_gem_publish(obj, file, &args->size, &args->handle);
-
-object_free:
-       if (obj->mm.n_placements > 1)
-               kfree(placements_ext);
-       i915_gem_object_free(obj);
-       return ret;
 }
index 616c3a2..afa3411 100644 (file)
@@ -12,6 +12,8 @@
 #include "i915_gem_object.h"
 #include "i915_scatterlist.h"
 
+I915_SELFTEST_DECLARE(static bool force_different_devices;)
+
 static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
 {
        return to_intel_bo(buf->priv);
@@ -25,15 +27,11 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
        struct scatterlist *src, *dst;
        int ret, i;
 
-       ret = i915_gem_object_pin_pages_unlocked(obj);
-       if (ret)
-               goto err;
-
        /* Copy sg so that we make an independent mapping */
        st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
        if (st == NULL) {
                ret = -ENOMEM;
-               goto err_unpin_pages;
+               goto err;
        }
 
        ret = sg_alloc_table(st, obj->mm.pages->nents, GFP_KERNEL);
@@ -58,8 +56,6 @@ err_free_sg:
        sg_free_table(st);
 err_free:
        kfree(st);
-err_unpin_pages:
-       i915_gem_object_unpin_pages(obj);
 err:
        return ERR_PTR(ret);
 }
@@ -68,13 +64,9 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
                                   struct sg_table *sg,
                                   enum dma_data_direction dir)
 {
-       struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
-
        dma_unmap_sgtable(attachment->dev, sg, dir, DMA_ATTR_SKIP_CPU_SYNC);
        sg_free_table(sg);
        kfree(sg);
-
-       i915_gem_object_unpin_pages(obj);
 }
 
 static int i915_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct dma_buf_map *map)
@@ -168,7 +160,46 @@ retry:
        return err;
 }
 
+static int i915_gem_dmabuf_attach(struct dma_buf *dmabuf,
+                                 struct dma_buf_attachment *attach)
+{
+       struct drm_i915_gem_object *obj = dma_buf_to_obj(dmabuf);
+       struct i915_gem_ww_ctx ww;
+       int err;
+
+       if (!i915_gem_object_can_migrate(obj, INTEL_REGION_SMEM))
+               return -EOPNOTSUPP;
+
+       for_i915_gem_ww(&ww, err, true) {
+               err = i915_gem_object_lock(obj, &ww);
+               if (err)
+                       continue;
+
+               err = i915_gem_object_migrate(obj, &ww, INTEL_REGION_SMEM);
+               if (err)
+                       continue;
+
+               err = i915_gem_object_wait_migration(obj, 0);
+               if (err)
+                       continue;
+
+               err = i915_gem_object_pin_pages(obj);
+       }
+
+       return err;
+}
+
+static void i915_gem_dmabuf_detach(struct dma_buf *dmabuf,
+                                  struct dma_buf_attachment *attach)
+{
+       struct drm_i915_gem_object *obj = dma_buf_to_obj(dmabuf);
+
+       i915_gem_object_unpin_pages(obj);
+}
+
 static const struct dma_buf_ops i915_dmabuf_ops =  {
+       .attach = i915_gem_dmabuf_attach,
+       .detach = i915_gem_dmabuf_detach,
        .map_dma_buf = i915_gem_map_dma_buf,
        .unmap_dma_buf = i915_gem_unmap_dma_buf,
        .release = drm_gem_dmabuf_release,
@@ -204,6 +235,8 @@ static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
        struct sg_table *pages;
        unsigned int sg_page_sizes;
 
+       assert_object_held(obj);
+
        pages = dma_buf_map_attachment(obj->base.import_attach,
                                       DMA_BIDIRECTIONAL);
        if (IS_ERR(pages))
@@ -241,7 +274,8 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
        if (dma_buf->ops == &i915_dmabuf_ops) {
                obj = dma_buf_to_obj(dma_buf);
                /* is it from our device? */
-               if (obj->base.dev == dev) {
+               if (obj->base.dev == dev &&
+                   !I915_SELFTEST_ONLY(force_different_devices)) {
                        /*
                         * Importing dmabuf exported from out own gem increases
                         * refcount on gem itself instead of f_count of dmabuf.
index 0738221..b684a62 100644 (file)
@@ -268,6 +268,9 @@ int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
        struct drm_i915_gem_object *obj;
        int err = 0;
 
+       if (IS_DGFX(to_i915(dev)))
+               return -ENODEV;
+
        rcu_read_lock();
        obj = i915_gem_object_lookup_rcu(file, args->handle);
        if (!obj) {
@@ -303,6 +306,9 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
        enum i915_cache_level level;
        int ret = 0;
 
+       if (IS_DGFX(i915))
+               return -ENODEV;
+
        switch (args->caching) {
        case I915_CACHING_NONE:
                level = I915_CACHE_NONE;
@@ -375,7 +381,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
        struct i915_vma *vma;
        int ret;
 
-       /* Frame buffer must be in LMEM (no migration yet) */
+       /* Frame buffer must be in LMEM */
        if (HAS_LMEM(i915) && !i915_gem_object_is_lmem(obj))
                return ERR_PTR(-EINVAL);
 
@@ -484,6 +490,9 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
        u32 write_domain = args->write_domain;
        int err;
 
+       if (IS_DGFX(to_i915(dev)))
+               return -ENODEV;
+
        /* Only handle setting domains to types used by the CPU. */
        if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS)
                return -EINVAL;
index 989ff06..1aa2499 100644 (file)
@@ -277,18 +277,9 @@ struct i915_execbuffer {
                bool has_llc : 1;
                bool has_fence : 1;
                bool needs_unfenced : 1;
-
-               struct i915_request *rq;
-               u32 *rq_cmd;
-               unsigned int rq_size;
-               struct intel_gt_buffer_pool_node *pool;
        } reloc_cache;
 
-       struct intel_gt_buffer_pool_node *reloc_pool; /** relocation pool for -EDEADLK handling */
-       struct intel_context *reloc_context;
-
        u64 invalid_flags; /** Set of execobj.flags that are invalid */
-       u32 context_flags; /** Set of execobj.flags to insert from the ctx */
 
        u64 batch_len; /** Length of batch within object */
        u32 batch_start_offset; /** Location within object of batch */
@@ -539,9 +530,6 @@ eb_validate_vma(struct i915_execbuffer *eb,
                        entry->flags |= EXEC_OBJECT_NEEDS_GTT | __EXEC_OBJECT_NEEDS_MAP;
        }
 
-       if (!(entry->flags & EXEC_OBJECT_PINNED))
-               entry->flags |= eb->context_flags;
-
        return 0;
 }
 
@@ -741,17 +729,13 @@ static int eb_select_context(struct i915_execbuffer *eb)
        struct i915_gem_context *ctx;
 
        ctx = i915_gem_context_lookup(eb->file->driver_priv, eb->args->rsvd1);
-       if (unlikely(!ctx))
-               return -ENOENT;
+       if (unlikely(IS_ERR(ctx)))
+               return PTR_ERR(ctx);
 
        eb->gem_context = ctx;
        if (rcu_access_pointer(ctx->vm))
                eb->invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
 
-       eb->context_flags = 0;
-       if (test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags))
-               eb->context_flags |= __EXEC_OBJECT_NEEDS_BIAS;
-
        return 0;
 }
 
@@ -920,21 +904,38 @@ err:
        return err;
 }
 
-static int eb_validate_vmas(struct i915_execbuffer *eb)
+static int eb_lock_vmas(struct i915_execbuffer *eb)
 {
        unsigned int i;
        int err;
 
-       INIT_LIST_HEAD(&eb->unbound);
-
        for (i = 0; i < eb->buffer_count; i++) {
-               struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
                struct eb_vma *ev = &eb->vma[i];
                struct i915_vma *vma = ev->vma;
 
                err = i915_gem_object_lock(vma->obj, &eb->ww);
                if (err)
                        return err;
+       }
+
+       return 0;
+}
+
+static int eb_validate_vmas(struct i915_execbuffer *eb)
+{
+       unsigned int i;
+       int err;
+
+       INIT_LIST_HEAD(&eb->unbound);
+
+       err = eb_lock_vmas(eb);
+       if (err)
+               return err;
+
+       for (i = 0; i < eb->buffer_count; i++) {
+               struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
+               struct eb_vma *ev = &eb->vma[i];
+               struct i915_vma *vma = ev->vma;
 
                err = eb_pin_vma(eb, entry, ev);
                if (err == -EDEADLK)
@@ -992,7 +993,7 @@ eb_get_vma(const struct i915_execbuffer *eb, unsigned long handle)
        }
 }
 
-static void eb_release_vmas(struct i915_execbuffer *eb, bool final, bool release_userptr)
+static void eb_release_vmas(struct i915_execbuffer *eb, bool final)
 {
        const unsigned int count = eb->buffer_count;
        unsigned int i;
@@ -1006,11 +1007,6 @@ static void eb_release_vmas(struct i915_execbuffer *eb, bool final, bool release
 
                eb_unreserve_vma(ev);
 
-               if (release_userptr && ev->flags & __EXEC_OBJECT_USERPTR_INIT) {
-                       ev->flags &= ~__EXEC_OBJECT_USERPTR_INIT;
-                       i915_gem_object_userptr_submit_fini(vma->obj);
-               }
-
                if (final)
                        i915_vma_put(vma);
        }
@@ -1020,8 +1016,6 @@ static void eb_release_vmas(struct i915_execbuffer *eb, bool final, bool release
 
 static void eb_destroy(const struct i915_execbuffer *eb)
 {
-       GEM_BUG_ON(eb->reloc_cache.rq);
-
        if (eb->lut_size > 0)
                kfree(eb->buckets);
 }
@@ -1033,14 +1027,6 @@ relocation_target(const struct drm_i915_gem_relocation_entry *reloc,
        return gen8_canonical_addr((int)reloc->delta + target->node.start);
 }
 
-static void reloc_cache_clear(struct reloc_cache *cache)
-{
-       cache->rq = NULL;
-       cache->rq_cmd = NULL;
-       cache->pool = NULL;
-       cache->rq_size = 0;
-}
-
 static void reloc_cache_init(struct reloc_cache *cache,
                             struct drm_i915_private *i915)
 {
@@ -1053,7 +1039,6 @@ static void reloc_cache_init(struct reloc_cache *cache,
        cache->has_fence = cache->graphics_ver < 4;
        cache->needs_unfenced = INTEL_INFO(i915)->unfenced_needs_alignment;
        cache->node.flags = 0;
-       reloc_cache_clear(cache);
 }
 
 static inline void *unmask_page(unsigned long p)
@@ -1075,48 +1060,10 @@ static inline struct i915_ggtt *cache_to_ggtt(struct reloc_cache *cache)
        return &i915->ggtt;
 }
 
-static void reloc_cache_put_pool(struct i915_execbuffer *eb, struct reloc_cache *cache)
-{
-       if (!cache->pool)
-               return;
-
-       /*
-        * This is a bit nasty, normally we keep objects locked until the end
-        * of execbuffer, but we already submit this, and have to unlock before
-        * dropping the reference. Fortunately we can only hold 1 pool node at
-        * a time, so this should be harmless.
-        */
-       i915_gem_ww_unlock_single(cache->pool->obj);
-       intel_gt_buffer_pool_put(cache->pool);
-       cache->pool = NULL;
-}
-
-static void reloc_gpu_flush(struct i915_execbuffer *eb, struct reloc_cache *cache)
-{
-       struct drm_i915_gem_object *obj = cache->rq->batch->obj;
-
-       GEM_BUG_ON(cache->rq_size >= obj->base.size / sizeof(u32));
-       cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END;
-
-       i915_gem_object_flush_map(obj);
-       i915_gem_object_unpin_map(obj);
-
-       intel_gt_chipset_flush(cache->rq->engine->gt);
-
-       i915_request_add(cache->rq);
-       reloc_cache_put_pool(eb, cache);
-       reloc_cache_clear(cache);
-
-       eb->reloc_pool = NULL;
-}
-
 static void reloc_cache_reset(struct reloc_cache *cache, struct i915_execbuffer *eb)
 {
        void *vaddr;
 
-       if (cache->rq)
-               reloc_gpu_flush(eb, cache);
-
        if (!cache->vaddr)
                return;
 
@@ -1298,295 +1245,6 @@ static void clflush_write32(u32 *addr, u32 value, unsigned int flushes)
                *addr = value;
 }
 
-static int reloc_move_to_gpu(struct i915_request *rq, struct i915_vma *vma)
-{
-       struct drm_i915_gem_object *obj = vma->obj;
-       int err;
-
-       assert_vma_held(vma);
-
-       if (obj->cache_dirty & ~obj->cache_coherent)
-               i915_gem_clflush_object(obj, 0);
-       obj->write_domain = 0;
-
-       err = i915_request_await_object(rq, vma->obj, true);
-       if (err == 0)
-               err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
-
-       return err;
-}
-
-static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
-                            struct intel_engine_cs *engine,
-                            struct i915_vma *vma,
-                            unsigned int len)
-{
-       struct reloc_cache *cache = &eb->reloc_cache;
-       struct intel_gt_buffer_pool_node *pool = eb->reloc_pool;
-       struct i915_request *rq;
-       struct i915_vma *batch;
-       u32 *cmd;
-       int err;
-
-       if (!pool) {
-               pool = intel_gt_get_buffer_pool(engine->gt, PAGE_SIZE,
-                                               cache->has_llc ?
-                                               I915_MAP_WB :
-                                               I915_MAP_WC);
-               if (IS_ERR(pool))
-                       return PTR_ERR(pool);
-       }
-       eb->reloc_pool = NULL;
-
-       err = i915_gem_object_lock(pool->obj, &eb->ww);
-       if (err)
-               goto err_pool;
-
-       cmd = i915_gem_object_pin_map(pool->obj, pool->type);
-       if (IS_ERR(cmd)) {
-               err = PTR_ERR(cmd);
-               goto err_pool;
-       }
-       intel_gt_buffer_pool_mark_used(pool);
-
-       memset32(cmd, 0, pool->obj->base.size / sizeof(u32));
-
-       batch = i915_vma_instance(pool->obj, vma->vm, NULL);
-       if (IS_ERR(batch)) {
-               err = PTR_ERR(batch);
-               goto err_unmap;
-       }
-
-       err = i915_vma_pin_ww(batch, &eb->ww, 0, 0, PIN_USER | PIN_NONBLOCK);
-       if (err)
-               goto err_unmap;
-
-       if (engine == eb->context->engine) {
-               rq = i915_request_create(eb->context);
-       } else {
-               struct intel_context *ce = eb->reloc_context;
-
-               if (!ce) {
-                       ce = intel_context_create(engine);
-                       if (IS_ERR(ce)) {
-                               err = PTR_ERR(ce);
-                               goto err_unpin;
-                       }
-
-                       i915_vm_put(ce->vm);
-                       ce->vm = i915_vm_get(eb->context->vm);
-                       eb->reloc_context = ce;
-               }
-
-               err = intel_context_pin_ww(ce, &eb->ww);
-               if (err)
-                       goto err_unpin;
-
-               rq = i915_request_create(ce);
-               intel_context_unpin(ce);
-       }
-       if (IS_ERR(rq)) {
-               err = PTR_ERR(rq);
-               goto err_unpin;
-       }
-
-       err = intel_gt_buffer_pool_mark_active(pool, rq);
-       if (err)
-               goto err_request;
-
-       err = reloc_move_to_gpu(rq, vma);
-       if (err)
-               goto err_request;
-
-       err = eb->engine->emit_bb_start(rq,
-                                       batch->node.start, PAGE_SIZE,
-                                       cache->graphics_ver > 5 ? 0 : I915_DISPATCH_SECURE);
-       if (err)
-               goto skip_request;
-
-       assert_vma_held(batch);
-       err = i915_request_await_object(rq, batch->obj, false);
-       if (err == 0)
-               err = i915_vma_move_to_active(batch, rq, 0);
-       if (err)
-               goto skip_request;
-
-       rq->batch = batch;
-       i915_vma_unpin(batch);
-
-       cache->rq = rq;
-       cache->rq_cmd = cmd;
-       cache->rq_size = 0;
-       cache->pool = pool;
-
-       /* Return with batch mapping (cmd) still pinned */
-       return 0;
-
-skip_request:
-       i915_request_set_error_once(rq, err);
-err_request:
-       i915_request_add(rq);
-err_unpin:
-       i915_vma_unpin(batch);
-err_unmap:
-       i915_gem_object_unpin_map(pool->obj);
-err_pool:
-       eb->reloc_pool = pool;
-       return err;
-}
-
-static bool reloc_can_use_engine(const struct intel_engine_cs *engine)
-{
-       return engine->class != VIDEO_DECODE_CLASS || GRAPHICS_VER(engine->i915) != 6;
-}
-
-static u32 *reloc_gpu(struct i915_execbuffer *eb,
-                     struct i915_vma *vma,
-                     unsigned int len)
-{
-       struct reloc_cache *cache = &eb->reloc_cache;
-       u32 *cmd;
-
-       if (cache->rq_size > PAGE_SIZE/sizeof(u32) - (len + 1))
-               reloc_gpu_flush(eb, cache);
-
-       if (unlikely(!cache->rq)) {
-               int err;
-               struct intel_engine_cs *engine = eb->engine;
-
-               /* If we need to copy for the cmdparser, we will stall anyway */
-               if (eb_use_cmdparser(eb))
-                       return ERR_PTR(-EWOULDBLOCK);
-
-               if (!reloc_can_use_engine(engine)) {
-                       engine = engine->gt->engine_class[COPY_ENGINE_CLASS][0];
-                       if (!engine)
-                               return ERR_PTR(-ENODEV);
-               }
-
-               err = __reloc_gpu_alloc(eb, engine, vma, len);
-               if (unlikely(err))
-                       return ERR_PTR(err);
-       }
-
-       cmd = cache->rq_cmd + cache->rq_size;
-       cache->rq_size += len;
-
-       return cmd;
-}
-
-static inline bool use_reloc_gpu(struct i915_vma *vma)
-{
-       if (DBG_FORCE_RELOC == FORCE_GPU_RELOC)
-               return true;
-
-       if (DBG_FORCE_RELOC)
-               return false;
-
-       return !dma_resv_test_signaled(vma->resv, true);
-}
-
-static unsigned long vma_phys_addr(struct i915_vma *vma, u32 offset)
-{
-       struct page *page;
-       unsigned long addr;
-
-       GEM_BUG_ON(vma->pages != vma->obj->mm.pages);
-
-       page = i915_gem_object_get_page(vma->obj, offset >> PAGE_SHIFT);
-       addr = PFN_PHYS(page_to_pfn(page));
-       GEM_BUG_ON(overflows_type(addr, u32)); /* expected dma32 */
-
-       return addr + offset_in_page(offset);
-}
-
-static int __reloc_entry_gpu(struct i915_execbuffer *eb,
-                             struct i915_vma *vma,
-                             u64 offset,
-                             u64 target_addr)
-{
-       const unsigned int ver = eb->reloc_cache.graphics_ver;
-       unsigned int len;
-       u32 *batch;
-       u64 addr;
-
-       if (ver >= 8)
-               len = offset & 7 ? 8 : 5;
-       else if (ver >= 4)
-               len = 4;
-       else
-               len = 3;
-
-       batch = reloc_gpu(eb, vma, len);
-       if (batch == ERR_PTR(-EDEADLK))
-               return -EDEADLK;
-       else if (IS_ERR(batch))
-               return false;
-
-       addr = gen8_canonical_addr(vma->node.start + offset);
-       if (ver >= 8) {
-               if (offset & 7) {
-                       *batch++ = MI_STORE_DWORD_IMM_GEN4;
-                       *batch++ = lower_32_bits(addr);
-                       *batch++ = upper_32_bits(addr);
-                       *batch++ = lower_32_bits(target_addr);
-
-                       addr = gen8_canonical_addr(addr + 4);
-
-                       *batch++ = MI_STORE_DWORD_IMM_GEN4;
-                       *batch++ = lower_32_bits(addr);
-                       *batch++ = upper_32_bits(addr);
-                       *batch++ = upper_32_bits(target_addr);
-               } else {
-                       *batch++ = (MI_STORE_DWORD_IMM_GEN4 | (1 << 21)) + 1;
-                       *batch++ = lower_32_bits(addr);
-                       *batch++ = upper_32_bits(addr);
-                       *batch++ = lower_32_bits(target_addr);
-                       *batch++ = upper_32_bits(target_addr);
-               }
-       } else if (ver >= 6) {
-               *batch++ = MI_STORE_DWORD_IMM_GEN4;
-               *batch++ = 0;
-               *batch++ = addr;
-               *batch++ = target_addr;
-       } else if (IS_I965G(eb->i915)) {
-               *batch++ = MI_STORE_DWORD_IMM_GEN4;
-               *batch++ = 0;
-               *batch++ = vma_phys_addr(vma, offset);
-               *batch++ = target_addr;
-       } else if (ver >= 4) {
-               *batch++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
-               *batch++ = 0;
-               *batch++ = addr;
-               *batch++ = target_addr;
-       } else if (ver >= 3 &&
-                  !(IS_I915G(eb->i915) || IS_I915GM(eb->i915))) {
-               *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
-               *batch++ = addr;
-               *batch++ = target_addr;
-       } else {
-               *batch++ = MI_STORE_DWORD_IMM;
-               *batch++ = vma_phys_addr(vma, offset);
-               *batch++ = target_addr;
-       }
-
-       return true;
-}
-
-static int reloc_entry_gpu(struct i915_execbuffer *eb,
-                           struct i915_vma *vma,
-                           u64 offset,
-                           u64 target_addr)
-{
-       if (eb->reloc_cache.vaddr)
-               return false;
-
-       if (!use_reloc_gpu(vma))
-               return false;
-
-       return __reloc_entry_gpu(eb, vma, offset, target_addr);
-}
-
 static u64
 relocate_entry(struct i915_vma *vma,
               const struct drm_i915_gem_relocation_entry *reloc,
@@ -1595,32 +1253,25 @@ relocate_entry(struct i915_vma *vma,
 {
        u64 target_addr = relocation_target(reloc, target);
        u64 offset = reloc->offset;
-       int reloc_gpu = reloc_entry_gpu(eb, vma, offset, target_addr);
-
-       if (reloc_gpu < 0)
-               return reloc_gpu;
-
-       if (!reloc_gpu) {
-               bool wide = eb->reloc_cache.use_64bit_reloc;
-               void *vaddr;
+       bool wide = eb->reloc_cache.use_64bit_reloc;
+       void *vaddr;
 
 repeat:
-               vaddr = reloc_vaddr(vma->obj, eb,
-                                   offset >> PAGE_SHIFT);
-               if (IS_ERR(vaddr))
-                       return PTR_ERR(vaddr);
-
-               GEM_BUG_ON(!IS_ALIGNED(offset, sizeof(u32)));
-               clflush_write32(vaddr + offset_in_page(offset),
-                               lower_32_bits(target_addr),
-                               eb->reloc_cache.vaddr);
-
-               if (wide) {
-                       offset += sizeof(u32);
-                       target_addr >>= 32;
-                       wide = false;
-                       goto repeat;
-               }
+       vaddr = reloc_vaddr(vma->obj, eb,
+                           offset >> PAGE_SHIFT);
+       if (IS_ERR(vaddr))
+               return PTR_ERR(vaddr);
+
+       GEM_BUG_ON(!IS_ALIGNED(offset, sizeof(u32)));
+       clflush_write32(vaddr + offset_in_page(offset),
+                       lower_32_bits(target_addr),
+                       eb->reloc_cache.vaddr);
+
+       if (wide) {
+               offset += sizeof(u32);
+               target_addr >>= 32;
+               wide = false;
+               goto repeat;
        }
 
        return target->node.start | UPDATE;
@@ -1992,7 +1643,7 @@ repeat:
        }
 
        /* We may process another execbuffer during the unlock... */
-       eb_release_vmas(eb, false, true);
+       eb_release_vmas(eb, false);
        i915_gem_ww_ctx_fini(&eb->ww);
 
        if (rq) {
@@ -2061,9 +1712,7 @@ repeat_validate:
 
        list_for_each_entry(ev, &eb->relocs, reloc_link) {
                if (!have_copy) {
-                       pagefault_disable();
                        err = eb_relocate_vma(eb, ev);
-                       pagefault_enable();
                        if (err)
                                break;
                } else {
@@ -2096,7 +1745,7 @@ repeat_validate:
 
 err:
        if (err == -EDEADLK) {
-               eb_release_vmas(eb, false, false);
+               eb_release_vmas(eb, false);
                err = i915_gem_ww_ctx_backoff(&eb->ww);
                if (!err)
                        goto repeat_validate;
@@ -2193,7 +1842,7 @@ retry:
 
 err:
        if (err == -EDEADLK) {
-               eb_release_vmas(eb, false, false);
+               eb_release_vmas(eb, false);
                err = i915_gem_ww_ctx_backoff(&eb->ww);
                if (!err)
                        goto retry;
@@ -2270,7 +1919,7 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
 
 #ifdef CONFIG_MMU_NOTIFIER
        if (!err && (eb->args->flags & __EXEC_USERPTR_USED)) {
-               spin_lock(&eb->i915->mm.notifier_lock);
+               read_lock(&eb->i915->mm.notifier_lock);
 
                /*
                 * count is always at least 1, otherwise __EXEC_USERPTR_USED
@@ -2288,7 +1937,7 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
                                break;
                }
 
-               spin_unlock(&eb->i915->mm.notifier_lock);
+               read_unlock(&eb->i915->mm.notifier_lock);
        }
 #endif
 
@@ -3156,8 +2805,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
        eb.exec = exec;
        eb.vma = (struct eb_vma *)(exec + args->buffer_count + 1);
        eb.vma[0].vma = NULL;
-       eb.reloc_pool = eb.batch_pool = NULL;
-       eb.reloc_context = NULL;
+       eb.batch_pool = NULL;
 
        eb.invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
        reloc_cache_init(&eb.reloc_cache, eb.i915);
@@ -3232,7 +2880,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
 
        err = eb_lookup_vmas(&eb);
        if (err) {
-               eb_release_vmas(&eb, true, true);
+               eb_release_vmas(&eb, true);
                goto err_engine;
        }
 
@@ -3255,9 +2903,6 @@ i915_gem_do_execbuffer(struct drm_device *dev,
 
        batch = eb.batch->vma;
 
-       /* All GPU relocation batches must be submitted prior to the user rq */
-       GEM_BUG_ON(eb.reloc_cache.rq);
-
        /* Allocate a request for this batch buffer nice and early. */
        eb.request = i915_request_create(eb.context);
        if (IS_ERR(eb.request)) {
@@ -3265,11 +2910,20 @@ i915_gem_do_execbuffer(struct drm_device *dev,
                goto err_vma;
        }
 
+       if (unlikely(eb.gem_context->syncobj)) {
+               struct dma_fence *fence;
+
+               fence = drm_syncobj_fence_get(eb.gem_context->syncobj);
+               err = i915_request_await_dma_fence(eb.request, fence);
+               dma_fence_put(fence);
+               if (err)
+                       goto err_ext;
+       }
+
        if (in_fence) {
                if (args->flags & I915_EXEC_FENCE_SUBMIT)
                        err = i915_request_await_execution(eb.request,
-                                                          in_fence,
-                                                          eb.engine->bond_execute);
+                                                          in_fence);
                else
                        err = i915_request_await_dma_fence(eb.request,
                                                           in_fence);
@@ -3322,10 +2976,16 @@ err_request:
                        fput(out_fence->file);
                }
        }
+
+       if (unlikely(eb.gem_context->syncobj)) {
+               drm_syncobj_replace_fence(eb.gem_context->syncobj,
+                                         &eb.request->fence);
+       }
+
        i915_request_put(eb.request);
 
 err_vma:
-       eb_release_vmas(&eb, true, true);
+       eb_release_vmas(&eb, true);
        if (eb.trampoline)
                i915_vma_unpin(eb.trampoline);
        WARN_ON(err == -EDEADLK);
@@ -3333,10 +2993,6 @@ err_vma:
 
        if (eb.batch_pool)
                intel_gt_buffer_pool_put(eb.batch_pool);
-       if (eb.reloc_pool)
-               intel_gt_buffer_pool_put(eb.reloc_pool);
-       if (eb.reloc_context)
-               intel_context_put(eb.reloc_context);
 err_engine:
        eb_put_engine(&eb);
 err_context:
@@ -3450,7 +3106,3 @@ end:;
        kvfree(exec2_list);
        return err;
 }
-
-#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
-#include "selftests/i915_gem_execbuffer.c"
-#endif
index ce6b664..13b217f 100644 (file)
@@ -177,8 +177,8 @@ i915_gem_object_create_internal(struct drm_i915_private *i915,
                return ERR_PTR(-ENOMEM);
 
        drm_gem_private_object_init(&i915->drm, &obj->base, size);
-       i915_gem_object_init(obj, &i915_gem_object_internal_ops, &lock_class,
-                            I915_BO_ALLOC_STRUCT_PAGE);
+       i915_gem_object_init(obj, &i915_gem_object_internal_ops, &lock_class, 0);
+       obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE;
 
        /*
         * Mark the object as volatile, such that the pages are marked as
index d539dff..eb34530 100644 (file)
@@ -23,27 +23,6 @@ i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
        return io_mapping_map_wc(&obj->mm.region->iomap, offset, size);
 }
 
-/**
- * i915_gem_object_validates_to_lmem - Whether the object is resident in
- * lmem when pages are present.
- * @obj: The object to check.
- *
- * Migratable objects residency may change from under us if the object is
- * not pinned or locked. This function is intended to be used to check whether
- * the object can only reside in lmem when pages are present.
- *
- * Return: Whether the object is always resident in lmem when pages are
- * present.
- */
-bool i915_gem_object_validates_to_lmem(struct drm_i915_gem_object *obj)
-{
-       struct intel_memory_region *mr = READ_ONCE(obj->mm.region);
-
-       return !i915_gem_object_migratable(obj) &&
-               mr && (mr->type == INTEL_MEMORY_LOCAL ||
-                      mr->type == INTEL_MEMORY_STOLEN_LOCAL);
-}
-
 /**
  * i915_gem_object_is_lmem - Whether the object is resident in
  * lmem
@@ -71,11 +50,64 @@ bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
                      mr->type == INTEL_MEMORY_STOLEN_LOCAL);
 }
 
+/**
+ * __i915_gem_object_is_lmem - Whether the object is resident in
+ * lmem while in the fence signaling critical path.
+ * @obj: The object to check.
+ *
+ * This function is intended to be called from within the fence signaling
+ * path where the fence keeps the object from being migrated. For example
+ * during gpu reset or similar.
+ *
+ * Return: Whether the object is resident in lmem.
+ */
+bool __i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
+{
+       struct intel_memory_region *mr = READ_ONCE(obj->mm.region);
+
+#ifdef CONFIG_LOCKDEP
+       GEM_WARN_ON(dma_resv_test_signaled(obj->base.resv, true));
+#endif
+       return mr && (mr->type == INTEL_MEMORY_LOCAL ||
+                     mr->type == INTEL_MEMORY_STOLEN_LOCAL);
+}
+
+/**
+ * __i915_gem_object_create_lmem_with_ps - Create lmem object and force the
+ * minimum page size for the backing pages.
+ * @i915: The i915 instance.
+ * @size: The size in bytes for the object. Note that we need to round the size
+ * up depending on the @page_size. The final object size can be fished out from
+ * the drm GEM object.
+ * @page_size: The requested minimum page size in bytes for this object. This is
+ * useful if we need something bigger than the regions min_page_size due to some
+ * hw restriction, or in some very specialised cases where it needs to be
+ * smaller, where the internal fragmentation cost is too great when rounding up
+ * the object size.
+ * @flags: The optional BO allocation flags.
+ *
+ * Note that this interface assumes you know what you are doing when forcing the
+ * @page_size. If this is smaller than the regions min_page_size then it can
+ * never be inserted into any GTT, otherwise it might lead to undefined
+ * behaviour.
+ *
+ * Return: The object pointer, which might be an ERR_PTR in the case of failure.
+ */
+struct drm_i915_gem_object *
+__i915_gem_object_create_lmem_with_ps(struct drm_i915_private *i915,
+                                     resource_size_t size,
+                                     resource_size_t page_size,
+                                     unsigned int flags)
+{
+       return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_LMEM],
+                                            size, page_size, flags);
+}
+
 struct drm_i915_gem_object *
 i915_gem_object_create_lmem(struct drm_i915_private *i915,
                            resource_size_t size,
                            unsigned int flags)
 {
        return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_LMEM],
-                                            size, flags);
+                                            size, 0, flags);
 }
index ea76fd1..4ee81fc 100644 (file)
@@ -21,6 +21,13 @@ i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
 
 bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj);
 
+bool __i915_gem_object_is_lmem(struct drm_i915_gem_object *obj);
+
+struct drm_i915_gem_object *
+__i915_gem_object_create_lmem_with_ps(struct drm_i915_private *i915,
+                                     resource_size_t size,
+                                     resource_size_t page_size,
+                                     unsigned int flags);
 struct drm_i915_gem_object *
 i915_gem_object_create_lmem(struct drm_i915_private *i915,
                            resource_size_t size,
index 4f50a50..5130e8e 100644 (file)
@@ -645,7 +645,8 @@ mmap_offset_attach(struct drm_i915_gem_object *obj,
                goto insert;
 
        /* Attempt to reap some mmap space from dead objects */
-       err = intel_gt_retire_requests_timeout(&i915->gt, MAX_SCHEDULE_TIMEOUT);
+       err = intel_gt_retire_requests_timeout(&i915->gt, MAX_SCHEDULE_TIMEOUT,
+                                              NULL);
        if (err)
                goto err;
 
@@ -679,13 +680,19 @@ __assign_mmap_offset(struct drm_i915_gem_object *obj,
                return -ENODEV;
 
        if (obj->ops->mmap_offset)  {
+               if (mmap_type != I915_MMAP_TYPE_FIXED)
+                       return -ENODEV;
+
                *offset = obj->ops->mmap_offset(obj);
                return 0;
        }
 
+       if (mmap_type == I915_MMAP_TYPE_FIXED)
+               return -ENODEV;
+
        if (mmap_type != I915_MMAP_TYPE_GTT &&
            !i915_gem_object_has_struct_page(obj) &&
-           !i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM))
+           !i915_gem_object_has_iomem(obj))
                return -ENODEV;
 
        mmo = mmap_offset_attach(obj, mmap_type, file);
@@ -709,7 +716,12 @@ __assign_mmap_offset_handle(struct drm_file *file,
        if (!obj)
                return -ENOENT;
 
+       err = i915_gem_object_lock_interruptible(obj, NULL);
+       if (err)
+               goto out_put;
        err = __assign_mmap_offset(obj, mmap_type, offset, file);
+       i915_gem_object_unlock(obj);
+out_put:
        i915_gem_object_put(obj);
        return err;
 }
@@ -722,7 +734,9 @@ i915_gem_dumb_mmap_offset(struct drm_file *file,
 {
        enum i915_mmap_type mmap_type;
 
-       if (boot_cpu_has(X86_FEATURE_PAT))
+       if (HAS_LMEM(to_i915(dev)))
+               mmap_type = I915_MMAP_TYPE_FIXED;
+       else if (boot_cpu_has(X86_FEATURE_PAT))
                mmap_type = I915_MMAP_TYPE_WC;
        else if (!i915_ggtt_has_aperture(&to_i915(dev)->ggtt))
                return -ENODEV;
@@ -793,6 +807,10 @@ i915_gem_mmap_offset_ioctl(struct drm_device *dev, void *data,
                type = I915_MMAP_TYPE_UC;
                break;
 
+       case I915_MMAP_OFFSET_FIXED:
+               type = I915_MMAP_TYPE_FIXED;
+               break;
+
        default:
                return -EINVAL;
        }
@@ -933,10 +951,7 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
                return PTR_ERR(anon);
        }
 
-       vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
-
-       if (i915_gem_object_has_iomem(obj))
-               vma->vm_flags |= VM_IO;
+       vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
 
        /*
         * We keep the ref on mmo->obj, not vm_file, but we require
@@ -966,6 +981,9 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
                vma->vm_ops = &vm_ops_cpu;
                break;
 
+       case I915_MMAP_TYPE_FIXED:
+               GEM_WARN_ON(1);
+               fallthrough;
        case I915_MMAP_TYPE_WB:
                vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
                vma->vm_ops = &vm_ops_cpu;
index cf18c43..6fb9afb 100644 (file)
 #include "i915_gem_context.h"
 #include "i915_gem_mman.h"
 #include "i915_gem_object.h"
-#include "i915_globals.h"
 #include "i915_memcpy.h"
 #include "i915_trace.h"
 
-static struct i915_global_object {
-       struct i915_global base;
-       struct kmem_cache *slab_objects;
-} global;
+static struct kmem_cache *slab_objects;
 
 static const struct drm_gem_object_funcs i915_gem_object_funcs;
 
@@ -45,7 +41,7 @@ struct drm_i915_gem_object *i915_gem_object_alloc(void)
 {
        struct drm_i915_gem_object *obj;
 
-       obj = kmem_cache_zalloc(global.slab_objects, GFP_KERNEL);
+       obj = kmem_cache_zalloc(slab_objects, GFP_KERNEL);
        if (!obj)
                return NULL;
        obj->base.funcs = &i915_gem_object_funcs;
@@ -55,7 +51,7 @@ struct drm_i915_gem_object *i915_gem_object_alloc(void)
 
 void i915_gem_object_free(struct drm_i915_gem_object *obj)
 {
-       return kmem_cache_free(global.slab_objects, obj);
+       return kmem_cache_free(slab_objects, obj);
 }
 
 void i915_gem_object_init(struct drm_i915_gem_object *obj,
@@ -475,34 +471,200 @@ bool i915_gem_object_migratable(struct drm_i915_gem_object *obj)
        return obj->mm.n_placements > 1;
 }
 
-void i915_gem_init__objects(struct drm_i915_private *i915)
+/**
+ * i915_gem_object_has_struct_page - Whether the object is page-backed
+ * @obj: The object to query.
+ *
+ * This function should only be called while the object is locked or pinned,
+ * otherwise the page backing may change under the caller.
+ *
+ * Return: True if page-backed, false otherwise.
+ */
+bool i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
 {
-       INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
+#ifdef CONFIG_LOCKDEP
+       if (IS_DGFX(to_i915(obj->base.dev)) &&
+           i915_gem_object_evictable((void __force *)obj))
+               assert_object_held_shared(obj);
+#endif
+       return obj->mem_flags & I915_BO_FLAG_STRUCT_PAGE;
+}
+
+/**
+ * i915_gem_object_has_iomem - Whether the object is iomem-backed
+ * @obj: The object to query.
+ *
+ * This function should only be called while the object is locked or pinned,
+ * otherwise the iomem backing may change under the caller.
+ *
+ * Return: True if iomem-backed, false otherwise.
+ */
+bool i915_gem_object_has_iomem(const struct drm_i915_gem_object *obj)
+{
+#ifdef CONFIG_LOCKDEP
+       if (IS_DGFX(to_i915(obj->base.dev)) &&
+           i915_gem_object_evictable((void __force *)obj))
+               assert_object_held_shared(obj);
+#endif
+       return obj->mem_flags & I915_BO_FLAG_IOMEM;
+}
+
+/**
+ * i915_gem_object_can_migrate - Whether an object likely can be migrated
+ *
+ * @obj: The object to migrate
+ * @id: The region intended to migrate to
+ *
+ * Check whether the object backend supports migration to the
+ * given region. Note that pinning may affect the ability to migrate as
+ * returned by this function.
+ *
+ * This function is primarily intended as a helper for checking the
+ * possibility to migrate objects and might be slightly less permissive
+ * than i915_gem_object_migrate() when it comes to objects with the
+ * I915_BO_ALLOC_USER flag set.
+ *
+ * Return: true if migration is possible, false otherwise.
+ */
+bool i915_gem_object_can_migrate(struct drm_i915_gem_object *obj,
+                                enum intel_region_id id)
+{
+       struct drm_i915_private *i915 = to_i915(obj->base.dev);
+       unsigned int num_allowed = obj->mm.n_placements;
+       struct intel_memory_region *mr;
+       unsigned int i;
+
+       GEM_BUG_ON(id >= INTEL_REGION_UNKNOWN);
+       GEM_BUG_ON(obj->mm.madv != I915_MADV_WILLNEED);
+
+       mr = i915->mm.regions[id];
+       if (!mr)
+               return false;
+
+       if (obj->mm.region == mr)
+               return true;
+
+       if (!i915_gem_object_evictable(obj))
+               return false;
+
+       if (!obj->ops->migrate)
+               return false;
+
+       if (!(obj->flags & I915_BO_ALLOC_USER))
+               return true;
+
+       if (num_allowed == 0)
+               return false;
+
+       for (i = 0; i < num_allowed; ++i) {
+               if (mr == obj->mm.placements[i])
+                       return true;
+       }
+
+       return false;
 }
 
-static void i915_global_objects_shrink(void)
+/**
+ * i915_gem_object_migrate - Migrate an object to the desired region id
+ * @obj: The object to migrate.
+ * @ww: An optional struct i915_gem_ww_ctx. If NULL, the backend may
+ * not be successful in evicting other objects to make room for this object.
+ * @id: The region id to migrate to.
+ *
+ * Attempt to migrate the object to the desired memory region. The
+ * object backend must support migration and the object may not be
+ * pinned, (explicitly pinned pages or pinned vmas). The object must
+ * be locked.
+ * On successful completion, the object will have pages pointing to
+ * memory in the new region, but an async migration task may not have
+ * completed yet, and to accomplish that, i915_gem_object_wait_migration()
+ * must be called.
+ *
+ * Note: the @ww parameter is not used yet, but included to make sure
+ * callers put some effort into obtaining a valid ww ctx if one is
+ * available.
+ *
+ * Return: 0 on success. Negative error code on failure. In particular may
+ * return -ENXIO on lack of region space, -EDEADLK for deadlock avoidance
+ * if @ww is set, -EINTR or -ERESTARTSYS if signal pending, and
+ * -EBUSY if the object is pinned.
+ */
+int i915_gem_object_migrate(struct drm_i915_gem_object *obj,
+                           struct i915_gem_ww_ctx *ww,
+                           enum intel_region_id id)
 {
-       kmem_cache_shrink(global.slab_objects);
+       struct drm_i915_private *i915 = to_i915(obj->base.dev);
+       struct intel_memory_region *mr;
+
+       GEM_BUG_ON(id >= INTEL_REGION_UNKNOWN);
+       GEM_BUG_ON(obj->mm.madv != I915_MADV_WILLNEED);
+       assert_object_held(obj);
+
+       mr = i915->mm.regions[id];
+       GEM_BUG_ON(!mr);
+
+       if (!i915_gem_object_can_migrate(obj, id))
+               return -EINVAL;
+
+       if (!obj->ops->migrate) {
+               if (GEM_WARN_ON(obj->mm.region != mr))
+                       return -EINVAL;
+               return 0;
+       }
+
+       return obj->ops->migrate(obj, mr);
 }
 
-static void i915_global_objects_exit(void)
+/**
+ * i915_gem_object_placement_possible - Check whether the object can be
+ * placed at certain memory type
+ * @obj: Pointer to the object
+ * @type: The memory type to check
+ *
+ * Return: True if the object can be placed in @type. False otherwise.
+ */
+bool i915_gem_object_placement_possible(struct drm_i915_gem_object *obj,
+                                       enum intel_memory_type type)
 {
-       kmem_cache_destroy(global.slab_objects);
+       unsigned int i;
+
+       if (!obj->mm.n_placements) {
+               switch (type) {
+               case INTEL_MEMORY_LOCAL:
+                       return i915_gem_object_has_iomem(obj);
+               case INTEL_MEMORY_SYSTEM:
+                       return i915_gem_object_has_pages(obj);
+               default:
+                       /* Ignore stolen for now */
+                       GEM_BUG_ON(1);
+                       return false;
+               }
+       }
+
+       for (i = 0; i < obj->mm.n_placements; i++) {
+               if (obj->mm.placements[i]->type == type)
+                       return true;
+       }
+
+       return false;
+}
+
+void i915_gem_init__objects(struct drm_i915_private *i915)
+{
+       INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
 }
 
-static struct i915_global_object global = { {
-       .shrink = i915_global_objects_shrink,
-       .exit = i915_global_objects_exit,
-} };
+void i915_objects_module_exit(void)
+{
+       kmem_cache_destroy(slab_objects);
+}
 
-int __init i915_global_objects_init(void)
+int __init i915_objects_module_init(void)
 {
-       global.slab_objects =
-               KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN);
-       if (!global.slab_objects)
+       slab_objects = KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN);
+       if (!slab_objects)
                return -ENOMEM;
 
-       i915_global_register(&global.base);
        return 0;
 }
 
@@ -515,6 +677,7 @@ static const struct drm_gem_object_funcs i915_gem_object_funcs = {
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 #include "selftests/huge_gem_object.c"
 #include "selftests/huge_pages.c"
+#include "selftests/i915_gem_migrate.c"
 #include "selftests/i915_gem_object.c"
 #include "selftests/i915_gem_coherency.c"
 #endif
index e9eeceb..48112b9 100644 (file)
 #include <drm/drm_device.h>
 
 #include "display/intel_frontbuffer.h"
+#include "intel_memory_region.h"
 #include "i915_gem_object_types.h"
 #include "i915_gem_gtt.h"
+#include "i915_gem_ww.h"
 #include "i915_vma_types.h"
 
+enum intel_region_id;
+
 /*
  * XXX: There is a prevalence of the assumption that we fit the
  * object's page count inside a 32bit _signed_ variable. Let's document
@@ -44,6 +48,9 @@ static inline bool i915_gem_object_size_2big(u64 size)
 
 void i915_gem_init__objects(struct drm_i915_private *i915);
 
+void i915_objects_module_exit(void);
+int i915_objects_module_init(void);
+
 struct drm_i915_gem_object *i915_gem_object_alloc(void);
 void i915_gem_object_free(struct drm_i915_gem_object *obj);
 
@@ -57,6 +64,10 @@ i915_gem_object_create_shmem(struct drm_i915_private *i915,
 struct drm_i915_gem_object *
 i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915,
                                       const void *data, resource_size_t size);
+struct drm_i915_gem_object *
+__i915_gem_object_create_user(struct drm_i915_private *i915, u64 size,
+                             struct intel_memory_region **placements,
+                             unsigned int n_placements);
 
 extern const struct drm_i915_gem_object_ops i915_gem_shmem_ops;
 
@@ -147,7 +158,7 @@ i915_gem_object_put(struct drm_i915_gem_object *obj)
 /*
  * If more than one potential simultaneous locker, assert held.
  */
-static inline void assert_object_held_shared(struct drm_i915_gem_object *obj)
+static inline void assert_object_held_shared(const struct drm_i915_gem_object *obj)
 {
        /*
         * Note mm list lookup is protected by
@@ -169,13 +180,17 @@ static inline int __i915_gem_object_lock(struct drm_i915_gem_object *obj,
        else
                ret = dma_resv_lock(obj->base.resv, ww ? &ww->ctx : NULL);
 
-       if (!ret && ww)
+       if (!ret && ww) {
+               i915_gem_object_get(obj);
                list_add_tail(&obj->obj_link, &ww->obj_list);
+       }
        if (ret == -EALREADY)
                ret = 0;
 
-       if (ret == -EDEADLK)
+       if (ret == -EDEADLK) {
+               i915_gem_object_get(obj);
                ww->contended = obj;
+       }
 
        return ret;
 }
@@ -261,17 +276,9 @@ i915_gem_object_type_has(const struct drm_i915_gem_object *obj,
        return obj->ops->flags & flags;
 }
 
-static inline bool
-i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
-{
-       return obj->flags & I915_BO_ALLOC_STRUCT_PAGE;
-}
+bool i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj);
 
-static inline bool
-i915_gem_object_has_iomem(const struct drm_i915_gem_object *obj)
-{
-       return i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM);
-}
+bool i915_gem_object_has_iomem(const struct drm_i915_gem_object *obj);
 
 static inline bool
 i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
@@ -342,22 +349,22 @@ struct scatterlist *
 __i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
                         struct i915_gem_object_page_iter *iter,
                         unsigned int n,
-                        unsigned int *offset, bool allow_alloc, bool dma);
+                        unsigned int *offset, bool dma);
 
 static inline struct scatterlist *
 i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
                       unsigned int n,
-                      unsigned int *offset, bool allow_alloc)
+                      unsigned int *offset)
 {
-       return __i915_gem_object_get_sg(obj, &obj->mm.get_page, n, offset, allow_alloc, false);
+       return __i915_gem_object_get_sg(obj, &obj->mm.get_page, n, offset, false);
 }
 
 static inline struct scatterlist *
 i915_gem_object_get_sg_dma(struct drm_i915_gem_object *obj,
                           unsigned int n,
-                          unsigned int *offset, bool allow_alloc)
+                          unsigned int *offset)
 {
-       return __i915_gem_object_get_sg(obj, &obj->mm.get_dma_page, n, offset, allow_alloc, true);
+       return __i915_gem_object_get_sg(obj, &obj->mm.get_dma_page, n, offset, true);
 }
 
 struct page *
@@ -598,7 +605,18 @@ bool i915_gem_object_evictable(struct drm_i915_gem_object *obj);
 
 bool i915_gem_object_migratable(struct drm_i915_gem_object *obj);
 
-bool i915_gem_object_validates_to_lmem(struct drm_i915_gem_object *obj);
+int i915_gem_object_migrate(struct drm_i915_gem_object *obj,
+                           struct i915_gem_ww_ctx *ww,
+                           enum intel_region_id id);
+
+bool i915_gem_object_can_migrate(struct drm_i915_gem_object *obj,
+                                enum intel_region_id id);
+
+int i915_gem_object_wait_migration(struct drm_i915_gem_object *obj,
+                                  unsigned int flags);
+
+bool i915_gem_object_placement_possible(struct drm_i915_gem_object *obj,
+                                       enum intel_memory_type type);
 
 #ifdef CONFIG_MMU_NOTIFIER
 static inline bool
@@ -609,14 +627,12 @@ i915_gem_object_is_userptr(struct drm_i915_gem_object *obj)
 
 int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj);
 int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj);
-void i915_gem_object_userptr_submit_fini(struct drm_i915_gem_object *obj);
 int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj);
 #else
 static inline bool i915_gem_object_is_userptr(struct drm_i915_gem_object *obj) { return false; }
 
 static inline int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; }
 static inline int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; }
-static inline void i915_gem_object_userptr_submit_fini(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); }
 static inline int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj) { GEM_BUG_ON(1); return -ENODEV; }
 
 #endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
deleted file mode 100644 (file)
index 3e28c68..0000000
+++ /dev/null
@@ -1,461 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright Â© 2019 Intel Corporation
- */
-
-#include "i915_drv.h"
-#include "gt/intel_context.h"
-#include "gt/intel_engine_pm.h"
-#include "gt/intel_gpu_commands.h"
-#include "gt/intel_gt.h"
-#include "gt/intel_gt_buffer_pool.h"
-#include "gt/intel_ring.h"
-#include "i915_gem_clflush.h"
-#include "i915_gem_object_blt.h"
-
-struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce,
-                                        struct i915_vma *vma,
-                                        struct i915_gem_ww_ctx *ww,
-                                        u32 value)
-{
-       struct drm_i915_private *i915 = ce->vm->i915;
-       const u32 block_size = SZ_8M; /* ~1ms at 8GiB/s preemption delay */
-       struct intel_gt_buffer_pool_node *pool;
-       struct i915_vma *batch;
-       u64 offset;
-       u64 count;
-       u64 rem;
-       u32 size;
-       u32 *cmd;
-       int err;
-
-       GEM_BUG_ON(intel_engine_is_virtual(ce->engine));
-       intel_engine_pm_get(ce->engine);
-
-       count = div_u64(round_up(vma->size, block_size), block_size);
-       size = (1 + 8 * count) * sizeof(u32);
-       size = round_up(size, PAGE_SIZE);
-       pool = intel_gt_get_buffer_pool(ce->engine->gt, size, I915_MAP_WC);
-       if (IS_ERR(pool)) {
-               err = PTR_ERR(pool);
-               goto out_pm;
-       }
-
-       err = i915_gem_object_lock(pool->obj, ww);
-       if (err)
-               goto out_put;
-
-       batch = i915_vma_instance(pool->obj, ce->vm, NULL);
-       if (IS_ERR(batch)) {
-               err = PTR_ERR(batch);
-               goto out_put;
-       }
-
-       err = i915_vma_pin_ww(batch, ww, 0, 0, PIN_USER);
-       if (unlikely(err))
-               goto out_put;
-
-       /* we pinned the pool, mark it as such */
-       intel_gt_buffer_pool_mark_used(pool);
-
-       cmd = i915_gem_object_pin_map(pool->obj, pool->type);
-       if (IS_ERR(cmd)) {
-               err = PTR_ERR(cmd);
-               goto out_unpin;
-       }
-
-       rem = vma->size;
-       offset = vma->node.start;
-
-       do {
-               u32 size = min_t(u64, rem, block_size);
-
-               GEM_BUG_ON(size >> PAGE_SHIFT > S16_MAX);
-
-               if (GRAPHICS_VER(i915) >= 8) {
-                       *cmd++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (7 - 2);
-                       *cmd++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE;
-                       *cmd++ = 0;
-                       *cmd++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
-                       *cmd++ = lower_32_bits(offset);
-                       *cmd++ = upper_32_bits(offset);
-                       *cmd++ = value;
-               } else {
-                       *cmd++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (6 - 2);
-                       *cmd++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE;
-                       *cmd++ = 0;
-                       *cmd++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
-                       *cmd++ = offset;
-                       *cmd++ = value;
-               }
-
-               /* Allow ourselves to be preempted in between blocks. */
-               *cmd++ = MI_ARB_CHECK;
-
-               offset += size;
-               rem -= size;
-       } while (rem);
-
-       *cmd = MI_BATCH_BUFFER_END;
-
-       i915_gem_object_flush_map(pool->obj);
-       i915_gem_object_unpin_map(pool->obj);
-
-       intel_gt_chipset_flush(ce->vm->gt);
-
-       batch->private = pool;
-       return batch;
-
-out_unpin:
-       i915_vma_unpin(batch);
-out_put:
-       intel_gt_buffer_pool_put(pool);
-out_pm:
-       intel_engine_pm_put(ce->engine);
-       return ERR_PTR(err);
-}
-
-int intel_emit_vma_mark_active(struct i915_vma *vma, struct i915_request *rq)
-{
-       int err;
-
-       err = i915_request_await_object(rq, vma->obj, false);
-       if (err == 0)
-               err = i915_vma_move_to_active(vma, rq, 0);
-       if (unlikely(err))
-               return err;
-
-       return intel_gt_buffer_pool_mark_active(vma->private, rq);
-}
-
-void intel_emit_vma_release(struct intel_context *ce, struct i915_vma *vma)
-{
-       i915_vma_unpin(vma);
-       intel_gt_buffer_pool_put(vma->private);
-       intel_engine_pm_put(ce->engine);
-}
-
-static int
-move_obj_to_gpu(struct drm_i915_gem_object *obj,
-               struct i915_request *rq,
-               bool write)
-{
-       if (obj->cache_dirty & ~obj->cache_coherent)
-               i915_gem_clflush_object(obj, 0);
-
-       return i915_request_await_object(rq, obj, write);
-}
-
-int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj,
-                            struct intel_context *ce,
-                            u32 value)
-{
-       struct i915_gem_ww_ctx ww;
-       struct i915_request *rq;
-       struct i915_vma *batch;
-       struct i915_vma *vma;
-       int err;
-
-       vma = i915_vma_instance(obj, ce->vm, NULL);
-       if (IS_ERR(vma))
-               return PTR_ERR(vma);
-
-       i915_gem_ww_ctx_init(&ww, true);
-       intel_engine_pm_get(ce->engine);
-retry:
-       err = i915_gem_object_lock(obj, &ww);
-       if (err)
-               goto out;
-
-       err = intel_context_pin_ww(ce, &ww);
-       if (err)
-               goto out;
-
-       err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER);
-       if (err)
-               goto out_ctx;
-
-       batch = intel_emit_vma_fill_blt(ce, vma, &ww, value);
-       if (IS_ERR(batch)) {
-               err = PTR_ERR(batch);
-               goto out_vma;
-       }
-
-       rq = i915_request_create(ce);
-       if (IS_ERR(rq)) {
-               err = PTR_ERR(rq);
-               goto out_batch;
-       }
-
-       err = intel_emit_vma_mark_active(batch, rq);
-       if (unlikely(err))
-               goto out_request;
-
-       err = move_obj_to_gpu(vma->obj, rq, true);
-       if (err == 0)
-               err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
-       if (unlikely(err))
-               goto out_request;
-
-       if (ce->engine->emit_init_breadcrumb)
-               err = ce->engine->emit_init_breadcrumb(rq);
-
-       if (likely(!err))
-               err = ce->engine->emit_bb_start(rq,
-                                               batch->node.start,
-                                               batch->node.size,
-                                               0);
-out_request:
-       if (unlikely(err))
-               i915_request_set_error_once(rq, err);
-
-       i915_request_add(rq);
-out_batch:
-       intel_emit_vma_release(ce, batch);
-out_vma:
-       i915_vma_unpin(vma);
-out_ctx:
-       intel_context_unpin(ce);
-out:
-       if (err == -EDEADLK) {
-               err = i915_gem_ww_ctx_backoff(&ww);
-               if (!err)
-                       goto retry;
-       }
-       i915_gem_ww_ctx_fini(&ww);
-       intel_engine_pm_put(ce->engine);
-       return err;
-}
-
-/* Wa_1209644611:icl,ehl */
-static bool wa_1209644611_applies(struct drm_i915_private *i915, u32 size)
-{
-       u32 height = size >> PAGE_SHIFT;
-
-       if (GRAPHICS_VER(i915) != 11)
-               return false;
-
-       return height % 4 == 3 && height <= 8;
-}
-
-struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
-                                        struct i915_gem_ww_ctx *ww,
-                                        struct i915_vma *src,
-                                        struct i915_vma *dst)
-{
-       struct drm_i915_private *i915 = ce->vm->i915;
-       const u32 block_size = SZ_8M; /* ~1ms at 8GiB/s preemption delay */
-       struct intel_gt_buffer_pool_node *pool;
-       struct i915_vma *batch;
-       u64 src_offset, dst_offset;
-       u64 count, rem;
-       u32 size, *cmd;
-       int err;
-
-       GEM_BUG_ON(src->size != dst->size);
-
-       GEM_BUG_ON(intel_engine_is_virtual(ce->engine));
-       intel_engine_pm_get(ce->engine);
-
-       count = div_u64(round_up(dst->size, block_size), block_size);
-       size = (1 + 11 * count) * sizeof(u32);
-       size = round_up(size, PAGE_SIZE);
-       pool = intel_gt_get_buffer_pool(ce->engine->gt, size, I915_MAP_WC);
-       if (IS_ERR(pool)) {
-               err = PTR_ERR(pool);
-               goto out_pm;
-       }
-
-       err = i915_gem_object_lock(pool->obj, ww);
-       if (err)
-               goto out_put;
-
-       batch = i915_vma_instance(pool->obj, ce->vm, NULL);
-       if (IS_ERR(batch)) {
-               err = PTR_ERR(batch);
-               goto out_put;
-       }
-
-       err = i915_vma_pin_ww(batch, ww, 0, 0, PIN_USER);
-       if (unlikely(err))
-               goto out_put;
-
-       /* we pinned the pool, mark it as such */
-       intel_gt_buffer_pool_mark_used(pool);
-
-       cmd = i915_gem_object_pin_map(pool->obj, pool->type);
-       if (IS_ERR(cmd)) {
-               err = PTR_ERR(cmd);
-               goto out_unpin;
-       }
-
-       rem = src->size;
-       src_offset = src->node.start;
-       dst_offset = dst->node.start;
-
-       do {
-               size = min_t(u64, rem, block_size);
-               GEM_BUG_ON(size >> PAGE_SHIFT > S16_MAX);
-
-               if (GRAPHICS_VER(i915) >= 9 &&
-                   !wa_1209644611_applies(i915, size)) {
-                       *cmd++ = GEN9_XY_FAST_COPY_BLT_CMD | (10 - 2);
-                       *cmd++ = BLT_DEPTH_32 | PAGE_SIZE;
-                       *cmd++ = 0;
-                       *cmd++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
-                       *cmd++ = lower_32_bits(dst_offset);
-                       *cmd++ = upper_32_bits(dst_offset);
-                       *cmd++ = 0;
-                       *cmd++ = PAGE_SIZE;
-                       *cmd++ = lower_32_bits(src_offset);
-                       *cmd++ = upper_32_bits(src_offset);
-               } else if (GRAPHICS_VER(i915) >= 8) {
-                       *cmd++ = XY_SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (10 - 2);
-                       *cmd++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | PAGE_SIZE;
-                       *cmd++ = 0;
-                       *cmd++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
-                       *cmd++ = lower_32_bits(dst_offset);
-                       *cmd++ = upper_32_bits(dst_offset);
-                       *cmd++ = 0;
-                       *cmd++ = PAGE_SIZE;
-                       *cmd++ = lower_32_bits(src_offset);
-                       *cmd++ = upper_32_bits(src_offset);
-               } else {
-                       *cmd++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (6 - 2);
-                       *cmd++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | PAGE_SIZE;
-                       *cmd++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE;
-                       *cmd++ = dst_offset;
-                       *cmd++ = PAGE_SIZE;
-                       *cmd++ = src_offset;
-               }
-
-               /* Allow ourselves to be preempted in between blocks. */
-               *cmd++ = MI_ARB_CHECK;
-
-               src_offset += size;
-               dst_offset += size;
-               rem -= size;
-       } while (rem);
-
-       *cmd = MI_BATCH_BUFFER_END;
-
-       i915_gem_object_flush_map(pool->obj);
-       i915_gem_object_unpin_map(pool->obj);
-
-       intel_gt_chipset_flush(ce->vm->gt);
-       batch->private = pool;
-       return batch;
-
-out_unpin:
-       i915_vma_unpin(batch);
-out_put:
-       intel_gt_buffer_pool_put(pool);
-out_pm:
-       intel_engine_pm_put(ce->engine);
-       return ERR_PTR(err);
-}
-
-int i915_gem_object_copy_blt(struct drm_i915_gem_object *src,
-                            struct drm_i915_gem_object *dst,
-                            struct intel_context *ce)
-{
-       struct i915_address_space *vm = ce->vm;
-       struct i915_vma *vma[2], *batch;
-       struct i915_gem_ww_ctx ww;
-       struct i915_request *rq;
-       int err, i;
-
-       vma[0] = i915_vma_instance(src, vm, NULL);
-       if (IS_ERR(vma[0]))
-               return PTR_ERR(vma[0]);
-
-       vma[1] = i915_vma_instance(dst, vm, NULL);
-       if (IS_ERR(vma[1]))
-               return PTR_ERR(vma[1]);
-
-       i915_gem_ww_ctx_init(&ww, true);
-       intel_engine_pm_get(ce->engine);
-retry:
-       err = i915_gem_object_lock(src, &ww);
-       if (!err)
-               err = i915_gem_object_lock(dst, &ww);
-       if (!err)
-               err = intel_context_pin_ww(ce, &ww);
-       if (err)
-               goto out;
-
-       err = i915_vma_pin_ww(vma[0], &ww, 0, 0, PIN_USER);
-       if (err)
-               goto out_ctx;
-
-       err = i915_vma_pin_ww(vma[1], &ww, 0, 0, PIN_USER);
-       if (unlikely(err))
-               goto out_unpin_src;
-
-       batch = intel_emit_vma_copy_blt(ce, &ww, vma[0], vma[1]);
-       if (IS_ERR(batch)) {
-               err = PTR_ERR(batch);
-               goto out_unpin_dst;
-       }
-
-       rq = i915_request_create(ce);
-       if (IS_ERR(rq)) {
-               err = PTR_ERR(rq);
-               goto out_batch;
-       }
-
-       err = intel_emit_vma_mark_active(batch, rq);
-       if (unlikely(err))
-               goto out_request;
-
-       for (i = 0; i < ARRAY_SIZE(vma); i++) {
-               err = move_obj_to_gpu(vma[i]->obj, rq, i);
-               if (unlikely(err))
-                       goto out_request;
-       }
-
-       for (i = 0; i < ARRAY_SIZE(vma); i++) {
-               unsigned int flags = i ? EXEC_OBJECT_WRITE : 0;
-
-               err = i915_vma_move_to_active(vma[i], rq, flags);
-               if (unlikely(err))
-                       goto out_request;
-       }
-
-       if (rq->engine->emit_init_breadcrumb) {
-               err = rq->engine->emit_init_breadcrumb(rq);
-               if (unlikely(err))
-                       goto out_request;
-       }
-
-       err = rq->engine->emit_bb_start(rq,
-                                       batch->node.start, batch->node.size,
-                                       0);
-
-out_request:
-       if (unlikely(err))
-               i915_request_set_error_once(rq, err);
-
-       i915_request_add(rq);
-out_batch:
-       intel_emit_vma_release(ce, batch);
-out_unpin_dst:
-       i915_vma_unpin(vma[1]);
-out_unpin_src:
-       i915_vma_unpin(vma[0]);
-out_ctx:
-       intel_context_unpin(ce);
-out:
-       if (err == -EDEADLK) {
-               err = i915_gem_ww_ctx_backoff(&ww);
-               if (!err)
-                       goto retry;
-       }
-       i915_gem_ww_ctx_fini(&ww);
-       intel_engine_pm_put(ce->engine);
-       return err;
-}
-
-#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
-#include "selftests/i915_gem_object_blt.c"
-#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.h b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.h
deleted file mode 100644 (file)
index 2409fdc..0000000
+++ /dev/null
@@ -1,39 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright Â© 2019 Intel Corporation
- */
-
-#ifndef __I915_GEM_OBJECT_BLT_H__
-#define __I915_GEM_OBJECT_BLT_H__
-
-#include <linux/types.h>
-
-#include "gt/intel_context.h"
-#include "gt/intel_engine_pm.h"
-#include "i915_vma.h"
-
-struct drm_i915_gem_object;
-struct i915_gem_ww_ctx;
-
-struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce,
-                                        struct i915_vma *vma,
-                                        struct i915_gem_ww_ctx *ww,
-                                        u32 value);
-
-struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
-                                        struct i915_gem_ww_ctx *ww,
-                                        struct i915_vma *src,
-                                        struct i915_vma *dst);
-
-int intel_emit_vma_mark_active(struct i915_vma *vma, struct i915_request *rq);
-void intel_emit_vma_release(struct intel_context *ce, struct i915_vma *vma);
-
-int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj,
-                            struct intel_context *ce,
-                            u32 value);
-
-int i915_gem_object_copy_blt(struct drm_i915_gem_object *src,
-                            struct drm_i915_gem_object *dst,
-                            struct intel_context *ce);
-
-#endif
index 2a23b77..2471f36 100644 (file)
@@ -18,6 +18,7 @@
 
 struct drm_i915_gem_object;
 struct intel_fronbuffer;
+struct intel_memory_region;
 
 /*
  * struct i915_lut_handle tracks the fast lookups from handle to vma used
@@ -33,10 +34,9 @@ struct i915_lut_handle {
 
 struct drm_i915_gem_object_ops {
        unsigned int flags;
-#define I915_GEM_OBJECT_HAS_IOMEM      BIT(1)
-#define I915_GEM_OBJECT_IS_SHRINKABLE  BIT(2)
-#define I915_GEM_OBJECT_IS_PROXY       BIT(3)
-#define I915_GEM_OBJECT_NO_MMAP                BIT(4)
+#define I915_GEM_OBJECT_IS_SHRINKABLE  BIT(1)
+#define I915_GEM_OBJECT_IS_PROXY       BIT(2)
+#define I915_GEM_OBJECT_NO_MMAP                BIT(3)
 
        /* Interface between the GEM object and its backing storage.
         * get_pages() is called once prior to the use of the associated set
@@ -78,12 +78,100 @@ struct drm_i915_gem_object_ops {
         * delayed_free - Override the default delayed free implementation
         */
        void (*delayed_free)(struct drm_i915_gem_object *obj);
+
+       /**
+        * migrate - Migrate object to a different region either for
+        * pinning or for as long as the object lock is held.
+        */
+       int (*migrate)(struct drm_i915_gem_object *obj,
+                      struct intel_memory_region *mr);
+
        void (*release)(struct drm_i915_gem_object *obj);
 
        const struct vm_operations_struct *mmap_ops;
        const char *name; /* friendly name for debug, e.g. lockdep classes */
 };
 
+/**
+ * enum i915_cache_level - The supported GTT caching values for system memory
+ * pages.
+ *
+ * These translate to some special GTT PTE bits when binding pages into some
+ * address space. It also determines whether an object, or rather its pages are
+ * coherent with the GPU, when also reading or writing through the CPU cache
+ * with those pages.
+ *
+ * Userspace can also control this through struct drm_i915_gem_caching.
+ */
+enum i915_cache_level {
+       /**
+        * @I915_CACHE_NONE:
+        *
+        * GPU access is not coherent with the CPU cache. If the cache is dirty
+        * and we need the underlying pages to be coherent with some later GPU
+        * access then we need to manually flush the pages.
+        *
+        * On shared LLC platforms reads and writes through the CPU cache are
+        * still coherent even with this setting. See also
+        * &drm_i915_gem_object.cache_coherent for more details. Due to this we
+        * should only ever use uncached for scanout surfaces, otherwise we end
+        * up over-flushing in some places.
+        *
+        * This is the default on non-LLC platforms.
+        */
+       I915_CACHE_NONE = 0,
+       /**
+        * @I915_CACHE_LLC:
+        *
+        * GPU access is coherent with the CPU cache. If the cache is dirty,
+        * then the GPU will ensure that access remains coherent, when both
+        * reading and writing through the CPU cache. GPU writes can dirty the
+        * CPU cache.
+        *
+        * Not used for scanout surfaces.
+        *
+        * Applies to both platforms with shared LLC(HAS_LLC), and snooping
+        * based platforms(HAS_SNOOP).
+        *
+        * This is the default on shared LLC platforms.  The only exception is
+        * scanout objects, where the display engine is not coherent with the
+        * CPU cache. For such objects I915_CACHE_NONE or I915_CACHE_WT is
+        * automatically applied by the kernel in pin_for_display, if userspace
+        * has not done so already.
+        */
+       I915_CACHE_LLC,
+       /**
+        * @I915_CACHE_L3_LLC:
+        *
+        * Explicitly enable the Gfx L3 cache, with coherent LLC.
+        *
+        * The Gfx L3 sits between the domain specific caches, e.g
+        * sampler/render caches, and the larger LLC. LLC is coherent with the
+        * GPU, but L3 is only visible to the GPU, so likely needs to be flushed
+        * when the workload completes.
+        *
+        * Not used for scanout surfaces.
+        *
+        * Only exposed on some gen7 + GGTT. More recent hardware has dropped
+        * this explicit setting, where it should now be enabled by default.
+        */
+       I915_CACHE_L3_LLC,
+       /**
+        * @I915_CACHE_WT:
+        *
+        * Write-through. Used for scanout surfaces.
+        *
+        * The GPU can utilise the caches, while still having the display engine
+        * be coherent with GPU writes, as a result we don't need to flush the
+        * CPU caches when moving out of the render domain. This is the default
+        * setting chosen by the kernel, if supported by the HW, otherwise we
+        * fallback to I915_CACHE_NONE. On the CPU side writes through the CPU
+        * cache still need to be flushed, to remain coherent with the display
+        * engine.
+        */
+       I915_CACHE_WT,
+};
+
 enum i915_map_type {
        I915_MAP_WB = 0,
        I915_MAP_WC,
@@ -97,6 +185,7 @@ enum i915_mmap_type {
        I915_MMAP_TYPE_WC,
        I915_MMAP_TYPE_WB,
        I915_MMAP_TYPE_UC,
+       I915_MMAP_TYPE_FIXED,
 };
 
 struct i915_mmap_offset {
@@ -201,25 +290,138 @@ struct drm_i915_gem_object {
        unsigned long flags;
 #define I915_BO_ALLOC_CONTIGUOUS BIT(0)
 #define I915_BO_ALLOC_VOLATILE   BIT(1)
-#define I915_BO_ALLOC_STRUCT_PAGE BIT(2)
-#define I915_BO_ALLOC_CPU_CLEAR  BIT(3)
-#define I915_BO_ALLOC_USER       BIT(4)
+#define I915_BO_ALLOC_CPU_CLEAR  BIT(2)
+#define I915_BO_ALLOC_USER       BIT(3)
 #define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | \
                             I915_BO_ALLOC_VOLATILE | \
-                            I915_BO_ALLOC_STRUCT_PAGE | \
                             I915_BO_ALLOC_CPU_CLEAR | \
                             I915_BO_ALLOC_USER)
-#define I915_BO_READONLY         BIT(5)
-#define I915_TILING_QUIRK_BIT    6 /* unknown swizzling; do not release! */
+#define I915_BO_READONLY         BIT(4)
+#define I915_TILING_QUIRK_BIT    5 /* unknown swizzling; do not release! */
 
-       /*
-        * Is the object to be mapped as read-only to the GPU
-        * Only honoured if hardware has relevant pte bit
+       /**
+        * @mem_flags - Mutable placement-related flags
+        *
+        * These are flags that indicate specifics of the memory region
+        * the object is currently in. As such they are only stable
+        * either under the object lock or if the object is pinned.
+        */
+       unsigned int mem_flags;
+#define I915_BO_FLAG_STRUCT_PAGE BIT(0) /* Object backed by struct pages */
+#define I915_BO_FLAG_IOMEM       BIT(1) /* Object backed by IO memory */
+       /**
+        * @cache_level: The desired GTT caching level.
+        *
+        * See enum i915_cache_level for possible values, along with what
+        * each does.
         */
        unsigned int cache_level:3;
-       unsigned int cache_coherent:2;
+       /**
+        * @cache_coherent:
+        *
+        * Track whether the pages are coherent with the GPU if reading or
+        * writing through the CPU caches. The largely depends on the
+        * @cache_level setting.
+        *
+        * On platforms which don't have the shared LLC(HAS_SNOOP), like on Atom
+        * platforms, coherency must be explicitly requested with some special
+        * GTT caching bits(see enum i915_cache_level). When enabling coherency
+        * it does come at a performance and power cost on such platforms. On
+        * the flip side the kernel does not need to manually flush any buffers
+        * which need to be coherent with the GPU, if the object is not coherent
+        * i.e @cache_coherent is zero.
+        *
+        * On platforms that share the LLC with the CPU(HAS_LLC), all GT memory
+        * access will automatically snoop the CPU caches(even with CACHE_NONE).
+        * The one exception is when dealing with the display engine, like with
+        * scanout surfaces. To handle this the kernel will always flush the
+        * surface out of the CPU caches when preparing it for scanout.  Also
+        * note that since scanout surfaces are only ever read by the display
+        * engine we only need to care about flushing any writes through the CPU
+        * cache, reads on the other hand will always be coherent.
+        *
+        * Something strange here is why @cache_coherent is not a simple
+        * boolean, i.e coherent vs non-coherent. The reasoning for this is back
+        * to the display engine not being fully coherent. As a result scanout
+        * surfaces will either be marked as I915_CACHE_NONE or I915_CACHE_WT.
+        * In the case of seeing I915_CACHE_NONE the kernel makes the assumption
+        * that this is likely a scanout surface, and will set @cache_coherent
+        * as only I915_BO_CACHE_COHERENT_FOR_READ, on platforms with the shared
+        * LLC. The kernel uses this to always flush writes through the CPU
+        * cache as early as possible, where it can, in effect keeping
+        * @cache_dirty clean, so we can potentially avoid stalling when
+        * flushing the surface just before doing the scanout.  This does mean
+        * we might unnecessarily flush non-scanout objects in some places, but
+        * the default assumption is that all normal objects should be using
+        * I915_CACHE_LLC, at least on platforms with the shared LLC.
+        *
+        * Supported values:
+        *
+        * I915_BO_CACHE_COHERENT_FOR_READ:
+        *
+        * On shared LLC platforms, we use this for special scanout surfaces,
+        * where the display engine is not coherent with the CPU cache. As such
+        * we need to ensure we flush any writes before doing the scanout. As an
+        * optimisation we try to flush any writes as early as possible to avoid
+        * stalling later.
+        *
+        * Thus for scanout surfaces using I915_CACHE_NONE, on shared LLC
+        * platforms, we use:
+        *
+        *      cache_coherent = I915_BO_CACHE_COHERENT_FOR_READ
+        *
+        * While for normal objects that are fully coherent, including special
+        * scanout surfaces marked as I915_CACHE_WT, we use:
+        *
+        *      cache_coherent = I915_BO_CACHE_COHERENT_FOR_READ |
+        *                       I915_BO_CACHE_COHERENT_FOR_WRITE
+        *
+        * And then for objects that are not coherent at all we use:
+        *
+        *      cache_coherent = 0
+        *
+        * I915_BO_CACHE_COHERENT_FOR_WRITE:
+        *
+        * When writing through the CPU cache, the GPU is still coherent. Note
+        * that this also implies I915_BO_CACHE_COHERENT_FOR_READ.
+        */
 #define I915_BO_CACHE_COHERENT_FOR_READ BIT(0)
 #define I915_BO_CACHE_COHERENT_FOR_WRITE BIT(1)
+       unsigned int cache_coherent:2;
+
+       /**
+        * @cache_dirty:
+        *
+        * Track if we are we dirty with writes through the CPU cache for this
+        * object. As a result reading directly from main memory might yield
+        * stale data.
+        *
+        * This also ties into whether the kernel is tracking the object as
+        * coherent with the GPU, as per @cache_coherent, as it determines if
+        * flushing might be needed at various points.
+        *
+        * Another part of @cache_dirty is managing flushing when first
+        * acquiring the pages for system memory, at this point the pages are
+        * considered foreign, so the default assumption is that the cache is
+        * dirty, for example the page zeroing done by the kernel might leave
+        * writes though the CPU cache, or swapping-in, while the actual data in
+        * main memory is potentially stale.  Note that this is a potential
+        * security issue when dealing with userspace objects and zeroing. Now,
+        * whether we actually need apply the big sledgehammer of flushing all
+        * the pages on acquire depends on if @cache_coherent is marked as
+        * I915_BO_CACHE_COHERENT_FOR_WRITE, i.e that the GPU will be coherent
+        * for both reads and writes though the CPU cache.
+        *
+        * Note that on shared LLC platforms we still apply the heavy flush for
+        * I915_CACHE_NONE objects, under the assumption that this is going to
+        * be used for scanout.
+        *
+        * Update: On some hardware there is now also the 'Bypass LLC' MOCS
+        * entry, which defeats our @cache_coherent tracking, since userspace
+        * can freely bypass the CPU cache when touching the pages with the GPU,
+        * where the kernel is completely unaware. On such platform we need
+        * apply the sledgehammer-on-acquire regardless of the @cache_coherent.
+        */
        unsigned int cache_dirty:1;
 
        /**
@@ -265,9 +467,10 @@ struct drm_i915_gem_object {
                struct intel_memory_region *region;
 
                /**
-                * Memory manager node allocated for this object.
+                * Memory manager resource allocated for this object. Only
+                * needed for the mock region.
                 */
-               void *st_mm_node;
+               struct ttm_resource *res;
 
                /**
                 * Element within memory_region->objects or region->purgeable
index 086005c..8eb1c3a 100644 (file)
@@ -321,8 +321,7 @@ static void *i915_gem_object_map_pfn(struct drm_i915_gem_object *obj,
        dma_addr_t addr;
        void *vaddr;
 
-       if (type != I915_MAP_WC)
-               return ERR_PTR(-ENODEV);
+       GEM_BUG_ON(type != I915_MAP_WC);
 
        if (n_pfn > ARRAY_SIZE(stack)) {
                /* Too big for stack -- allocate temporary array instead */
@@ -351,7 +350,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
        int err;
 
        if (!i915_gem_object_has_struct_page(obj) &&
-           !i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM))
+           !i915_gem_object_has_iomem(obj))
                return ERR_PTR(-ENXIO);
 
        assert_object_held(obj);
@@ -374,6 +373,34 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
        }
        GEM_BUG_ON(!i915_gem_object_has_pages(obj));
 
+       /*
+        * For discrete our CPU mappings needs to be consistent in order to
+        * function correctly on !x86. When mapping things through TTM, we use
+        * the same rules to determine the caching type.
+        *
+        * The caching rules, starting from DG1:
+        *
+        *      - If the object can be placed in device local-memory, then the
+        *        pages should be allocated and mapped as write-combined only.
+        *
+        *      - Everything else is always allocated and mapped as write-back,
+        *        with the guarantee that everything is also coherent with the
+        *        GPU.
+        *
+        * Internal users of lmem are already expected to get this right, so no
+        * fudging needed there.
+        */
+       if (i915_gem_object_placement_possible(obj, INTEL_MEMORY_LOCAL)) {
+               if (type != I915_MAP_WC && !obj->mm.n_placements) {
+                       ptr = ERR_PTR(-ENODEV);
+                       goto err_unpin;
+               }
+
+               type = I915_MAP_WC;
+       } else if (IS_DGFX(to_i915(obj->base.dev))) {
+               type = I915_MAP_WB;
+       }
+
        ptr = page_unpack_bits(obj->mm.mapping, &has_type);
        if (ptr && has_type != type) {
                if (pinned) {
@@ -467,7 +494,7 @@ __i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
                         struct i915_gem_object_page_iter *iter,
                         unsigned int n,
                         unsigned int *offset,
-                        bool allow_alloc, bool dma)
+                        bool dma)
 {
        struct scatterlist *sg;
        unsigned int idx, count;
@@ -489,9 +516,6 @@ __i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
        if (n < READ_ONCE(iter->sg_idx))
                goto lookup;
 
-       if (!allow_alloc)
-               goto manual_lookup;
-
        mutex_lock(&iter->lock);
 
        /* We prefer to reuse the last sg so that repeated lookup of this
@@ -541,16 +565,7 @@ scan:
        if (unlikely(n < idx)) /* insertion completed by another thread */
                goto lookup;
 
-       goto manual_walk;
-
-manual_lookup:
-       idx = 0;
-       sg = obj->mm.pages->sgl;
-       count = __sg_page_count(sg);
-
-manual_walk:
-       /*
-        * In case we failed to insert the entry into the radixtree, we need
+       /* In case we failed to insert the entry into the radixtree, we need
         * to look beyond the current sg.
         */
        while (idx + count <= n) {
@@ -597,7 +612,7 @@ i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
 
        GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
 
-       sg = i915_gem_object_get_sg(obj, n, &offset, true);
+       sg = i915_gem_object_get_sg(obj, n, &offset);
        return nth_page(sg_page(sg), offset);
 }
 
@@ -623,7 +638,7 @@ i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
        struct scatterlist *sg;
        unsigned int offset;
 
-       sg = i915_gem_object_get_sg_dma(obj, n, &offset, true);
+       sg = i915_gem_object_get_sg_dma(obj, n, &offset);
 
        if (len)
                *len = sg_dma_len(sg) - (offset << PAGE_SHIFT);
index be72ad0..7986612 100644 (file)
@@ -76,7 +76,7 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
        intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
 
        /* We're no longer struct page backed */
-       obj->flags &= ~I915_BO_ALLOC_STRUCT_PAGE;
+       obj->mem_flags &= ~I915_BO_FLAG_STRUCT_PAGE;
        __i915_gem_object_set_pages(obj, st, sg->length);
 
        return 0;
index d1f1840..1f557b2 100644 (file)
@@ -13,11 +13,7 @@ void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
 {
        obj->mm.region = intel_memory_region_get(mem);
 
-       if (obj->base.size <= mem->min_page_size)
-               obj->flags |= I915_BO_ALLOC_CONTIGUOUS;
-
        mutex_lock(&mem->objects.lock);
-
        list_add(&obj->mm.region_link, &mem->objects.list);
        mutex_unlock(&mem->objects.lock);
 }
@@ -36,9 +32,11 @@ void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj)
 struct drm_i915_gem_object *
 i915_gem_object_create_region(struct intel_memory_region *mem,
                              resource_size_t size,
+                             resource_size_t page_size,
                              unsigned int flags)
 {
        struct drm_i915_gem_object *obj;
+       resource_size_t default_page_size;
        int err;
 
        /*
@@ -52,7 +50,14 @@ i915_gem_object_create_region(struct intel_memory_region *mem,
        if (!mem)
                return ERR_PTR(-ENODEV);
 
-       size = round_up(size, mem->min_page_size);
+       default_page_size = mem->min_page_size;
+       if (page_size)
+               default_page_size = page_size;
+
+       GEM_BUG_ON(!is_power_of_2_u64(default_page_size));
+       GEM_BUG_ON(default_page_size < PAGE_SIZE);
+
+       size = round_up(size, default_page_size);
 
        GEM_BUG_ON(!size);
        GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_MIN_ALIGNMENT));
@@ -64,7 +69,7 @@ i915_gem_object_create_region(struct intel_memory_region *mem,
        if (!obj)
                return ERR_PTR(-ENOMEM);
 
-       err = mem->ops->init_object(mem, obj, size, flags);
+       err = mem->ops->init_object(mem, obj, size, page_size, flags);
        if (err)
                goto err_object_free;
 
index 84fcb32..1008e58 100644 (file)
@@ -19,6 +19,7 @@ void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj);
 struct drm_i915_gem_object *
 i915_gem_object_create_region(struct intel_memory_region *mem,
                              resource_size_t size,
+                             resource_size_t page_size,
                              unsigned int flags);
 
 #endif
index 5d16c44..11f0721 100644 (file)
@@ -182,6 +182,24 @@ rebuild_st:
        if (i915_gem_object_needs_bit17_swizzle(obj))
                i915_gem_object_do_bit_17_swizzle(obj, st);
 
+       /*
+        * EHL and JSL add the 'Bypass LLC' MOCS entry, which should make it
+        * possible for userspace to bypass the GTT caching bits set by the
+        * kernel, as per the given object cache_level. This is troublesome
+        * since the heavy flush we apply when first gathering the pages is
+        * skipped if the kernel thinks the object is coherent with the GPU. As
+        * a result it might be possible to bypass the cache and read the
+        * contents of the page directly, which could be stale data. If it's
+        * just a case of userspace shooting themselves in the foot then so be
+        * it, but since i915 takes the stance of always zeroing memory before
+        * handing it to userspace, we need to prevent this.
+        *
+        * By setting cache_dirty here we make the clflush in set_pages
+        * unconditional on such platforms.
+        */
+       if (IS_JSL_EHL(i915) && obj->flags & I915_BO_ALLOC_USER)
+               obj->cache_dirty = true;
+
        __i915_gem_object_set_pages(obj, st, sg_page_sizes);
 
        return 0;
@@ -302,6 +320,7 @@ void i915_gem_object_put_pages_shmem(struct drm_i915_gem_object *obj, struct sg_
        struct pagevec pvec;
        struct page *page;
 
+       GEM_WARN_ON(IS_DGFX(to_i915(obj->base.dev)));
        __i915_gem_object_release_shmem(obj, pages, true);
 
        i915_gem_gtt_finish_pages(obj, pages);
@@ -444,7 +463,7 @@ shmem_pread(struct drm_i915_gem_object *obj,
 
 static void shmem_release(struct drm_i915_gem_object *obj)
 {
-       if (obj->flags & I915_BO_ALLOC_STRUCT_PAGE)
+       if (i915_gem_object_has_struct_page(obj))
                i915_gem_object_release_memory_region(obj);
 
        fput(obj->base.filp);
@@ -489,6 +508,7 @@ static int __create_shmem(struct drm_i915_private *i915,
 static int shmem_object_init(struct intel_memory_region *mem,
                             struct drm_i915_gem_object *obj,
                             resource_size_t size,
+                            resource_size_t page_size,
                             unsigned int flags)
 {
        static struct lock_class_key lock_class;
@@ -513,9 +533,8 @@ static int shmem_object_init(struct intel_memory_region *mem,
        mapping_set_gfp_mask(mapping, mask);
        GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
 
-       i915_gem_object_init(obj, &i915_gem_shmem_ops, &lock_class,
-                            I915_BO_ALLOC_STRUCT_PAGE);
-
+       i915_gem_object_init(obj, &i915_gem_shmem_ops, &lock_class, 0);
+       obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE;
        obj->write_domain = I915_GEM_DOMAIN_CPU;
        obj->read_domains = I915_GEM_DOMAIN_CPU;
 
@@ -548,7 +567,7 @@ i915_gem_object_create_shmem(struct drm_i915_private *i915,
                             resource_size_t size)
 {
        return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_SMEM],
-                                            size, 0);
+                                            size, 0, 0);
 }
 
 /* Allocate a new GEM object and fill it with the supplied data */
@@ -561,6 +580,7 @@ i915_gem_object_create_shmem_from_data(struct drm_i915_private *dev_priv,
        resource_size_t offset;
        int err;
 
+       GEM_WARN_ON(IS_DGFX(dev_priv));
        obj = i915_gem_object_create_shmem(dev_priv, round_up(size, PAGE_SIZE));
        if (IS_ERR(obj))
                return obj;
index b0c3a7d..ddd37cc 100644 (file)
@@ -447,7 +447,6 @@ static int i915_gem_init_stolen(struct intel_memory_region *mem)
                break;
        case 8:
        case 9:
-       case 10:
                if (IS_LP(i915))
                        chv_get_stolen_reserved(i915, uncore,
                                                &reserved_base, &reserved_size);
@@ -670,6 +669,7 @@ static int __i915_gem_object_create_stolen(struct intel_memory_region *mem,
 static int _i915_gem_object_stolen_init(struct intel_memory_region *mem,
                                        struct drm_i915_gem_object *obj,
                                        resource_size_t size,
+                                       resource_size_t page_size,
                                        unsigned int flags)
 {
        struct drm_i915_private *i915 = mem->i915;
@@ -708,7 +708,7 @@ struct drm_i915_gem_object *
 i915_gem_object_create_stolen(struct drm_i915_private *i915,
                              resource_size_t size)
 {
-       return i915_gem_object_create_region(i915->mm.stolen_region, size, 0);
+       return i915_gem_object_create_region(i915->mm.stolen_region, size, 0, 0);
 }
 
 static int init_stolen_smem(struct intel_memory_region *mem)
index bf33724..771eb29 100644 (file)
@@ -15,6 +15,9 @@
 #include "gem/i915_gem_ttm.h"
 #include "gem/i915_gem_mman.h"
 
+#include "gt/intel_migrate.h"
+#include "gt/intel_engine_pm.h"
+
 #define I915_PL_LMEM0 TTM_PL_PRIV
 #define I915_PL_SYSTEM TTM_PL_SYSTEM
 #define I915_PL_STOLEN TTM_PL_VRAM
 #define I915_TTM_PRIO_NO_PAGES  1
 #define I915_TTM_PRIO_HAS_PAGES 2
 
+/*
+ * Size of struct ttm_place vector in on-stack struct ttm_placement allocs
+ */
+#define I915_TTM_MAX_PLACEMENTS INTEL_REGION_UNKNOWN
+
 /**
  * struct i915_ttm_tt - TTM page vector with additional private information
  * @ttm: The base TTM page vector.
@@ -42,36 +50,123 @@ struct i915_ttm_tt {
        struct sg_table *cached_st;
 };
 
-static const struct ttm_place lmem0_sys_placement_flags[] = {
-       {
-               .fpfn = 0,
-               .lpfn = 0,
-               .mem_type = I915_PL_LMEM0,
-               .flags = 0,
-       }, {
-               .fpfn = 0,
-               .lpfn = 0,
-               .mem_type = I915_PL_SYSTEM,
-               .flags = 0,
-       }
-};
-
-static struct ttm_placement i915_lmem0_placement = {
-       .num_placement = 1,
-       .placement = &lmem0_sys_placement_flags[0],
-       .num_busy_placement = 1,
-       .busy_placement = &lmem0_sys_placement_flags[0],
+static const struct ttm_place sys_placement_flags = {
+       .fpfn = 0,
+       .lpfn = 0,
+       .mem_type = I915_PL_SYSTEM,
+       .flags = 0,
 };
 
 static struct ttm_placement i915_sys_placement = {
        .num_placement = 1,
-       .placement = &lmem0_sys_placement_flags[1],
+       .placement = &sys_placement_flags,
        .num_busy_placement = 1,
-       .busy_placement = &lmem0_sys_placement_flags[1],
+       .busy_placement = &sys_placement_flags,
 };
 
+static int i915_ttm_err_to_gem(int err)
+{
+       /* Fastpath */
+       if (likely(!err))
+               return 0;
+
+       switch (err) {
+       case -EBUSY:
+               /*
+                * TTM likes to convert -EDEADLK to -EBUSY, and wants us to
+                * restart the operation, since we don't record the contending
+                * lock. We use -EAGAIN to restart.
+                */
+               return -EAGAIN;
+       case -ENOSPC:
+               /*
+                * Memory type / region is full, and we can't evict.
+                * Except possibly system, that returns -ENOMEM;
+                */
+               return -ENXIO;
+       default:
+               break;
+       }
+
+       return err;
+}
+
+static bool gpu_binds_iomem(struct ttm_resource *mem)
+{
+       return mem->mem_type != TTM_PL_SYSTEM;
+}
+
+static bool cpu_maps_iomem(struct ttm_resource *mem)
+{
+       /* Once / if we support GGTT, this is also false for cached ttm_tts */
+       return mem->mem_type != TTM_PL_SYSTEM;
+}
+
+static enum i915_cache_level
+i915_ttm_cache_level(struct drm_i915_private *i915, struct ttm_resource *res,
+                    struct ttm_tt *ttm)
+{
+       return ((HAS_LLC(i915) || HAS_SNOOP(i915)) && !gpu_binds_iomem(res) &&
+               ttm->caching == ttm_cached) ? I915_CACHE_LLC :
+               I915_CACHE_NONE;
+}
+
 static void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj);
 
+static enum ttm_caching
+i915_ttm_select_tt_caching(const struct drm_i915_gem_object *obj)
+{
+       /*
+        * Objects only allowed in system get cached cpu-mappings.
+        * Other objects get WC mapping for now. Even if in system.
+        */
+       if (obj->mm.region->type == INTEL_MEMORY_SYSTEM &&
+           obj->mm.n_placements <= 1)
+               return ttm_cached;
+
+       return ttm_write_combined;
+}
+
+static void
+i915_ttm_place_from_region(const struct intel_memory_region *mr,
+                          struct ttm_place *place,
+                          unsigned int flags)
+{
+       memset(place, 0, sizeof(*place));
+       place->mem_type = intel_region_to_ttm_type(mr);
+
+       if (flags & I915_BO_ALLOC_CONTIGUOUS)
+               place->flags = TTM_PL_FLAG_CONTIGUOUS;
+}
+
+static void
+i915_ttm_placement_from_obj(const struct drm_i915_gem_object *obj,
+                           struct ttm_place *requested,
+                           struct ttm_place *busy,
+                           struct ttm_placement *placement)
+{
+       unsigned int num_allowed = obj->mm.n_placements;
+       unsigned int flags = obj->flags;
+       unsigned int i;
+
+       placement->num_placement = 1;
+       i915_ttm_place_from_region(num_allowed ? obj->mm.placements[0] :
+                                  obj->mm.region, requested, flags);
+
+       /* Cache this on object? */
+       placement->num_busy_placement = num_allowed;
+       for (i = 0; i < placement->num_busy_placement; ++i)
+               i915_ttm_place_from_region(obj->mm.placements[i], busy + i, flags);
+
+       if (num_allowed == 0) {
+               *busy = *requested;
+               placement->num_busy_placement = 1;
+       }
+
+       placement->placement = requested;
+       placement->busy_placement = busy;
+}
+
 static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo,
                                         uint32_t page_flags)
 {
@@ -89,7 +184,8 @@ static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo,
            man->use_tt)
                page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
 
-       ret = ttm_tt_init(&i915_tt->ttm, bo, page_flags, ttm_write_combined);
+       ret = ttm_tt_init(&i915_tt->ttm, bo, page_flags,
+                         i915_ttm_select_tt_caching(obj));
        if (ret) {
                kfree(i915_tt);
                return NULL;
@@ -119,6 +215,7 @@ static void i915_ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
        struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
 
        ttm_tt_destroy_common(bdev, ttm);
+       ttm_tt_fini(ttm);
        kfree(i915_tt);
 }
 
@@ -128,11 +225,7 @@ static bool i915_ttm_eviction_valuable(struct ttm_buffer_object *bo,
        struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
 
        /* Will do for now. Our pinned objects are still on TTM's LRU lists */
-       if (!i915_gem_object_evictable(obj))
-               return false;
-
-       /* This isn't valid with a buddy allocator */
-       return ttm_bo_eviction_valuable(bo, place);
+       return i915_gem_object_evictable(obj);
 }
 
 static void i915_ttm_evict_flags(struct ttm_buffer_object *bo,
@@ -175,6 +268,55 @@ static void i915_ttm_free_cached_io_st(struct drm_i915_gem_object *obj)
        obj->ttm.cached_io_st = NULL;
 }
 
+static void
+i915_ttm_adjust_domains_after_move(struct drm_i915_gem_object *obj)
+{
+       struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
+
+       if (cpu_maps_iomem(bo->resource) || bo->ttm->caching != ttm_cached) {
+               obj->write_domain = I915_GEM_DOMAIN_WC;
+               obj->read_domains = I915_GEM_DOMAIN_WC;
+       } else {
+               obj->write_domain = I915_GEM_DOMAIN_CPU;
+               obj->read_domains = I915_GEM_DOMAIN_CPU;
+       }
+}
+
+static void i915_ttm_adjust_gem_after_move(struct drm_i915_gem_object *obj)
+{
+       struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
+       unsigned int cache_level;
+       unsigned int i;
+
+       /*
+        * If object was moved to an allowable region, update the object
+        * region to consider it migrated. Note that if it's currently not
+        * in an allowable region, it's evicted and we don't update the
+        * object region.
+        */
+       if (intel_region_to_ttm_type(obj->mm.region) != bo->resource->mem_type) {
+               for (i = 0; i < obj->mm.n_placements; ++i) {
+                       struct intel_memory_region *mr = obj->mm.placements[i];
+
+                       if (intel_region_to_ttm_type(mr) == bo->resource->mem_type &&
+                           mr != obj->mm.region) {
+                               i915_gem_object_release_memory_region(obj);
+                               i915_gem_object_init_memory_region(obj, mr);
+                               break;
+                       }
+               }
+       }
+
+       obj->mem_flags &= ~(I915_BO_FLAG_STRUCT_PAGE | I915_BO_FLAG_IOMEM);
+
+       obj->mem_flags |= cpu_maps_iomem(bo->resource) ? I915_BO_FLAG_IOMEM :
+               I915_BO_FLAG_STRUCT_PAGE;
+
+       cache_level = i915_ttm_cache_level(to_i915(bo->base.dev), bo->resource,
+                                          bo->ttm);
+       i915_gem_object_set_cache_coherency(obj, cache_level);
+}
+
 static void i915_ttm_purge(struct drm_i915_gem_object *obj)
 {
        struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
@@ -190,8 +332,10 @@ static void i915_ttm_purge(struct drm_i915_gem_object *obj)
 
        /* TTM's purge interface. Note that we might be reentering. */
        ret = ttm_bo_validate(bo, &place, &ctx);
-
        if (!ret) {
+               obj->write_domain = 0;
+               obj->read_domains = 0;
+               i915_ttm_adjust_gem_after_move(obj);
                i915_ttm_free_cached_io_st(obj);
                obj->mm.madv = __I915_MADV_PURGED;
        }
@@ -214,6 +358,7 @@ static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo)
 
        if (likely(obj)) {
                /* This releases all gem object bindings to the backend. */
+               i915_ttm_free_cached_io_st(obj);
                __i915_gem_free_object(obj);
        }
 }
@@ -273,13 +418,75 @@ i915_ttm_resource_get_st(struct drm_i915_gem_object *obj,
                         struct ttm_resource *res)
 {
        struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
-       struct ttm_resource_manager *man =
-               ttm_manager_type(bo->bdev, res->mem_type);
 
-       if (man->use_tt)
+       if (!gpu_binds_iomem(res))
                return i915_ttm_tt_get_st(bo->ttm);
 
-       return intel_region_ttm_node_to_st(obj->mm.region, res);
+       /*
+        * If CPU mapping differs, we need to add the ttm_tt pages to
+        * the resulting st. Might make sense for GGTT.
+        */
+       GEM_WARN_ON(!cpu_maps_iomem(res));
+       return intel_region_ttm_resource_to_st(obj->mm.region, res);
+}
+
+static int i915_ttm_accel_move(struct ttm_buffer_object *bo,
+                              struct ttm_resource *dst_mem,
+                              struct sg_table *dst_st)
+{
+       struct drm_i915_private *i915 = container_of(bo->bdev, typeof(*i915),
+                                                    bdev);
+       struct ttm_resource_manager *src_man =
+               ttm_manager_type(bo->bdev, bo->resource->mem_type);
+       struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
+       struct sg_table *src_st;
+       struct i915_request *rq;
+       struct ttm_tt *ttm = bo->ttm;
+       enum i915_cache_level src_level, dst_level;
+       int ret;
+
+       if (!i915->gt.migrate.context)
+               return -EINVAL;
+
+       dst_level = i915_ttm_cache_level(i915, dst_mem, ttm);
+       if (!ttm || !ttm_tt_is_populated(ttm)) {
+               if (bo->type == ttm_bo_type_kernel)
+                       return -EINVAL;
+
+               if (ttm && !(ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC))
+                       return 0;
+
+               intel_engine_pm_get(i915->gt.migrate.context->engine);
+               ret = intel_context_migrate_clear(i915->gt.migrate.context, NULL,
+                                                 dst_st->sgl, dst_level,
+                                                 gpu_binds_iomem(dst_mem),
+                                                 0, &rq);
+
+               if (!ret && rq) {
+                       i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
+                       i915_request_put(rq);
+               }
+               intel_engine_pm_put(i915->gt.migrate.context->engine);
+       } else {
+               src_st = src_man->use_tt ? i915_ttm_tt_get_st(ttm) :
+                       obj->ttm.cached_io_st;
+
+               src_level = i915_ttm_cache_level(i915, bo->resource, ttm);
+               intel_engine_pm_get(i915->gt.migrate.context->engine);
+               ret = intel_context_migrate_copy(i915->gt.migrate.context,
+                                                NULL, src_st->sgl, src_level,
+                                                gpu_binds_iomem(bo->resource),
+                                                dst_st->sgl, dst_level,
+                                                gpu_binds_iomem(dst_mem),
+                                                &rq);
+               if (!ret && rq) {
+                       i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
+                       i915_request_put(rq);
+               }
+               intel_engine_pm_put(i915->gt.migrate.context->engine);
+       }
+
+       return ret;
 }
 
 static int i915_ttm_move(struct ttm_buffer_object *bo, bool evict,
@@ -290,8 +497,6 @@ static int i915_ttm_move(struct ttm_buffer_object *bo, bool evict,
        struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
        struct ttm_resource_manager *dst_man =
                ttm_manager_type(bo->bdev, dst_mem->mem_type);
-       struct ttm_resource_manager *src_man =
-               ttm_manager_type(bo->bdev, bo->resource->mem_type);
        struct intel_memory_region *dst_reg, *src_reg;
        union {
                struct ttm_kmap_iter_tt tt;
@@ -332,34 +537,40 @@ static int i915_ttm_move(struct ttm_buffer_object *bo, bool evict,
        if (IS_ERR(dst_st))
                return PTR_ERR(dst_st);
 
-       /* If we start mapping GGTT, we can no longer use man::use_tt here. */
-       dst_iter = dst_man->use_tt ?
-               ttm_kmap_iter_tt_init(&_dst_iter.tt, bo->ttm) :
-               ttm_kmap_iter_iomap_init(&_dst_iter.io, &dst_reg->iomap,
-                                        dst_st, dst_reg->region.start);
-
-       src_iter = src_man->use_tt ?
-               ttm_kmap_iter_tt_init(&_src_iter.tt, bo->ttm) :
-               ttm_kmap_iter_iomap_init(&_src_iter.io, &src_reg->iomap,
-                                        obj->ttm.cached_io_st,
-                                        src_reg->region.start);
-
-       ttm_move_memcpy(bo, dst_mem->num_pages, dst_iter, src_iter);
+       ret = i915_ttm_accel_move(bo, dst_mem, dst_st);
+       if (ret) {
+               /* If we start mapping GGTT, we can no longer use man::use_tt here. */
+               dst_iter = !cpu_maps_iomem(dst_mem) ?
+                       ttm_kmap_iter_tt_init(&_dst_iter.tt, bo->ttm) :
+                       ttm_kmap_iter_iomap_init(&_dst_iter.io, &dst_reg->iomap,
+                                                dst_st, dst_reg->region.start);
+
+               src_iter = !cpu_maps_iomem(bo->resource) ?
+                       ttm_kmap_iter_tt_init(&_src_iter.tt, bo->ttm) :
+                       ttm_kmap_iter_iomap_init(&_src_iter.io, &src_reg->iomap,
+                                                obj->ttm.cached_io_st,
+                                                src_reg->region.start);
+
+               ttm_move_memcpy(bo, dst_mem->num_pages, dst_iter, src_iter);
+       }
+       /* Below dst_mem becomes bo->resource. */
        ttm_bo_move_sync_cleanup(bo, dst_mem);
+       i915_ttm_adjust_domains_after_move(obj);
        i915_ttm_free_cached_io_st(obj);
 
-       if (!dst_man->use_tt) {
+       if (gpu_binds_iomem(dst_mem) || cpu_maps_iomem(dst_mem)) {
                obj->ttm.cached_io_st = dst_st;
                obj->ttm.get_io_page.sg_pos = dst_st->sgl;
                obj->ttm.get_io_page.sg_idx = 0;
        }
 
+       i915_ttm_adjust_gem_after_move(obj);
        return 0;
 }
 
 static int i915_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem)
 {
-       if (mem->mem_type < I915_PL_LMEM0)
+       if (!cpu_maps_iomem(mem))
                return 0;
 
        mem->bus.caching = ttm_write_combined;
@@ -378,7 +589,7 @@ static unsigned long i915_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
 
        GEM_WARN_ON(bo->ttm);
 
-       sg = __i915_gem_object_get_sg(obj, &obj->ttm.get_io_page, page_offset, &ofs, true, true);
+       sg = __i915_gem_object_get_sg(obj, &obj->ttm.get_io_page, page_offset, &ofs, true);
 
        return ((base + sg_dma_address(sg)) >> PAGE_SHIFT) + ofs;
 }
@@ -406,7 +617,8 @@ struct ttm_device_funcs *i915_ttm_driver(void)
        return &i915_ttm_bo_driver;
 }
 
-static int i915_ttm_get_pages(struct drm_i915_gem_object *obj)
+static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj,
+                               struct ttm_placement *placement)
 {
        struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
        struct ttm_operation_ctx ctx = {
@@ -414,25 +626,111 @@ static int i915_ttm_get_pages(struct drm_i915_gem_object *obj)
                .no_wait_gpu = false,
        };
        struct sg_table *st;
+       int real_num_busy;
        int ret;
 
-       /* Move to the requested placement. */
-       ret = ttm_bo_validate(bo, &i915_lmem0_placement, &ctx);
-       if (ret)
-               return ret == -ENOSPC ? -ENXIO : ret;
-
-       /* Object either has a page vector or is an iomem object */
-       st = bo->ttm ? i915_ttm_tt_get_st(bo->ttm) : obj->ttm.cached_io_st;
-       if (IS_ERR(st))
-               return PTR_ERR(st);
+       /* First try only the requested placement. No eviction. */
+       real_num_busy = fetch_and_zero(&placement->num_busy_placement);
+       ret = ttm_bo_validate(bo, placement, &ctx);
+       if (ret) {
+               ret = i915_ttm_err_to_gem(ret);
+               /*
+                * Anything that wants to restart the operation gets to
+                * do that.
+                */
+               if (ret == -EDEADLK || ret == -EINTR || ret == -ERESTARTSYS ||
+                   ret == -EAGAIN)
+                       return ret;
 
-       __i915_gem_object_set_pages(obj, st, i915_sg_dma_sizes(st->sgl));
+               /*
+                * If the initial attempt fails, allow all accepted placements,
+                * evicting if necessary.
+                */
+               placement->num_busy_placement = real_num_busy;
+               ret = ttm_bo_validate(bo, placement, &ctx);
+               if (ret)
+                       return i915_ttm_err_to_gem(ret);
+       }
 
        i915_ttm_adjust_lru(obj);
+       if (bo->ttm && !ttm_tt_is_populated(bo->ttm)) {
+               ret = ttm_tt_populate(bo->bdev, bo->ttm, &ctx);
+               if (ret)
+                       return ret;
+
+               i915_ttm_adjust_domains_after_move(obj);
+               i915_ttm_adjust_gem_after_move(obj);
+       }
+
+       if (!i915_gem_object_has_pages(obj)) {
+               /* Object either has a page vector or is an iomem object */
+               st = bo->ttm ? i915_ttm_tt_get_st(bo->ttm) : obj->ttm.cached_io_st;
+               if (IS_ERR(st))
+                       return PTR_ERR(st);
+
+               __i915_gem_object_set_pages(obj, st, i915_sg_dma_sizes(st->sgl));
+       }
 
        return ret;
 }
 
+static int i915_ttm_get_pages(struct drm_i915_gem_object *obj)
+{
+       struct ttm_place requested, busy[I915_TTM_MAX_PLACEMENTS];
+       struct ttm_placement placement;
+
+       GEM_BUG_ON(obj->mm.n_placements > I915_TTM_MAX_PLACEMENTS);
+
+       /* Move to the requested placement. */
+       i915_ttm_placement_from_obj(obj, &requested, busy, &placement);
+
+       return __i915_ttm_get_pages(obj, &placement);
+}
+
+/**
+ * DOC: Migration vs eviction
+ *
+ * GEM migration may not be the same as TTM migration / eviction. If
+ * the TTM core decides to evict an object it may be evicted to a
+ * TTM memory type that is not in the object's allowable GEM regions, or
+ * in fact theoretically to a TTM memory type that doesn't correspond to
+ * a GEM memory region. In that case the object's GEM region is not
+ * updated, and the data is migrated back to the GEM region at
+ * get_pages time. TTM may however set up CPU ptes to the object even
+ * when it is evicted.
+ * Gem forced migration using the i915_ttm_migrate() op, is allowed even
+ * to regions that are not in the object's list of allowable placements.
+ */
+static int i915_ttm_migrate(struct drm_i915_gem_object *obj,
+                           struct intel_memory_region *mr)
+{
+       struct ttm_place requested;
+       struct ttm_placement placement;
+       int ret;
+
+       i915_ttm_place_from_region(mr, &requested, obj->flags);
+       placement.num_placement = 1;
+       placement.num_busy_placement = 1;
+       placement.placement = &requested;
+       placement.busy_placement = &requested;
+
+       ret = __i915_ttm_get_pages(obj, &placement);
+       if (ret)
+               return ret;
+
+       /*
+        * Reinitialize the region bindings. This is primarily
+        * required for objects where the new region is not in
+        * its allowable placements.
+        */
+       if (obj->mm.region != mr) {
+               i915_gem_object_release_memory_region(obj);
+               i915_gem_object_init_memory_region(obj, mr);
+       }
+
+       return 0;
+}
+
 static void i915_ttm_put_pages(struct drm_i915_gem_object *obj,
                               struct sg_table *st)
 {
@@ -561,15 +859,15 @@ static u64 i915_ttm_mmap_offset(struct drm_i915_gem_object *obj)
        return drm_vma_node_offset_addr(&obj->base.vma_node);
 }
 
-const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = {
+static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = {
        .name = "i915_gem_object_ttm",
-       .flags = I915_GEM_OBJECT_HAS_IOMEM,
 
        .get_pages = i915_ttm_get_pages,
        .put_pages = i915_ttm_put_pages,
        .truncate = i915_ttm_purge,
        .adjust_lru = i915_ttm_adjust_lru,
        .delayed_free = i915_ttm_delayed_free,
+       .migrate = i915_ttm_migrate,
        .mmap_offset = i915_ttm_mmap_offset,
        .mmap_ops = &vm_ops_ttm,
 };
@@ -596,37 +894,32 @@ void i915_ttm_bo_destroy(struct ttm_buffer_object *bo)
 int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
                               struct drm_i915_gem_object *obj,
                               resource_size_t size,
+                              resource_size_t page_size,
                               unsigned int flags)
 {
        static struct lock_class_key lock_class;
        struct drm_i915_private *i915 = mem->i915;
+       struct ttm_operation_ctx ctx = {
+               .interruptible = true,
+               .no_wait_gpu = false,
+       };
        enum ttm_bo_type bo_type;
-       size_t alignment = 0;
        int ret;
 
-       /* Adjust alignment to GPU- and CPU huge page sizes. */
-
-       if (mem->is_range_manager) {
-               if (size >= SZ_1G)
-                       alignment = SZ_1G >> PAGE_SHIFT;
-               else if (size >= SZ_2M)
-                       alignment = SZ_2M >> PAGE_SHIFT;
-               else if (size >= SZ_64K)
-                       alignment = SZ_64K >> PAGE_SHIFT;
-       }
-
        drm_gem_private_object_init(&i915->drm, &obj->base, size);
        i915_gem_object_init(obj, &i915_gem_ttm_obj_ops, &lock_class, flags);
        i915_gem_object_init_memory_region(obj, mem);
        i915_gem_object_make_unshrinkable(obj);
-       obj->read_domains = I915_GEM_DOMAIN_WC | I915_GEM_DOMAIN_GTT;
-       i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
        INIT_RADIX_TREE(&obj->ttm.get_io_page.radix, GFP_KERNEL | __GFP_NOWARN);
        mutex_init(&obj->ttm.get_io_page.lock);
-
        bo_type = (obj->flags & I915_BO_ALLOC_USER) ? ttm_bo_type_device :
                ttm_bo_type_kernel;
 
+       obj->base.vma_node.driver_private = i915_gem_to_ttm(obj);
+
+       /* Forcing the page size is kernel internal only */
+       GEM_BUG_ON(page_size && obj->mm.n_placements);
+
        /*
         * If this function fails, it will call the destructor, but
         * our caller still owns the object. So no freeing in the
@@ -634,14 +927,39 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
         * Similarly, in delayed_destroy, we can't call ttm_bo_put()
         * until successful initialization.
         */
-       obj->base.vma_node.driver_private = i915_gem_to_ttm(obj);
-       ret = ttm_bo_init(&i915->bdev, i915_gem_to_ttm(obj), size,
-                         bo_type, &i915_sys_placement, alignment,
-                         true, NULL, NULL, i915_ttm_bo_destroy);
+       ret = ttm_bo_init_reserved(&i915->bdev, i915_gem_to_ttm(obj), size,
+                                  bo_type, &i915_sys_placement,
+                                  page_size >> PAGE_SHIFT,
+                                  &ctx, NULL, NULL, i915_ttm_bo_destroy);
+       if (ret)
+               return i915_ttm_err_to_gem(ret);
 
-       if (!ret)
-               obj->ttm.created = true;
+       obj->ttm.created = true;
+       i915_ttm_adjust_domains_after_move(obj);
+       i915_ttm_adjust_gem_after_move(obj);
+       i915_gem_object_unlock(obj);
 
-       /* i915 wants -ENXIO when out of memory region space. */
-       return (ret == -ENOSPC) ? -ENXIO : ret;
+       return 0;
+}
+
+static const struct intel_memory_region_ops ttm_system_region_ops = {
+       .init_object = __i915_gem_ttm_object_init,
+};
+
+struct intel_memory_region *
+i915_gem_ttm_system_setup(struct drm_i915_private *i915,
+                         u16 type, u16 instance)
+{
+       struct intel_memory_region *mr;
+
+       mr = intel_memory_region_create(i915, 0,
+                                       totalram_pages() << PAGE_SHIFT,
+                                       PAGE_SIZE, 0,
+                                       type, instance,
+                                       &ttm_system_region_ops);
+       if (IS_ERR(mr))
+               return mr;
+
+       intel_memory_region_set_name(mr, "system-ttm");
+       return mr;
 }
index b8d3dcb..40927f6 100644 (file)
@@ -44,5 +44,6 @@ i915_ttm_to_gem(struct ttm_buffer_object *bo)
 int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
                               struct drm_i915_gem_object *obj,
                               resource_size_t size,
+                              resource_size_t page_size,
                               unsigned int flags);
 #endif
index 7487bab..468a7a6 100644 (file)
@@ -67,11 +67,11 @@ static bool i915_gem_userptr_invalidate(struct mmu_interval_notifier *mni,
        if (!mmu_notifier_range_blockable(range))
                return false;
 
-       spin_lock(&i915->mm.notifier_lock);
+       write_lock(&i915->mm.notifier_lock);
 
        mmu_interval_set_seq(mni, cur_seq);
 
-       spin_unlock(&i915->mm.notifier_lock);
+       write_unlock(&i915->mm.notifier_lock);
 
        /*
         * We don't wait when the process is exiting. This is valid
@@ -107,16 +107,15 @@ i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj)
 
 static void i915_gem_object_userptr_drop_ref(struct drm_i915_gem_object *obj)
 {
-       struct drm_i915_private *i915 = to_i915(obj->base.dev);
        struct page **pvec = NULL;
 
-       spin_lock(&i915->mm.notifier_lock);
+       assert_object_held_shared(obj);
+
        if (!--obj->userptr.page_ref) {
                pvec = obj->userptr.pvec;
                obj->userptr.pvec = NULL;
        }
        GEM_BUG_ON(obj->userptr.page_ref < 0);
-       spin_unlock(&i915->mm.notifier_lock);
 
        if (pvec) {
                const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
@@ -128,7 +127,6 @@ static void i915_gem_object_userptr_drop_ref(struct drm_i915_gem_object *obj)
 
 static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
 {
-       struct drm_i915_private *i915 = to_i915(obj->base.dev);
        const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
        unsigned int max_segment = i915_sg_segment_size();
        struct sg_table *st;
@@ -141,16 +139,13 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
        if (!st)
                return -ENOMEM;
 
-       spin_lock(&i915->mm.notifier_lock);
-       if (GEM_WARN_ON(!obj->userptr.page_ref)) {
-               spin_unlock(&i915->mm.notifier_lock);
-               ret = -EFAULT;
+       if (!obj->userptr.page_ref) {
+               ret = -EAGAIN;
                goto err_free;
        }
 
        obj->userptr.page_ref++;
        pvec = obj->userptr.pvec;
-       spin_unlock(&i915->mm.notifier_lock);
 
 alloc_table:
        sg = __sg_alloc_table_from_pages(st, pvec, num_pages, 0,
@@ -241,7 +236,7 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
        i915_gem_object_userptr_drop_ref(obj);
 }
 
-static int i915_gem_object_userptr_unbind(struct drm_i915_gem_object *obj, bool get_pages)
+static int i915_gem_object_userptr_unbind(struct drm_i915_gem_object *obj)
 {
        struct sg_table *pages;
        int err;
@@ -259,15 +254,11 @@ static int i915_gem_object_userptr_unbind(struct drm_i915_gem_object *obj, bool
        if (!IS_ERR_OR_NULL(pages))
                i915_gem_userptr_put_pages(obj, pages);
 
-       if (get_pages)
-               err = ____i915_gem_object_get_pages(obj);
-
        return err;
 }
 
 int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj)
 {
-       struct drm_i915_private *i915 = to_i915(obj->base.dev);
        const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
        struct page **pvec;
        unsigned int gup_flags = 0;
@@ -277,39 +268,22 @@ int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj)
        if (obj->userptr.notifier.mm != current->mm)
                return -EFAULT;
 
+       notifier_seq = mmu_interval_read_begin(&obj->userptr.notifier);
+
        ret = i915_gem_object_lock_interruptible(obj, NULL);
        if (ret)
                return ret;
 
-       /* optimistically try to preserve current pages while unlocked */
-       if (i915_gem_object_has_pages(obj) &&
-           !mmu_interval_check_retry(&obj->userptr.notifier,
-                                     obj->userptr.notifier_seq)) {
-               spin_lock(&i915->mm.notifier_lock);
-               if (obj->userptr.pvec &&
-                   !mmu_interval_read_retry(&obj->userptr.notifier,
-                                            obj->userptr.notifier_seq)) {
-                       obj->userptr.page_ref++;
-
-                       /* We can keep using the current binding, this is the fastpath */
-                       ret = 1;
-               }
-               spin_unlock(&i915->mm.notifier_lock);
+       if (notifier_seq == obj->userptr.notifier_seq && obj->userptr.pvec) {
+               i915_gem_object_unlock(obj);
+               return 0;
        }
 
-       if (!ret) {
-               /* Make sure userptr is unbound for next attempt, so we don't use stale pages. */
-               ret = i915_gem_object_userptr_unbind(obj, false);
-       }
+       ret = i915_gem_object_userptr_unbind(obj);
        i915_gem_object_unlock(obj);
-       if (ret < 0)
+       if (ret)
                return ret;
 
-       if (ret > 0)
-               return 0;
-
-       notifier_seq = mmu_interval_read_begin(&obj->userptr.notifier);
-
        pvec = kvmalloc_array(num_pages, sizeof(struct page *), GFP_KERNEL);
        if (!pvec)
                return -ENOMEM;
@@ -329,7 +303,9 @@ int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj)
        }
        ret = 0;
 
-       spin_lock(&i915->mm.notifier_lock);
+       ret = i915_gem_object_lock_interruptible(obj, NULL);
+       if (ret)
+               goto out;
 
        if (mmu_interval_read_retry(&obj->userptr.notifier,
                !obj->userptr.page_ref ? notifier_seq :
@@ -341,12 +317,14 @@ int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj)
        if (!obj->userptr.page_ref++) {
                obj->userptr.pvec = pvec;
                obj->userptr.notifier_seq = notifier_seq;
-
                pvec = NULL;
+               ret = ____i915_gem_object_get_pages(obj);
        }
 
+       obj->userptr.page_ref--;
+
 out_unlock:
-       spin_unlock(&i915->mm.notifier_lock);
+       i915_gem_object_unlock(obj);
 
 out:
        if (pvec) {
@@ -369,11 +347,6 @@ int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj)
        return 0;
 }
 
-void i915_gem_object_userptr_submit_fini(struct drm_i915_gem_object *obj)
-{
-       i915_gem_object_userptr_drop_ref(obj);
-}
-
 int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj)
 {
        int err;
@@ -396,7 +369,6 @@ int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj)
                i915_gem_object_unlock(obj);
        }
 
-       i915_gem_object_userptr_submit_fini(obj);
        return err;
 }
 
@@ -450,6 +422,34 @@ static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
 
 #endif
 
+static int
+probe_range(struct mm_struct *mm, unsigned long addr, unsigned long len)
+{
+       const unsigned long end = addr + len;
+       struct vm_area_struct *vma;
+       int ret = -EFAULT;
+
+       mmap_read_lock(mm);
+       for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
+               /* Check for holes, note that we also update the addr below */
+               if (vma->vm_start > addr)
+                       break;
+
+               if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
+                       break;
+
+               if (vma->vm_end >= end) {
+                       ret = 0;
+                       break;
+               }
+
+               addr = vma->vm_end;
+       }
+       mmap_read_unlock(mm);
+
+       return ret;
+}
+
 /*
  * Creates a new mm object that wraps some normal memory from the process
  * context - user memory.
@@ -505,7 +505,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
        }
 
        if (args->flags & ~(I915_USERPTR_READ_ONLY |
-                           I915_USERPTR_UNSYNCHRONIZED))
+                           I915_USERPTR_UNSYNCHRONIZED |
+                           I915_USERPTR_PROBE))
                return -EINVAL;
 
        if (i915_gem_object_size_2big(args->user_size))
@@ -532,14 +533,24 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
                        return -ENODEV;
        }
 
+       if (args->flags & I915_USERPTR_PROBE) {
+               /*
+                * Check that the range pointed to represents real struct
+                * pages and not iomappings (at this moment in time!)
+                */
+               ret = probe_range(current->mm, args->user_ptr, args->user_size);
+               if (ret)
+                       return ret;
+       }
+
 #ifdef CONFIG_MMU_NOTIFIER
        obj = i915_gem_object_alloc();
        if (obj == NULL)
                return -ENOMEM;
 
        drm_gem_private_object_init(dev, &obj->base, args->user_size);
-       i915_gem_object_init(obj, &i915_gem_userptr_ops, &lock_class,
-                            I915_BO_ALLOC_STRUCT_PAGE);
+       i915_gem_object_init(obj, &i915_gem_userptr_ops, &lock_class, 0);
+       obj->mem_flags = I915_BO_FLAG_STRUCT_PAGE;
        obj->read_domains = I915_GEM_DOMAIN_CPU;
        obj->write_domain = I915_GEM_DOMAIN_CPU;
        i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
@@ -572,7 +583,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
 int i915_gem_init_userptr(struct drm_i915_private *dev_priv)
 {
 #ifdef CONFIG_MMU_NOTIFIER
-       spin_lock_init(&dev_priv->mm.notifier_lock);
+       rwlock_init(&dev_priv->mm.notifier_lock);
 #endif
 
        return 0;
index 1e97520..f909aaa 100644 (file)
@@ -104,8 +104,8 @@ static void fence_set_priority(struct dma_fence *fence,
        engine = rq->engine;
 
        rcu_read_lock(); /* RCU serialisation for set-wedged protection */
-       if (engine->schedule)
-               engine->schedule(rq, attr);
+       if (engine->sched_engine->schedule)
+               engine->sched_engine->schedule(rq, attr);
        rcu_read_unlock();
 }
 
@@ -290,3 +290,22 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
        i915_gem_object_put(obj);
        return ret;
 }
+
+/**
+ * i915_gem_object_wait_migration - Sync an accelerated migration operation
+ * @obj: The migrating object.
+ * @flags: waiting flags. Currently supports only I915_WAIT_INTERRUPTIBLE.
+ *
+ * Wait for any pending async migration operation on the object,
+ * whether it's explicitly (i915_gem_object_migrate()) or implicitly
+ * (swapin, initial clearing) initiated.
+ *
+ * Return: 0 if successful, -ERESTARTSYS if a signal was hit during waiting.
+ */
+int i915_gem_object_wait_migration(struct drm_i915_gem_object *obj,
+                                  unsigned int flags)
+{
+       might_sleep();
+       /* NOP for now. */
+       return 0;
+}
index 0c8ecfd..f963b8e 100644 (file)
@@ -114,8 +114,8 @@ huge_gem_object(struct drm_i915_private *i915,
                return ERR_PTR(-ENOMEM);
 
        drm_gem_private_object_init(&i915->drm, &obj->base, dma_size);
-       i915_gem_object_init(obj, &huge_ops, &lock_class,
-                            I915_BO_ALLOC_STRUCT_PAGE);
+       i915_gem_object_init(obj, &huge_ops, &lock_class, 0);
+       obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE;
 
        obj->read_domains = I915_GEM_DOMAIN_CPU;
        obj->write_domain = I915_GEM_DOMAIN_CPU;
index dadd485..a094f3c 100644 (file)
@@ -167,9 +167,8 @@ huge_pages_object(struct drm_i915_private *i915,
                return ERR_PTR(-ENOMEM);
 
        drm_gem_private_object_init(&i915->drm, &obj->base, size);
-       i915_gem_object_init(obj, &huge_page_ops, &lock_class,
-                            I915_BO_ALLOC_STRUCT_PAGE);
-
+       i915_gem_object_init(obj, &huge_page_ops, &lock_class, 0);
+       obj->mem_flags |= I915_BO_FLAG_STRUCT_PAGE;
        i915_gem_object_set_volatile(obj);
 
        obj->write_domain = I915_GEM_DOMAIN_CPU;
@@ -497,7 +496,8 @@ static int igt_mock_memory_region_huge_pages(void *arg)
                int i;
 
                for (i = 0; i < ARRAY_SIZE(flags); ++i) {
-                       obj = i915_gem_object_create_region(mem, page_size,
+                       obj = i915_gem_object_create_region(mem,
+                                                           page_size, page_size,
                                                            flags[i]);
                        if (IS_ERR(obj)) {
                                err = PTR_ERR(obj);
index 176e6b2..ecbcbb8 100644 (file)
@@ -5,6 +5,7 @@
 
 #include "i915_selftest.h"
 
+#include "gt/intel_context.h"
 #include "gt/intel_engine_user.h"
 #include "gt/intel_gt.h"
 #include "gt/intel_gpu_commands.h"
 #include "huge_gem_object.h"
 #include "mock_context.h"
 
-static int __igt_client_fill(struct intel_engine_cs *engine)
-{
-       struct intel_context *ce = engine->kernel_context;
-       struct drm_i915_gem_object *obj;
-       I915_RND_STATE(prng);
-       IGT_TIMEOUT(end);
-       u32 *vaddr;
-       int err = 0;
-
-       intel_engine_pm_get(engine);
-       do {
-               const u32 max_block_size = S16_MAX * PAGE_SIZE;
-               u32 sz = min_t(u64, ce->vm->total >> 4, prandom_u32_state(&prng));
-               u32 phys_sz = sz % (max_block_size + 1);
-               u32 val = prandom_u32_state(&prng);
-               u32 i;
-
-               sz = round_up(sz, PAGE_SIZE);
-               phys_sz = round_up(phys_sz, PAGE_SIZE);
-
-               pr_debug("%s with phys_sz= %x, sz=%x, val=%x\n", __func__,
-                        phys_sz, sz, val);
-
-               obj = huge_gem_object(engine->i915, phys_sz, sz);
-               if (IS_ERR(obj)) {
-                       err = PTR_ERR(obj);
-                       goto err_flush;
-               }
-
-               vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
-               if (IS_ERR(vaddr)) {
-                       err = PTR_ERR(vaddr);
-                       goto err_put;
-               }
-
-               /*
-                * XXX: The goal is move this to get_pages, so try to dirty the
-                * CPU cache first to check that we do the required clflush
-                * before scheduling the blt for !llc platforms. This matches
-                * some version of reality where at get_pages the pages
-                * themselves may not yet be coherent with the GPU(swap-in). If
-                * we are missing the flush then we should see the stale cache
-                * values after we do the set_to_cpu_domain and pick it up as a
-                * test failure.
-                */
-               memset32(vaddr, val ^ 0xdeadbeaf,
-                        huge_gem_object_phys_size(obj) / sizeof(u32));
-
-               if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
-                       obj->cache_dirty = true;
-
-               err = i915_gem_schedule_fill_pages_blt(obj, ce, obj->mm.pages,
-                                                      &obj->mm.page_sizes,
-                                                      val);
-               if (err)
-                       goto err_unpin;
-
-               i915_gem_object_lock(obj, NULL);
-               err = i915_gem_object_set_to_cpu_domain(obj, false);
-               i915_gem_object_unlock(obj);
-               if (err)
-                       goto err_unpin;
-
-               for (i = 0; i < huge_gem_object_phys_size(obj) / sizeof(u32); ++i) {
-                       if (vaddr[i] != val) {
-                               pr_err("vaddr[%u]=%x, expected=%x\n", i,
-                                      vaddr[i], val);
-                               err = -EINVAL;
-                               goto err_unpin;
-                       }
-               }
-
-               i915_gem_object_unpin_map(obj);
-               i915_gem_object_put(obj);
-       } while (!time_after(jiffies, end));
-
-       goto err_flush;
-
-err_unpin:
-       i915_gem_object_unpin_map(obj);
-err_put:
-       i915_gem_object_put(obj);
-err_flush:
-       if (err == -ENOMEM)
-               err = 0;
-       intel_engine_pm_put(engine);
-
-       return err;
-}
-
-static int igt_client_fill(void *arg)
-{
-       int inst = 0;
-
-       do {
-               struct intel_engine_cs *engine;
-               int err;
-
-               engine = intel_engine_lookup_user(arg,
-                                                 I915_ENGINE_CLASS_COPY,
-                                                 inst++);
-               if (!engine)
-                       return 0;
-
-               err = __igt_client_fill(engine);
-               if (err == -ENOMEM)
-                       err = 0;
-               if (err)
-                       return err;
-       } while (1);
-}
-
 #define WIDTH 512
 #define HEIGHT 32
 
@@ -693,7 +582,6 @@ static int igt_client_tiled_blits(void *arg)
 int i915_gem_client_blt_live_selftests(struct drm_i915_private *i915)
 {
        static const struct i915_subtest tests[] = {
-               SUBTEST(igt_client_fill),
                SUBTEST(igt_client_tiled_blits),
        };
 
index dbcfa28..8eb5050 100644 (file)
@@ -680,7 +680,7 @@ static int igt_ctx_exec(void *arg)
                        struct i915_gem_context *ctx;
                        struct intel_context *ce;
 
-                       ctx = kernel_context(i915);
+                       ctx = kernel_context(i915, NULL);
                        if (IS_ERR(ctx)) {
                                err = PTR_ERR(ctx);
                                goto out_file;
@@ -813,16 +813,12 @@ static int igt_shared_ctx_exec(void *arg)
                        struct i915_gem_context *ctx;
                        struct intel_context *ce;
 
-                       ctx = kernel_context(i915);
+                       ctx = kernel_context(i915, ctx_vm(parent));
                        if (IS_ERR(ctx)) {
                                err = PTR_ERR(ctx);
                                goto out_test;
                        }
 
-                       mutex_lock(&ctx->mutex);
-                       __assign_ppgtt(ctx, ctx_vm(parent));
-                       mutex_unlock(&ctx->mutex);
-
                        ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
                        GEM_BUG_ON(IS_ERR(ce));
 
@@ -1875,125 +1871,6 @@ out_file:
        return err;
 }
 
-static bool skip_unused_engines(struct intel_context *ce, void *data)
-{
-       return !ce->state;
-}
-
-static void mock_barrier_task(void *data)
-{
-       unsigned int *counter = data;
-
-       ++*counter;
-}
-
-static int mock_context_barrier(void *arg)
-{
-#undef pr_fmt
-#define pr_fmt(x) "context_barrier_task():" # x
-       struct drm_i915_private *i915 = arg;
-       struct i915_gem_context *ctx;
-       struct i915_request *rq;
-       unsigned int counter;
-       int err;
-
-       /*
-        * The context barrier provides us with a callback after it emits
-        * a request; useful for retiring old state after loading new.
-        */
-
-       ctx = mock_context(i915, "mock");
-       if (!ctx)
-               return -ENOMEM;
-
-       counter = 0;
-       err = context_barrier_task(ctx, 0, NULL, NULL, NULL,
-                                  mock_barrier_task, &counter);
-       if (err) {
-               pr_err("Failed at line %d, err=%d\n", __LINE__, err);
-               goto out;
-       }
-       if (counter == 0) {
-               pr_err("Did not retire immediately with 0 engines\n");
-               err = -EINVAL;
-               goto out;
-       }
-
-       counter = 0;
-       err = context_barrier_task(ctx, ALL_ENGINES, skip_unused_engines,
-                                  NULL, NULL, mock_barrier_task, &counter);
-       if (err) {
-               pr_err("Failed at line %d, err=%d\n", __LINE__, err);
-               goto out;
-       }
-       if (counter == 0) {
-               pr_err("Did not retire immediately for all unused engines\n");
-               err = -EINVAL;
-               goto out;
-       }
-
-       rq = igt_request_alloc(ctx, i915->gt.engine[RCS0]);
-       if (IS_ERR(rq)) {
-               pr_err("Request allocation failed!\n");
-               goto out;
-       }
-       i915_request_add(rq);
-
-       counter = 0;
-       context_barrier_inject_fault = BIT(RCS0);
-       err = context_barrier_task(ctx, ALL_ENGINES, NULL, NULL, NULL,
-                                  mock_barrier_task, &counter);
-       context_barrier_inject_fault = 0;
-       if (err == -ENXIO)
-               err = 0;
-       else
-               pr_err("Did not hit fault injection!\n");
-       if (counter != 0) {
-               pr_err("Invoked callback on error!\n");
-               err = -EIO;
-       }
-       if (err)
-               goto out;
-
-       counter = 0;
-       err = context_barrier_task(ctx, ALL_ENGINES, skip_unused_engines,
-                                  NULL, NULL, mock_barrier_task, &counter);
-       if (err) {
-               pr_err("Failed at line %d, err=%d\n", __LINE__, err);
-               goto out;
-       }
-       mock_device_flush(i915);
-       if (counter == 0) {
-               pr_err("Did not retire on each active engines\n");
-               err = -EINVAL;
-               goto out;
-       }
-
-out:
-       mock_context_close(ctx);
-       return err;
-#undef pr_fmt
-#define pr_fmt(x) x
-}
-
-int i915_gem_context_mock_selftests(void)
-{
-       static const struct i915_subtest tests[] = {
-               SUBTEST(mock_context_barrier),
-       };
-       struct drm_i915_private *i915;
-       int err;
-
-       i915 = mock_gem_device();
-       if (!i915)
-               return -ENOMEM;
-
-       err = i915_subtests(tests, i915);
-
-       mock_destroy_device(i915);
-       return err;
-}
-
 int i915_gem_context_live_selftests(struct drm_i915_private *i915)
 {
        static const struct i915_subtest tests[] = {
index dd74bc0..ffae7df 100644 (file)
@@ -35,7 +35,7 @@ static int igt_dmabuf_export(void *arg)
 static int igt_dmabuf_import_self(void *arg)
 {
        struct drm_i915_private *i915 = arg;
-       struct drm_i915_gem_object *obj;
+       struct drm_i915_gem_object *obj, *import_obj;
        struct drm_gem_object *import;
        struct dma_buf *dmabuf;
        int err;
@@ -65,10 +65,19 @@ static int igt_dmabuf_import_self(void *arg)
                err = -EINVAL;
                goto out_import;
        }
+       import_obj = to_intel_bo(import);
+
+       i915_gem_object_lock(import_obj, NULL);
+       err = __i915_gem_object_get_pages(import_obj);
+       i915_gem_object_unlock(import_obj);
+       if (err) {
+               pr_err("Same object dma-buf get_pages failed!\n");
+               goto out_import;
+       }
 
        err = 0;
 out_import:
-       i915_gem_object_put(to_intel_bo(import));
+       i915_gem_object_put(import_obj);
 out_dmabuf:
        dma_buf_put(dmabuf);
 out:
@@ -76,6 +85,180 @@ out:
        return err;
 }
 
+static int igt_dmabuf_import_same_driver_lmem(void *arg)
+{
+       struct drm_i915_private *i915 = arg;
+       struct intel_memory_region *lmem = i915->mm.regions[INTEL_REGION_LMEM];
+       struct drm_i915_gem_object *obj;
+       struct drm_gem_object *import;
+       struct dma_buf *dmabuf;
+       int err;
+
+       if (!lmem)
+               return 0;
+
+       force_different_devices = true;
+
+       obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &lmem, 1);
+       if (IS_ERR(obj)) {
+               pr_err("__i915_gem_object_create_user failed with err=%ld\n",
+                      PTR_ERR(dmabuf));
+               err = PTR_ERR(obj);
+               goto out_ret;
+       }
+
+       dmabuf = i915_gem_prime_export(&obj->base, 0);
+       if (IS_ERR(dmabuf)) {
+               pr_err("i915_gem_prime_export failed with err=%ld\n",
+                      PTR_ERR(dmabuf));
+               err = PTR_ERR(dmabuf);
+               goto out;
+       }
+
+       /*
+        * We expect an import of an LMEM-only object to fail with
+        * -EOPNOTSUPP because it can't be migrated to SMEM.
+        */
+       import = i915_gem_prime_import(&i915->drm, dmabuf);
+       if (!IS_ERR(import)) {
+               drm_gem_object_put(import);
+               pr_err("i915_gem_prime_import succeeded when it shouldn't have\n");
+               err = -EINVAL;
+       } else if (PTR_ERR(import) != -EOPNOTSUPP) {
+               pr_err("i915_gem_prime_import failed with the wrong err=%ld\n",
+                      PTR_ERR(import));
+               err = PTR_ERR(import);
+       }
+
+       dma_buf_put(dmabuf);
+out:
+       i915_gem_object_put(obj);
+out_ret:
+       force_different_devices = false;
+       return err;
+}
+
+static int igt_dmabuf_import_same_driver(struct drm_i915_private *i915,
+                                        struct intel_memory_region **regions,
+                                        unsigned int num_regions)
+{
+       struct drm_i915_gem_object *obj, *import_obj;
+       struct drm_gem_object *import;
+       struct dma_buf *dmabuf;
+       struct dma_buf_attachment *import_attach;
+       struct sg_table *st;
+       long timeout;
+       int err;
+
+       force_different_devices = true;
+
+       obj = __i915_gem_object_create_user(i915, PAGE_SIZE,
+                                           regions, num_regions);
+       if (IS_ERR(obj)) {
+               pr_err("__i915_gem_object_create_user failed with err=%ld\n",
+                      PTR_ERR(dmabuf));
+               err = PTR_ERR(obj);
+               goto out_ret;
+       }
+
+       dmabuf = i915_gem_prime_export(&obj->base, 0);
+       if (IS_ERR(dmabuf)) {
+               pr_err("i915_gem_prime_export failed with err=%ld\n",
+                      PTR_ERR(dmabuf));
+               err = PTR_ERR(dmabuf);
+               goto out;
+       }
+
+       import = i915_gem_prime_import(&i915->drm, dmabuf);
+       if (IS_ERR(import)) {
+               pr_err("i915_gem_prime_import failed with err=%ld\n",
+                      PTR_ERR(import));
+               err = PTR_ERR(import);
+               goto out_dmabuf;
+       }
+
+       if (import == &obj->base) {
+               pr_err("i915_gem_prime_import reused gem object!\n");
+               err = -EINVAL;
+               goto out_import;
+       }
+
+       import_obj = to_intel_bo(import);
+
+       i915_gem_object_lock(import_obj, NULL);
+       err = __i915_gem_object_get_pages(import_obj);
+       if (err) {
+               pr_err("Different objects dma-buf get_pages failed!\n");
+               i915_gem_object_unlock(import_obj);
+               goto out_import;
+       }
+
+       /*
+        * If the exported object is not in system memory, something
+        * weird is going on. TODO: When p2p is supported, this is no
+        * longer considered weird.
+        */
+       if (obj->mm.region != i915->mm.regions[INTEL_REGION_SMEM]) {
+               pr_err("Exported dma-buf is not in system memory\n");
+               err = -EINVAL;
+       }
+
+       i915_gem_object_unlock(import_obj);
+
+       /* Now try a fake an importer */
+       import_attach = dma_buf_attach(dmabuf, obj->base.dev->dev);
+       if (IS_ERR(import_attach)) {
+               err = PTR_ERR(import_attach);
+               goto out_import;
+       }
+
+       st = dma_buf_map_attachment(import_attach, DMA_BIDIRECTIONAL);
+       if (IS_ERR(st)) {
+               err = PTR_ERR(st);
+               goto out_detach;
+       }
+
+       timeout = dma_resv_wait_timeout(dmabuf->resv, false, true, 5 * HZ);
+       if (!timeout) {
+               pr_err("dmabuf wait for exclusive fence timed out.\n");
+               timeout = -ETIME;
+       }
+       err = timeout > 0 ? 0 : timeout;
+       dma_buf_unmap_attachment(import_attach, st, DMA_BIDIRECTIONAL);
+out_detach:
+       dma_buf_detach(dmabuf, import_attach);
+out_import:
+       i915_gem_object_put(import_obj);
+out_dmabuf:
+       dma_buf_put(dmabuf);
+out:
+       i915_gem_object_put(obj);
+out_ret:
+       force_different_devices = false;
+       return err;
+}
+
+static int igt_dmabuf_import_same_driver_smem(void *arg)
+{
+       struct drm_i915_private *i915 = arg;
+       struct intel_memory_region *smem = i915->mm.regions[INTEL_REGION_SMEM];
+
+       return igt_dmabuf_import_same_driver(i915, &smem, 1);
+}
+
+static int igt_dmabuf_import_same_driver_lmem_smem(void *arg)
+{
+       struct drm_i915_private *i915 = arg;
+       struct intel_memory_region *regions[2];
+
+       if (!i915->mm.regions[INTEL_REGION_LMEM])
+               return 0;
+
+       regions[0] = i915->mm.regions[INTEL_REGION_LMEM];
+       regions[1] = i915->mm.regions[INTEL_REGION_SMEM];
+       return igt_dmabuf_import_same_driver(i915, regions, 2);
+}
+
 static int igt_dmabuf_import(void *arg)
 {
        struct drm_i915_private *i915 = arg;
@@ -286,6 +469,9 @@ int i915_gem_dmabuf_live_selftests(struct drm_i915_private *i915)
 {
        static const struct i915_subtest tests[] = {
                SUBTEST(igt_dmabuf_export),
+               SUBTEST(igt_dmabuf_import_same_driver_lmem),
+               SUBTEST(igt_dmabuf_import_same_driver_smem),
+               SUBTEST(igt_dmabuf_import_same_driver_lmem_smem),
        };
 
        return i915_subtests(tests, i915);
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_migrate.c
new file mode 100644 (file)
index 0000000..28a700f
--- /dev/null
@@ -0,0 +1,243 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright Â© 2020-2021 Intel Corporation
+ */
+
+#include "gt/intel_migrate.h"
+
+static int igt_fill_check_buffer(struct drm_i915_gem_object *obj,
+                                bool fill)
+{
+       struct drm_i915_private *i915 = to_i915(obj->base.dev);
+       unsigned int i, count = obj->base.size / sizeof(u32);
+       enum i915_map_type map_type =
+               i915_coherent_map_type(i915, obj, false);
+       u32 *cur;
+       int err = 0;
+
+       assert_object_held(obj);
+       cur = i915_gem_object_pin_map(obj, map_type);
+       if (IS_ERR(cur))
+               return PTR_ERR(cur);
+
+       if (fill)
+               for (i = 0; i < count; ++i)
+                       *cur++ = i;
+       else
+               for (i = 0; i < count; ++i)
+                       if (*cur++ != i) {
+                               pr_err("Object content mismatch at location %d of %d\n", i, count);
+                               err = -EINVAL;
+                               break;
+                       }
+
+       i915_gem_object_unpin_map(obj);
+
+       return err;
+}
+
+static int igt_create_migrate(struct intel_gt *gt, enum intel_region_id src,
+                             enum intel_region_id dst)
+{
+       struct drm_i915_private *i915 = gt->i915;
+       struct intel_memory_region *src_mr = i915->mm.regions[src];
+       struct drm_i915_gem_object *obj;
+       struct i915_gem_ww_ctx ww;
+       int err = 0;
+
+       GEM_BUG_ON(!src_mr);
+
+       /* Switch object backing-store on create */
+       obj = i915_gem_object_create_region(src_mr, PAGE_SIZE, 0, 0);
+       if (IS_ERR(obj))
+               return PTR_ERR(obj);
+
+       for_i915_gem_ww(&ww, err, true) {
+               err = i915_gem_object_lock(obj, &ww);
+               if (err)
+                       continue;
+
+               err = igt_fill_check_buffer(obj, true);
+               if (err)
+                       continue;
+
+               err = i915_gem_object_migrate(obj, &ww, dst);
+               if (err)
+                       continue;
+
+               err = i915_gem_object_pin_pages(obj);
+               if (err)
+                       continue;
+
+               if (i915_gem_object_can_migrate(obj, src))
+                       err = -EINVAL;
+
+               i915_gem_object_unpin_pages(obj);
+               err = i915_gem_object_wait_migration(obj, true);
+               if (err)
+                       continue;
+
+               err = igt_fill_check_buffer(obj, false);
+       }
+       i915_gem_object_put(obj);
+
+       return err;
+}
+
+static int igt_smem_create_migrate(void *arg)
+{
+       return igt_create_migrate(arg, INTEL_REGION_LMEM, INTEL_REGION_SMEM);
+}
+
+static int igt_lmem_create_migrate(void *arg)
+{
+       return igt_create_migrate(arg, INTEL_REGION_SMEM, INTEL_REGION_LMEM);
+}
+
+static int igt_same_create_migrate(void *arg)
+{
+       return igt_create_migrate(arg, INTEL_REGION_LMEM, INTEL_REGION_LMEM);
+}
+
+static int lmem_pages_migrate_one(struct i915_gem_ww_ctx *ww,
+                                 struct drm_i915_gem_object *obj)
+{
+       int err;
+
+       err = i915_gem_object_lock(obj, ww);
+       if (err)
+               return err;
+
+       if (i915_gem_object_is_lmem(obj)) {
+               err = i915_gem_object_migrate(obj, ww, INTEL_REGION_SMEM);
+               if (err) {
+                       pr_err("Object failed migration to smem\n");
+                       if (err)
+                               return err;
+               }
+
+               if (i915_gem_object_is_lmem(obj)) {
+                       pr_err("object still backed by lmem\n");
+                       err = -EINVAL;
+               }
+
+               if (!i915_gem_object_has_struct_page(obj)) {
+                       pr_err("object not backed by struct page\n");
+                       err = -EINVAL;
+               }
+
+       } else {
+               err = i915_gem_object_migrate(obj, ww, INTEL_REGION_LMEM);
+               if (err) {
+                       pr_err("Object failed migration to lmem\n");
+                       if (err)
+                               return err;
+               }
+
+               if (i915_gem_object_has_struct_page(obj)) {
+                       pr_err("object still backed by struct page\n");
+                       err = -EINVAL;
+               }
+
+               if (!i915_gem_object_is_lmem(obj)) {
+                       pr_err("object not backed by lmem\n");
+                       err = -EINVAL;
+               }
+       }
+
+       return err;
+}
+
+static int igt_lmem_pages_migrate(void *arg)
+{
+       struct intel_gt *gt = arg;
+       struct drm_i915_private *i915 = gt->i915;
+       struct drm_i915_gem_object *obj;
+       struct i915_gem_ww_ctx ww;
+       struct i915_request *rq;
+       int err;
+       int i;
+
+       /* From LMEM to shmem and back again */
+
+       obj = i915_gem_object_create_lmem(i915, SZ_2M, 0);
+       if (IS_ERR(obj))
+               return PTR_ERR(obj);
+
+       /* Initial GPU fill, sync, CPU initialization. */
+       for_i915_gem_ww(&ww, err, true) {
+               err = i915_gem_object_lock(obj, &ww);
+               if (err)
+                       continue;
+
+               err = ____i915_gem_object_get_pages(obj);
+               if (err)
+                       continue;
+
+               err = intel_migrate_clear(&gt->migrate, &ww, NULL,
+                                         obj->mm.pages->sgl, obj->cache_level,
+                                         i915_gem_object_is_lmem(obj),
+                                         0xdeadbeaf, &rq);
+               if (rq) {
+                       dma_resv_add_excl_fence(obj->base.resv, &rq->fence);
+                       i915_request_put(rq);
+               }
+               if (err)
+                       continue;
+
+               err = i915_gem_object_wait(obj, I915_WAIT_INTERRUPTIBLE,
+                                          5 * HZ);
+               if (err)
+                       continue;
+
+               err = igt_fill_check_buffer(obj, true);
+               if (err)
+                       continue;
+       }
+       if (err)
+               goto out_put;
+
+       /*
+        * Migrate to and from smem without explicitly syncing.
+        * Finalize with data in smem for fast readout.
+        */
+       for (i = 1; i <= 5; ++i) {
+               for_i915_gem_ww(&ww, err, true)
+                       err = lmem_pages_migrate_one(&ww, obj);
+               if (err)
+                       goto out_put;
+       }
+
+       err = i915_gem_object_lock_interruptible(obj, NULL);
+       if (err)
+               goto out_put;
+
+       /* Finally sync migration and check content. */
+       err = i915_gem_object_wait_migration(obj, true);
+       if (err)
+               goto out_unlock;
+
+       err = igt_fill_check_buffer(obj, false);
+
+out_unlock:
+       i915_gem_object_unlock(obj);
+out_put:
+       i915_gem_object_put(obj);
+
+       return err;
+}
+
+int i915_gem_migrate_live_selftests(struct drm_i915_private *i915)
+{
+       static const struct i915_subtest tests[] = {
+               SUBTEST(igt_smem_create_migrate),
+               SUBTEST(igt_lmem_create_migrate),
+               SUBTEST(igt_same_create_migrate),
+               SUBTEST(igt_lmem_pages_migrate),
+       };
+
+       if (!HAS_LMEM(i915))
+               return 0;
+
+       return intel_gt_live_subtests(tests, &i915->gt);
+}
index bc32622..b20f562 100644 (file)
@@ -573,6 +573,14 @@ err:
        return 0;
 }
 
+static enum i915_mmap_type default_mapping(struct drm_i915_private *i915)
+{
+       if (HAS_LMEM(i915))
+               return I915_MMAP_TYPE_FIXED;
+
+       return I915_MMAP_TYPE_GTT;
+}
+
 static bool assert_mmap_offset(struct drm_i915_private *i915,
                               unsigned long size,
                               int expected)
@@ -585,7 +593,7 @@ static bool assert_mmap_offset(struct drm_i915_private *i915,
        if (IS_ERR(obj))
                return expected && expected == PTR_ERR(obj);
 
-       ret = __assign_mmap_offset(obj, I915_MMAP_TYPE_GTT, &offset, NULL);
+       ret = __assign_mmap_offset(obj, default_mapping(i915), &offset, NULL);
        i915_gem_object_put(obj);
 
        return ret == expected;
@@ -689,7 +697,7 @@ static int igt_mmap_offset_exhaustion(void *arg)
                goto out;
        }
 
-       err = __assign_mmap_offset(obj, I915_MMAP_TYPE_GTT, &offset, NULL);
+       err = __assign_mmap_offset(obj, default_mapping(i915), &offset, NULL);
        if (err) {
                pr_err("Unable to insert object into reclaimed hole\n");
                goto err_obj;
@@ -831,34 +839,25 @@ static int wc_check(struct drm_i915_gem_object *obj)
 
 static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type)
 {
-       if (type == I915_MMAP_TYPE_GTT &&
-           !i915_ggtt_has_aperture(&to_i915(obj->base.dev)->ggtt))
-               return false;
+       struct drm_i915_private *i915 = to_i915(obj->base.dev);
+       bool no_map;
 
-       if (type != I915_MMAP_TYPE_GTT &&
-           !i915_gem_object_has_struct_page(obj) &&
-           !i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM))
+       if (HAS_LMEM(i915))
+               return type == I915_MMAP_TYPE_FIXED;
+       else if (type == I915_MMAP_TYPE_FIXED)
                return false;
 
-       return true;
-}
-
-static void object_set_placements(struct drm_i915_gem_object *obj,
-                                 struct intel_memory_region **placements,
-                                 unsigned int n_placements)
-{
-       GEM_BUG_ON(!n_placements);
+       if (type == I915_MMAP_TYPE_GTT &&
+           !i915_ggtt_has_aperture(&to_i915(obj->base.dev)->ggtt))
+               return false;
 
-       if (n_placements == 1) {
-               struct drm_i915_private *i915 = to_i915(obj->base.dev);
-               struct intel_memory_region *mr = placements[0];
+       i915_gem_object_lock(obj, NULL);
+       no_map = (type != I915_MMAP_TYPE_GTT &&
+                 !i915_gem_object_has_struct_page(obj) &&
+                 !i915_gem_object_has_iomem(obj));
+       i915_gem_object_unlock(obj);
 
-               obj->mm.placements = &i915->mm.regions[mr->id];
-               obj->mm.n_placements = 1;
-       } else {
-               obj->mm.placements = placements;
-               obj->mm.n_placements = n_placements;
-       }
+       return !no_map;
 }
 
 #define expand32(x) (((x) << 0) | ((x) << 8) | ((x) << 16) | ((x) << 24))
@@ -955,18 +954,18 @@ static int igt_mmap(void *arg)
                        struct drm_i915_gem_object *obj;
                        int err;
 
-                       obj = i915_gem_object_create_region(mr, sizes[i], I915_BO_ALLOC_USER);
+                       obj = __i915_gem_object_create_user(i915, sizes[i], &mr, 1);
                        if (obj == ERR_PTR(-ENODEV))
                                continue;
 
                        if (IS_ERR(obj))
                                return PTR_ERR(obj);
 
-                       object_set_placements(obj, &mr, 1);
-
                        err = __igt_mmap(i915, obj, I915_MMAP_TYPE_GTT);
                        if (err == 0)
                                err = __igt_mmap(i915, obj, I915_MMAP_TYPE_WC);
+                       if (err == 0)
+                               err = __igt_mmap(i915, obj, I915_MMAP_TYPE_FIXED);
 
                        i915_gem_object_put(obj);
                        if (err)
@@ -984,14 +983,21 @@ static const char *repr_mmap_type(enum i915_mmap_type type)
        case I915_MMAP_TYPE_WB: return "wb";
        case I915_MMAP_TYPE_WC: return "wc";
        case I915_MMAP_TYPE_UC: return "uc";
+       case I915_MMAP_TYPE_FIXED: return "fixed";
        default: return "unknown";
        }
 }
 
-static bool can_access(const struct drm_i915_gem_object *obj)
+static bool can_access(struct drm_i915_gem_object *obj)
 {
-       return i915_gem_object_has_struct_page(obj) ||
-              i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM);
+       bool access;
+
+       i915_gem_object_lock(obj, NULL);
+       access = i915_gem_object_has_struct_page(obj) ||
+               i915_gem_object_has_iomem(obj);
+       i915_gem_object_unlock(obj);
+
+       return access;
 }
 
 static int __igt_mmap_access(struct drm_i915_private *i915,
@@ -1075,15 +1081,13 @@ static int igt_mmap_access(void *arg)
                struct drm_i915_gem_object *obj;
                int err;
 
-               obj = i915_gem_object_create_region(mr, PAGE_SIZE, I915_BO_ALLOC_USER);
+               obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &mr, 1);
                if (obj == ERR_PTR(-ENODEV))
                        continue;
 
                if (IS_ERR(obj))
                        return PTR_ERR(obj);
 
-               object_set_placements(obj, &mr, 1);
-
                err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_GTT);
                if (err == 0)
                        err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_WB);
@@ -1091,6 +1095,8 @@ static int igt_mmap_access(void *arg)
                        err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_WC);
                if (err == 0)
                        err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_UC);
+               if (err == 0)
+                       err = __igt_mmap_access(i915, obj, I915_MMAP_TYPE_FIXED);
 
                i915_gem_object_put(obj);
                if (err)
@@ -1220,18 +1226,18 @@ static int igt_mmap_gpu(void *arg)
                struct drm_i915_gem_object *obj;
                int err;
 
-               obj = i915_gem_object_create_region(mr, PAGE_SIZE, I915_BO_ALLOC_USER);
+               obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &mr, 1);
                if (obj == ERR_PTR(-ENODEV))
                        continue;
 
                if (IS_ERR(obj))
                        return PTR_ERR(obj);
 
-               object_set_placements(obj, &mr, 1);
-
                err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_GTT);
                if (err == 0)
                        err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_WC);
+               if (err == 0)
+                       err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_FIXED);
 
                i915_gem_object_put(obj);
                if (err)
@@ -1375,18 +1381,18 @@ static int igt_mmap_revoke(void *arg)
                struct drm_i915_gem_object *obj;
                int err;
 
-               obj = i915_gem_object_create_region(mr, PAGE_SIZE, I915_BO_ALLOC_USER);
+               obj = __i915_gem_object_create_user(i915, PAGE_SIZE, &mr, 1);
                if (obj == ERR_PTR(-ENODEV))
                        continue;
 
                if (IS_ERR(obj))
                        return PTR_ERR(obj);
 
-               object_set_placements(obj, &mr, 1);
-
                err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_GTT);
                if (err == 0)
                        err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_WC);
+               if (err == 0)
+                       err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_FIXED);
 
                i915_gem_object_put(obj);
                if (err)
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
deleted file mode 100644 (file)
index 8c335d1..0000000
+++ /dev/null
@@ -1,597 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright Â© 2019 Intel Corporation
- */
-
-#include <linux/sort.h>
-
-#include "gt/intel_gt.h"
-#include "gt/intel_engine_user.h"
-
-#include "i915_selftest.h"
-
-#include "gem/i915_gem_context.h"
-#include "selftests/igt_flush_test.h"
-#include "selftests/i915_random.h"
-#include "selftests/mock_drm.h"
-#include "huge_gem_object.h"
-#include "mock_context.h"
-
-static int wrap_ktime_compare(const void *A, const void *B)
-{
-       const ktime_t *a = A, *b = B;
-
-       return ktime_compare(*a, *b);
-}
-
-static int __perf_fill_blt(struct drm_i915_gem_object *obj)
-{
-       struct drm_i915_private *i915 = to_i915(obj->base.dev);
-       int inst = 0;
-
-       do {
-               struct intel_engine_cs *engine;
-               ktime_t t[5];
-               int pass;
-               int err;
-
-               engine = intel_engine_lookup_user(i915,
-                                                 I915_ENGINE_CLASS_COPY,
-                                                 inst++);
-               if (!engine)
-                       return 0;
-
-               intel_engine_pm_get(engine);
-               for (pass = 0; pass < ARRAY_SIZE(t); pass++) {
-                       struct intel_context *ce = engine->kernel_context;
-                       ktime_t t0, t1;
-
-                       t0 = ktime_get();
-
-                       err = i915_gem_object_fill_blt(obj, ce, 0);
-                       if (err)
-                               break;
-
-                       err = i915_gem_object_wait(obj,
-                                                  I915_WAIT_ALL,
-                                                  MAX_SCHEDULE_TIMEOUT);
-                       if (err)
-                               break;
-
-                       t1 = ktime_get();
-                       t[pass] = ktime_sub(t1, t0);
-               }
-               intel_engine_pm_put(engine);
-               if (err)
-                       return err;
-
-               sort(t, ARRAY_SIZE(t), sizeof(*t), wrap_ktime_compare, NULL);
-               pr_info("%s: blt %zd KiB fill: %lld MiB/s\n",
-                       engine->name,
-                       obj->base.size >> 10,
-                       div64_u64(mul_u32_u32(4 * obj->base.size,
-                                             1000 * 1000 * 1000),
-                                 t[1] + 2 * t[2] + t[3]) >> 20);
-       } while (1);
-}
-
-static int perf_fill_blt(void *arg)
-{
-       struct drm_i915_private *i915 = arg;
-       static const unsigned long sizes[] = {
-               SZ_4K,
-               SZ_64K,
-               SZ_2M,
-               SZ_64M
-       };
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(sizes); i++) {
-               struct drm_i915_gem_object *obj;
-               int err;
-
-               obj = i915_gem_object_create_internal(i915, sizes[i]);
-               if (IS_ERR(obj))
-                       return PTR_ERR(obj);
-
-               err = __perf_fill_blt(obj);
-               i915_gem_object_put(obj);
-               if (err)
-                       return err;
-       }
-
-       return 0;
-}
-
-static int __perf_copy_blt(struct drm_i915_gem_object *src,
-                          struct drm_i915_gem_object *dst)
-{
-       struct drm_i915_private *i915 = to_i915(src->base.dev);
-       int inst = 0;
-
-       do {
-               struct intel_engine_cs *engine;
-               ktime_t t[5];
-               int pass;
-               int err = 0;
-
-               engine = intel_engine_lookup_user(i915,
-                                                 I915_ENGINE_CLASS_COPY,
-                                                 inst++);
-               if (!engine)
-                       return 0;
-
-               intel_engine_pm_get(engine);
-               for (pass = 0; pass < ARRAY_SIZE(t); pass++) {
-                       struct intel_context *ce = engine->kernel_context;
-                       ktime_t t0, t1;
-
-                       t0 = ktime_get();
-
-                       err = i915_gem_object_copy_blt(src, dst, ce);
-                       if (err)
-                               break;
-
-                       err = i915_gem_object_wait(dst,
-                                                  I915_WAIT_ALL,
-                                                  MAX_SCHEDULE_TIMEOUT);
-                       if (err)
-                               break;
-
-                       t1 = ktime_get();
-                       t[pass] = ktime_sub(t1, t0);
-               }
-               intel_engine_pm_put(engine);
-               if (err)
-                       return err;
-
-               sort(t, ARRAY_SIZE(t), sizeof(*t), wrap_ktime_compare, NULL);
-               pr_info("%s: blt %zd KiB copy: %lld MiB/s\n",
-                       engine->name,
-                       src->base.size >> 10,
-                       div64_u64(mul_u32_u32(4 * src->base.size,
-                                             1000 * 1000 * 1000),
-                                 t[1] + 2 * t[2] + t[3]) >> 20);
-       } while (1);
-}
-
-static int perf_copy_blt(void *arg)
-{
-       struct drm_i915_private *i915 = arg;
-       static const unsigned long sizes[] = {
-               SZ_4K,
-               SZ_64K,
-               SZ_2M,
-               SZ_64M
-       };
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(sizes); i++) {
-               struct drm_i915_gem_object *src, *dst;
-               int err;
-
-               src = i915_gem_object_create_internal(i915, sizes[i]);
-               if (IS_ERR(src))
-                       return PTR_ERR(src);
-
-               dst = i915_gem_object_create_internal(i915, sizes[i]);
-               if (IS_ERR(dst)) {
-                       err = PTR_ERR(dst);
-                       goto err_src;
-               }
-
-               err = __perf_copy_blt(src, dst);
-
-               i915_gem_object_put(dst);
-err_src:
-               i915_gem_object_put(src);
-               if (err)
-                       return err;
-       }
-
-       return 0;
-}
-
-struct igt_thread_arg {
-       struct intel_engine_cs *engine;
-       struct i915_gem_context *ctx;
-       struct file *file;
-       struct rnd_state prng;
-       unsigned int n_cpus;
-};
-
-static int igt_fill_blt_thread(void *arg)
-{
-       struct igt_thread_arg *thread = arg;
-       struct intel_engine_cs *engine = thread->engine;
-       struct rnd_state *prng = &thread->prng;
-       struct drm_i915_gem_object *obj;
-       struct i915_gem_context *ctx;
-       struct intel_context *ce;
-       unsigned int prio;
-       IGT_TIMEOUT(end);
-       u64 total, max;
-       int err;
-
-       ctx = thread->ctx;
-       if (!ctx) {
-               ctx = live_context_for_engine(engine, thread->file);
-               if (IS_ERR(ctx))
-                       return PTR_ERR(ctx);
-
-               prio = i915_prandom_u32_max_state(I915_PRIORITY_MAX, prng);
-               ctx->sched.priority = prio;
-       }
-
-       ce = i915_gem_context_get_engine(ctx, 0);
-       GEM_BUG_ON(IS_ERR(ce));
-
-       /*
-        * If we have a tiny shared address space, like for the GGTT
-        * then we can't be too greedy.
-        */
-       max = ce->vm->total;
-       if (i915_is_ggtt(ce->vm) || thread->ctx)
-               max = div_u64(max, thread->n_cpus);
-       max >>= 4;
-
-       total = PAGE_SIZE;
-       do {
-               /* Aim to keep the runtime under reasonable bounds! */
-               const u32 max_phys_size = SZ_64K;
-               u32 val = prandom_u32_state(prng);
-               u32 phys_sz;
-               u32 sz;
-               u32 *vaddr;
-               u32 i;
-
-               total = min(total, max);
-               sz = i915_prandom_u32_max_state(total, prng) + 1;
-               phys_sz = sz % max_phys_size + 1;
-
-               sz = round_up(sz, PAGE_SIZE);
-               phys_sz = round_up(phys_sz, PAGE_SIZE);
-               phys_sz = min(phys_sz, sz);
-
-               pr_debug("%s with phys_sz= %x, sz=%x, val=%x\n", __func__,
-                        phys_sz, sz, val);
-
-               obj = huge_gem_object(engine->i915, phys_sz, sz);
-               if (IS_ERR(obj)) {
-                       err = PTR_ERR(obj);
-                       goto err_flush;
-               }
-
-               vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
-               if (IS_ERR(vaddr)) {
-                       err = PTR_ERR(vaddr);
-                       goto err_put;
-               }
-
-               /*
-                * Make sure the potentially async clflush does its job, if
-                * required.
-                */
-               memset32(vaddr, val ^ 0xdeadbeaf,
-                        huge_gem_object_phys_size(obj) / sizeof(u32));
-
-               if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
-                       obj->cache_dirty = true;
-
-               err = i915_gem_object_fill_blt(obj, ce, val);
-               if (err)
-                       goto err_unpin;
-
-               err = i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT);
-               if (err)
-                       goto err_unpin;
-
-               for (i = 0; i < huge_gem_object_phys_size(obj) / sizeof(u32); i += 17) {
-                       if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
-                               drm_clflush_virt_range(&vaddr[i], sizeof(vaddr[i]));
-
-                       if (vaddr[i] != val) {
-                               pr_err("vaddr[%u]=%x, expected=%x\n", i,
-                                      vaddr[i], val);
-                               err = -EINVAL;
-                               goto err_unpin;
-                       }
-               }
-
-               i915_gem_object_unpin_map(obj);
-               i915_gem_object_put(obj);
-
-               total <<= 1;
-       } while (!time_after(jiffies, end));
-
-       goto err_flush;
-
-err_unpin:
-       i915_gem_object_unpin_map(obj);
-err_put:
-       i915_gem_object_put(obj);
-err_flush:
-       if (err == -ENOMEM)
-               err = 0;
-
-       intel_context_put(ce);
-       return err;
-}
-
-static int igt_copy_blt_thread(void *arg)
-{
-       struct igt_thread_arg *thread = arg;
-       struct intel_engine_cs *engine = thread->engine;
-       struct rnd_state *prng = &thread->prng;
-       struct drm_i915_gem_object *src, *dst;
-       struct i915_gem_context *ctx;
-       struct intel_context *ce;
-       unsigned int prio;
-       IGT_TIMEOUT(end);
-       u64 total, max;
-       int err;
-
-       ctx = thread->ctx;
-       if (!ctx) {
-               ctx = live_context_for_engine(engine, thread->file);
-               if (IS_ERR(ctx))
-                       return PTR_ERR(ctx);
-
-               prio = i915_prandom_u32_max_state(I915_PRIORITY_MAX, prng);
-               ctx->sched.priority = prio;
-       }
-
-       ce = i915_gem_context_get_engine(ctx, 0);
-       GEM_BUG_ON(IS_ERR(ce));
-
-       /*
-        * If we have a tiny shared address space, like for the GGTT
-        * then we can't be too greedy.
-        */
-       max = ce->vm->total;
-       if (i915_is_ggtt(ce->vm) || thread->ctx)
-               max = div_u64(max, thread->n_cpus);
-       max >>= 4;
-
-       total = PAGE_SIZE;
-       do {
-               /* Aim to keep the runtime under reasonable bounds! */
-               const u32 max_phys_size = SZ_64K;
-               u32 val = prandom_u32_state(prng);
-               u32 phys_sz;
-               u32 sz;
-               u32 *vaddr;
-               u32 i;
-
-               total = min(total, max);
-               sz = i915_prandom_u32_max_state(total, prng) + 1;
-               phys_sz = sz % max_phys_size + 1;
-
-               sz = round_up(sz, PAGE_SIZE);
-               phys_sz = round_up(phys_sz, PAGE_SIZE);
-               phys_sz = min(phys_sz, sz);
-
-               pr_debug("%s with phys_sz= %x, sz=%x, val=%x\n", __func__,
-                        phys_sz, sz, val);
-
-               src = huge_gem_object(engine->i915, phys_sz, sz);
-               if (IS_ERR(src)) {
-                       err = PTR_ERR(src);
-                       goto err_flush;
-               }
-
-               vaddr = i915_gem_object_pin_map_unlocked(src, I915_MAP_WB);
-               if (IS_ERR(vaddr)) {
-                       err = PTR_ERR(vaddr);
-                       goto err_put_src;
-               }
-
-               memset32(vaddr, val,
-                        huge_gem_object_phys_size(src) / sizeof(u32));
-
-               i915_gem_object_unpin_map(src);
-
-               if (!(src->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
-                       src->cache_dirty = true;
-
-               dst = huge_gem_object(engine->i915, phys_sz, sz);
-               if (IS_ERR(dst)) {
-                       err = PTR_ERR(dst);
-                       goto err_put_src;
-               }
-
-               vaddr = i915_gem_object_pin_map_unlocked(dst, I915_MAP_WB);
-               if (IS_ERR(vaddr)) {
-                       err = PTR_ERR(vaddr);
-                       goto err_put_dst;
-               }
-
-               memset32(vaddr, val ^ 0xdeadbeaf,
-                        huge_gem_object_phys_size(dst) / sizeof(u32));
-
-               if (!(dst->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
-                       dst->cache_dirty = true;
-
-               err = i915_gem_object_copy_blt(src, dst, ce);
-               if (err)
-                       goto err_unpin;
-
-               err = i915_gem_object_wait(dst, 0, MAX_SCHEDULE_TIMEOUT);
-               if (err)
-                       goto err_unpin;
-
-               for (i = 0; i < huge_gem_object_phys_size(dst) / sizeof(u32); i += 17) {
-                       if (!(dst->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
-                               drm_clflush_virt_range(&vaddr[i], sizeof(vaddr[i]));
-
-                       if (vaddr[i] != val) {
-                               pr_err("vaddr[%u]=%x, expected=%x\n", i,
-                                      vaddr[i], val);
-                               err = -EINVAL;
-                               goto err_unpin;
-                       }
-               }
-
-               i915_gem_object_unpin_map(dst);
-
-               i915_gem_object_put(src);
-               i915_gem_object_put(dst);
-
-               total <<= 1;
-       } while (!time_after(jiffies, end));
-
-       goto err_flush;
-
-err_unpin:
-       i915_gem_object_unpin_map(dst);
-err_put_dst:
-       i915_gem_object_put(dst);
-err_put_src:
-       i915_gem_object_put(src);
-err_flush:
-       if (err == -ENOMEM)
-               err = 0;
-
-       intel_context_put(ce);
-       return err;
-}
-
-static int igt_threaded_blt(struct intel_engine_cs *engine,
-                           int (*blt_fn)(void *arg),
-                           unsigned int flags)
-#define SINGLE_CTX BIT(0)
-{
-       struct igt_thread_arg *thread;
-       struct task_struct **tsk;
-       unsigned int n_cpus, i;
-       I915_RND_STATE(prng);
-       int err = 0;
-
-       n_cpus = num_online_cpus() + 1;
-
-       tsk = kcalloc(n_cpus, sizeof(struct task_struct *), GFP_KERNEL);
-       if (!tsk)
-               return 0;
-
-       thread = kcalloc(n_cpus, sizeof(struct igt_thread_arg), GFP_KERNEL);
-       if (!thread)
-               goto out_tsk;
-
-       thread[0].file = mock_file(engine->i915);
-       if (IS_ERR(thread[0].file)) {
-               err = PTR_ERR(thread[0].file);
-               goto out_thread;
-       }
-
-       if (flags & SINGLE_CTX) {
-               thread[0].ctx = live_context_for_engine(engine, thread[0].file);
-               if (IS_ERR(thread[0].ctx)) {
-                       err = PTR_ERR(thread[0].ctx);
-                       goto out_file;
-               }
-       }
-
-       for (i = 0; i < n_cpus; ++i) {
-               thread[i].engine = engine;
-               thread[i].file = thread[0].file;
-               thread[i].ctx = thread[0].ctx;
-               thread[i].n_cpus = n_cpus;
-               thread[i].prng =
-                       I915_RND_STATE_INITIALIZER(prandom_u32_state(&prng));
-
-               tsk[i] = kthread_run(blt_fn, &thread[i], "igt/blt-%d", i);
-               if (IS_ERR(tsk[i])) {
-                       err = PTR_ERR(tsk[i]);
-                       break;
-               }
-
-               get_task_struct(tsk[i]);
-       }
-
-       yield(); /* start all threads before we kthread_stop() */
-
-       for (i = 0; i < n_cpus; ++i) {
-               int status;
-
-               if (IS_ERR_OR_NULL(tsk[i]))
-                       continue;
-
-               status = kthread_stop(tsk[i]);
-               if (status && !err)
-                       err = status;
-
-               put_task_struct(tsk[i]);
-       }
-
-out_file:
-       fput(thread[0].file);
-out_thread:
-       kfree(thread);
-out_tsk:
-       kfree(tsk);
-       return err;
-}
-
-static int test_copy_engines(struct drm_i915_private *i915,
-                            int (*fn)(void *arg),
-                            unsigned int flags)
-{
-       struct intel_engine_cs *engine;
-       int ret;
-
-       for_each_uabi_class_engine(engine, I915_ENGINE_CLASS_COPY, i915) {
-               ret = igt_threaded_blt(engine, fn, flags);
-               if (ret)
-                       return ret;
-       }
-
-       return 0;
-}
-
-static int igt_fill_blt(void *arg)
-{
-       return test_copy_engines(arg, igt_fill_blt_thread, 0);
-}
-
-static int igt_fill_blt_ctx0(void *arg)
-{
-       return test_copy_engines(arg, igt_fill_blt_thread, SINGLE_CTX);
-}
-
-static int igt_copy_blt(void *arg)
-{
-       return test_copy_engines(arg, igt_copy_blt_thread, 0);
-}
-
-static int igt_copy_blt_ctx0(void *arg)
-{
-       return test_copy_engines(arg, igt_copy_blt_thread, SINGLE_CTX);
-}
-
-int i915_gem_object_blt_live_selftests(struct drm_i915_private *i915)
-{
-       static const struct i915_subtest tests[] = {
-               SUBTEST(igt_fill_blt),
-               SUBTEST(igt_fill_blt_ctx0),
-               SUBTEST(igt_copy_blt),
-               SUBTEST(igt_copy_blt_ctx0),
-       };
-
-       if (intel_gt_is_wedged(&i915->gt))
-               return 0;
-
-       return i915_live_subtests(tests, i915);
-}
-
-int i915_gem_object_blt_perf_selftests(struct drm_i915_private *i915)
-{
-       static const struct i915_subtest tests[] = {
-               SUBTEST(perf_fill_blt),
-               SUBTEST(perf_copy_blt),
-       };
-
-       if (intel_gt_is_wedged(&i915->gt))
-               return 0;
-
-       return i915_live_subtests(tests, i915);
-}
index 3a6ce87..d43d8da 100644 (file)
@@ -25,13 +25,14 @@ static int mock_phys_object(void *arg)
                goto out;
        }
 
+       i915_gem_object_lock(obj, NULL);
        if (!i915_gem_object_has_struct_page(obj)) {
+               i915_gem_object_unlock(obj);
                err = -EINVAL;
                pr_err("shmem has no struct page\n");
                goto out_obj;
        }
 
-       i915_gem_object_lock(obj, NULL);
        err = i915_gem_object_attach_phys(obj, PAGE_SIZE);
        i915_gem_object_unlock(obj);
        if (err) {
index 51b5a34..fee070d 100644 (file)
@@ -14,6 +14,7 @@ mock_context(struct drm_i915_private *i915,
 {
        struct i915_gem_context *ctx;
        struct i915_gem_engines *e;
+       struct intel_sseu null_sseu = {};
 
        ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
        if (!ctx)
@@ -30,15 +31,6 @@ mock_context(struct drm_i915_private *i915,
 
        i915_gem_context_set_persistence(ctx);
 
-       mutex_init(&ctx->engines_mutex);
-       e = default_engines(ctx);
-       if (IS_ERR(e))
-               goto err_free;
-       RCU_INIT_POINTER(ctx->engines, e);
-
-       INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
-       mutex_init(&ctx->lut_mutex);
-
        if (name) {
                struct i915_ppgtt *ppgtt;
 
@@ -46,25 +38,29 @@ mock_context(struct drm_i915_private *i915,
 
                ppgtt = mock_ppgtt(i915, name);
                if (!ppgtt)
-                       goto err_put;
-
-               mutex_lock(&ctx->mutex);
-               __set_ppgtt(ctx, &ppgtt->vm);
-               mutex_unlock(&ctx->mutex);
+                       goto err_free;
 
+               ctx->vm = i915_vm_open(&ppgtt->vm);
                i915_vm_put(&ppgtt->vm);
        }
 
+       mutex_init(&ctx->engines_mutex);
+       e = default_engines(ctx, null_sseu);
+       if (IS_ERR(e))
+               goto err_vm;
+       RCU_INIT_POINTER(ctx->engines, e);
+
+       INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
+       mutex_init(&ctx->lut_mutex);
+
        return ctx;
 
+err_vm:
+       if (ctx->vm)
+               i915_vm_close(ctx->vm);
 err_free:
        kfree(ctx);
        return NULL;
-
-err_put:
-       i915_gem_context_set_closed(ctx);
-       i915_gem_context_put(ctx);
-       return NULL;
 }
 
 void mock_context_close(struct i915_gem_context *ctx)
@@ -80,20 +76,29 @@ void mock_init_contexts(struct drm_i915_private *i915)
 struct i915_gem_context *
 live_context(struct drm_i915_private *i915, struct file *file)
 {
+       struct drm_i915_file_private *fpriv = to_drm_file(file)->driver_priv;
+       struct i915_gem_proto_context *pc;
        struct i915_gem_context *ctx;
        int err;
        u32 id;
 
-       ctx = i915_gem_create_context(i915, 0);
+       pc = proto_context_create(i915, 0);
+       if (IS_ERR(pc))
+               return ERR_CAST(pc);
+
+       ctx = i915_gem_create_context(i915, pc);
+       proto_context_close(pc);
        if (IS_ERR(ctx))
                return ctx;
 
        i915_gem_context_set_no_error_capture(ctx);
 
-       err = gem_context_register(ctx, to_drm_file(file)->driver_priv, &id);
+       err = xa_alloc(&fpriv->context_xa, &id, NULL, xa_limit_32b, GFP_KERNEL);
        if (err < 0)
                goto err_ctx;
 
+       gem_context_register(ctx, fpriv, id);
+
        return ctx;
 
 err_ctx:
@@ -106,6 +111,7 @@ live_context_for_engine(struct intel_engine_cs *engine, struct file *file)
 {
        struct i915_gem_engines *engines;
        struct i915_gem_context *ctx;
+       struct intel_sseu null_sseu = {};
        struct intel_context *ce;
 
        engines = alloc_engines(1);
@@ -124,7 +130,7 @@ live_context_for_engine(struct intel_engine_cs *engine, struct file *file)
                return ERR_CAST(ce);
        }
 
-       intel_context_set_gem(ce, ctx);
+       intel_context_set_gem(ce, ctx, null_sseu);
        engines->engines[0] = ce;
        engines->num_engines = 1;
 
@@ -139,11 +145,24 @@ live_context_for_engine(struct intel_engine_cs *engine, struct file *file)
 }
 
 struct i915_gem_context *
-kernel_context(struct drm_i915_private *i915)
+kernel_context(struct drm_i915_private *i915,
+              struct i915_address_space *vm)
 {
        struct i915_gem_context *ctx;
+       struct i915_gem_proto_context *pc;
+
+       pc = proto_context_create(i915, 0);
+       if (IS_ERR(pc))
+               return ERR_CAST(pc);
+
+       if (vm) {
+               if (pc->vm)
+                       i915_vm_put(pc->vm);
+               pc->vm = i915_vm_get(vm);
+       }
 
-       ctx = i915_gem_create_context(i915, 0);
+       ctx = i915_gem_create_context(i915, pc);
+       proto_context_close(pc);
        if (IS_ERR(ctx))
                return ctx;
 
index 2a6121d..7a02fd9 100644 (file)
@@ -10,6 +10,7 @@
 struct file;
 struct drm_i915_private;
 struct intel_engine_cs;
+struct i915_address_space;
 
 void mock_init_contexts(struct drm_i915_private *i915);
 
@@ -25,7 +26,8 @@ live_context(struct drm_i915_private *i915, struct file *file);
 struct i915_gem_context *
 live_context_for_engine(struct intel_engine_cs *engine, struct file *file);
 
-struct i915_gem_context *kernel_context(struct drm_i915_private *i915);
+struct i915_gem_context *kernel_context(struct drm_i915_private *i915,
+                                       struct i915_address_space *vm);
 void kernel_context_close(struct i915_gem_context *ctx);
 
 #endif /* !__MOCK_CONTEXT_H */
index 4270b5a..d6f5836 100644 (file)
@@ -437,20 +437,20 @@ static int frequency_show(struct seq_file *m, void *unused)
                max_freq = (IS_GEN9_LP(i915) ? rp_state_cap >> 0 :
                            rp_state_cap >> 16) & 0xff;
                max_freq *= (IS_GEN9_BC(i915) ||
-                            GRAPHICS_VER(i915) >= 10 ? GEN9_FREQ_SCALER : 1);
+                            GRAPHICS_VER(i915) >= 11 ? GEN9_FREQ_SCALER : 1);
                seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
                           intel_gpu_freq(rps, max_freq));
 
                max_freq = (rp_state_cap & 0xff00) >> 8;
                max_freq *= (IS_GEN9_BC(i915) ||
-                            GRAPHICS_VER(i915) >= 10 ? GEN9_FREQ_SCALER : 1);
+                            GRAPHICS_VER(i915) >= 11 ? GEN9_FREQ_SCALER : 1);
                seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
                           intel_gpu_freq(rps, max_freq));
 
                max_freq = (IS_GEN9_LP(i915) ? rp_state_cap >> 16 :
                            rp_state_cap >> 0) & 0xff;
                max_freq *= (IS_GEN9_BC(i915) ||
-                            GRAPHICS_VER(i915) >= 10 ? GEN9_FREQ_SCALER : 1);
+                            GRAPHICS_VER(i915) >= 11 ? GEN9_FREQ_SCALER : 1);
                seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
                           intel_gpu_freq(rps, max_freq));
                seq_printf(m, "Max overclocked frequency: %dMHz\n",
@@ -500,7 +500,7 @@ static int llc_show(struct seq_file *m, void *data)
 
        min_gpu_freq = rps->min_freq;
        max_gpu_freq = rps->max_freq;
-       if (IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 10) {
+       if (IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 11) {
                /* Convert GT frequency to 50 HZ units */
                min_gpu_freq /= GEN9_FREQ_SCALER;
                max_gpu_freq /= GEN9_FREQ_SCALER;
@@ -518,7 +518,7 @@ static int llc_show(struct seq_file *m, void *data)
                           intel_gpu_freq(rps,
                                          (gpu_freq *
                                           (IS_GEN9_BC(i915) ||
-                                           GRAPHICS_VER(i915) >= 10 ?
+                                           GRAPHICS_VER(i915) >= 11 ?
                                            GEN9_FREQ_SCALER : 1))),
                           ((ia_freq >> 0) & 0xff) * 100,
                           ((ia_freq >> 8) & 0xff) * 100);
index 94e0a56..461844d 100644 (file)
@@ -42,7 +42,7 @@ int gen8_emit_flush_rcs(struct i915_request *rq, u32 mode)
                        vf_flush_wa = true;
 
                /* WaForGAMHang:kbl */
-               if (IS_KBL_GT_STEP(rq->engine->i915, 0, STEP_B0))
+               if (IS_KBL_GT_STEP(rq->engine->i915, 0, STEP_C0))
                        dc_flush_wa = true;
        }
 
@@ -208,7 +208,7 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
                flags |= PIPE_CONTROL_FLUSH_L3;
                flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
                flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
-               /* Wa_1409600907:tgl */
+               /* Wa_1409600907:tgl,adl-p */
                flags |= PIPE_CONTROL_DEPTH_STALL;
                flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
                flags |= PIPE_CONTROL_FLUSH_ENABLE;
@@ -279,7 +279,7 @@ int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode)
        if (mode & EMIT_INVALIDATE)
                aux_inv = rq->engine->mask & ~BIT(BCS0);
        if (aux_inv)
-               cmd += 2 * hweight8(aux_inv) + 2;
+               cmd += 2 * hweight32(aux_inv) + 2;
 
        cs = intel_ring_begin(rq, cmd);
        if (IS_ERR(cs))
@@ -313,9 +313,8 @@ int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode)
                struct intel_engine_cs *engine;
                unsigned int tmp;
 
-               *cs++ = MI_LOAD_REGISTER_IMM(hweight8(aux_inv));
-               for_each_engine_masked(engine, rq->engine->gt,
-                                      aux_inv, tmp) {
+               *cs++ = MI_LOAD_REGISTER_IMM(hweight32(aux_inv));
+               for_each_engine_masked(engine, rq->engine->gt, aux_inv, tmp) {
                        *cs++ = i915_mmio_reg_offset(aux_inv_reg(engine));
                        *cs++ = AUX_INV;
                }
@@ -506,7 +505,8 @@ gen8_emit_fini_breadcrumb_tail(struct i915_request *rq, u32 *cs)
        *cs++ = MI_USER_INTERRUPT;
 
        *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
-       if (intel_engine_has_semaphores(rq->engine))
+       if (intel_engine_has_semaphores(rq->engine) &&
+           !intel_uc_uses_guc_submission(&rq->engine->gt->uc))
                cs = emit_preempt_busywait(rq, cs);
 
        rq->tail = intel_ring_offset(rq, cs);
@@ -598,7 +598,8 @@ gen12_emit_fini_breadcrumb_tail(struct i915_request *rq, u32 *cs)
        *cs++ = MI_USER_INTERRUPT;
 
        *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
-       if (intel_engine_has_semaphores(rq->engine))
+       if (intel_engine_has_semaphores(rq->engine) &&
+           !intel_uc_uses_guc_submission(&rq->engine->gt->uc))
                cs = gen12_emit_preempt_busywait(rq, cs);
 
        rq->tail = intel_ring_offset(rq, cs);
index da4f5eb..6e0e52e 100644 (file)
@@ -358,6 +358,54 @@ static void gen8_ppgtt_alloc(struct i915_address_space *vm,
                           &start, start + length, vm->top);
 }
 
+static void __gen8_ppgtt_foreach(struct i915_address_space *vm,
+                                struct i915_page_directory *pd,
+                                u64 *start, u64 end, int lvl,
+                                void (*fn)(struct i915_address_space *vm,
+                                           struct i915_page_table *pt,
+                                           void *data),
+                                void *data)
+{
+       unsigned int idx, len;
+
+       len = gen8_pd_range(*start, end, lvl--, &idx);
+
+       spin_lock(&pd->lock);
+       do {
+               struct i915_page_table *pt = pd->entry[idx];
+
+               atomic_inc(&pt->used);
+               spin_unlock(&pd->lock);
+
+               if (lvl) {
+                       __gen8_ppgtt_foreach(vm, as_pd(pt), start, end, lvl,
+                                            fn, data);
+               } else {
+                       fn(vm, pt, data);
+                       *start += gen8_pt_count(*start, end);
+               }
+
+               spin_lock(&pd->lock);
+               atomic_dec(&pt->used);
+       } while (idx++, --len);
+       spin_unlock(&pd->lock);
+}
+
+static void gen8_ppgtt_foreach(struct i915_address_space *vm,
+                              u64 start, u64 length,
+                              void (*fn)(struct i915_address_space *vm,
+                                         struct i915_page_table *pt,
+                                         void *data),
+                              void *data)
+{
+       start >>= GEN8_PTE_SHIFT;
+       length >>= GEN8_PTE_SHIFT;
+
+       __gen8_ppgtt_foreach(vm, i915_vm_to_ppgtt(vm)->pd,
+                            &start, start + length, vm->top,
+                            fn, data);
+}
+
 static __always_inline u64
 gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt,
                      struct i915_page_directory *pdp,
@@ -552,6 +600,24 @@ static void gen8_ppgtt_insert(struct i915_address_space *vm,
        }
 }
 
+static void gen8_ppgtt_insert_entry(struct i915_address_space *vm,
+                                   dma_addr_t addr,
+                                   u64 offset,
+                                   enum i915_cache_level level,
+                                   u32 flags)
+{
+       u64 idx = offset >> GEN8_PTE_SHIFT;
+       struct i915_page_directory * const pdp =
+               gen8_pdp_for_page_index(vm, idx);
+       struct i915_page_directory *pd =
+               i915_pd_entry(pdp, gen8_pd_index(idx, 2));
+       gen8_pte_t *vaddr;
+
+       vaddr = px_vaddr(i915_pt_entry(pd, gen8_pd_index(idx, 1)));
+       vaddr[gen8_pd_index(idx, 0)] = gen8_pte_encode(addr, level, flags);
+       clflush_cache_range(&vaddr[gen8_pd_index(idx, 0)], sizeof(*vaddr));
+}
+
 static int gen8_init_scratch(struct i915_address_space *vm)
 {
        u32 pte_flags;
@@ -731,8 +797,10 @@ struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt)
 
        ppgtt->vm.bind_async_flags = I915_VMA_LOCAL_BIND;
        ppgtt->vm.insert_entries = gen8_ppgtt_insert;
+       ppgtt->vm.insert_page = gen8_ppgtt_insert_entry;
        ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc;
        ppgtt->vm.clear_range = gen8_ppgtt_clear;
+       ppgtt->vm.foreach = gen8_ppgtt_foreach;
 
        ppgtt->vm.pte_encode = gen8_pte_encode;
 
index 38cc427..209cf26 100644 (file)
 #include "intel_gt_pm.h"
 #include "intel_gt_requests.h"
 
-static bool irq_enable(struct intel_engine_cs *engine)
+static bool irq_enable(struct intel_breadcrumbs *b)
 {
-       if (!engine->irq_enable)
-               return false;
-
-       /* Caller disables interrupts */
-       spin_lock(&engine->gt->irq_lock);
-       engine->irq_enable(engine);
-       spin_unlock(&engine->gt->irq_lock);
-
-       return true;
+       return intel_engine_irq_enable(b->irq_engine);
 }
 
-static void irq_disable(struct intel_engine_cs *engine)
+static void irq_disable(struct intel_breadcrumbs *b)
 {
-       if (!engine->irq_disable)
-               return;
-
-       /* Caller disables interrupts */
-       spin_lock(&engine->gt->irq_lock);
-       engine->irq_disable(engine);
-       spin_unlock(&engine->gt->irq_lock);
+       intel_engine_irq_disable(b->irq_engine);
 }
 
 static void __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
@@ -57,7 +43,7 @@ static void __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
        WRITE_ONCE(b->irq_armed, true);
 
        /* Requests may have completed before we could enable the interrupt. */
-       if (!b->irq_enabled++ && irq_enable(b->irq_engine))
+       if (!b->irq_enabled++ && b->irq_enable(b))
                irq_work_queue(&b->irq_work);
 }
 
@@ -76,7 +62,7 @@ static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b)
 {
        GEM_BUG_ON(!b->irq_enabled);
        if (!--b->irq_enabled)
-               irq_disable(b->irq_engine);
+               b->irq_disable(b);
 
        WRITE_ONCE(b->irq_armed, false);
        intel_gt_pm_put_async(b->irq_engine->gt);
@@ -259,6 +245,9 @@ static void signal_irq_work(struct irq_work *work)
                        llist_entry(signal, typeof(*rq), signal_node);
                struct list_head cb_list;
 
+               if (rq->engine->sched_engine->retire_inflight_request_prio)
+                       rq->engine->sched_engine->retire_inflight_request_prio(rq);
+
                spin_lock(&rq->lock);
                list_replace(&rq->fence.cb_list, &cb_list);
                __dma_fence_signal__timestamp(&rq->fence, timestamp);
@@ -281,7 +270,7 @@ intel_breadcrumbs_create(struct intel_engine_cs *irq_engine)
        if (!b)
                return NULL;
 
-       b->irq_engine = irq_engine;
+       kref_init(&b->ref);
 
        spin_lock_init(&b->signalers_lock);
        INIT_LIST_HEAD(&b->signalers);
@@ -290,6 +279,10 @@ intel_breadcrumbs_create(struct intel_engine_cs *irq_engine)
        spin_lock_init(&b->irq_lock);
        init_irq_work(&b->irq_work, signal_irq_work);
 
+       b->irq_engine = irq_engine;
+       b->irq_enable = irq_enable;
+       b->irq_disable = irq_disable;
+
        return b;
 }
 
@@ -303,9 +296,9 @@ void intel_breadcrumbs_reset(struct intel_breadcrumbs *b)
        spin_lock_irqsave(&b->irq_lock, flags);
 
        if (b->irq_enabled)
-               irq_enable(b->irq_engine);
+               b->irq_enable(b);
        else
-               irq_disable(b->irq_engine);
+               b->irq_disable(b);
 
        spin_unlock_irqrestore(&b->irq_lock, flags);
 }
@@ -325,11 +318,14 @@ void __intel_breadcrumbs_park(struct intel_breadcrumbs *b)
        }
 }
 
-void intel_breadcrumbs_free(struct intel_breadcrumbs *b)
+void intel_breadcrumbs_free(struct kref *kref)
 {
+       struct intel_breadcrumbs *b = container_of(kref, typeof(*b), ref);
+
        irq_work_sync(&b->irq_work);
        GEM_BUG_ON(!list_empty(&b->signalers));
        GEM_BUG_ON(b->irq_armed);
+
        kfree(b);
 }
 
index 3ce5ce2..be0d4f3 100644 (file)
@@ -9,7 +9,7 @@
 #include <linux/atomic.h>
 #include <linux/irq_work.h>
 
-#include "intel_engine_types.h"
+#include "intel_breadcrumbs_types.h"
 
 struct drm_printer;
 struct i915_request;
@@ -17,7 +17,7 @@ struct intel_breadcrumbs;
 
 struct intel_breadcrumbs *
 intel_breadcrumbs_create(struct intel_engine_cs *irq_engine);
-void intel_breadcrumbs_free(struct intel_breadcrumbs *b);
+void intel_breadcrumbs_free(struct kref *kref);
 
 void intel_breadcrumbs_reset(struct intel_breadcrumbs *b);
 void __intel_breadcrumbs_park(struct intel_breadcrumbs *b);
@@ -48,4 +48,16 @@ void i915_request_cancel_breadcrumb(struct i915_request *request);
 void intel_context_remove_breadcrumbs(struct intel_context *ce,
                                      struct intel_breadcrumbs *b);
 
+static inline struct intel_breadcrumbs *
+intel_breadcrumbs_get(struct intel_breadcrumbs *b)
+{
+       kref_get(&b->ref);
+       return b;
+}
+
+static inline void intel_breadcrumbs_put(struct intel_breadcrumbs *b)
+{
+       kref_put(&b->ref, intel_breadcrumbs_free);
+}
+
 #endif /* __INTEL_BREADCRUMBS__ */
index 3a084ce..72dfd37 100644 (file)
@@ -7,10 +7,13 @@
 #define __INTEL_BREADCRUMBS_TYPES__
 
 #include <linux/irq_work.h>
+#include <linux/kref.h>
 #include <linux/list.h>
 #include <linux/spinlock.h>
 #include <linux/types.h>
 
+#include "intel_engine_types.h"
+
 /*
  * Rather than have every client wait upon all user interrupts,
  * with the herd waking after every interrupt and each doing the
@@ -29,6 +32,7 @@
  * the overhead of waking that client is much preferred.
  */
 struct intel_breadcrumbs {
+       struct kref ref;
        atomic_t active;
 
        spinlock_t signalers_lock; /* protects the list of signalers */
@@ -42,7 +46,10 @@ struct intel_breadcrumbs {
        bool irq_armed;
 
        /* Not all breadcrumbs are attached to physical HW */
+       intel_engine_mask_t     engine_mask;
        struct intel_engine_cs *irq_engine;
+       bool    (*irq_enable)(struct intel_breadcrumbs *b);
+       void    (*irq_disable)(struct intel_breadcrumbs *b);
 };
 
 #endif /* __INTEL_BREADCRUMBS_TYPES__ */
index 4033184..745e84c 100644 (file)
@@ -7,28 +7,26 @@
 #include "gem/i915_gem_pm.h"
 
 #include "i915_drv.h"
-#include "i915_globals.h"
+#include "i915_trace.h"
 
 #include "intel_context.h"
 #include "intel_engine.h"
 #include "intel_engine_pm.h"
 #include "intel_ring.h"
 
-static struct i915_global_context {
-       struct i915_global base;
-       struct kmem_cache *slab_ce;
-} global;
+static struct kmem_cache *slab_ce;
 
 static struct intel_context *intel_context_alloc(void)
 {
-       return kmem_cache_zalloc(global.slab_ce, GFP_KERNEL);
+       return kmem_cache_zalloc(slab_ce, GFP_KERNEL);
 }
 
 static void rcu_context_free(struct rcu_head *rcu)
 {
        struct intel_context *ce = container_of(rcu, typeof(*ce), rcu);
 
-       kmem_cache_free(global.slab_ce, ce);
+       trace_intel_context_free(ce);
+       kmem_cache_free(slab_ce, ce);
 }
 
 void intel_context_free(struct intel_context *ce)
@@ -46,6 +44,7 @@ intel_context_create(struct intel_engine_cs *engine)
                return ERR_PTR(-ENOMEM);
 
        intel_context_init(ce, engine);
+       trace_intel_context_create(ce);
        return ce;
 }
 
@@ -80,7 +79,7 @@ static int intel_context_active_acquire(struct intel_context *ce)
 
        __i915_active_acquire(&ce->active);
 
-       if (intel_context_is_barrier(ce))
+       if (intel_context_is_barrier(ce) || intel_engine_uses_guc(ce->engine))
                return 0;
 
        /* Preallocate tracking nodes */
@@ -268,6 +267,8 @@ int __intel_context_do_pin_ww(struct intel_context *ce,
 
        GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */
 
+       trace_intel_context_do_pin(ce);
+
 err_unlock:
        mutex_unlock(&ce->pin_mutex);
 err_post_unpin:
@@ -306,9 +307,9 @@ retry:
        return err;
 }
 
-void intel_context_unpin(struct intel_context *ce)
+void __intel_context_do_unpin(struct intel_context *ce, int sub)
 {
-       if (!atomic_dec_and_test(&ce->pin_count))
+       if (!atomic_sub_and_test(sub, &ce->pin_count))
                return;
 
        CE_TRACE(ce, "unpin\n");
@@ -323,6 +324,7 @@ void intel_context_unpin(struct intel_context *ce)
         */
        intel_context_get(ce);
        intel_context_active_release(ce);
+       trace_intel_context_do_unpin(ce);
        intel_context_put(ce);
 }
 
@@ -360,6 +362,12 @@ static int __intel_context_active(struct i915_active *active)
        return 0;
 }
 
+static int sw_fence_dummy_notify(struct i915_sw_fence *sf,
+                                enum i915_sw_fence_notify state)
+{
+       return NOTIFY_DONE;
+}
+
 void
 intel_context_init(struct intel_context *ce, struct intel_engine_cs *engine)
 {
@@ -371,7 +379,8 @@ intel_context_init(struct intel_context *ce, struct intel_engine_cs *engine)
        ce->engine = engine;
        ce->ops = engine->cops;
        ce->sseu = engine->sseu;
-       ce->ring = __intel_context_ring_size(SZ_4K);
+       ce->ring = NULL;
+       ce->ring_size = SZ_4K;
 
        ewma_runtime_init(&ce->runtime.avg);
 
@@ -383,6 +392,22 @@ intel_context_init(struct intel_context *ce, struct intel_engine_cs *engine)
 
        mutex_init(&ce->pin_mutex);
 
+       spin_lock_init(&ce->guc_state.lock);
+       INIT_LIST_HEAD(&ce->guc_state.fences);
+
+       spin_lock_init(&ce->guc_active.lock);
+       INIT_LIST_HEAD(&ce->guc_active.requests);
+
+       ce->guc_id = GUC_INVALID_LRC_ID;
+       INIT_LIST_HEAD(&ce->guc_id_link);
+
+       /*
+        * Initialize fence to be complete as this is expected to be complete
+        * unless there is a pending schedule disable outstanding.
+        */
+       i915_sw_fence_init(&ce->guc_blocked, sw_fence_dummy_notify);
+       i915_sw_fence_commit(&ce->guc_blocked);
+
        i915_active_init(&ce->active,
                         __intel_context_active, __intel_context_retire, 0);
 }
@@ -397,28 +422,17 @@ void intel_context_fini(struct intel_context *ce)
        i915_active_fini(&ce->active);
 }
 
-static void i915_global_context_shrink(void)
-{
-       kmem_cache_shrink(global.slab_ce);
-}
-
-static void i915_global_context_exit(void)
+void i915_context_module_exit(void)
 {
-       kmem_cache_destroy(global.slab_ce);
+       kmem_cache_destroy(slab_ce);
 }
 
-static struct i915_global_context global = { {
-       .shrink = i915_global_context_shrink,
-       .exit = i915_global_context_exit,
-} };
-
-int __init i915_global_context_init(void)
+int __init i915_context_module_init(void)
 {
-       global.slab_ce = KMEM_CACHE(intel_context, SLAB_HWCACHE_ALIGN);
-       if (!global.slab_ce)
+       slab_ce = KMEM_CACHE(intel_context, SLAB_HWCACHE_ALIGN);
+       if (!slab_ce)
                return -ENOMEM;
 
-       i915_global_register(&global.base);
        return 0;
 }
 
@@ -499,6 +513,26 @@ retry:
        return rq;
 }
 
+struct i915_request *intel_context_find_active_request(struct intel_context *ce)
+{
+       struct i915_request *rq, *active = NULL;
+       unsigned long flags;
+
+       GEM_BUG_ON(!intel_engine_uses_guc(ce->engine));
+
+       spin_lock_irqsave(&ce->guc_active.lock, flags);
+       list_for_each_entry_reverse(rq, &ce->guc_active.requests,
+                                   sched.link) {
+               if (i915_request_completed(rq))
+                       break;
+
+               active = rq;
+       }
+       spin_unlock_irqrestore(&ce->guc_active.lock, flags);
+
+       return active;
+}
+
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 #include "selftest_context.c"
 #endif
index f83a73a..c410989 100644 (file)
@@ -16,6 +16,7 @@
 #include "intel_engine_types.h"
 #include "intel_ring_types.h"
 #include "intel_timeline_types.h"
+#include "i915_trace.h"
 
 #define CE_TRACE(ce, fmt, ...) do {                                    \
        const struct intel_context *ce__ = (ce);                        \
@@ -30,6 +31,9 @@ void intel_context_init(struct intel_context *ce,
                        struct intel_engine_cs *engine);
 void intel_context_fini(struct intel_context *ce);
 
+void i915_context_module_exit(void);
+int i915_context_module_init(void);
+
 struct intel_context *
 intel_context_create(struct intel_engine_cs *engine);
 
@@ -69,6 +73,13 @@ intel_context_is_pinned(struct intel_context *ce)
        return atomic_read(&ce->pin_count);
 }
 
+static inline void intel_context_cancel_request(struct intel_context *ce,
+                                               struct i915_request *rq)
+{
+       GEM_BUG_ON(!ce->ops->cancel_request);
+       return ce->ops->cancel_request(ce, rq);
+}
+
 /**
  * intel_context_unlock_pinned - Releases the earlier locking of 'pinned' status
  * @ce - the context
@@ -113,7 +124,32 @@ static inline void __intel_context_pin(struct intel_context *ce)
        atomic_inc(&ce->pin_count);
 }
 
-void intel_context_unpin(struct intel_context *ce);
+void __intel_context_do_unpin(struct intel_context *ce, int sub);
+
+static inline void intel_context_sched_disable_unpin(struct intel_context *ce)
+{
+       __intel_context_do_unpin(ce, 2);
+}
+
+static inline void intel_context_unpin(struct intel_context *ce)
+{
+       if (!ce->ops->sched_disable) {
+               __intel_context_do_unpin(ce, 1);
+       } else {
+               /*
+                * Move ownership of this pin to the scheduling disable which is
+                * an async operation. When that operation completes the above
+                * intel_context_sched_disable_unpin is called potentially
+                * unpinning the context.
+                */
+               while (!atomic_add_unless(&ce->pin_count, -1, 1)) {
+                       if (atomic_cmpxchg(&ce->pin_count, 1, 2) == 1) {
+                               ce->ops->sched_disable(ce);
+                               break;
+                       }
+               }
+       }
+}
 
 void intel_context_enter_engine(struct intel_context *ce);
 void intel_context_exit_engine(struct intel_context *ce);
@@ -175,10 +211,8 @@ int intel_context_prepare_remote_request(struct intel_context *ce,
 
 struct i915_request *intel_context_create_request(struct intel_context *ce);
 
-static inline struct intel_ring *__intel_context_ring_size(u64 sz)
-{
-       return u64_to_ptr(struct intel_ring, sz);
-}
+struct i915_request *
+intel_context_find_active_request(struct intel_context *ce);
 
 static inline bool intel_context_is_barrier(const struct intel_context *ce)
 {
@@ -220,6 +254,18 @@ static inline bool intel_context_set_banned(struct intel_context *ce)
        return test_and_set_bit(CONTEXT_BANNED, &ce->flags);
 }
 
+static inline bool intel_context_ban(struct intel_context *ce,
+                                    struct i915_request *rq)
+{
+       bool ret = intel_context_set_banned(ce);
+
+       trace_intel_context_ban(ce);
+       if (ce->ops->ban)
+               ce->ops->ban(ce, rq);
+
+       return ret;
+}
+
 static inline bool
 intel_context_force_single_submission(const struct intel_context *ce)
 {
diff --git a/drivers/gpu/drm/i915/gt/intel_context_param.c b/drivers/gpu/drm/i915/gt/intel_context_param.c
deleted file mode 100644 (file)
index 65dcd09..0000000
+++ /dev/null
@@ -1,63 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright Â© 2019 Intel Corporation
- */
-
-#include "i915_active.h"
-#include "intel_context.h"
-#include "intel_context_param.h"
-#include "intel_ring.h"
-
-int intel_context_set_ring_size(struct intel_context *ce, long sz)
-{
-       int err;
-
-       if (intel_context_lock_pinned(ce))
-               return -EINTR;
-
-       err = i915_active_wait(&ce->active);
-       if (err < 0)
-               goto unlock;
-
-       if (intel_context_is_pinned(ce)) {
-               err = -EBUSY; /* In active use, come back later! */
-               goto unlock;
-       }
-
-       if (test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
-               struct intel_ring *ring;
-
-               /* Replace the existing ringbuffer */
-               ring = intel_engine_create_ring(ce->engine, sz);
-               if (IS_ERR(ring)) {
-                       err = PTR_ERR(ring);
-                       goto unlock;
-               }
-
-               intel_ring_put(ce->ring);
-               ce->ring = ring;
-
-               /* Context image will be updated on next pin */
-       } else {
-               ce->ring = __intel_context_ring_size(sz);
-       }
-
-unlock:
-       intel_context_unlock_pinned(ce);
-       return err;
-}
-
-long intel_context_get_ring_size(struct intel_context *ce)
-{
-       long sz = (unsigned long)READ_ONCE(ce->ring);
-
-       if (test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
-               if (intel_context_lock_pinned(ce))
-                       return -EINTR;
-
-               sz = ce->ring->size;
-               intel_context_unlock_pinned(ce);
-       }
-
-       return sz;
-}
index 3ecacc6..0c69cb4 100644 (file)
 
 #include "intel_context.h"
 
-int intel_context_set_ring_size(struct intel_context *ce, long sz);
-long intel_context_get_ring_size(struct intel_context *ce);
-
-static inline int
+static inline void
 intel_context_set_watchdog_us(struct intel_context *ce, u64 timeout_us)
 {
        ce->watchdog.timeout_us = timeout_us;
-       return 0;
 }
 
 #endif /* INTEL_CONTEXT_PARAM_H */
index ed8c447..e54351a 100644 (file)
 #include <linux/types.h>
 
 #include "i915_active_types.h"
+#include "i915_sw_fence.h"
 #include "i915_utils.h"
 #include "intel_engine_types.h"
 #include "intel_sseu.h"
 
-#define CONTEXT_REDZONE POISON_INUSE
+#include "uc/intel_guc_fwif.h"
 
+#define CONTEXT_REDZONE POISON_INUSE
 DECLARE_EWMA(runtime, 3, 8);
 
 struct i915_gem_context;
@@ -35,16 +37,29 @@ struct intel_context_ops {
 
        int (*alloc)(struct intel_context *ce);
 
+       void (*ban)(struct intel_context *ce, struct i915_request *rq);
+
        int (*pre_pin)(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void **vaddr);
        int (*pin)(struct intel_context *ce, void *vaddr);
        void (*unpin)(struct intel_context *ce);
        void (*post_unpin)(struct intel_context *ce);
 
+       void (*cancel_request)(struct intel_context *ce,
+                              struct i915_request *rq);
+
        void (*enter)(struct intel_context *ce);
        void (*exit)(struct intel_context *ce);
 
+       void (*sched_disable)(struct intel_context *ce);
+
        void (*reset)(struct intel_context *ce);
        void (*destroy)(struct kref *kref);
+
+       /* virtual engine/context interface */
+       struct intel_context *(*create_virtual)(struct intel_engine_cs **engine,
+                                               unsigned int count);
+       struct intel_engine_cs *(*get_sibling)(struct intel_engine_cs *engine,
+                                              unsigned int sibling);
 };
 
 struct intel_context {
@@ -82,6 +97,7 @@ struct intel_context {
        spinlock_t signal_lock; /* protects signals, the list of requests */
 
        struct i915_vma *state;
+       u32 ring_size;
        struct intel_ring *ring;
        struct intel_timeline *timeline;
 
@@ -95,6 +111,7 @@ struct intel_context {
 #define CONTEXT_BANNED                 6
 #define CONTEXT_FORCE_SINGLE_SUBMISSION        7
 #define CONTEXT_NOPREEMPT              8
+#define CONTEXT_LRCA_DIRTY             9
 
        struct {
                u64 timeout_us;
@@ -136,6 +153,51 @@ struct intel_context {
        struct intel_sseu sseu;
 
        u8 wa_bb_page; /* if set, page num reserved for context workarounds */
+
+       struct {
+               /** lock: protects everything in guc_state */
+               spinlock_t lock;
+               /**
+                * sched_state: scheduling state of this context using GuC
+                * submission
+                */
+               u16 sched_state;
+               /*
+                * fences: maintains of list of requests that have a submit
+                * fence related to GuC submission
+                */
+               struct list_head fences;
+       } guc_state;
+
+       struct {
+               /** lock: protects everything in guc_active */
+               spinlock_t lock;
+               /** requests: active requests on this context */
+               struct list_head requests;
+       } guc_active;
+
+       /* GuC scheduling state flags that do not require a lock. */
+       atomic_t guc_sched_state_no_lock;
+
+       /* GuC LRC descriptor ID */
+       u16 guc_id;
+
+       /* GuC LRC descriptor reference count */
+       atomic_t guc_id_ref;
+
+       /*
+        * GuC ID link - in list when unpinned but guc_id still valid in GuC
+        */
+       struct list_head guc_id_link;
+
+       /* GuC context blocked fence */
+       struct i915_sw_fence guc_blocked;
+
+       /*
+        * GuC priority management
+        */
+       u8 guc_prio;
+       u32 guc_prio_count[GUC_CLIENT_PRIORITY_NUM];
 };
 
 #endif /* __INTEL_CONTEXT_TYPES__ */
index 8d91849..87579af 100644 (file)
@@ -19,7 +19,9 @@
 #include "intel_workarounds.h"
 
 struct drm_printer;
+struct intel_context;
 struct intel_gt;
+struct lock_class_key;
 
 /* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
  * but keeps the logic simple. Indeed, the whole purpose of this macro is just
@@ -123,20 +125,6 @@ execlists_active(const struct intel_engine_execlists *execlists)
        return active;
 }
 
-static inline void
-execlists_active_lock_bh(struct intel_engine_execlists *execlists)
-{
-       local_bh_disable(); /* prevent local softirq and lock recursion */
-       tasklet_lock(&execlists->tasklet);
-}
-
-static inline void
-execlists_active_unlock_bh(struct intel_engine_execlists *execlists)
-{
-       tasklet_unlock(&execlists->tasklet);
-       local_bh_enable(); /* restore softirq, and kick ksoftirqd! */
-}
-
 struct i915_request *
 execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists);
 
@@ -186,11 +174,12 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
 #define I915_GEM_HWS_PREEMPT_ADDR      (I915_GEM_HWS_PREEMPT * sizeof(u32))
 #define I915_GEM_HWS_SEQNO             0x40
 #define I915_GEM_HWS_SEQNO_ADDR                (I915_GEM_HWS_SEQNO * sizeof(u32))
+#define I915_GEM_HWS_MIGRATE           (0x42 * sizeof(u32))
 #define I915_GEM_HWS_SCRATCH           0x80
 
 #define I915_HWS_CSB_BUF0_INDEX                0x10
 #define I915_HWS_CSB_WRITE_INDEX       0x1f
-#define CNL_HWS_CSB_WRITE_INDEX                0x2f
+#define ICL_HWS_CSB_WRITE_INDEX                0x2f
 
 void intel_engine_stop(struct intel_engine_cs *engine);
 void intel_engine_cleanup(struct intel_engine_cs *engine);
@@ -223,6 +212,9 @@ void intel_engine_get_instdone(const struct intel_engine_cs *engine,
 
 void intel_engine_init_execlists(struct intel_engine_cs *engine);
 
+bool intel_engine_irq_enable(struct intel_engine_cs *engine);
+void intel_engine_irq_disable(struct intel_engine_cs *engine);
+
 static inline void __intel_engine_reset(struct intel_engine_cs *engine,
                                        bool stalled)
 {
@@ -248,17 +240,27 @@ __printf(3, 4)
 void intel_engine_dump(struct intel_engine_cs *engine,
                       struct drm_printer *m,
                       const char *header, ...);
+void intel_engine_dump_active_requests(struct list_head *requests,
+                                      struct i915_request *hung_rq,
+                                      struct drm_printer *m);
 
 ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine,
                                   ktime_t *now);
 
 struct i915_request *
-intel_engine_find_active_request(struct intel_engine_cs *engine);
+intel_engine_execlist_find_hung_request(struct intel_engine_cs *engine);
 
 u32 intel_engine_context_size(struct intel_gt *gt, u8 class);
+struct intel_context *
+intel_engine_create_pinned_context(struct intel_engine_cs *engine,
+                                  struct i915_address_space *vm,
+                                  unsigned int ring_size,
+                                  unsigned int hwsp,
+                                  struct lock_class_key *key,
+                                  const char *name);
+
+void intel_engine_destroy_pinned_context(struct intel_context *ce);
 
-void intel_engine_init_active(struct intel_engine_cs *engine,
-                             unsigned int subclass);
 #define ENGINE_PHYSICAL        0
 #define ENGINE_MOCK    1
 #define ENGINE_VIRTUAL 2
@@ -277,13 +279,60 @@ intel_engine_has_preempt_reset(const struct intel_engine_cs *engine)
        return intel_engine_has_preemption(engine);
 }
 
+struct intel_context *
+intel_engine_create_virtual(struct intel_engine_cs **siblings,
+                           unsigned int count);
+
+static inline bool
+intel_virtual_engine_has_heartbeat(const struct intel_engine_cs *engine)
+{
+       /*
+        * For non-GuC submission we expect the back-end to look at the
+        * heartbeat status of the actual physical engine that the work
+        * has been (or is being) scheduled on, so we should only reach
+        * here with GuC submission enabled.
+        */
+       GEM_BUG_ON(!intel_engine_uses_guc(engine));
+
+       return intel_guc_virtual_engine_has_heartbeat(engine);
+}
+
 static inline bool
 intel_engine_has_heartbeat(const struct intel_engine_cs *engine)
 {
        if (!IS_ACTIVE(CONFIG_DRM_I915_HEARTBEAT_INTERVAL))
                return false;
 
-       return READ_ONCE(engine->props.heartbeat_interval_ms);
+       if (intel_engine_is_virtual(engine))
+               return intel_virtual_engine_has_heartbeat(engine);
+       else
+               return READ_ONCE(engine->props.heartbeat_interval_ms);
+}
+
+static inline struct intel_engine_cs *
+intel_engine_get_sibling(struct intel_engine_cs *engine, unsigned int sibling)
+{
+       GEM_BUG_ON(!intel_engine_is_virtual(engine));
+       return engine->cops->get_sibling(engine, sibling);
+}
+
+static inline void
+intel_engine_set_hung_context(struct intel_engine_cs *engine,
+                             struct intel_context *ce)
+{
+       engine->hung_ce = ce;
+}
+
+static inline void
+intel_engine_clear_hung_context(struct intel_engine_cs *engine)
+{
+       intel_engine_set_hung_context(engine, NULL);
+}
+
+static inline struct intel_context *
+intel_engine_get_hung_context(struct intel_engine_cs *engine)
+{
+       return engine->hung_ce;
 }
 
 #endif /* _INTEL_RINGBUFFER_H_ */
index 7f03df2..0d9105a 100644 (file)
 #define DEFAULT_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
 #define GEN8_LR_CONTEXT_RENDER_SIZE    (20 * PAGE_SIZE)
 #define GEN9_LR_CONTEXT_RENDER_SIZE    (22 * PAGE_SIZE)
-#define GEN10_LR_CONTEXT_RENDER_SIZE   (18 * PAGE_SIZE)
 #define GEN11_LR_CONTEXT_RENDER_SIZE   (14 * PAGE_SIZE)
 
 #define GEN8_LR_CONTEXT_OTHER_SIZE     ( 2 * PAGE_SIZE)
 
 #define MAX_MMIO_BASES 3
 struct engine_info {
-       unsigned int hw_id;
        u8 class;
        u8 instance;
        /* mmio bases table *must* be sorted in reverse graphics_ver order */
@@ -54,7 +52,6 @@ struct engine_info {
 
 static const struct engine_info intel_engines[] = {
        [RCS0] = {
-               .hw_id = RCS0_HW,
                .class = RENDER_CLASS,
                .instance = 0,
                .mmio_bases = {
@@ -62,7 +59,6 @@ static const struct engine_info intel_engines[] = {
                },
        },
        [BCS0] = {
-               .hw_id = BCS0_HW,
                .class = COPY_ENGINE_CLASS,
                .instance = 0,
                .mmio_bases = {
@@ -70,7 +66,6 @@ static const struct engine_info intel_engines[] = {
                },
        },
        [VCS0] = {
-               .hw_id = VCS0_HW,
                .class = VIDEO_DECODE_CLASS,
                .instance = 0,
                .mmio_bases = {
@@ -80,7 +75,6 @@ static const struct engine_info intel_engines[] = {
                },
        },
        [VCS1] = {
-               .hw_id = VCS1_HW,
                .class = VIDEO_DECODE_CLASS,
                .instance = 1,
                .mmio_bases = {
@@ -89,7 +83,6 @@ static const struct engine_info intel_engines[] = {
                },
        },
        [VCS2] = {
-               .hw_id = VCS2_HW,
                .class = VIDEO_DECODE_CLASS,
                .instance = 2,
                .mmio_bases = {
@@ -97,15 +90,41 @@ static const struct engine_info intel_engines[] = {
                },
        },
        [VCS3] = {
-               .hw_id = VCS3_HW,
                .class = VIDEO_DECODE_CLASS,
                .instance = 3,
                .mmio_bases = {
                        { .graphics_ver = 11, .base = GEN11_BSD4_RING_BASE }
                },
        },
+       [VCS4] = {
+               .class = VIDEO_DECODE_CLASS,
+               .instance = 4,
+               .mmio_bases = {
+                       { .graphics_ver = 12, .base = XEHP_BSD5_RING_BASE }
+               },
+       },
+       [VCS5] = {
+               .class = VIDEO_DECODE_CLASS,
+               .instance = 5,
+               .mmio_bases = {
+                       { .graphics_ver = 12, .base = XEHP_BSD6_RING_BASE }
+               },
+       },
+       [VCS6] = {
+               .class = VIDEO_DECODE_CLASS,
+               .instance = 6,
+               .mmio_bases = {
+                       { .graphics_ver = 12, .base = XEHP_BSD7_RING_BASE }
+               },
+       },
+       [VCS7] = {
+               .class = VIDEO_DECODE_CLASS,
+               .instance = 7,
+               .mmio_bases = {
+                       { .graphics_ver = 12, .base = XEHP_BSD8_RING_BASE }
+               },
+       },
        [VECS0] = {
-               .hw_id = VECS0_HW,
                .class = VIDEO_ENHANCEMENT_CLASS,
                .instance = 0,
                .mmio_bases = {
@@ -114,13 +133,26 @@ static const struct engine_info intel_engines[] = {
                },
        },
        [VECS1] = {
-               .hw_id = VECS1_HW,
                .class = VIDEO_ENHANCEMENT_CLASS,
                .instance = 1,
                .mmio_bases = {
                        { .graphics_ver = 11, .base = GEN11_VEBOX2_RING_BASE }
                },
        },
+       [VECS2] = {
+               .class = VIDEO_ENHANCEMENT_CLASS,
+               .instance = 2,
+               .mmio_bases = {
+                       { .graphics_ver = 12, .base = XEHP_VEBOX3_RING_BASE }
+               },
+       },
+       [VECS3] = {
+               .class = VIDEO_ENHANCEMENT_CLASS,
+               .instance = 3,
+               .mmio_bases = {
+                       { .graphics_ver = 12, .base = XEHP_VEBOX4_RING_BASE }
+               },
+       },
 };
 
 /**
@@ -153,8 +185,6 @@ u32 intel_engine_context_size(struct intel_gt *gt, u8 class)
                case 12:
                case 11:
                        return GEN11_LR_CONTEXT_RENDER_SIZE;
-               case 10:
-                       return GEN10_LR_CONTEXT_RENDER_SIZE;
                case 9:
                        return GEN9_LR_CONTEXT_RENDER_SIZE;
                case 8:
@@ -269,6 +299,8 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
 
        BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH));
        BUILD_BUG_ON(MAX_ENGINE_INSTANCE >= BIT(GEN11_ENGINE_INSTANCE_WIDTH));
+       BUILD_BUG_ON(I915_MAX_VCS > (MAX_ENGINE_INSTANCE + 1));
+       BUILD_BUG_ON(I915_MAX_VECS > (MAX_ENGINE_INSTANCE + 1));
 
        if (GEM_DEBUG_WARN_ON(id >= ARRAY_SIZE(gt->engine)))
                return -EINVAL;
@@ -294,7 +326,6 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
        engine->i915 = i915;
        engine->gt = gt;
        engine->uncore = gt->uncore;
-       engine->hw_id = info->hw_id;
        guc_class = engine_class_to_guc_class(info->class);
        engine->guc_id = MAKE_GUC_ID(guc_class, info->instance);
        engine->mmio_base = __engine_mmio_base(i915, info->mmio_bases);
@@ -328,9 +359,6 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
        if (engine->context_size)
                DRIVER_CAPS(i915)->has_logical_contexts = true;
 
-       /* Nothing to do here, execute in order of dependencies */
-       engine->schedule = NULL;
-
        ewma__engine_latency_init(&engine->latency);
        seqcount_init(&engine->stats.lock);
 
@@ -445,6 +473,28 @@ void intel_engines_free(struct intel_gt *gt)
        }
 }
 
+static
+bool gen11_vdbox_has_sfc(struct drm_i915_private *i915,
+                        unsigned int physical_vdbox,
+                        unsigned int logical_vdbox, u16 vdbox_mask)
+{
+       /*
+        * In Gen11, only even numbered logical VDBOXes are hooked
+        * up to an SFC (Scaler & Format Converter) unit.
+        * In Gen12, Even numbered physical instance always are connected
+        * to an SFC. Odd numbered physical instances have SFC only if
+        * previous even instance is fused off.
+        */
+       if (GRAPHICS_VER(i915) == 12)
+               return (physical_vdbox % 2 == 0) ||
+                       !(BIT(physical_vdbox - 1) & vdbox_mask);
+       else if (GRAPHICS_VER(i915) == 11)
+               return logical_vdbox % 2 == 0;
+
+       MISSING_CASE(GRAPHICS_VER(i915));
+       return false;
+}
+
 /*
  * Determine which engines are fused off in our particular hardware.
  * Note that we have a catch-22 situation where we need to be able to access
@@ -471,7 +521,14 @@ static intel_engine_mask_t init_engine_mask(struct intel_gt *gt)
        if (GRAPHICS_VER(i915) < 11)
                return info->engine_mask;
 
-       media_fuse = ~intel_uncore_read(uncore, GEN11_GT_VEBOX_VDBOX_DISABLE);
+       /*
+        * On newer platforms the fusing register is called 'enable' and has
+        * enable semantics, while on older platforms it is called 'disable'
+        * and bits have disable semantices.
+        */
+       media_fuse = intel_uncore_read(uncore, GEN11_GT_VEBOX_VDBOX_DISABLE);
+       if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 50))
+               media_fuse = ~media_fuse;
 
        vdbox_mask = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
        vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
@@ -489,13 +546,9 @@ static intel_engine_mask_t init_engine_mask(struct intel_gt *gt)
                        continue;
                }
 
-               /*
-                * In Gen11, only even numbered logical VDBOXes are
-                * hooked up to an SFC (Scaler & Format Converter) unit.
-                * In TGL each VDBOX has access to an SFC.
-                */
-               if (GRAPHICS_VER(i915) >= 12 || logical_vdbox++ % 2 == 0)
+               if (gen11_vdbox_has_sfc(i915, i, logical_vdbox, vdbox_mask))
                        gt->info.vdbox_sfc_access |= BIT(i);
+               logical_vdbox++;
        }
        drm_dbg(&i915->drm, "vdbox enable: %04x, instances: %04lx\n",
                vdbox_mask, VDBOX_MASK(gt));
@@ -585,9 +638,6 @@ void intel_engine_init_execlists(struct intel_engine_cs *engine)
        memset(execlists->pending, 0, sizeof(execlists->pending));
        execlists->active =
                memset(execlists->inflight, 0, sizeof(execlists->inflight));
-
-       execlists->queue_priority_hint = INT_MIN;
-       execlists->queue = RB_ROOT_CACHED;
 }
 
 static void cleanup_status_page(struct intel_engine_cs *engine)
@@ -714,11 +764,17 @@ static int engine_setup_common(struct intel_engine_cs *engine)
                goto err_status;
        }
 
+       engine->sched_engine = i915_sched_engine_create(ENGINE_PHYSICAL);
+       if (!engine->sched_engine) {
+               err = -ENOMEM;
+               goto err_sched_engine;
+       }
+       engine->sched_engine->private_data = engine;
+
        err = intel_engine_init_cmd_parser(engine);
        if (err)
                goto err_cmd_parser;
 
-       intel_engine_init_active(engine, ENGINE_PHYSICAL);
        intel_engine_init_execlists(engine);
        intel_engine_init__pm(engine);
        intel_engine_init_retire(engine);
@@ -737,7 +793,9 @@ static int engine_setup_common(struct intel_engine_cs *engine)
        return 0;
 
 err_cmd_parser:
-       intel_breadcrumbs_free(engine->breadcrumbs);
+       i915_sched_engine_put(engine->sched_engine);
+err_sched_engine:
+       intel_breadcrumbs_put(engine->breadcrumbs);
 err_status:
        cleanup_status_page(engine);
        return err;
@@ -775,11 +833,11 @@ static int measure_breadcrumb_dw(struct intel_context *ce)
        frame->rq.ring = &frame->ring;
 
        mutex_lock(&ce->timeline->mutex);
-       spin_lock_irq(&engine->active.lock);
+       spin_lock_irq(&engine->sched_engine->lock);
 
        dw = engine->emit_fini_breadcrumb(&frame->rq, frame->cs) - frame->cs;
 
-       spin_unlock_irq(&engine->active.lock);
+       spin_unlock_irq(&engine->sched_engine->lock);
        mutex_unlock(&ce->timeline->mutex);
 
        GEM_BUG_ON(dw & 1); /* RING_TAIL must be qword aligned */
@@ -788,33 +846,13 @@ static int measure_breadcrumb_dw(struct intel_context *ce)
        return dw;
 }
 
-void
-intel_engine_init_active(struct intel_engine_cs *engine, unsigned int subclass)
-{
-       INIT_LIST_HEAD(&engine->active.requests);
-       INIT_LIST_HEAD(&engine->active.hold);
-
-       spin_lock_init(&engine->active.lock);
-       lockdep_set_subclass(&engine->active.lock, subclass);
-
-       /*
-        * Due to an interesting quirk in lockdep's internal debug tracking,
-        * after setting a subclass we must ensure the lock is used. Otherwise,
-        * nr_unused_locks is incremented once too often.
-        */
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-       local_irq_disable();
-       lock_map_acquire(&engine->active.lock.dep_map);
-       lock_map_release(&engine->active.lock.dep_map);
-       local_irq_enable();
-#endif
-}
-
-static struct intel_context *
-create_pinned_context(struct intel_engine_cs *engine,
-                     unsigned int hwsp,
-                     struct lock_class_key *key,
-                     const char *name)
+struct intel_context *
+intel_engine_create_pinned_context(struct intel_engine_cs *engine,
+                                  struct i915_address_space *vm,
+                                  unsigned int ring_size,
+                                  unsigned int hwsp,
+                                  struct lock_class_key *key,
+                                  const char *name)
 {
        struct intel_context *ce;
        int err;
@@ -825,6 +863,11 @@ create_pinned_context(struct intel_engine_cs *engine,
 
        __set_bit(CONTEXT_BARRIER_BIT, &ce->flags);
        ce->timeline = page_pack_bits(NULL, hwsp);
+       ce->ring = NULL;
+       ce->ring_size = ring_size;
+
+       i915_vm_put(ce->vm);
+       ce->vm = i915_vm_get(vm);
 
        err = intel_context_pin(ce); /* perma-pin so it is always available */
        if (err) {
@@ -843,7 +886,7 @@ create_pinned_context(struct intel_engine_cs *engine,
        return ce;
 }
 
-static void destroy_pinned_context(struct intel_context *ce)
+void intel_engine_destroy_pinned_context(struct intel_context *ce)
 {
        struct intel_engine_cs *engine = ce->engine;
        struct i915_vma *hwsp = engine->status_page.vma;
@@ -863,8 +906,9 @@ create_kernel_context(struct intel_engine_cs *engine)
 {
        static struct lock_class_key kernel;
 
-       return create_pinned_context(engine, I915_GEM_HWS_SEQNO_ADDR,
-                                    &kernel, "kernel_context");
+       return intel_engine_create_pinned_context(engine, engine->gt->vm, SZ_4K,
+                                                 I915_GEM_HWS_SEQNO_ADDR,
+                                                 &kernel, "kernel_context");
 }
 
 /**
@@ -907,7 +951,7 @@ static int engine_init_common(struct intel_engine_cs *engine)
        return 0;
 
 err_context:
-       destroy_pinned_context(ce);
+       intel_engine_destroy_pinned_context(ce);
        return ret;
 }
 
@@ -957,10 +1001,10 @@ int intel_engines_init(struct intel_gt *gt)
  */
 void intel_engine_cleanup_common(struct intel_engine_cs *engine)
 {
-       GEM_BUG_ON(!list_empty(&engine->active.requests));
-       tasklet_kill(&engine->execlists.tasklet); /* flush the callback */
+       GEM_BUG_ON(!list_empty(&engine->sched_engine->requests));
 
-       intel_breadcrumbs_free(engine->breadcrumbs);
+       i915_sched_engine_put(engine->sched_engine);
+       intel_breadcrumbs_put(engine->breadcrumbs);
 
        intel_engine_fini_retire(engine);
        intel_engine_cleanup_cmd_parser(engine);
@@ -969,7 +1013,7 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
                fput(engine->default_state);
 
        if (engine->kernel_context)
-               destroy_pinned_context(engine->kernel_context);
+               intel_engine_destroy_pinned_context(engine->kernel_context);
 
        GEM_BUG_ON(!llist_empty(&engine->barrier_tasks));
        cleanup_status_page(engine);
@@ -1105,45 +1149,8 @@ static u32
 read_subslice_reg(const struct intel_engine_cs *engine,
                  int slice, int subslice, i915_reg_t reg)
 {
-       struct drm_i915_private *i915 = engine->i915;
-       struct intel_uncore *uncore = engine->uncore;
-       u32 mcr_mask, mcr_ss, mcr, old_mcr, val;
-       enum forcewake_domains fw_domains;
-
-       if (GRAPHICS_VER(i915) >= 11) {
-               mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK;
-               mcr_ss = GEN11_MCR_SLICE(slice) | GEN11_MCR_SUBSLICE(subslice);
-       } else {
-               mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK;
-               mcr_ss = GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice);
-       }
-
-       fw_domains = intel_uncore_forcewake_for_reg(uncore, reg,
-                                                   FW_REG_READ);
-       fw_domains |= intel_uncore_forcewake_for_reg(uncore,
-                                                    GEN8_MCR_SELECTOR,
-                                                    FW_REG_READ | FW_REG_WRITE);
-
-       spin_lock_irq(&uncore->lock);
-       intel_uncore_forcewake_get__locked(uncore, fw_domains);
-
-       old_mcr = mcr = intel_uncore_read_fw(uncore, GEN8_MCR_SELECTOR);
-
-       mcr &= ~mcr_mask;
-       mcr |= mcr_ss;
-       intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
-
-       val = intel_uncore_read_fw(uncore, reg);
-
-       mcr &= ~mcr_mask;
-       mcr |= old_mcr & mcr_mask;
-
-       intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
-
-       intel_uncore_forcewake_put__locked(uncore, fw_domains);
-       spin_unlock_irq(&uncore->lock);
-
-       return val;
+       return intel_uncore_read_with_mcr_steering(engine->uncore, reg,
+                                                  slice, subslice);
 }
 
 /* NB: please notice the memset */
@@ -1243,7 +1250,7 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
 
 void __intel_engine_flush_submission(struct intel_engine_cs *engine, bool sync)
 {
-       struct tasklet_struct *t = &engine->execlists.tasklet;
+       struct tasklet_struct *t = &engine->sched_engine->tasklet;
 
        if (!t->callback)
                return;
@@ -1283,7 +1290,7 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
        intel_engine_flush_submission(engine);
 
        /* ELSP is empty, but there are ready requests? E.g. after reset */
-       if (!RB_EMPTY_ROOT(&engine->execlists.queue.rb_root))
+       if (!i915_sched_engine_is_empty(engine->sched_engine))
                return false;
 
        /* Ring stopped? */
@@ -1314,6 +1321,30 @@ bool intel_engines_are_idle(struct intel_gt *gt)
        return true;
 }
 
+bool intel_engine_irq_enable(struct intel_engine_cs *engine)
+{
+       if (!engine->irq_enable)
+               return false;
+
+       /* Caller disables interrupts */
+       spin_lock(&engine->gt->irq_lock);
+       engine->irq_enable(engine);
+       spin_unlock(&engine->gt->irq_lock);
+
+       return true;
+}
+
+void intel_engine_irq_disable(struct intel_engine_cs *engine)
+{
+       if (!engine->irq_disable)
+               return;
+
+       /* Caller disables interrupts */
+       spin_lock(&engine->gt->irq_lock);
+       engine->irq_disable(engine);
+       spin_unlock(&engine->gt->irq_lock);
+}
+
 void intel_engines_reset_default_submission(struct intel_gt *gt)
 {
        struct intel_engine_cs *engine;
@@ -1349,7 +1380,7 @@ static struct intel_timeline *get_timeline(struct i915_request *rq)
        struct intel_timeline *tl;
 
        /*
-        * Even though we are holding the engine->active.lock here, there
+        * Even though we are holding the engine->sched_engine->lock here, there
         * is no control over the submission queue per-se and we are
         * inspecting the active state at a random point in time, with an
         * unknown queue. Play safe and make sure the timeline remains valid.
@@ -1504,8 +1535,8 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
 
                drm_printf(m, "\tExeclist tasklet queued? %s (%s), preempt? %s, timeslice? %s\n",
                           yesno(test_bit(TASKLET_STATE_SCHED,
-                                         &engine->execlists.tasklet.state)),
-                          enableddisabled(!atomic_read(&engine->execlists.tasklet.count)),
+                                         &engine->sched_engine->tasklet.state)),
+                          enableddisabled(!atomic_read(&engine->sched_engine->tasklet.count)),
                           repr_timer(&engine->execlists.preempt),
                           repr_timer(&engine->execlists.timer));
 
@@ -1529,7 +1560,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
                                   idx, hws[idx * 2], hws[idx * 2 + 1]);
                }
 
-               execlists_active_lock_bh(execlists);
+               i915_sched_engine_active_lock_bh(engine->sched_engine);
                rcu_read_lock();
                for (port = execlists->active; (rq = *port); port++) {
                        char hdr[160];
@@ -1560,7 +1591,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
                        i915_request_show(m, rq, hdr, 0);
                }
                rcu_read_unlock();
-               execlists_active_unlock_bh(execlists);
+               i915_sched_engine_active_unlock_bh(engine->sched_engine);
        } else if (GRAPHICS_VER(dev_priv) > 6) {
                drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
                           ENGINE_READ(engine, RING_PP_DIR_BASE));
@@ -1650,6 +1681,98 @@ static void print_properties(struct intel_engine_cs *engine,
                           read_ul(&engine->defaults, p->offset));
 }
 
+static void engine_dump_request(struct i915_request *rq, struct drm_printer *m, const char *msg)
+{
+       struct intel_timeline *tl = get_timeline(rq);
+
+       i915_request_show(m, rq, msg, 0);
+
+       drm_printf(m, "\t\tring->start:  0x%08x\n",
+                  i915_ggtt_offset(rq->ring->vma));
+       drm_printf(m, "\t\tring->head:   0x%08x\n",
+                  rq->ring->head);
+       drm_printf(m, "\t\tring->tail:   0x%08x\n",
+                  rq->ring->tail);
+       drm_printf(m, "\t\tring->emit:   0x%08x\n",
+                  rq->ring->emit);
+       drm_printf(m, "\t\tring->space:  0x%08x\n",
+                  rq->ring->space);
+
+       if (tl) {
+               drm_printf(m, "\t\tring->hwsp:   0x%08x\n",
+                          tl->hwsp_offset);
+               intel_timeline_put(tl);
+       }
+
+       print_request_ring(m, rq);
+
+       if (rq->context->lrc_reg_state) {
+               drm_printf(m, "Logical Ring Context:\n");
+               hexdump(m, rq->context->lrc_reg_state, PAGE_SIZE);
+       }
+}
+
+void intel_engine_dump_active_requests(struct list_head *requests,
+                                      struct i915_request *hung_rq,
+                                      struct drm_printer *m)
+{
+       struct i915_request *rq;
+       const char *msg;
+       enum i915_request_state state;
+
+       list_for_each_entry(rq, requests, sched.link) {
+               if (rq == hung_rq)
+                       continue;
+
+               state = i915_test_request_state(rq);
+               if (state < I915_REQUEST_QUEUED)
+                       continue;
+
+               if (state == I915_REQUEST_ACTIVE)
+                       msg = "\t\tactive on engine";
+               else
+                       msg = "\t\tactive in queue";
+
+               engine_dump_request(rq, m, msg);
+       }
+}
+
+static void engine_dump_active_requests(struct intel_engine_cs *engine, struct drm_printer *m)
+{
+       struct i915_request *hung_rq = NULL;
+       struct intel_context *ce;
+       bool guc;
+
+       /*
+        * No need for an engine->irq_seqno_barrier() before the seqno reads.
+        * The GPU is still running so requests are still executing and any
+        * hardware reads will be out of date by the time they are reported.
+        * But the intention here is just to report an instantaneous snapshot
+        * so that's fine.
+        */
+       lockdep_assert_held(&engine->sched_engine->lock);
+
+       drm_printf(m, "\tRequests:\n");
+
+       guc = intel_uc_uses_guc_submission(&engine->gt->uc);
+       if (guc) {
+               ce = intel_engine_get_hung_context(engine);
+               if (ce)
+                       hung_rq = intel_context_find_active_request(ce);
+       } else {
+               hung_rq = intel_engine_execlist_find_hung_request(engine);
+       }
+
+       if (hung_rq)
+               engine_dump_request(hung_rq, m, "\t\thung");
+
+       if (guc)
+               intel_guc_dump_active_requests(engine, hung_rq, m);
+       else
+               intel_engine_dump_active_requests(&engine->sched_engine->requests,
+                                                 hung_rq, m);
+}
+
 void intel_engine_dump(struct intel_engine_cs *engine,
                       struct drm_printer *m,
                       const char *header, ...)
@@ -1694,41 +1817,12 @@ void intel_engine_dump(struct intel_engine_cs *engine,
                   i915_reset_count(error));
        print_properties(engine, m);
 
-       drm_printf(m, "\tRequests:\n");
+       spin_lock_irqsave(&engine->sched_engine->lock, flags);
+       engine_dump_active_requests(engine, m);
 
-       spin_lock_irqsave(&engine->active.lock, flags);
-       rq = intel_engine_find_active_request(engine);
-       if (rq) {
-               struct intel_timeline *tl = get_timeline(rq);
-
-               i915_request_show(m, rq, "\t\tactive ", 0);
-
-               drm_printf(m, "\t\tring->start:  0x%08x\n",
-                          i915_ggtt_offset(rq->ring->vma));
-               drm_printf(m, "\t\tring->head:   0x%08x\n",
-                          rq->ring->head);
-               drm_printf(m, "\t\tring->tail:   0x%08x\n",
-                          rq->ring->tail);
-               drm_printf(m, "\t\tring->emit:   0x%08x\n",
-                          rq->ring->emit);
-               drm_printf(m, "\t\tring->space:  0x%08x\n",
-                          rq->ring->space);
-
-               if (tl) {
-                       drm_printf(m, "\t\tring->hwsp:   0x%08x\n",
-                                  tl->hwsp_offset);
-                       intel_timeline_put(tl);
-               }
-
-               print_request_ring(m, rq);
-
-               if (rq->context->lrc_reg_state) {
-                       drm_printf(m, "Logical Ring Context:\n");
-                       hexdump(m, rq->context->lrc_reg_state, PAGE_SIZE);
-               }
-       }
-       drm_printf(m, "\tOn hold?: %lu\n", list_count(&engine->active.hold));
-       spin_unlock_irqrestore(&engine->active.lock, flags);
+       drm_printf(m, "\tOn hold?: %lu\n",
+                  list_count(&engine->sched_engine->hold));
+       spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
 
        drm_printf(m, "\tMMIO base:  0x%08x\n", engine->mmio_base);
        wakeref = intel_runtime_pm_get_if_in_use(engine->uncore->rpm);
@@ -1785,18 +1879,32 @@ ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine, ktime_t *now)
        return total;
 }
 
-static bool match_ring(struct i915_request *rq)
+struct intel_context *
+intel_engine_create_virtual(struct intel_engine_cs **siblings,
+                           unsigned int count)
 {
-       u32 ring = ENGINE_READ(rq->engine, RING_START);
+       if (count == 0)
+               return ERR_PTR(-EINVAL);
+
+       if (count == 1)
+               return intel_context_create(siblings[0]);
 
-       return ring == i915_ggtt_offset(rq->ring->vma);
+       GEM_BUG_ON(!siblings[0]->cops->create_virtual);
+       return siblings[0]->cops->create_virtual(siblings, count);
 }
 
 struct i915_request *
-intel_engine_find_active_request(struct intel_engine_cs *engine)
+intel_engine_execlist_find_hung_request(struct intel_engine_cs *engine)
 {
        struct i915_request *request, *active = NULL;
 
+       /*
+        * This search does not work in GuC submission mode. However, the GuC
+        * will report the hanging context directly to the driver itself. So
+        * the driver should never get here when in GuC mode.
+        */
+       GEM_BUG_ON(intel_uc_uses_guc_submission(&engine->gt->uc));
+
        /*
         * We are called by the error capture, reset and to dump engine
         * state at random points in time. In particular, note that neither is
@@ -1808,7 +1916,7 @@ intel_engine_find_active_request(struct intel_engine_cs *engine)
         * At all other times, we must assume the GPU is still running, but
         * we only care about the snapshot of this moment.
         */
-       lockdep_assert_held(&engine->active.lock);
+       lockdep_assert_held(&engine->sched_engine->lock);
 
        rcu_read_lock();
        request = execlists_active(&engine->execlists);
@@ -1826,15 +1934,9 @@ intel_engine_find_active_request(struct intel_engine_cs *engine)
        if (active)
                return active;
 
-       list_for_each_entry(request, &engine->active.requests, sched.link) {
-               if (__i915_request_is_complete(request))
-                       continue;
-
-               if (!__i915_request_has_started(request))
-                       continue;
-
-               /* More than one preemptible request may match! */
-               if (!match_ring(request))
+       list_for_each_entry(request, &engine->sched_engine->requests,
+                           sched.link) {
+               if (i915_test_request_state(request) != I915_REQUEST_ACTIVE)
                        continue;
 
                active = request;
index b99ac41..74775ae 100644 (file)
@@ -70,12 +70,38 @@ static void show_heartbeat(const struct i915_request *rq,
 {
        struct drm_printer p = drm_debug_printer("heartbeat");
 
-       intel_engine_dump(engine, &p,
-                         "%s heartbeat {seqno:%llx:%lld, prio:%d} not ticking\n",
-                         engine->name,
-                         rq->fence.context,
-                         rq->fence.seqno,
-                         rq->sched.attr.priority);
+       if (!rq) {
+               intel_engine_dump(engine, &p,
+                                 "%s heartbeat not ticking\n",
+                                 engine->name);
+       } else {
+               intel_engine_dump(engine, &p,
+                                 "%s heartbeat {seqno:%llx:%lld, prio:%d} not ticking\n",
+                                 engine->name,
+                                 rq->fence.context,
+                                 rq->fence.seqno,
+                                 rq->sched.attr.priority);
+       }
+}
+
+static void
+reset_engine(struct intel_engine_cs *engine, struct i915_request *rq)
+{
+       if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+               show_heartbeat(rq, engine);
+
+       if (intel_engine_uses_guc(engine))
+               /*
+                * GuC itself is toast or GuC's hang detection
+                * is disabled. Either way, need to find the
+                * hang culprit manually.
+                */
+               intel_guc_find_hung_context(engine);
+
+       intel_gt_handle_error(engine->gt, engine->mask,
+                             I915_ERROR_CAPTURE,
+                             "stopped heartbeat on %s",
+                             engine->name);
 }
 
 static void heartbeat(struct work_struct *wrk)
@@ -102,6 +128,11 @@ static void heartbeat(struct work_struct *wrk)
        if (intel_gt_is_wedged(engine->gt))
                goto out;
 
+       if (i915_sched_engine_disabled(engine->sched_engine)) {
+               reset_engine(engine, engine->heartbeat.systole);
+               goto out;
+       }
+
        if (engine->heartbeat.systole) {
                long delay = READ_ONCE(engine->props.heartbeat_interval_ms);
 
@@ -121,7 +152,7 @@ static void heartbeat(struct work_struct *wrk)
                         * but all other contexts, including the kernel
                         * context are stuck waiting for the signal.
                         */
-               } else if (engine->schedule &&
+               } else if (engine->sched_engine->schedule &&
                           rq->sched.attr.priority < I915_PRIORITY_BARRIER) {
                        /*
                         * Gradually raise the priority of the heartbeat to
@@ -136,16 +167,10 @@ static void heartbeat(struct work_struct *wrk)
                                attr.priority = I915_PRIORITY_BARRIER;
 
                        local_bh_disable();
-                       engine->schedule(rq, &attr);
+                       engine->sched_engine->schedule(rq, &attr);
                        local_bh_enable();
                } else {
-                       if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
-                               show_heartbeat(rq, engine);
-
-                       intel_gt_handle_error(engine->gt, engine->mask,
-                                             I915_ERROR_CAPTURE,
-                                             "stopped heartbeat on %s",
-                                             engine->name);
+                       reset_engine(engine, rq);
                }
 
                rq->emitted_jiffies = jiffies;
@@ -194,6 +219,25 @@ void intel_engine_park_heartbeat(struct intel_engine_cs *engine)
                i915_request_put(fetch_and_zero(&engine->heartbeat.systole));
 }
 
+void intel_gt_unpark_heartbeats(struct intel_gt *gt)
+{
+       struct intel_engine_cs *engine;
+       enum intel_engine_id id;
+
+       for_each_engine(engine, gt, id)
+               if (intel_engine_pm_is_awake(engine))
+                       intel_engine_unpark_heartbeat(engine);
+}
+
+void intel_gt_park_heartbeats(struct intel_gt *gt)
+{
+       struct intel_engine_cs *engine;
+       enum intel_engine_id id;
+
+       for_each_engine(engine, gt, id)
+               intel_engine_park_heartbeat(engine);
+}
+
 void intel_engine_init_heartbeat(struct intel_engine_cs *engine)
 {
        INIT_DELAYED_WORK(&engine->heartbeat.work, heartbeat);
index a488ea3..5da6d80 100644 (file)
@@ -7,6 +7,7 @@
 #define INTEL_ENGINE_HEARTBEAT_H
 
 struct intel_engine_cs;
+struct intel_gt;
 
 void intel_engine_init_heartbeat(struct intel_engine_cs *engine);
 
@@ -16,6 +17,9 @@ int intel_engine_set_heartbeat(struct intel_engine_cs *engine,
 void intel_engine_park_heartbeat(struct intel_engine_cs *engine);
 void intel_engine_unpark_heartbeat(struct intel_engine_cs *engine);
 
+void intel_gt_park_heartbeats(struct intel_gt *gt);
+void intel_gt_unpark_heartbeats(struct intel_gt *gt);
+
 int intel_engine_pulse(struct intel_engine_cs *engine);
 int intel_engine_flush_barriers(struct intel_engine_cs *engine);
 
index 47f4397..1f07ac4 100644 (file)
@@ -275,13 +275,11 @@ static int __engine_park(struct intel_wakeref *wf)
        intel_breadcrumbs_park(engine->breadcrumbs);
 
        /* Must be reset upon idling, or we may miss the busy wakeup. */
-       GEM_BUG_ON(engine->execlists.queue_priority_hint != INT_MIN);
+       GEM_BUG_ON(engine->sched_engine->queue_priority_hint != INT_MIN);
 
        if (engine->park)
                engine->park(engine);
 
-       engine->execlists.no_priolist = false;
-
        /* While gt calls i915_vma_parked(), we have to break the lock cycle */
        intel_gt_pm_put_async(engine->gt);
        return 0;
index e113f93..ed91bcf 100644 (file)
 #include "i915_pmu.h"
 #include "i915_priolist_types.h"
 #include "i915_selftest.h"
-#include "intel_breadcrumbs_types.h"
 #include "intel_sseu.h"
 #include "intel_timeline_types.h"
 #include "intel_uncore.h"
 #include "intel_wakeref.h"
 #include "intel_workarounds_types.h"
 
-/* Legacy HW Engine ID */
-
-#define RCS0_HW                0
-#define VCS0_HW                1
-#define BCS0_HW                2
-#define VECS0_HW       3
-#define VCS1_HW                4
-#define VCS2_HW                6
-#define VCS3_HW                7
-#define VECS1_HW       12
-
-/* Gen11+ HW Engine class + instance */
+/* HW Engine class + instance */
 #define RENDER_CLASS           0
 #define VIDEO_DECODE_CLASS     1
 #define VIDEO_ENHANCEMENT_CLASS        2
 #define COPY_ENGINE_CLASS      3
 #define OTHER_CLASS            4
 #define MAX_ENGINE_CLASS       4
-#define MAX_ENGINE_INSTANCE    3
+#define MAX_ENGINE_INSTANCE    7
 
 #define I915_MAX_SLICES        3
 #define I915_MAX_SUBSLICES 8
@@ -59,11 +47,13 @@ struct drm_i915_reg_table;
 struct i915_gem_context;
 struct i915_request;
 struct i915_sched_attr;
+struct i915_sched_engine;
 struct intel_gt;
 struct intel_ring;
 struct intel_uncore;
+struct intel_breadcrumbs;
 
-typedef u8 intel_engine_mask_t;
+typedef u32 intel_engine_mask_t;
 #define ALL_ENGINES ((intel_engine_mask_t)~0ul)
 
 struct intel_hw_status_page {
@@ -100,8 +90,8 @@ struct i915_ctx_workarounds {
        struct i915_vma *vma;
 };
 
-#define I915_MAX_VCS   4
-#define I915_MAX_VECS  2
+#define I915_MAX_VCS   8
+#define I915_MAX_VECS  4
 
 /*
  * Engine IDs definitions.
@@ -114,9 +104,15 @@ enum intel_engine_id {
        VCS1,
        VCS2,
        VCS3,
+       VCS4,
+       VCS5,
+       VCS6,
+       VCS7,
 #define _VCS(n) (VCS0 + (n))
        VECS0,
        VECS1,
+       VECS2,
+       VECS3,
 #define _VECS(n) (VECS0 + (n))
        I915_NUM_ENGINES
 #define INVALID_ENGINE ((enum intel_engine_id)-1)
@@ -137,11 +133,6 @@ struct st_preempt_hang {
  * driver and the hardware state for execlist mode of submission.
  */
 struct intel_engine_execlists {
-       /**
-        * @tasklet: softirq tasklet for bottom handler
-        */
-       struct tasklet_struct tasklet;
-
        /**
         * @timer: kick the current context if its timeslice expires
         */
@@ -152,11 +143,6 @@ struct intel_engine_execlists {
         */
        struct timer_list preempt;
 
-       /**
-        * @default_priolist: priority list for I915_PRIORITY_NORMAL
-        */
-       struct i915_priolist default_priolist;
-
        /**
         * @ccid: identifier for contexts submitted to this engine
         */
@@ -191,11 +177,6 @@ struct intel_engine_execlists {
         */
        u32 reset_ccid;
 
-       /**
-        * @no_priolist: priority lists disabled
-        */
-       bool no_priolist;
-
        /**
         * @submit_reg: gen-specific execlist submission register
         * set to the ExecList Submission Port (elsp) register pre-Gen11 and to
@@ -238,23 +219,10 @@ struct intel_engine_execlists {
        unsigned int port_mask;
 
        /**
-        * @queue_priority_hint: Highest pending priority.
-        *
-        * When we add requests into the queue, or adjust the priority of
-        * executing requests, we compute the maximum priority of those
-        * pending requests. We can then use this value to determine if
-        * we need to preempt the executing requests to service the queue.
-        * However, since the we may have recorded the priority of an inflight
-        * request we wanted to preempt but since completed, at the time of
-        * dequeuing the priority hint may no longer may match the highest
-        * available request priority.
+        * @virtual: Queue of requets on a virtual engine, sorted by priority.
+        * Each RB entry is a struct i915_priolist containing a list of requests
+        * of the same priority.
         */
-       int queue_priority_hint;
-
-       /**
-        * @queue: queue of requests, in priority lists
-        */
-       struct rb_root_cached queue;
        struct rb_root_cached virtual;
 
        /**
@@ -295,7 +263,6 @@ struct intel_engine_cs {
        enum intel_engine_id id;
        enum intel_engine_id legacy_idx;
 
-       unsigned int hw_id;
        unsigned int guc_id;
 
        intel_engine_mask_t mask;
@@ -326,15 +293,13 @@ struct intel_engine_cs {
 
        struct intel_sseu sseu;
 
-       struct {
-               spinlock_t lock;
-               struct list_head requests;
-               struct list_head hold; /* ready requests, but on hold */
-       } active;
+       struct i915_sched_engine *sched_engine;
 
        /* keep a request in reserve for a [pm] barrier under oom */
        struct i915_request *request_pool;
 
+       struct intel_context *hung_ce;
+
        struct llist_head barrier_tasks;
 
        struct intel_context *kernel_context; /* pinned */
@@ -419,6 +384,8 @@ struct intel_engine_cs {
        void            (*park)(struct intel_engine_cs *engine);
        void            (*unpark)(struct intel_engine_cs *engine);
 
+       void            (*bump_serial)(struct intel_engine_cs *engine);
+
        void            (*set_default_submission)(struct intel_engine_cs *engine);
 
        const struct intel_context_ops *cops;
@@ -447,22 +414,13 @@ struct intel_engine_cs {
         */
        void            (*submit_request)(struct i915_request *rq);
 
-       /*
-        * Called on signaling of a SUBMIT_FENCE, passing along the signaling
-        * request down to the bonded pairs.
-        */
-       void            (*bond_execute)(struct i915_request *rq,
-                                       struct dma_fence *signal);
+       void            (*release)(struct intel_engine_cs *engine);
 
        /*
-        * Call when the priority on a request has changed and it and its
-        * dependencies may need rescheduling. Note the request itself may
-        * not be ready to run!
+        * Add / remove request from engine active tracking
         */
-       void            (*schedule)(struct i915_request *request,
-                                   const struct i915_sched_attr *attr);
-
-       void            (*release)(struct intel_engine_cs *engine);
+       void            (*add_active_request)(struct i915_request *rq);
+       void            (*remove_active_request)(struct i915_request *rq);
 
        struct intel_engine_execlists execlists;
 
@@ -485,6 +443,7 @@ struct intel_engine_cs {
 #define I915_ENGINE_IS_VIRTUAL       BIT(5)
 #define I915_ENGINE_HAS_RELATIVE_MMIO BIT(6)
 #define I915_ENGINE_REQUIRES_CMD_PARSER BIT(7)
+#define I915_ENGINE_WANT_FORCED_PREEMPTION BIT(8)
        unsigned int flags;
 
        /*
index 3cca7ea..8f8bea0 100644 (file)
@@ -11,6 +11,7 @@
 #include "intel_engine.h"
 #include "intel_engine_user.h"
 #include "intel_gt.h"
+#include "uc/intel_guc_submission.h"
 
 struct intel_engine_cs *
 intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance)
@@ -108,13 +109,16 @@ static void set_scheduler_caps(struct drm_i915_private *i915)
        for_each_uabi_engine(engine, i915) { /* all engines must agree! */
                int i;
 
-               if (engine->schedule)
+               if (engine->sched_engine->schedule)
                        enabled |= (I915_SCHEDULER_CAP_ENABLED |
                                    I915_SCHEDULER_CAP_PRIORITY);
                else
                        disabled |= (I915_SCHEDULER_CAP_ENABLED |
                                     I915_SCHEDULER_CAP_PRIORITY);
 
+               if (intel_uc_uses_guc_submission(&i915->gt.uc))
+                       enabled |= I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP;
+
                for (i = 0; i < ARRAY_SIZE(map); i++) {
                        if (engine->flags & BIT(map[i].engine))
                                enabled |= BIT(map[i].sched);
index fc77592..de5f9c8 100644 (file)
 #include "gen8_engine_cs.h"
 #include "intel_breadcrumbs.h"
 #include "intel_context.h"
+#include "intel_engine_heartbeat.h"
 #include "intel_engine_pm.h"
 #include "intel_engine_stats.h"
 #include "intel_execlists_submission.h"
 #define GEN12_CSB_CTX_VALID(csb_dw) \
        (FIELD_GET(GEN12_CSB_SW_CTX_ID_MASK, csb_dw) != GEN12_IDLE_CTX_ID)
 
+#define XEHP_CTX_STATUS_SWITCHED_TO_NEW_QUEUE  BIT(1) /* upper csb dword */
+#define XEHP_CSB_SW_CTX_ID_MASK                        GENMASK(31, 10)
+#define XEHP_IDLE_CTX_ID                       0xFFFF
+#define XEHP_CSB_CTX_VALID(csb_dw) \
+       (FIELD_GET(XEHP_CSB_SW_CTX_ID_MASK, csb_dw) != XEHP_IDLE_CTX_ID)
+
 /* Typical size of the average request (2 pipecontrols and a MI_BB) */
 #define EXECLISTS_REQUEST_SIZE 64 /* bytes */
 
@@ -182,18 +189,6 @@ struct virtual_engine {
                int prio;
        } nodes[I915_NUM_ENGINES];
 
-       /*
-        * Keep track of bonded pairs -- restrictions upon on our selection
-        * of physical engines any particular request may be submitted to.
-        * If we receive a submit-fence from a master engine, we will only
-        * use one of sibling_mask physical engines.
-        */
-       struct ve_bond {
-               const struct intel_engine_cs *master;
-               intel_engine_mask_t sibling_mask;
-       } *bonds;
-       unsigned int num_bonds;
-
        /* And finally, which physical engines this virtual engine maps onto. */
        unsigned int num_siblings;
        struct intel_engine_cs *siblings[];
@@ -205,6 +200,9 @@ static struct virtual_engine *to_virtual_engine(struct intel_engine_cs *engine)
        return container_of(engine, struct virtual_engine, base);
 }
 
+static struct intel_context *
+execlists_create_virtual(struct intel_engine_cs **siblings, unsigned int count);
+
 static struct i915_request *
 __active_request(const struct intel_timeline * const tl,
                 struct i915_request *rq,
@@ -273,11 +271,11 @@ static int effective_prio(const struct i915_request *rq)
        return prio;
 }
 
-static int queue_prio(const struct intel_engine_execlists *execlists)
+static int queue_prio(const struct i915_sched_engine *sched_engine)
 {
        struct rb_node *rb;
 
-       rb = rb_first_cached(&execlists->queue);
+       rb = rb_first_cached(&sched_engine->queue);
        if (!rb)
                return INT_MIN;
 
@@ -318,14 +316,14 @@ static bool need_preempt(const struct intel_engine_cs *engine,
         * to preserve FIFO ordering of dependencies.
         */
        last_prio = max(effective_prio(rq), I915_PRIORITY_NORMAL - 1);
-       if (engine->execlists.queue_priority_hint <= last_prio)
+       if (engine->sched_engine->queue_priority_hint <= last_prio)
                return false;
 
        /*
         * Check against the first request in ELSP[1], it will, thanks to the
         * power of PI, be the highest priority of that context.
         */
-       if (!list_is_last(&rq->sched.link, &engine->active.requests) &&
+       if (!list_is_last(&rq->sched.link, &engine->sched_engine->requests) &&
            rq_prio(list_next_entry(rq, sched.link)) > last_prio)
                return true;
 
@@ -340,7 +338,7 @@ static bool need_preempt(const struct intel_engine_cs *engine,
         * context, it's priority would not exceed ELSP[0] aka last_prio.
         */
        return max(virtual_prio(&engine->execlists),
-                  queue_prio(&engine->execlists)) > last_prio;
+                  queue_prio(engine->sched_engine)) > last_prio;
 }
 
 __maybe_unused static bool
@@ -367,10 +365,10 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
        struct list_head *pl;
        int prio = I915_PRIORITY_INVALID;
 
-       lockdep_assert_held(&engine->active.lock);
+       lockdep_assert_held(&engine->sched_engine->lock);
 
        list_for_each_entry_safe_reverse(rq, rn,
-                                        &engine->active.requests,
+                                        &engine->sched_engine->requests,
                                         sched.link) {
                if (__i915_request_is_complete(rq)) {
                        list_del_init(&rq->sched.link);
@@ -382,9 +380,10 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
                GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
                if (rq_prio(rq) != prio) {
                        prio = rq_prio(rq);
-                       pl = i915_sched_lookup_priolist(engine, prio);
+                       pl = i915_sched_lookup_priolist(engine->sched_engine,
+                                                       prio);
                }
-               GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
+               GEM_BUG_ON(i915_sched_engine_is_empty(engine->sched_engine));
 
                list_move(&rq->sched.link, pl);
                set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
@@ -489,6 +488,16 @@ __execlists_schedule_in(struct i915_request *rq)
                /* Use a fixed tag for OA and friends */
                GEM_BUG_ON(ce->tag <= BITS_PER_LONG);
                ce->lrc.ccid = ce->tag;
+       } else if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50)) {
+               /* We don't need a strict matching tag, just different values */
+               unsigned int tag = ffs(READ_ONCE(engine->context_tag));
+
+               GEM_BUG_ON(tag == 0 || tag >= BITS_PER_LONG);
+               clear_bit(tag - 1, &engine->context_tag);
+               ce->lrc.ccid = tag << (XEHP_SW_CTX_ID_SHIFT - 32);
+
+               BUILD_BUG_ON(BITS_PER_LONG > GEN12_MAX_CONTEXT_HW_ID);
+
        } else {
                /* We don't need a strict matching tag, just different values */
                unsigned int tag = __ffs(engine->context_tag);
@@ -534,13 +543,13 @@ resubmit_virtual_request(struct i915_request *rq, struct virtual_engine *ve)
 {
        struct intel_engine_cs *engine = rq->engine;
 
-       spin_lock_irq(&engine->active.lock);
+       spin_lock_irq(&engine->sched_engine->lock);
 
        clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
        WRITE_ONCE(rq->engine, &ve->base);
        ve->base.submit_request(rq);
 
-       spin_unlock_irq(&engine->active.lock);
+       spin_unlock_irq(&engine->sched_engine->lock);
 }
 
 static void kick_siblings(struct i915_request *rq, struct intel_context *ce)
@@ -569,7 +578,7 @@ static void kick_siblings(struct i915_request *rq, struct intel_context *ce)
                resubmit_virtual_request(rq, ve);
 
        if (READ_ONCE(ve->request))
-               tasklet_hi_schedule(&ve->base.execlists.tasklet);
+               tasklet_hi_schedule(&ve->base.sched_engine->tasklet);
 }
 
 static void __execlists_schedule_out(struct i915_request * const rq,
@@ -579,7 +588,7 @@ static void __execlists_schedule_out(struct i915_request * const rq,
        unsigned int ccid;
 
        /*
-        * NB process_csb() is not under the engine->active.lock and hence
+        * NB process_csb() is not under the engine->sched_engine->lock and hence
         * schedule_out can race with schedule_in meaning that we should
         * refrain from doing non-trivial work here.
         */
@@ -599,8 +608,14 @@ static void __execlists_schedule_out(struct i915_request * const rq,
                intel_engine_add_retire(engine, ce->timeline);
 
        ccid = ce->lrc.ccid;
-       ccid >>= GEN11_SW_CTX_ID_SHIFT - 32;
-       ccid &= GEN12_MAX_CONTEXT_HW_ID;
+       if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50)) {
+               ccid >>= XEHP_SW_CTX_ID_SHIFT - 32;
+               ccid &= XEHP_MAX_CONTEXT_HW_ID;
+       } else {
+               ccid >>= GEN11_SW_CTX_ID_SHIFT - 32;
+               ccid &= GEN12_MAX_CONTEXT_HW_ID;
+       }
+
        if (ccid < BITS_PER_LONG) {
                GEM_BUG_ON(ccid == 0);
                GEM_BUG_ON(test_bit(ccid - 1, &engine->context_tag));
@@ -738,9 +753,9 @@ trace_ports(const struct intel_engine_execlists *execlists,
 }
 
 static bool
-reset_in_progress(const struct intel_engine_execlists *execlists)
+reset_in_progress(const struct intel_engine_cs *engine)
 {
-       return unlikely(!__tasklet_is_enabled(&execlists->tasklet));
+       return unlikely(!__tasklet_is_enabled(&engine->sched_engine->tasklet));
 }
 
 static __maybe_unused noinline bool
@@ -756,7 +771,7 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
        trace_ports(execlists, msg, execlists->pending);
 
        /* We may be messing around with the lists during reset, lalala */
-       if (reset_in_progress(execlists))
+       if (reset_in_progress(engine))
                return true;
 
        if (!execlists->pending[0]) {
@@ -1096,7 +1111,8 @@ static void defer_active(struct intel_engine_cs *engine)
        if (!rq)
                return;
 
-       defer_request(rq, i915_sched_lookup_priolist(engine, rq_prio(rq)));
+       defer_request(rq, i915_sched_lookup_priolist(engine->sched_engine,
+                                                    rq_prio(rq)));
 }
 
 static bool
@@ -1133,13 +1149,14 @@ static bool needs_timeslice(const struct intel_engine_cs *engine,
                return false;
 
        /* If ELSP[1] is occupied, always check to see if worth slicing */
-       if (!list_is_last_rcu(&rq->sched.link, &engine->active.requests)) {
+       if (!list_is_last_rcu(&rq->sched.link,
+                             &engine->sched_engine->requests)) {
                ENGINE_TRACE(engine, "timeslice required for second inflight context\n");
                return true;
        }
 
        /* Otherwise, ELSP[0] is by itself, but may be waiting in the queue */
-       if (!RB_EMPTY_ROOT(&engine->execlists.queue.rb_root)) {
+       if (!i915_sched_engine_is_empty(engine->sched_engine)) {
                ENGINE_TRACE(engine, "timeslice required for queue\n");
                return true;
        }
@@ -1187,7 +1204,7 @@ static void start_timeslice(struct intel_engine_cs *engine)
                         * its timeslice, so recheck.
                         */
                        if (!timer_pending(&el->timer))
-                               tasklet_hi_schedule(&el->tasklet);
+                               tasklet_hi_schedule(&engine->sched_engine->tasklet);
                        return;
                }
 
@@ -1236,6 +1253,7 @@ static bool completed(const struct i915_request *rq)
 static void execlists_dequeue(struct intel_engine_cs *engine)
 {
        struct intel_engine_execlists * const execlists = &engine->execlists;
+       struct i915_sched_engine * const sched_engine = engine->sched_engine;
        struct i915_request **port = execlists->pending;
        struct i915_request ** const last_port = port + execlists->port_mask;
        struct i915_request *last, * const *active;
@@ -1265,7 +1283,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
         * and context switches) submission.
         */
 
-       spin_lock(&engine->active.lock);
+       spin_lock(&sched_engine->lock);
 
        /*
         * If the queue is higher priority than the last
@@ -1287,7 +1305,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
                                     last->fence.context,
                                     last->fence.seqno,
                                     last->sched.attr.priority,
-                                    execlists->queue_priority_hint);
+                                    sched_engine->queue_priority_hint);
                        record_preemption(execlists);
 
                        /*
@@ -1313,7 +1331,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
                                     yesno(timer_expired(&execlists->timer)),
                                     last->fence.context, last->fence.seqno,
                                     rq_prio(last),
-                                    execlists->queue_priority_hint,
+                                    sched_engine->queue_priority_hint,
                                     yesno(timeslice_yield(execlists, last)));
 
                        /*
@@ -1365,7 +1383,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
                                 * Even if ELSP[1] is occupied and not worthy
                                 * of timeslices, our queue might be.
                                 */
-                               spin_unlock(&engine->active.lock);
+                               spin_unlock(&sched_engine->lock);
                                return;
                        }
                }
@@ -1375,7 +1393,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
        while ((ve = first_virtual_engine(engine))) {
                struct i915_request *rq;
 
-               spin_lock(&ve->base.active.lock);
+               spin_lock(&ve->base.sched_engine->lock);
 
                rq = ve->request;
                if (unlikely(!virtual_matches(ve, rq, engine)))
@@ -1384,14 +1402,14 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
                GEM_BUG_ON(rq->engine != &ve->base);
                GEM_BUG_ON(rq->context != &ve->context);
 
-               if (unlikely(rq_prio(rq) < queue_prio(execlists))) {
-                       spin_unlock(&ve->base.active.lock);
+               if (unlikely(rq_prio(rq) < queue_prio(sched_engine))) {
+                       spin_unlock(&ve->base.sched_engine->lock);
                        break;
                }
 
                if (last && !can_merge_rq(last, rq)) {
-                       spin_unlock(&ve->base.active.lock);
-                       spin_unlock(&engine->active.lock);
+                       spin_unlock(&ve->base.sched_engine->lock);
+                       spin_unlock(&engine->sched_engine->lock);
                        return; /* leave this for another sibling */
                }
 
@@ -1405,7 +1423,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
                             yesno(engine != ve->siblings[0]));
 
                WRITE_ONCE(ve->request, NULL);
-               WRITE_ONCE(ve->base.execlists.queue_priority_hint, INT_MIN);
+               WRITE_ONCE(ve->base.sched_engine->queue_priority_hint, INT_MIN);
 
                rb = &ve->nodes[engine->id].rb;
                rb_erase_cached(rb, &execlists->virtual);
@@ -1437,7 +1455,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 
                i915_request_put(rq);
 unlock:
-               spin_unlock(&ve->base.active.lock);
+               spin_unlock(&ve->base.sched_engine->lock);
 
                /*
                 * Hmm, we have a bunch of virtual engine requests,
@@ -1450,7 +1468,7 @@ unlock:
                        break;
        }
 
-       while ((rb = rb_first_cached(&execlists->queue))) {
+       while ((rb = rb_first_cached(&sched_engine->queue))) {
                struct i915_priolist *p = to_priolist(rb);
                struct i915_request *rq, *rn;
 
@@ -1529,7 +1547,7 @@ unlock:
                        }
                }
 
-               rb_erase_cached(&p->node, &execlists->queue);
+               rb_erase_cached(&p->node, &sched_engine->queue);
                i915_priolist_free(p);
        }
 done:
@@ -1551,8 +1569,9 @@ done:
         * request triggering preemption on the next dequeue (or subsequent
         * interrupt for secondary ports).
         */
-       execlists->queue_priority_hint = queue_prio(execlists);
-       spin_unlock(&engine->active.lock);
+       sched_engine->queue_priority_hint = queue_prio(sched_engine);
+       i915_sched_engine_reset_on_empty(sched_engine);
+       spin_unlock(&sched_engine->lock);
 
        /*
         * We can skip poking the HW if we ended up with exactly the same set
@@ -1655,13 +1674,24 @@ static void invalidate_csb_entries(const u64 *first, const u64 *last)
  *     bits 44-46: reserved
  *     bits 47-57: sw context id of the lrc the GT switched away from
  *     bits 58-63: sw counter of the lrc the GT switched away from
+ *
+ * Xe_HP csb shuffles things around compared to TGL:
+ *
+ *     bits 0-3:   context switch detail (same possible values as TGL)
+ *     bits 4-9:   engine instance
+ *     bits 10-25: sw context id of the lrc the GT switched to
+ *     bits 26-31: sw counter of the lrc the GT switched to
+ *     bit  32:    semaphore wait mode (poll or signal), Only valid when
+ *                 switch detail is set to "wait on semaphore"
+ *     bit  33:    switched to new queue
+ *     bits 34-41: wait detail (for switch detail 1 to 4)
+ *     bits 42-57: sw context id of the lrc the GT switched away from
+ *     bits 58-63: sw counter of the lrc the GT switched away from
  */
-static bool gen12_csb_parse(const u64 csb)
+static inline bool
+__gen12_csb_parse(bool ctx_to_valid, bool ctx_away_valid, bool new_queue,
+                 u8 switch_detail)
 {
-       bool ctx_away_valid = GEN12_CSB_CTX_VALID(upper_32_bits(csb));
-       bool new_queue =
-               lower_32_bits(csb) & GEN12_CTX_STATUS_SWITCHED_TO_NEW_QUEUE;
-
        /*
         * The context switch detail is not guaranteed to be 5 when a preemption
         * occurs, so we can't just check for that. The check below works for
@@ -1670,7 +1700,7 @@ static bool gen12_csb_parse(const u64 csb)
         * would require some extra handling, but we don't support that.
         */
        if (!ctx_away_valid || new_queue) {
-               GEM_BUG_ON(!GEN12_CSB_CTX_VALID(lower_32_bits(csb)));
+               GEM_BUG_ON(!ctx_to_valid);
                return true;
        }
 
@@ -1679,10 +1709,26 @@ static bool gen12_csb_parse(const u64 csb)
         * context switch on an unsuccessful wait instruction since we always
         * use polling mode.
         */
-       GEM_BUG_ON(GEN12_CTX_SWITCH_DETAIL(upper_32_bits(csb)));
+       GEM_BUG_ON(switch_detail);
        return false;
 }
 
+static bool xehp_csb_parse(const u64 csb)
+{
+       return __gen12_csb_parse(XEHP_CSB_CTX_VALID(lower_32_bits(csb)), /* cxt to */
+                                XEHP_CSB_CTX_VALID(upper_32_bits(csb)), /* cxt away */
+                                upper_32_bits(csb) & XEHP_CTX_STATUS_SWITCHED_TO_NEW_QUEUE,
+                                GEN12_CTX_SWITCH_DETAIL(lower_32_bits(csb)));
+}
+
+static bool gen12_csb_parse(const u64 csb)
+{
+       return __gen12_csb_parse(GEN12_CSB_CTX_VALID(lower_32_bits(csb)), /* cxt to */
+                                GEN12_CSB_CTX_VALID(upper_32_bits(csb)), /* cxt away */
+                                lower_32_bits(csb) & GEN12_CTX_STATUS_SWITCHED_TO_NEW_QUEUE,
+                                GEN12_CTX_SWITCH_DETAIL(upper_32_bits(csb)));
+}
+
 static bool gen8_csb_parse(const u64 csb)
 {
        return csb & (GEN8_CTX_STATUS_IDLE_ACTIVE | GEN8_CTX_STATUS_PREEMPTED);
@@ -1767,8 +1813,8 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
         * access. Either we are inside the tasklet, or the tasklet is disabled
         * and we assume that is only inside the reset paths and so serialised.
         */
-       GEM_BUG_ON(!tasklet_is_locked(&execlists->tasklet) &&
-                  !reset_in_progress(execlists));
+       GEM_BUG_ON(!tasklet_is_locked(&engine->sched_engine->tasklet) &&
+                  !reset_in_progress(engine));
 
        /*
         * Note that csb_write, csb_status may be either in HWSP or mmio.
@@ -1847,7 +1893,9 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
                ENGINE_TRACE(engine, "csb[%d]: status=0x%08x:0x%08x\n",
                             head, upper_32_bits(csb), lower_32_bits(csb));
 
-               if (GRAPHICS_VER(engine->i915) >= 12)
+               if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
+                       promote = xehp_csb_parse(csb);
+               else if (GRAPHICS_VER(engine->i915) >= 12)
                        promote = gen12_csb_parse(csb);
                else
                        promote = gen8_csb_parse(csb);
@@ -1979,7 +2027,8 @@ static void __execlists_hold(struct i915_request *rq)
                        __i915_request_unsubmit(rq);
 
                clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
-               list_move_tail(&rq->sched.link, &rq->engine->active.hold);
+               list_move_tail(&rq->sched.link,
+                              &rq->engine->sched_engine->hold);
                i915_request_set_hold(rq);
                RQ_TRACE(rq, "on hold\n");
 
@@ -2016,7 +2065,7 @@ static bool execlists_hold(struct intel_engine_cs *engine,
        if (i915_request_on_hold(rq))
                return false;
 
-       spin_lock_irq(&engine->active.lock);
+       spin_lock_irq(&engine->sched_engine->lock);
 
        if (__i915_request_is_complete(rq)) { /* too late! */
                rq = NULL;
@@ -2032,10 +2081,10 @@ static bool execlists_hold(struct intel_engine_cs *engine,
        GEM_BUG_ON(i915_request_on_hold(rq));
        GEM_BUG_ON(rq->engine != engine);
        __execlists_hold(rq);
-       GEM_BUG_ON(list_empty(&engine->active.hold));
+       GEM_BUG_ON(list_empty(&engine->sched_engine->hold));
 
 unlock:
-       spin_unlock_irq(&engine->active.lock);
+       spin_unlock_irq(&engine->sched_engine->lock);
        return rq;
 }
 
@@ -2079,7 +2128,7 @@ static void __execlists_unhold(struct i915_request *rq)
 
                i915_request_clear_hold(rq);
                list_move_tail(&rq->sched.link,
-                              i915_sched_lookup_priolist(rq->engine,
+                              i915_sched_lookup_priolist(rq->engine->sched_engine,
                                                          rq_prio(rq)));
                set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
 
@@ -2115,7 +2164,7 @@ static void __execlists_unhold(struct i915_request *rq)
 static void execlists_unhold(struct intel_engine_cs *engine,
                             struct i915_request *rq)
 {
-       spin_lock_irq(&engine->active.lock);
+       spin_lock_irq(&engine->sched_engine->lock);
 
        /*
         * Move this request back to the priority queue, and all of its
@@ -2123,12 +2172,12 @@ static void execlists_unhold(struct intel_engine_cs *engine,
         */
        __execlists_unhold(rq);
 
-       if (rq_prio(rq) > engine->execlists.queue_priority_hint) {
-               engine->execlists.queue_priority_hint = rq_prio(rq);
-               tasklet_hi_schedule(&engine->execlists.tasklet);
+       if (rq_prio(rq) > engine->sched_engine->queue_priority_hint) {
+               engine->sched_engine->queue_priority_hint = rq_prio(rq);
+               tasklet_hi_schedule(&engine->sched_engine->tasklet);
        }
 
-       spin_unlock_irq(&engine->active.lock);
+       spin_unlock_irq(&engine->sched_engine->lock);
 }
 
 struct execlists_capture {
@@ -2258,13 +2307,13 @@ static void execlists_capture(struct intel_engine_cs *engine)
        if (!cap)
                return;
 
-       spin_lock_irq(&engine->active.lock);
+       spin_lock_irq(&engine->sched_engine->lock);
        cap->rq = active_context(engine, active_ccid(engine));
        if (cap->rq) {
                cap->rq = active_request(cap->rq->context->timeline, cap->rq);
                cap->rq = i915_request_get_rcu(cap->rq);
        }
-       spin_unlock_irq(&engine->active.lock);
+       spin_unlock_irq(&engine->sched_engine->lock);
        if (!cap->rq)
                goto err_free;
 
@@ -2316,13 +2365,13 @@ static void execlists_reset(struct intel_engine_cs *engine, const char *msg)
        ENGINE_TRACE(engine, "reset for %s\n", msg);
 
        /* Mark this tasklet as disabled to avoid waiting for it to complete */
-       tasklet_disable_nosync(&engine->execlists.tasklet);
+       tasklet_disable_nosync(&engine->sched_engine->tasklet);
 
        ring_set_paused(engine, 1); /* Freeze the current request in place */
        execlists_capture(engine);
        intel_engine_reset(engine, msg);
 
-       tasklet_enable(&engine->execlists.tasklet);
+       tasklet_enable(&engine->sched_engine->tasklet);
        clear_and_wake_up_bit(bit, lock);
 }
 
@@ -2345,8 +2394,9 @@ static bool preempt_timeout(const struct intel_engine_cs *const engine)
  */
 static void execlists_submission_tasklet(struct tasklet_struct *t)
 {
-       struct intel_engine_cs * const engine =
-               from_tasklet(engine, t, execlists.tasklet);
+       struct i915_sched_engine *sched_engine =
+               from_tasklet(sched_engine, t, tasklet);
+       struct intel_engine_cs * const engine = sched_engine->private_data;
        struct i915_request *post[2 * EXECLIST_MAX_PORTS];
        struct i915_request **inactive;
 
@@ -2421,13 +2471,16 @@ static void execlists_irq_handler(struct intel_engine_cs *engine, u16 iir)
                intel_engine_signal_breadcrumbs(engine);
 
        if (tasklet)
-               tasklet_hi_schedule(&engine->execlists.tasklet);
+               tasklet_hi_schedule(&engine->sched_engine->tasklet);
 }
 
 static void __execlists_kick(struct intel_engine_execlists *execlists)
 {
+       struct intel_engine_cs *engine =
+               container_of(execlists, typeof(*engine), execlists);
+
        /* Kick the tasklet for some interrupt coalescing and reset handling */
-       tasklet_hi_schedule(&execlists->tasklet);
+       tasklet_hi_schedule(&engine->sched_engine->tasklet);
 }
 
 #define execlists_kick(t, member) \
@@ -2448,19 +2501,20 @@ static void queue_request(struct intel_engine_cs *engine,
 {
        GEM_BUG_ON(!list_empty(&rq->sched.link));
        list_add_tail(&rq->sched.link,
-                     i915_sched_lookup_priolist(engine, rq_prio(rq)));
+                     i915_sched_lookup_priolist(engine->sched_engine,
+                                                rq_prio(rq)));
        set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
 }
 
 static bool submit_queue(struct intel_engine_cs *engine,
                         const struct i915_request *rq)
 {
-       struct intel_engine_execlists *execlists = &engine->execlists;
+       struct i915_sched_engine *sched_engine = engine->sched_engine;
 
-       if (rq_prio(rq) <= execlists->queue_priority_hint)
+       if (rq_prio(rq) <= sched_engine->queue_priority_hint)
                return false;
 
-       execlists->queue_priority_hint = rq_prio(rq);
+       sched_engine->queue_priority_hint = rq_prio(rq);
        return true;
 }
 
@@ -2468,7 +2522,7 @@ static bool ancestor_on_hold(const struct intel_engine_cs *engine,
                             const struct i915_request *rq)
 {
        GEM_BUG_ON(i915_request_on_hold(rq));
-       return !list_empty(&engine->active.hold) && hold_request(rq);
+       return !list_empty(&engine->sched_engine->hold) && hold_request(rq);
 }
 
 static void execlists_submit_request(struct i915_request *request)
@@ -2477,23 +2531,24 @@ static void execlists_submit_request(struct i915_request *request)
        unsigned long flags;
 
        /* Will be called from irq-context when using foreign fences. */
-       spin_lock_irqsave(&engine->active.lock, flags);
+       spin_lock_irqsave(&engine->sched_engine->lock, flags);
 
        if (unlikely(ancestor_on_hold(engine, request))) {
                RQ_TRACE(request, "ancestor on hold\n");
-               list_add_tail(&request->sched.link, &engine->active.hold);
+               list_add_tail(&request->sched.link,
+                             &engine->sched_engine->hold);
                i915_request_set_hold(request);
        } else {
                queue_request(engine, request);
 
-               GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
+               GEM_BUG_ON(i915_sched_engine_is_empty(engine->sched_engine));
                GEM_BUG_ON(list_empty(&request->sched.link));
 
                if (submit_queue(engine, request))
                        __execlists_kick(&engine->execlists);
        }
 
-       spin_unlock_irqrestore(&engine->active.lock, flags);
+       spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
 }
 
 static int
@@ -2533,11 +2588,26 @@ static int execlists_context_alloc(struct intel_context *ce)
        return lrc_alloc(ce, ce->engine);
 }
 
+static void execlists_context_cancel_request(struct intel_context *ce,
+                                            struct i915_request *rq)
+{
+       struct intel_engine_cs *engine = NULL;
+
+       i915_request_active_engine(rq, &engine);
+
+       if (engine && intel_engine_pulse(engine))
+               intel_gt_handle_error(engine->gt, engine->mask, 0,
+                                     "request cancellation by %s",
+                                     current->comm);
+}
+
 static const struct intel_context_ops execlists_context_ops = {
        .flags = COPS_HAS_INFLIGHT,
 
        .alloc = execlists_context_alloc,
 
+       .cancel_request = execlists_context_cancel_request,
+
        .pre_pin = execlists_context_pre_pin,
        .pin = execlists_context_pin,
        .unpin = lrc_unpin,
@@ -2548,6 +2618,8 @@ static const struct intel_context_ops execlists_context_ops = {
 
        .reset = lrc_reset,
        .destroy = lrc_destroy,
+
+       .create_virtual = execlists_create_virtual,
 };
 
 static int emit_pdps(struct i915_request *rq)
@@ -2800,10 +2872,8 @@ static int execlists_resume(struct intel_engine_cs *engine)
 
 static void execlists_reset_prepare(struct intel_engine_cs *engine)
 {
-       struct intel_engine_execlists * const execlists = &engine->execlists;
-
        ENGINE_TRACE(engine, "depth<-%d\n",
-                    atomic_read(&execlists->tasklet.count));
+                    atomic_read(&engine->sched_engine->tasklet.count));
 
        /*
         * Prevent request submission to the hardware until we have
@@ -2814,8 +2884,8 @@ static void execlists_reset_prepare(struct intel_engine_cs *engine)
         * Turning off the execlists->tasklet until the reset is over
         * prevents the race.
         */
-       __tasklet_disable_sync_once(&execlists->tasklet);
-       GEM_BUG_ON(!reset_in_progress(execlists));
+       __tasklet_disable_sync_once(&engine->sched_engine->tasklet);
+       GEM_BUG_ON(!reset_in_progress(engine));
 
        /*
         * We stop engines, otherwise we might get failed reset and a
@@ -2957,24 +3027,26 @@ static void execlists_reset_rewind(struct intel_engine_cs *engine, bool stalled)
 
        /* Push back any incomplete requests for replay after the reset. */
        rcu_read_lock();
-       spin_lock_irqsave(&engine->active.lock, flags);
+       spin_lock_irqsave(&engine->sched_engine->lock, flags);
        __unwind_incomplete_requests(engine);
-       spin_unlock_irqrestore(&engine->active.lock, flags);
+       spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
        rcu_read_unlock();
 }
 
 static void nop_submission_tasklet(struct tasklet_struct *t)
 {
-       struct intel_engine_cs * const engine =
-               from_tasklet(engine, t, execlists.tasklet);
+       struct i915_sched_engine *sched_engine =
+               from_tasklet(sched_engine, t, tasklet);
+       struct intel_engine_cs * const engine = sched_engine->private_data;
 
        /* The driver is wedged; don't process any more events. */
-       WRITE_ONCE(engine->execlists.queue_priority_hint, INT_MIN);
+       WRITE_ONCE(engine->sched_engine->queue_priority_hint, INT_MIN);
 }
 
 static void execlists_reset_cancel(struct intel_engine_cs *engine)
 {
        struct intel_engine_execlists * const execlists = &engine->execlists;
+       struct i915_sched_engine * const sched_engine = engine->sched_engine;
        struct i915_request *rq, *rn;
        struct rb_node *rb;
        unsigned long flags;
@@ -2998,15 +3070,15 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
        execlists_reset_csb(engine, true);
 
        rcu_read_lock();
-       spin_lock_irqsave(&engine->active.lock, flags);
+       spin_lock_irqsave(&engine->sched_engine->lock, flags);
 
        /* Mark all executing requests as skipped. */
-       list_for_each_entry(rq, &engine->active.requests, sched.link)
+       list_for_each_entry(rq, &engine->sched_engine->requests, sched.link)
                i915_request_put(i915_request_mark_eio(rq));
        intel_engine_signal_breadcrumbs(engine);
 
        /* Flush the queued requests to the timeline list (for retiring). */
-       while ((rb = rb_first_cached(&execlists->queue))) {
+       while ((rb = rb_first_cached(&sched_engine->queue))) {
                struct i915_priolist *p = to_priolist(rb);
 
                priolist_for_each_request_consume(rq, rn, p) {
@@ -3016,12 +3088,12 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
                        }
                }
 
-               rb_erase_cached(&p->node, &execlists->queue);
+               rb_erase_cached(&p->node, &sched_engine->queue);
                i915_priolist_free(p);
        }
 
        /* On-hold requests will be flushed to timeline upon their release */
-       list_for_each_entry(rq, &engine->active.hold, sched.link)
+       list_for_each_entry(rq, &sched_engine->hold, sched.link)
                i915_request_put(i915_request_mark_eio(rq));
 
        /* Cancel all attached virtual engines */
@@ -3032,7 +3104,7 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
                rb_erase_cached(rb, &execlists->virtual);
                RB_CLEAR_NODE(rb);
 
-               spin_lock(&ve->base.active.lock);
+               spin_lock(&ve->base.sched_engine->lock);
                rq = fetch_and_zero(&ve->request);
                if (rq) {
                        if (i915_request_mark_eio(rq)) {
@@ -3042,20 +3114,20 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
                        }
                        i915_request_put(rq);
 
-                       ve->base.execlists.queue_priority_hint = INT_MIN;
+                       ve->base.sched_engine->queue_priority_hint = INT_MIN;
                }
-               spin_unlock(&ve->base.active.lock);
+               spin_unlock(&ve->base.sched_engine->lock);
        }
 
        /* Remaining _unready_ requests will be nop'ed when submitted */
 
-       execlists->queue_priority_hint = INT_MIN;
-       execlists->queue = RB_ROOT_CACHED;
+       sched_engine->queue_priority_hint = INT_MIN;
+       sched_engine->queue = RB_ROOT_CACHED;
 
-       GEM_BUG_ON(__tasklet_is_enabled(&execlists->tasklet));
-       execlists->tasklet.callback = nop_submission_tasklet;
+       GEM_BUG_ON(__tasklet_is_enabled(&engine->sched_engine->tasklet));
+       engine->sched_engine->tasklet.callback = nop_submission_tasklet;
 
-       spin_unlock_irqrestore(&engine->active.lock, flags);
+       spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
        rcu_read_unlock();
 }
 
@@ -3073,14 +3145,14 @@ static void execlists_reset_finish(struct intel_engine_cs *engine)
         * reset as the next level of recovery, and as a final resort we
         * will declare the device wedged.
         */
-       GEM_BUG_ON(!reset_in_progress(execlists));
+       GEM_BUG_ON(!reset_in_progress(engine));
 
        /* And kick in case we missed a new request submission. */
-       if (__tasklet_enable(&execlists->tasklet))
+       if (__tasklet_enable(&engine->sched_engine->tasklet))
                __execlists_kick(execlists);
 
        ENGINE_TRACE(engine, "depth->%d\n",
-                    atomic_read(&execlists->tasklet.count));
+                    atomic_read(&engine->sched_engine->tasklet.count));
 }
 
 static void gen8_logical_ring_enable_irq(struct intel_engine_cs *engine)
@@ -3101,6 +3173,42 @@ static void execlists_park(struct intel_engine_cs *engine)
        cancel_timer(&engine->execlists.preempt);
 }
 
+static void add_to_engine(struct i915_request *rq)
+{
+       lockdep_assert_held(&rq->engine->sched_engine->lock);
+       list_move_tail(&rq->sched.link, &rq->engine->sched_engine->requests);
+}
+
+static void remove_from_engine(struct i915_request *rq)
+{
+       struct intel_engine_cs *engine, *locked;
+
+       /*
+        * Virtual engines complicate acquiring the engine timeline lock,
+        * as their rq->engine pointer is not stable until under that
+        * engine lock. The simple ploy we use is to take the lock then
+        * check that the rq still belongs to the newly locked engine.
+        */
+       locked = READ_ONCE(rq->engine);
+       spin_lock_irq(&locked->sched_engine->lock);
+       while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) {
+               spin_unlock(&locked->sched_engine->lock);
+               spin_lock(&engine->sched_engine->lock);
+               locked = engine;
+       }
+       list_del_init(&rq->sched.link);
+
+       clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+       clear_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
+
+       /* Prevent further __await_execution() registering a cb, then flush */
+       set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
+
+       spin_unlock_irq(&locked->sched_engine->lock);
+
+       i915_request_notify_execute_cb_imm(rq);
+}
+
 static bool can_preempt(struct intel_engine_cs *engine)
 {
        if (GRAPHICS_VER(engine->i915) > 8)
@@ -3110,11 +3218,62 @@ static bool can_preempt(struct intel_engine_cs *engine)
        return engine->class != RENDER_CLASS;
 }
 
+static void kick_execlists(const struct i915_request *rq, int prio)
+{
+       struct intel_engine_cs *engine = rq->engine;
+       struct i915_sched_engine *sched_engine = engine->sched_engine;
+       const struct i915_request *inflight;
+
+       /*
+        * We only need to kick the tasklet once for the high priority
+        * new context we add into the queue.
+        */
+       if (prio <= sched_engine->queue_priority_hint)
+               return;
+
+       rcu_read_lock();
+
+       /* Nothing currently active? We're overdue for a submission! */
+       inflight = execlists_active(&engine->execlists);
+       if (!inflight)
+               goto unlock;
+
+       /*
+        * If we are already the currently executing context, don't
+        * bother evaluating if we should preempt ourselves.
+        */
+       if (inflight->context == rq->context)
+               goto unlock;
+
+       ENGINE_TRACE(engine,
+                    "bumping queue-priority-hint:%d for rq:%llx:%lld, inflight:%llx:%lld prio %d\n",
+                    prio,
+                    rq->fence.context, rq->fence.seqno,
+                    inflight->fence.context, inflight->fence.seqno,
+                    inflight->sched.attr.priority);
+
+       sched_engine->queue_priority_hint = prio;
+
+       /*
+        * Allow preemption of low -> normal -> high, but we do
+        * not allow low priority tasks to preempt other low priority
+        * tasks under the impression that latency for low priority
+        * tasks does not matter (as much as background throughput),
+        * so kiss.
+        */
+       if (prio >= max(I915_PRIORITY_NORMAL, rq_prio(inflight)))
+               tasklet_hi_schedule(&sched_engine->tasklet);
+
+unlock:
+       rcu_read_unlock();
+}
+
 static void execlists_set_default_submission(struct intel_engine_cs *engine)
 {
        engine->submit_request = execlists_submit_request;
-       engine->schedule = i915_schedule;
-       engine->execlists.tasklet.callback = execlists_submission_tasklet;
+       engine->sched_engine->schedule = i915_schedule;
+       engine->sched_engine->kick_backend = kick_execlists;
+       engine->sched_engine->tasklet.callback = execlists_submission_tasklet;
 }
 
 static void execlists_shutdown(struct intel_engine_cs *engine)
@@ -3122,7 +3281,7 @@ static void execlists_shutdown(struct intel_engine_cs *engine)
        /* Synchronise with residual timers and any softirq they raise */
        del_timer_sync(&engine->execlists.timer);
        del_timer_sync(&engine->execlists.preempt);
-       tasklet_kill(&engine->execlists.tasklet);
+       tasklet_kill(&engine->sched_engine->tasklet);
 }
 
 static void execlists_release(struct intel_engine_cs *engine)
@@ -3144,6 +3303,8 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
 
        engine->cops = &execlists_context_ops;
        engine->request_alloc = execlists_request_alloc;
+       engine->add_active_request = add_to_engine;
+       engine->remove_active_request = remove_from_engine;
 
        engine->reset.prepare = execlists_reset_prepare;
        engine->reset.rewind = execlists_reset_rewind;
@@ -3238,7 +3399,7 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine)
        struct intel_uncore *uncore = engine->uncore;
        u32 base = engine->mmio_base;
 
-       tasklet_setup(&engine->execlists.tasklet, execlists_submission_tasklet);
+       tasklet_setup(&engine->sched_engine->tasklet, execlists_submission_tasklet);
        timer_setup(&engine->execlists.timer, execlists_timeslice, 0);
        timer_setup(&engine->execlists.preempt, execlists_preempt, 0);
 
@@ -3255,6 +3416,10 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine)
                        i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(base));
                execlists->ctrl_reg = uncore->regs +
                        i915_mmio_reg_offset(RING_EXECLIST_CONTROL(base));
+
+               engine->fw_domain = intel_uncore_forcewake_for_reg(engine->uncore,
+                                   RING_EXECLIST_CONTROL(engine->mmio_base),
+                                   FW_REG_WRITE);
        } else {
                execlists->submit_reg = uncore->regs +
                        i915_mmio_reg_offset(RING_ELSP(base));
@@ -3272,7 +3437,8 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine)
                execlists->csb_size = GEN11_CSB_ENTRIES;
 
        engine->context_tag = GENMASK(BITS_PER_LONG - 2, 0);
-       if (GRAPHICS_VER(engine->i915) >= 11) {
+       if (GRAPHICS_VER(engine->i915) >= 11 &&
+           GRAPHICS_VER_FULL(engine->i915) < IP_VER(12, 50)) {
                execlists->ccid |= engine->instance << (GEN11_ENGINE_INSTANCE_SHIFT - 32);
                execlists->ccid |= engine->class << (GEN11_ENGINE_CLASS_SHIFT - 32);
        }
@@ -3286,7 +3452,7 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine)
 
 static struct list_head *virtual_queue(struct virtual_engine *ve)
 {
-       return &ve->base.execlists.default_priolist.requests;
+       return &ve->base.sched_engine->default_priolist.requests;
 }
 
 static void rcu_virtual_context_destroy(struct work_struct *wrk)
@@ -3301,7 +3467,7 @@ static void rcu_virtual_context_destroy(struct work_struct *wrk)
        if (unlikely(ve->request)) {
                struct i915_request *old;
 
-               spin_lock_irq(&ve->base.active.lock);
+               spin_lock_irq(&ve->base.sched_engine->lock);
 
                old = fetch_and_zero(&ve->request);
                if (old) {
@@ -3310,7 +3476,7 @@ static void rcu_virtual_context_destroy(struct work_struct *wrk)
                        i915_request_put(old);
                }
 
-               spin_unlock_irq(&ve->base.active.lock);
+               spin_unlock_irq(&ve->base.sched_engine->lock);
        }
 
        /*
@@ -3320,7 +3486,7 @@ static void rcu_virtual_context_destroy(struct work_struct *wrk)
         * rbtrees as in the case it is running in parallel, it may reinsert
         * the rb_node into a sibling.
         */
-       tasklet_kill(&ve->base.execlists.tasklet);
+       tasklet_kill(&ve->base.sched_engine->tasklet);
 
        /* Decouple ourselves from the siblings, no more access allowed. */
        for (n = 0; n < ve->num_siblings; n++) {
@@ -3330,24 +3496,26 @@ static void rcu_virtual_context_destroy(struct work_struct *wrk)
                if (RB_EMPTY_NODE(node))
                        continue;
 
-               spin_lock_irq(&sibling->active.lock);
+               spin_lock_irq(&sibling->sched_engine->lock);
 
-               /* Detachment is lazily performed in the execlists tasklet */
+               /* Detachment is lazily performed in the sched_engine->tasklet */
                if (!RB_EMPTY_NODE(node))
                        rb_erase_cached(node, &sibling->execlists.virtual);
 
-               spin_unlock_irq(&sibling->active.lock);
+               spin_unlock_irq(&sibling->sched_engine->lock);
        }
-       GEM_BUG_ON(__tasklet_is_scheduled(&ve->base.execlists.tasklet));
+       GEM_BUG_ON(__tasklet_is_scheduled(&ve->base.sched_engine->tasklet));
        GEM_BUG_ON(!list_empty(virtual_queue(ve)));
 
        lrc_fini(&ve->context);
        intel_context_fini(&ve->context);
 
-       intel_breadcrumbs_free(ve->base.breadcrumbs);
+       if (ve->base.breadcrumbs)
+               intel_breadcrumbs_put(ve->base.breadcrumbs);
+       if (ve->base.sched_engine)
+               i915_sched_engine_put(ve->base.sched_engine);
        intel_engine_free_request_pool(&ve->base);
 
-       kfree(ve->bonds);
        kfree(ve);
 }
 
@@ -3440,11 +3608,24 @@ static void virtual_context_exit(struct intel_context *ce)
                intel_engine_pm_put(ve->siblings[n]);
 }
 
+static struct intel_engine_cs *
+virtual_get_sibling(struct intel_engine_cs *engine, unsigned int sibling)
+{
+       struct virtual_engine *ve = to_virtual_engine(engine);
+
+       if (sibling >= ve->num_siblings)
+               return NULL;
+
+       return ve->siblings[sibling];
+}
+
 static const struct intel_context_ops virtual_context_ops = {
        .flags = COPS_HAS_INFLIGHT,
 
        .alloc = virtual_context_alloc,
 
+       .cancel_request = execlists_context_cancel_request,
+
        .pre_pin = virtual_context_pre_pin,
        .pin = virtual_context_pin,
        .unpin = lrc_unpin,
@@ -3454,6 +3635,8 @@ static const struct intel_context_ops virtual_context_ops = {
        .exit = virtual_context_exit,
 
        .destroy = virtual_context_destroy,
+
+       .get_sibling = virtual_get_sibling,
 };
 
 static intel_engine_mask_t virtual_submission_mask(struct virtual_engine *ve)
@@ -3475,16 +3658,18 @@ static intel_engine_mask_t virtual_submission_mask(struct virtual_engine *ve)
 
        ENGINE_TRACE(&ve->base, "rq=%llx:%lld, mask=%x, prio=%d\n",
                     rq->fence.context, rq->fence.seqno,
-                    mask, ve->base.execlists.queue_priority_hint);
+                    mask, ve->base.sched_engine->queue_priority_hint);
 
        return mask;
 }
 
 static void virtual_submission_tasklet(struct tasklet_struct *t)
 {
+       struct i915_sched_engine *sched_engine =
+               from_tasklet(sched_engine, t, tasklet);
        struct virtual_engine * const ve =
-               from_tasklet(ve, t, base.execlists.tasklet);
-       const int prio = READ_ONCE(ve->base.execlists.queue_priority_hint);
+               (struct virtual_engine *)sched_engine->private_data;
+       const int prio = READ_ONCE(sched_engine->queue_priority_hint);
        intel_engine_mask_t mask;
        unsigned int n;
 
@@ -3503,7 +3688,7 @@ static void virtual_submission_tasklet(struct tasklet_struct *t)
                if (!READ_ONCE(ve->request))
                        break; /* already handled by a sibling's tasklet */
 
-               spin_lock_irq(&sibling->active.lock);
+               spin_lock_irq(&sibling->sched_engine->lock);
 
                if (unlikely(!(mask & sibling->mask))) {
                        if (!RB_EMPTY_NODE(&node->rb)) {
@@ -3552,11 +3737,11 @@ static void virtual_submission_tasklet(struct tasklet_struct *t)
 submit_engine:
                GEM_BUG_ON(RB_EMPTY_NODE(&node->rb));
                node->prio = prio;
-               if (first && prio > sibling->execlists.queue_priority_hint)
-                       tasklet_hi_schedule(&sibling->execlists.tasklet);
+               if (first && prio > sibling->sched_engine->queue_priority_hint)
+                       tasklet_hi_schedule(&sibling->sched_engine->tasklet);
 
 unlock_engine:
-               spin_unlock_irq(&sibling->active.lock);
+               spin_unlock_irq(&sibling->sched_engine->lock);
 
                if (intel_context_inflight(&ve->context))
                        break;
@@ -3574,7 +3759,7 @@ static void virtual_submit_request(struct i915_request *rq)
 
        GEM_BUG_ON(ve->base.submit_request != virtual_submit_request);
 
-       spin_lock_irqsave(&ve->base.active.lock, flags);
+       spin_lock_irqsave(&ve->base.sched_engine->lock, flags);
 
        /* By the time we resubmit a request, it may be completed */
        if (__i915_request_is_complete(rq)) {
@@ -3588,68 +3773,25 @@ static void virtual_submit_request(struct i915_request *rq)
                i915_request_put(ve->request);
        }
 
-       ve->base.execlists.queue_priority_hint = rq_prio(rq);
+       ve->base.sched_engine->queue_priority_hint = rq_prio(rq);
        ve->request = i915_request_get(rq);
 
        GEM_BUG_ON(!list_empty(virtual_queue(ve)));
        list_move_tail(&rq->sched.link, virtual_queue(ve));
 
-       tasklet_hi_schedule(&ve->base.execlists.tasklet);
+       tasklet_hi_schedule(&ve->base.sched_engine->tasklet);
 
 unlock:
-       spin_unlock_irqrestore(&ve->base.active.lock, flags);
+       spin_unlock_irqrestore(&ve->base.sched_engine->lock, flags);
 }
 
-static struct ve_bond *
-virtual_find_bond(struct virtual_engine *ve,
-                 const struct intel_engine_cs *master)
-{
-       int i;
-
-       for (i = 0; i < ve->num_bonds; i++) {
-               if (ve->bonds[i].master == master)
-                       return &ve->bonds[i];
-       }
-
-       return NULL;
-}
-
-static void
-virtual_bond_execute(struct i915_request *rq, struct dma_fence *signal)
-{
-       struct virtual_engine *ve = to_virtual_engine(rq->engine);
-       intel_engine_mask_t allowed, exec;
-       struct ve_bond *bond;
-
-       allowed = ~to_request(signal)->engine->mask;
-
-       bond = virtual_find_bond(ve, to_request(signal)->engine);
-       if (bond)
-               allowed &= bond->sibling_mask;
-
-       /* Restrict the bonded request to run on only the available engines */
-       exec = READ_ONCE(rq->execution_mask);
-       while (!try_cmpxchg(&rq->execution_mask, &exec, exec & allowed))
-               ;
-
-       /* Prevent the master from being re-run on the bonded engines */
-       to_request(signal)->execution_mask &= ~allowed;
-}
-
-struct intel_context *
-intel_execlists_create_virtual(struct intel_engine_cs **siblings,
-                              unsigned int count)
+static struct intel_context *
+execlists_create_virtual(struct intel_engine_cs **siblings, unsigned int count)
 {
        struct virtual_engine *ve;
        unsigned int n;
        int err;
 
-       if (count == 0)
-               return ERR_PTR(-EINVAL);
-
-       if (count == 1)
-               return intel_context_create(siblings[0]);
-
        ve = kzalloc(struct_size(ve, siblings, count), GFP_KERNEL);
        if (!ve)
                return ERR_PTR(-ENOMEM);
@@ -3681,19 +3823,24 @@ intel_execlists_create_virtual(struct intel_engine_cs **siblings,
 
        snprintf(ve->base.name, sizeof(ve->base.name), "virtual");
 
-       intel_engine_init_active(&ve->base, ENGINE_VIRTUAL);
        intel_engine_init_execlists(&ve->base);
 
+       ve->base.sched_engine = i915_sched_engine_create(ENGINE_VIRTUAL);
+       if (!ve->base.sched_engine) {
+               err = -ENOMEM;
+               goto err_put;
+       }
+       ve->base.sched_engine->private_data = &ve->base;
+
        ve->base.cops = &virtual_context_ops;
        ve->base.request_alloc = execlists_request_alloc;
 
-       ve->base.schedule = i915_schedule;
+       ve->base.sched_engine->schedule = i915_schedule;
+       ve->base.sched_engine->kick_backend = kick_execlists;
        ve->base.submit_request = virtual_submit_request;
-       ve->base.bond_execute = virtual_bond_execute;
 
        INIT_LIST_HEAD(virtual_queue(ve));
-       ve->base.execlists.queue_priority_hint = INT_MIN;
-       tasklet_setup(&ve->base.execlists.tasklet, virtual_submission_tasklet);
+       tasklet_setup(&ve->base.sched_engine->tasklet, virtual_submission_tasklet);
 
        intel_context_init(&ve->context, &ve->base);
 
@@ -3721,7 +3868,7 @@ intel_execlists_create_virtual(struct intel_engine_cs **siblings,
                 * layering if we handle cloning of the requests and
                 * submitting a copy into each backend.
                 */
-               if (sibling->execlists.tasklet.callback !=
+               if (sibling->sched_engine->tasklet.callback !=
                    execlists_submission_tasklet) {
                        err = -ENODEV;
                        goto err_put;
@@ -3756,6 +3903,8 @@ intel_execlists_create_virtual(struct intel_engine_cs **siblings,
                         "v%dx%d", ve->base.class, count);
                ve->base.context_size = sibling->context_size;
 
+               ve->base.add_active_request = sibling->add_active_request;
+               ve->base.remove_active_request = sibling->remove_active_request;
                ve->base.emit_bb_start = sibling->emit_bb_start;
                ve->base.emit_flush = sibling->emit_flush;
                ve->base.emit_init_breadcrumb = sibling->emit_init_breadcrumb;
@@ -3776,70 +3925,6 @@ err_put:
        return ERR_PTR(err);
 }
 
-struct intel_context *
-intel_execlists_clone_virtual(struct intel_engine_cs *src)
-{
-       struct virtual_engine *se = to_virtual_engine(src);
-       struct intel_context *dst;
-
-       dst = intel_execlists_create_virtual(se->siblings,
-                                            se->num_siblings);
-       if (IS_ERR(dst))
-               return dst;
-
-       if (se->num_bonds) {
-               struct virtual_engine *de = to_virtual_engine(dst->engine);
-
-               de->bonds = kmemdup(se->bonds,
-                                   sizeof(*se->bonds) * se->num_bonds,
-                                   GFP_KERNEL);
-               if (!de->bonds) {
-                       intel_context_put(dst);
-                       return ERR_PTR(-ENOMEM);
-               }
-
-               de->num_bonds = se->num_bonds;
-       }
-
-       return dst;
-}
-
-int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine,
-                                    const struct intel_engine_cs *master,
-                                    const struct intel_engine_cs *sibling)
-{
-       struct virtual_engine *ve = to_virtual_engine(engine);
-       struct ve_bond *bond;
-       int n;
-
-       /* Sanity check the sibling is part of the virtual engine */
-       for (n = 0; n < ve->num_siblings; n++)
-               if (sibling == ve->siblings[n])
-                       break;
-       if (n == ve->num_siblings)
-               return -EINVAL;
-
-       bond = virtual_find_bond(ve, master);
-       if (bond) {
-               bond->sibling_mask |= sibling->mask;
-               return 0;
-       }
-
-       bond = krealloc(ve->bonds,
-                       sizeof(*bond) * (ve->num_bonds + 1),
-                       GFP_KERNEL);
-       if (!bond)
-               return -ENOMEM;
-
-       bond[ve->num_bonds].master = master;
-       bond[ve->num_bonds].sibling_mask = sibling->mask;
-
-       ve->bonds = bond;
-       ve->num_bonds++;
-
-       return 0;
-}
-
 void intel_execlists_show_requests(struct intel_engine_cs *engine,
                                   struct drm_printer *m,
                                   void (*show_request)(struct drm_printer *m,
@@ -3849,16 +3934,17 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
                                   unsigned int max)
 {
        const struct intel_engine_execlists *execlists = &engine->execlists;
+       struct i915_sched_engine *sched_engine = engine->sched_engine;
        struct i915_request *rq, *last;
        unsigned long flags;
        unsigned int count;
        struct rb_node *rb;
 
-       spin_lock_irqsave(&engine->active.lock, flags);
+       spin_lock_irqsave(&sched_engine->lock, flags);
 
        last = NULL;
        count = 0;
-       list_for_each_entry(rq, &engine->active.requests, sched.link) {
+       list_for_each_entry(rq, &sched_engine->requests, sched.link) {
                if (count++ < max - 1)
                        show_request(m, rq, "\t\t", 0);
                else
@@ -3873,13 +3959,13 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
                show_request(m, last, "\t\t", 0);
        }
 
-       if (execlists->queue_priority_hint != INT_MIN)
+       if (sched_engine->queue_priority_hint != INT_MIN)
                drm_printf(m, "\t\tQueue priority hint: %d\n",
-                          READ_ONCE(execlists->queue_priority_hint));
+                          READ_ONCE(sched_engine->queue_priority_hint));
 
        last = NULL;
        count = 0;
-       for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
+       for (rb = rb_first_cached(&sched_engine->queue); rb; rb = rb_next(rb)) {
                struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
 
                priolist_for_each_request(rq, p) {
@@ -3921,7 +4007,7 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
                show_request(m, last, "\t\t", 0);
        }
 
-       spin_unlock_irqrestore(&engine->active.lock, flags);
+       spin_unlock_irqrestore(&sched_engine->lock, flags);
 }
 
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
index 4ca9b47..a1aa92c 100644 (file)
@@ -32,15 +32,7 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
                                                        int indent),
                                   unsigned int max);
 
-struct intel_context *
-intel_execlists_create_virtual(struct intel_engine_cs **siblings,
-                              unsigned int count);
-
-struct intel_context *
-intel_execlists_clone_virtual(struct intel_engine_cs *src);
-
-int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine,
-                                    const struct intel_engine_cs *master,
-                                    const struct intel_engine_cs *sibling);
+bool
+intel_engine_in_execlists_submission_mode(const struct intel_engine_cs *engine);
 
 #endif /* __INTEL_EXECLISTS_SUBMISSION_H__ */
index 20e46b8..de3ac58 100644 (file)
@@ -826,13 +826,13 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
        phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2;
 
        /*
-        * On BXT+/CNL+ writes larger than 64 bit to the GTT pagetable range
+        * On BXT+/ICL+ writes larger than 64 bit to the GTT pagetable range
         * will be dropped. For WC mappings in general we have 64 byte burst
         * writes when the WC buffer is flushed, so we can't use it, but have to
         * resort to an uncached mapping. The WC issue is easily caught by the
         * readback check when writing GTT PTE entries.
         */
-       if (IS_GEN9_LP(i915) || GRAPHICS_VER(i915) >= 10)
+       if (IS_GEN9_LP(i915) || GRAPHICS_VER(i915) >= 11)
                ggtt->gsm = ioremap(phys_addr, size);
        else
                ggtt->gsm = ioremap_wc(phys_addr, size);
@@ -1494,7 +1494,7 @@ intel_partial_pages(const struct i915_ggtt_view *view,
        if (ret)
                goto err_sg_alloc;
 
-       iter = i915_gem_object_get_sg_dma(obj, view->partial.offset, &offset, true);
+       iter = i915_gem_object_get_sg_dma(obj, view->partial.offset, &offset);
        GEM_BUG_ON(!iter);
 
        sg = st->sgl;
index 2694dbb..1c3af0f 100644 (file)
 #define   MI_SEMAPHORE_SAD_NEQ_SDD     (5 << 12)
 #define   MI_SEMAPHORE_TOKEN_MASK      REG_GENMASK(9, 5)
 #define   MI_SEMAPHORE_TOKEN_SHIFT     5
+#define MI_STORE_DATA_IMM      MI_INSTR(0x20, 0)
 #define MI_STORE_DWORD_IMM     MI_INSTR(0x20, 1)
 #define MI_STORE_DWORD_IMM_GEN4        MI_INSTR(0x20, 2)
+#define MI_STORE_QWORD_IMM_GEN8 (MI_INSTR(0x20, 3) | REG_BIT(21))
 #define   MI_MEM_VIRTUAL       (1 << 22) /* 945,g33,965 */
 #define   MI_USE_GGTT          (1 << 22) /* g4x+ */
 #define MI_STORE_DWORD_INDEX   MI_INSTR(0x21, 1)
index 2161bf0..62d40c9 100644 (file)
@@ -13,6 +13,7 @@
 #include "intel_gt_clock_utils.h"
 #include "intel_gt_pm.h"
 #include "intel_gt_requests.h"
+#include "intel_migrate.h"
 #include "intel_mocs.h"
 #include "intel_rc6.h"
 #include "intel_renderstate.h"
@@ -40,8 +41,8 @@ void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
        intel_gt_init_timelines(gt);
        intel_gt_pm_init_early(gt);
 
-       intel_rps_init_early(&gt->rps);
        intel_uc_init_early(&gt->uc);
+       intel_rps_init_early(&gt->rps);
 }
 
 int intel_gt_probe_lmem(struct intel_gt *gt)
@@ -83,13 +84,73 @@ void intel_gt_init_hw_early(struct intel_gt *gt, struct i915_ggtt *ggtt)
        gt->ggtt = ggtt;
 }
 
+static const struct intel_mmio_range icl_l3bank_steering_table[] = {
+       { 0x00B100, 0x00B3FF },
+       {},
+};
+
+static const struct intel_mmio_range xehpsdv_mslice_steering_table[] = {
+       { 0x004000, 0x004AFF },
+       { 0x00C800, 0x00CFFF },
+       { 0x00DD00, 0x00DDFF },
+       { 0x00E900, 0x00FFFF }, /* 0xEA00 - OxEFFF is unused */
+       {},
+};
+
+static const struct intel_mmio_range xehpsdv_lncf_steering_table[] = {
+       { 0x00B000, 0x00B0FF },
+       { 0x00D800, 0x00D8FF },
+       {},
+};
+
+static const struct intel_mmio_range dg2_lncf_steering_table[] = {
+       { 0x00B000, 0x00B0FF },
+       { 0x00D880, 0x00D8FF },
+       {},
+};
+
+static u16 slicemask(struct intel_gt *gt, int count)
+{
+       u64 dss_mask = intel_sseu_get_subslices(&gt->info.sseu, 0);
+
+       return intel_slicemask_from_dssmask(dss_mask, count);
+}
+
 int intel_gt_init_mmio(struct intel_gt *gt)
 {
+       struct drm_i915_private *i915 = gt->i915;
+
        intel_gt_init_clock_frequency(gt);
 
        intel_uc_init_mmio(&gt->uc);
        intel_sseu_info_init(gt);
 
+       /*
+        * An mslice is unavailable only if both the meml3 for the slice is
+        * disabled *and* all of the DSS in the slice (quadrant) are disabled.
+        */
+       if (HAS_MSLICES(i915))
+               gt->info.mslice_mask =
+                       slicemask(gt, GEN_DSS_PER_MSLICE) |
+                       (intel_uncore_read(gt->uncore, GEN10_MIRROR_FUSE3) &
+                        GEN12_MEML3_EN_MASK);
+
+       if (IS_DG2(i915)) {
+               gt->steering_table[MSLICE] = xehpsdv_mslice_steering_table;
+               gt->steering_table[LNCF] = dg2_lncf_steering_table;
+       } else if (IS_XEHPSDV(i915)) {
+               gt->steering_table[MSLICE] = xehpsdv_mslice_steering_table;
+               gt->steering_table[LNCF] = xehpsdv_lncf_steering_table;
+       } else if (GRAPHICS_VER(i915) >= 11 &&
+                  GRAPHICS_VER_FULL(i915) < IP_VER(12, 50)) {
+               gt->steering_table[L3BANK] = icl_l3bank_steering_table;
+               gt->info.l3bank_mask =
+                       ~intel_uncore_read(gt->uncore, GEN10_MIRROR_FUSE3) &
+                       GEN10_L3BANK_MASK;
+       } else if (HAS_MSLICES(i915)) {
+               MISSING_CASE(INTEL_INFO(i915)->platform);
+       }
+
        return intel_engines_init_mmio(gt);
 }
 
@@ -192,7 +253,7 @@ static void clear_register(struct intel_uncore *uncore, i915_reg_t reg)
        intel_uncore_rmw(uncore, reg, 0, 0);
 }
 
-static void gen8_clear_engine_error_register(struct intel_engine_cs *engine)
+static void gen6_clear_engine_error_register(struct intel_engine_cs *engine)
 {
        GEN6_RING_FAULT_REG_RMW(engine, RING_FAULT_VALID, 0);
        GEN6_RING_FAULT_REG_POSTING_READ(engine);
@@ -238,7 +299,7 @@ intel_gt_clear_error_registers(struct intel_gt *gt,
                enum intel_engine_id id;
 
                for_each_engine_masked(engine, gt, engine_mask, id)
-                       gen8_clear_engine_error_register(engine);
+                       gen6_clear_engine_error_register(engine);
        }
 }
 
@@ -572,6 +633,25 @@ static void __intel_gt_disable(struct intel_gt *gt)
        GEM_BUG_ON(intel_gt_pm_is_awake(gt));
 }
 
+int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout)
+{
+       long remaining_timeout;
+
+       /* If the device is asleep, we have no requests outstanding */
+       if (!intel_gt_pm_is_awake(gt))
+               return 0;
+
+       while ((timeout = intel_gt_retire_requests_timeout(gt, timeout,
+                                                          &remaining_timeout)) > 0) {
+               cond_resched();
+               if (signal_pending(current))
+                       return -EINTR;
+       }
+
+       return timeout ? timeout : intel_uc_wait_for_idle(&gt->uc,
+                                                         remaining_timeout);
+}
+
 int intel_gt_init(struct intel_gt *gt)
 {
        int err;
@@ -622,10 +702,14 @@ int intel_gt_init(struct intel_gt *gt)
        if (err)
                goto err_gt;
 
+       intel_uc_init_late(&gt->uc);
+
        err = i915_inject_probe_error(gt->i915, -EIO);
        if (err)
                goto err_gt;
 
+       intel_migrate_init(&gt->migrate, gt);
+
        goto out_fw;
 err_gt:
        __intel_gt_disable(gt);
@@ -649,6 +733,7 @@ void intel_gt_driver_remove(struct intel_gt *gt)
 {
        __intel_gt_disable(gt);
 
+       intel_migrate_fini(&gt->migrate);
        intel_uc_driver_remove(&gt->uc);
 
        intel_engines_release(gt);
@@ -697,6 +782,112 @@ void intel_gt_driver_late_release(struct intel_gt *gt)
        intel_engines_free(gt);
 }
 
+/**
+ * intel_gt_reg_needs_read_steering - determine whether a register read
+ *     requires explicit steering
+ * @gt: GT structure
+ * @reg: the register to check steering requirements for
+ * @type: type of multicast steering to check
+ *
+ * Determines whether @reg needs explicit steering of a specific type for
+ * reads.
+ *
+ * Returns false if @reg does not belong to a register range of the given
+ * steering type, or if the default (subslice-based) steering IDs are suitable
+ * for @type steering too.
+ */
+static bool intel_gt_reg_needs_read_steering(struct intel_gt *gt,
+                                            i915_reg_t reg,
+                                            enum intel_steering_type type)
+{
+       const u32 offset = i915_mmio_reg_offset(reg);
+       const struct intel_mmio_range *entry;
+
+       if (likely(!intel_gt_needs_read_steering(gt, type)))
+               return false;
+
+       for (entry = gt->steering_table[type]; entry->end; entry++) {
+               if (offset >= entry->start && offset <= entry->end)
+                       return true;
+       }
+
+       return false;
+}
+
+/**
+ * intel_gt_get_valid_steering - determines valid IDs for a class of MCR steering
+ * @gt: GT structure
+ * @type: multicast register type
+ * @sliceid: Slice ID returned
+ * @subsliceid: Subslice ID returned
+ *
+ * Determines sliceid and subsliceid values that will steer reads
+ * of a specific multicast register class to a valid value.
+ */
+static void intel_gt_get_valid_steering(struct intel_gt *gt,
+                                       enum intel_steering_type type,
+                                       u8 *sliceid, u8 *subsliceid)
+{
+       switch (type) {
+       case L3BANK:
+               GEM_DEBUG_WARN_ON(!gt->info.l3bank_mask); /* should be impossible! */
+
+               *sliceid = 0;           /* unused */
+               *subsliceid = __ffs(gt->info.l3bank_mask);
+               break;
+       case MSLICE:
+               GEM_DEBUG_WARN_ON(!gt->info.mslice_mask); /* should be impossible! */
+
+               *sliceid = __ffs(gt->info.mslice_mask);
+               *subsliceid = 0;        /* unused */
+               break;
+       case LNCF:
+               GEM_DEBUG_WARN_ON(!gt->info.mslice_mask); /* should be impossible! */
+
+               /*
+                * An LNCF is always present if its mslice is present, so we
+                * can safely just steer to LNCF 0 in all cases.
+                */
+               *sliceid = __ffs(gt->info.mslice_mask) << 1;
+               *subsliceid = 0;        /* unused */
+               break;
+       default:
+               MISSING_CASE(type);
+               *sliceid = 0;
+               *subsliceid = 0;
+       }
+}
+
+/**
+ * intel_gt_read_register_fw - reads a GT register with support for multicast
+ * @gt: GT structure
+ * @reg: register to read
+ *
+ * This function will read a GT register.  If the register is a multicast
+ * register, the read will be steered to a valid instance (i.e., one that
+ * isn't fused off or powered down by power gating).
+ *
+ * Returns the value from a valid instance of @reg.
+ */
+u32 intel_gt_read_register_fw(struct intel_gt *gt, i915_reg_t reg)
+{
+       int type;
+       u8 sliceid, subsliceid;
+
+       for (type = 0; type < NUM_STEERING_TYPES; type++) {
+               if (intel_gt_reg_needs_read_steering(gt, reg, type)) {
+                       intel_gt_get_valid_steering(gt, type, &sliceid,
+                                                   &subsliceid);
+                       return intel_uncore_read_with_mcr_steering_fw(gt->uncore,
+                                                                     reg,
+                                                                     sliceid,
+                                                                     subsliceid);
+               }
+       }
+
+       return intel_uncore_read_fw(gt->uncore, reg);
+}
+
 void intel_gt_info_print(const struct intel_gt_info *info,
                         struct drm_printer *p)
 {
index 7ec395c..74e7718 100644 (file)
@@ -48,6 +48,8 @@ void intel_gt_driver_release(struct intel_gt *gt);
 
 void intel_gt_driver_late_release(struct intel_gt *gt);
 
+int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout);
+
 void intel_gt_check_and_clear_faults(struct intel_gt *gt);
 void intel_gt_clear_error_registers(struct intel_gt *gt,
                                    intel_engine_mask_t engine_mask);
@@ -75,6 +77,14 @@ static inline bool intel_gt_is_wedged(const struct intel_gt *gt)
        return unlikely(test_bit(I915_WEDGED, &gt->reset.flags));
 }
 
+static inline bool intel_gt_needs_read_steering(struct intel_gt *gt,
+                                               enum intel_steering_type type)
+{
+       return gt->steering_table[type];
+}
+
+u32 intel_gt_read_register_fw(struct intel_gt *gt, i915_reg_t reg);
+
 void intel_gt_info_print(const struct intel_gt_info *info,
                         struct drm_printer *p);
 
index 9f0e729..3513d6f 100644 (file)
@@ -24,8 +24,8 @@ static u32 read_reference_ts_freq(struct intel_uncore *uncore)
        return base_freq + frac_freq;
 }
 
-static u32 gen10_get_crystal_clock_freq(struct intel_uncore *uncore,
-                                       u32 rpm_config_reg)
+static u32 gen9_get_crystal_clock_freq(struct intel_uncore *uncore,
+                                      u32 rpm_config_reg)
 {
        u32 f19_2_mhz = 19200000;
        u32 f24_mhz = 24000000;
@@ -128,10 +128,10 @@ static u32 read_clock_frequency(struct intel_uncore *uncore)
                } else {
                        u32 c0 = intel_uncore_read(uncore, RPM_CONFIG0);
 
-                       if (GRAPHICS_VER(uncore->i915) <= 10)
-                               freq = gen10_get_crystal_clock_freq(uncore, c0);
-                       else
+                       if (GRAPHICS_VER(uncore->i915) >= 11)
                                freq = gen11_get_crystal_clock_freq(uncore, c0);
+                       else
+                               freq = gen9_get_crystal_clock_freq(uncore, c0);
 
                        /*
                         * Now figure out how the command stream's timestamp
index c134622..b2de83b 100644 (file)
@@ -184,7 +184,13 @@ void gen11_gt_irq_reset(struct intel_gt *gt)
        intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK,    ~0);
        intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK,   ~0);
        intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK,   ~0);
+       if (HAS_ENGINE(gt, VCS4) || HAS_ENGINE(gt, VCS5))
+               intel_uncore_write(uncore, GEN12_VCS4_VCS5_INTR_MASK,   ~0);
+       if (HAS_ENGINE(gt, VCS6) || HAS_ENGINE(gt, VCS7))
+               intel_uncore_write(uncore, GEN12_VCS6_VCS7_INTR_MASK,   ~0);
        intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK, ~0);
+       if (HAS_ENGINE(gt, VECS2) || HAS_ENGINE(gt, VECS3))
+               intel_uncore_write(uncore, GEN12_VECS2_VECS3_INTR_MASK, ~0);
 
        intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE, 0);
        intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK,  ~0);
@@ -218,8 +224,13 @@ void gen11_gt_irq_postinstall(struct intel_gt *gt)
        intel_uncore_write(uncore, GEN11_BCS_RSVD_INTR_MASK, ~smask);
        intel_uncore_write(uncore, GEN11_VCS0_VCS1_INTR_MASK, ~dmask);
        intel_uncore_write(uncore, GEN11_VCS2_VCS3_INTR_MASK, ~dmask);
+       if (HAS_ENGINE(gt, VCS4) || HAS_ENGINE(gt, VCS5))
+               intel_uncore_write(uncore, GEN12_VCS4_VCS5_INTR_MASK, ~dmask);
+       if (HAS_ENGINE(gt, VCS6) || HAS_ENGINE(gt, VCS7))
+               intel_uncore_write(uncore, GEN12_VCS6_VCS7_INTR_MASK, ~dmask);
        intel_uncore_write(uncore, GEN11_VECS0_VECS1_INTR_MASK, ~dmask);
-
+       if (HAS_ENGINE(gt, VECS2) || HAS_ENGINE(gt, VECS3))
+               intel_uncore_write(uncore, GEN12_VECS2_VECS3_INTR_MASK, ~dmask);
        /*
         * RPS interrupts will get enabled/disabled on demand when RPS itself
         * is enabled/disabled.
index aef3084..dea8e24 100644 (file)
@@ -6,7 +6,6 @@
 #include <linux/suspend.h>
 
 #include "i915_drv.h"
-#include "i915_globals.h"
 #include "i915_params.h"
 #include "intel_context.h"
 #include "intel_engine_pm.h"
@@ -67,8 +66,6 @@ static int __gt_unpark(struct intel_wakeref *wf)
 
        GT_TRACE(gt, "\n");
 
-       i915_globals_unpark();
-
        /*
         * It seems that the DMC likes to transition between the DC states a lot
         * when there are no connected displays (no active power domains) during
@@ -116,8 +113,6 @@ static int __gt_park(struct intel_wakeref *wf)
        GEM_BUG_ON(!wakeref);
        intel_display_power_put_async(i915, POWER_DOMAIN_GT_IRQ, wakeref);
 
-       i915_globals_park();
-
        return 0;
 }
 
@@ -174,8 +169,6 @@ static void gt_sanitize(struct intel_gt *gt, bool force)
        if (intel_gt_is_wedged(gt))
                intel_gt_unset_wedged(gt);
 
-       intel_uc_sanitize(&gt->uc);
-
        for_each_engine(engine, gt, id)
                if (engine->reset.prepare)
                        engine->reset.prepare(engine);
@@ -191,6 +184,8 @@ static void gt_sanitize(struct intel_gt *gt, bool force)
                        __intel_engine_reset(engine, false);
        }
 
+       intel_uc_reset(&gt->uc, false);
+
        for_each_engine(engine, gt, id)
                if (engine->reset.finish)
                        engine->reset.finish(engine);
@@ -243,6 +238,8 @@ int intel_gt_resume(struct intel_gt *gt)
                goto err_wedged;
        }
 
+       intel_uc_reset_finish(&gt->uc);
+
        intel_rps_enable(&gt->rps);
        intel_llc_enable(&gt->llc);
 
index 647eca9..edb881d 100644 (file)
@@ -130,7 +130,8 @@ void intel_engine_fini_retire(struct intel_engine_cs *engine)
        GEM_BUG_ON(engine->retire);
 }
 
-long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
+long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout,
+                                     long *remaining_timeout)
 {
        struct intel_gt_timelines *timelines = &gt->timelines;
        struct intel_timeline *tl, *tn;
@@ -195,22 +196,10 @@ out_active:       spin_lock(&timelines->lock);
        if (flush_submission(gt, timeout)) /* Wait, there's more! */
                active_count++;
 
-       return active_count ? timeout : 0;
-}
-
-int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout)
-{
-       /* If the device is asleep, we have no requests outstanding */
-       if (!intel_gt_pm_is_awake(gt))
-               return 0;
-
-       while ((timeout = intel_gt_retire_requests_timeout(gt, timeout)) > 0) {
-               cond_resched();
-               if (signal_pending(current))
-                       return -EINTR;
-       }
+       if (remaining_timeout)
+               *remaining_timeout = timeout;
 
-       return timeout;
+       return active_count ? timeout : 0;
 }
 
 static void retire_work_handler(struct work_struct *work)
index fcc30a6..51dbe0e 100644 (file)
@@ -6,14 +6,17 @@
 #ifndef INTEL_GT_REQUESTS_H
 #define INTEL_GT_REQUESTS_H
 
+#include <stddef.h>
+
 struct intel_engine_cs;
 struct intel_gt;
 struct intel_timeline;
 
-long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout);
+long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout,
+                                     long *remaining_timeout);
 static inline void intel_gt_retire_requests(struct intel_gt *gt)
 {
-       intel_gt_retire_requests_timeout(gt, 0);
+       intel_gt_retire_requests_timeout(gt, 0, NULL);
 }
 
 void intel_engine_init_retire(struct intel_engine_cs *engine);
@@ -21,8 +24,6 @@ void intel_engine_add_retire(struct intel_engine_cs *engine,
                             struct intel_timeline *tl);
 void intel_engine_fini_retire(struct intel_engine_cs *engine);
 
-int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout);
-
 void intel_gt_init_requests(struct intel_gt *gt);
 void intel_gt_park_requests(struct intel_gt *gt);
 void intel_gt_unpark_requests(struct intel_gt *gt);
index fecfacf..a81e21b 100644 (file)
@@ -24,6 +24,7 @@
 #include "intel_reset_types.h"
 #include "intel_rc6_types.h"
 #include "intel_rps_types.h"
+#include "intel_migrate_types.h"
 #include "intel_wakeref.h"
 
 struct drm_i915_private;
@@ -31,6 +32,33 @@ struct i915_ggtt;
 struct intel_engine_cs;
 struct intel_uncore;
 
+struct intel_mmio_range {
+       u32 start;
+       u32 end;
+};
+
+/*
+ * The hardware has multiple kinds of multicast register ranges that need
+ * special register steering (and future platforms are expected to add
+ * additional types).
+ *
+ * During driver startup, we initialize the steering control register to
+ * direct reads to a slice/subslice that are valid for the 'subslice' class
+ * of multicast registers.  If another type of steering does not have any
+ * overlap in valid steering targets with 'subslice' style registers, we will
+ * need to explicitly re-steer reads of registers of the other type.
+ *
+ * Only the replication types that may need additional non-default steering
+ * are listed here.
+ */
+enum intel_steering_type {
+       L3BANK,
+       MSLICE,
+       LNCF,
+
+       NUM_STEERING_TYPES
+};
+
 enum intel_submission_method {
        INTEL_SUBMISSION_RING,
        INTEL_SUBMISSION_ELSP,
@@ -145,8 +173,15 @@ struct intel_gt {
 
        struct i915_vma *scratch;
 
+       struct intel_migrate migrate;
+
+       const struct intel_mmio_range *steering_table[NUM_STEERING_TYPES];
+
        struct intel_gt_info {
                intel_engine_mask_t engine_mask;
+
+               u32 l3bank_mask;
+
                u8 num_engines;
 
                /* Media engine access to SFC per instance */
@@ -154,6 +189,8 @@ struct intel_gt {
 
                /* Slice/subslice/EU info */
                struct sseu_dev_info sseu;
+
+               unsigned long mslice_mask;
        } info;
 };
 
index 084ea65..e137dd3 100644 (file)
@@ -16,7 +16,19 @@ struct drm_i915_gem_object *alloc_pt_lmem(struct i915_address_space *vm, int sz)
 {
        struct drm_i915_gem_object *obj;
 
-       obj = i915_gem_object_create_lmem(vm->i915, sz, 0);
+       /*
+        * To avoid severe over-allocation when dealing with min_page_size
+        * restrictions, we override that behaviour here by allowing an object
+        * size and page layout which can be smaller. In practice this should be
+        * totally fine, since GTT paging structures are not typically inserted
+        * into the GTT.
+        *
+        * Note that we also hit this path for the scratch page, and for this
+        * case it might need to be 64K, but that should work fine here since we
+        * used the passed in size for the page size, which should ensure it
+        * also has the same alignment.
+        */
+       obj = __i915_gem_object_create_lmem_with_ps(vm->i915, sz, sz, 0);
        /*
         * Ensure all paging structures for this vm share the same dma-resv
         * object underneath, with the idea that one object_lock() will lock
@@ -414,7 +426,7 @@ static void tgl_setup_private_ppat(struct intel_uncore *uncore)
        intel_uncore_write(uncore, GEN12_PAT_INDEX(7), GEN8_PPAT_WB);
 }
 
-static void cnl_setup_private_ppat(struct intel_uncore *uncore)
+static void icl_setup_private_ppat(struct intel_uncore *uncore)
 {
        intel_uncore_write(uncore,
                           GEN10_PAT_INDEX(0),
@@ -514,8 +526,8 @@ void setup_private_pat(struct intel_uncore *uncore)
 
        if (GRAPHICS_VER(i915) >= 12)
                tgl_setup_private_ppat(uncore);
-       else if (GRAPHICS_VER(i915) >= 10)
-               cnl_setup_private_ppat(uncore);
+       else if (GRAPHICS_VER(i915) >= 11)
+               icl_setup_private_ppat(uncore);
        else if (IS_CHERRYVIEW(i915) || IS_GEN9_LP(i915))
                chv_setup_private_ppat(uncore);
        else
index edea95b..bc71530 100644 (file)
@@ -140,7 +140,6 @@ typedef u64 gen8_pte_t;
 
 enum i915_cache_level;
 
-struct drm_i915_file_private;
 struct drm_i915_gem_object;
 struct i915_fence_reg;
 struct i915_vma;
@@ -220,16 +219,6 @@ struct i915_address_space {
        struct intel_gt *gt;
        struct drm_i915_private *i915;
        struct device *dma;
-       /*
-        * Every address space belongs to a struct file - except for the global
-        * GTT that is owned by the driver (and so @file is set to NULL). In
-        * principle, no information should leak from one context to another
-        * (or between files/processes etc) unless explicitly shared by the
-        * owner. Tracking the owner is important in order to free up per-file
-        * objects along with the file, to aide resource tracking, and to
-        * assign blame.
-        */
-       struct drm_i915_file_private *file;
        u64 total;              /* size addr space maps (ex. 2GB for ggtt) */
        u64 reserved;           /* size addr space reserved */
 
@@ -296,6 +285,13 @@ struct i915_address_space {
                               u32 flags);
        void (*cleanup)(struct i915_address_space *vm);
 
+       void (*foreach)(struct i915_address_space *vm,
+                       u64 start, u64 length,
+                       void (*fn)(struct i915_address_space *vm,
+                                  struct i915_page_table *pt,
+                                  void *data),
+                       void *data);
+
        struct i915_vma_ops vma_ops;
 
        I915_SELFTEST_DECLARE(struct fault_attr fault_attr);
index a27bac0..bb4af49 100644 (file)
@@ -70,7 +70,7 @@ static void set_offsets(u32 *regs,
        if (close) {
                /* Close the batch; used mainly by live_lrc_layout() */
                *regs = MI_BATCH_BUFFER_END;
-               if (GRAPHICS_VER(engine->i915) >= 10)
+               if (GRAPHICS_VER(engine->i915) >= 11)
                        *regs |= BIT(0);
        }
 }
@@ -484,6 +484,47 @@ static const u8 gen12_rcs_offsets[] = {
        END
 };
 
+static const u8 xehp_rcs_offsets[] = {
+       NOP(1),
+       LRI(13, POSTED),
+       REG16(0x244),
+       REG(0x034),
+       REG(0x030),
+       REG(0x038),
+       REG(0x03c),
+       REG(0x168),
+       REG(0x140),
+       REG(0x110),
+       REG(0x1c0),
+       REG(0x1c4),
+       REG(0x1c8),
+       REG(0x180),
+       REG16(0x2b4),
+
+       NOP(5),
+       LRI(9, POSTED),
+       REG16(0x3a8),
+       REG16(0x28c),
+       REG16(0x288),
+       REG16(0x284),
+       REG16(0x280),
+       REG16(0x27c),
+       REG16(0x278),
+       REG16(0x274),
+       REG16(0x270),
+
+       LRI(3, POSTED),
+       REG(0x1b0),
+       REG16(0x5a8),
+       REG16(0x5ac),
+
+       NOP(6),
+       LRI(1, 0),
+       REG(0x0c8),
+
+       END
+};
+
 #undef END
 #undef REG16
 #undef REG
@@ -502,7 +543,9 @@ static const u8 *reg_offsets(const struct intel_engine_cs *engine)
                   !intel_engine_has_relative_mmio(engine));
 
        if (engine->class == RENDER_CLASS) {
-               if (GRAPHICS_VER(engine->i915) >= 12)
+               if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
+                       return xehp_rcs_offsets;
+               else if (GRAPHICS_VER(engine->i915) >= 12)
                        return gen12_rcs_offsets;
                else if (GRAPHICS_VER(engine->i915) >= 11)
                        return gen11_rcs_offsets;
@@ -522,7 +565,9 @@ static const u8 *reg_offsets(const struct intel_engine_cs *engine)
 
 static int lrc_ring_mi_mode(const struct intel_engine_cs *engine)
 {
-       if (GRAPHICS_VER(engine->i915) >= 12)
+       if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
+               return 0x70;
+       else if (GRAPHICS_VER(engine->i915) >= 12)
                return 0x60;
        else if (GRAPHICS_VER(engine->i915) >= 9)
                return 0x54;
@@ -534,7 +579,9 @@ static int lrc_ring_mi_mode(const struct intel_engine_cs *engine)
 
 static int lrc_ring_gpr0(const struct intel_engine_cs *engine)
 {
-       if (GRAPHICS_VER(engine->i915) >= 12)
+       if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
+               return 0x84;
+       else if (GRAPHICS_VER(engine->i915) >= 12)
                return 0x74;
        else if (GRAPHICS_VER(engine->i915) >= 9)
                return 0x68;
@@ -578,10 +625,16 @@ static int lrc_ring_indirect_offset(const struct intel_engine_cs *engine)
 
 static int lrc_ring_cmd_buf_cctl(const struct intel_engine_cs *engine)
 {
-       if (engine->class != RENDER_CLASS)
-               return -1;
 
-       if (GRAPHICS_VER(engine->i915) >= 12)
+       if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
+               /*
+                * Note that the CSFE context has a dummy slot for CMD_BUF_CCTL
+                * simply to match the RCS context image layout.
+                */
+               return 0xc6;
+       else if (engine->class != RENDER_CLASS)
+               return -1;
+       else if (GRAPHICS_VER(engine->i915) >= 12)
                return 0xb6;
        else if (GRAPHICS_VER(engine->i915) >= 11)
                return 0xaa;
@@ -600,8 +653,6 @@ lrc_ring_indirect_offset_default(const struct intel_engine_cs *engine)
                return GEN12_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
        case 11:
                return GEN11_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
-       case 10:
-               return GEN10_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
        case 9:
                return GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
        case 8:
@@ -845,7 +896,7 @@ int lrc_alloc(struct intel_context *ce, struct intel_engine_cs *engine)
        if (IS_ERR(vma))
                return PTR_ERR(vma);
 
-       ring = intel_engine_create_ring(engine, (unsigned long)ce->ring);
+       ring = intel_engine_create_ring(engine, ce->ring_size);
        if (IS_ERR(ring)) {
                err = PTR_ERR(ring);
                goto err_vma;
@@ -1101,6 +1152,14 @@ setup_indirect_ctx_bb(const struct intel_context *ce,
  *      bits 55-60:    SW counter
  *      bits 61-63:    engine class
  *
+ * On Xe_HP, the upper dword of the descriptor has a new format:
+ *
+ *      bits 32-37:    virtual function number
+ *      bit 38:        mbz, reserved for use by hardware
+ *      bits 39-54:    SW context ID
+ *      bits 55-57:    reserved
+ *      bits 58-63:    SW counter
+ *
  * engine info, SW context ID and SW counter need to form a unique number
  * (Context ID) per lrc.
  */
@@ -1387,40 +1446,6 @@ static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
        return batch;
 }
 
-static u32 *
-gen10_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
-{
-       int i;
-
-       /*
-        * WaPipeControlBefore3DStateSamplePattern: cnl
-        *
-        * Ensure the engine is idle prior to programming a
-        * 3DSTATE_SAMPLE_PATTERN during a context restore.
-        */
-       batch = gen8_emit_pipe_control(batch,
-                                      PIPE_CONTROL_CS_STALL,
-                                      0);
-       /*
-        * WaPipeControlBefore3DStateSamplePattern says we need 4 dwords for
-        * the PIPE_CONTROL followed by 12 dwords of 0x0, so 16 dwords in
-        * total. However, a PIPE_CONTROL is 6 dwords long, not 4, which is
-        * confusing. Since gen8_emit_pipe_control() already advances the
-        * batch by 6 dwords, we advance the other 10 here, completing a
-        * cacheline. It's not clear if the workaround requires this padding
-        * before other commands, or if it's just the regular padding we would
-        * already have for the workaround bb, so leave it here for now.
-        */
-       for (i = 0; i < 10; i++)
-               *batch++ = MI_NOOP;
-
-       /* Pad to end of cacheline */
-       while ((unsigned long)batch % CACHELINE_BYTES)
-               *batch++ = MI_NOOP;
-
-       return batch;
-}
-
 #define CTX_WA_BB_SIZE (PAGE_SIZE)
 
 static int lrc_create_wa_ctx(struct intel_engine_cs *engine)
@@ -1473,10 +1498,6 @@ void lrc_init_wa_ctx(struct intel_engine_cs *engine)
        case 12:
        case 11:
                return;
-       case 10:
-               wa_bb_fn[0] = gen10_init_indirectctx_bb;
-               wa_bb_fn[1] = NULL;
-               break;
        case 9:
                wa_bb_fn[0] = gen9_init_indirectctx_bb;
                wa_bb_fn[1] = NULL;
index 41e5350..f785d0e 100644 (file)
 #define GEN11_CSB_WRITE_PTR_MASK       (GEN11_CSB_PTR_MASK << 0)
 
 #define MAX_CONTEXT_HW_ID      (1 << 21) /* exclusive */
-#define MAX_GUC_CONTEXT_HW_ID  (1 << 20) /* exclusive */
 #define GEN11_MAX_CONTEXT_HW_ID        (1 << 11) /* exclusive */
 /* in Gen12 ID 0x7FF is reserved to indicate idle */
 #define GEN12_MAX_CONTEXT_HW_ID        (GEN11_MAX_CONTEXT_HW_ID - 1)
+/* in Xe_HP ID 0xFFFF is reserved to indicate "invalid context" */
+#define XEHP_MAX_CONTEXT_HW_ID 0xFFFF
 
 #endif /* _INTEL_LRC_REG_H_ */
diff --git a/drivers/gpu/drm/i915/gt/intel_migrate.c b/drivers/gpu/drm/i915/gt/intel_migrate.c
new file mode 100644 (file)
index 0000000..d0a7c93
--- /dev/null
@@ -0,0 +1,688 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright Â© 2020 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "intel_context.h"
+#include "intel_gpu_commands.h"
+#include "intel_gt.h"
+#include "intel_gtt.h"
+#include "intel_migrate.h"
+#include "intel_ring.h"
+
+struct insert_pte_data {
+       u64 offset;
+       bool is_lmem;
+};
+
+#define CHUNK_SZ SZ_8M /* ~1ms at 8GiB/s preemption delay */
+
+static bool engine_supports_migration(struct intel_engine_cs *engine)
+{
+       if (!engine)
+               return false;
+
+       /*
+        * We need the ability to prevent aribtration (MI_ARB_ON_OFF),
+        * the ability to write PTE using inline data (MI_STORE_DATA)
+        * and of course the ability to do the block transfer (blits).
+        */
+       GEM_BUG_ON(engine->class != COPY_ENGINE_CLASS);
+
+       return true;
+}
+
+static void insert_pte(struct i915_address_space *vm,
+                      struct i915_page_table *pt,
+                      void *data)
+{
+       struct insert_pte_data *d = data;
+
+       vm->insert_page(vm, px_dma(pt), d->offset, I915_CACHE_NONE,
+                       d->is_lmem ? PTE_LM : 0);
+       d->offset += PAGE_SIZE;
+}
+
+static struct i915_address_space *migrate_vm(struct intel_gt *gt)
+{
+       struct i915_vm_pt_stash stash = {};
+       struct i915_ppgtt *vm;
+       int err;
+       int i;
+
+       /*
+        * We construct a very special VM for use by all migration contexts,
+        * it is kept pinned so that it can be used at any time. As we need
+        * to pre-allocate the page directories for the migration VM, this
+        * limits us to only using a small number of prepared vma.
+        *
+        * To be able to pipeline and reschedule migration operations while
+        * avoiding unnecessary contention on the vm itself, the PTE updates
+        * are inline with the blits. All the blits use the same fixed
+        * addresses, with the backing store redirection being updated on the
+        * fly. Only 2 implicit vma are used for all migration operations.
+        *
+        * We lay the ppGTT out as:
+        *
+        *      [0, CHUNK_SZ) -> first object
+        *      [CHUNK_SZ, 2 * CHUNK_SZ) -> second object
+        *      [2 * CHUNK_SZ, 2 * CHUNK_SZ + 2 * CHUNK_SZ >> 9] -> PTE
+        *
+        * By exposing the dma addresses of the page directories themselves
+        * within the ppGTT, we are then able to rewrite the PTE prior to use.
+        * But the PTE update and subsequent migration operation must be atomic,
+        * i.e. within the same non-preemptible window so that we do not switch
+        * to another migration context that overwrites the PTE.
+        *
+        * TODO: Add support for huge LMEM PTEs
+        */
+
+       vm = i915_ppgtt_create(gt);
+       if (IS_ERR(vm))
+               return ERR_CAST(vm);
+
+       if (!vm->vm.allocate_va_range || !vm->vm.foreach) {
+               err = -ENODEV;
+               goto err_vm;
+       }
+
+       /*
+        * Each engine instance is assigned its own chunk in the VM, so
+        * that we can run multiple instances concurrently
+        */
+       for (i = 0; i < ARRAY_SIZE(gt->engine_class[COPY_ENGINE_CLASS]); i++) {
+               struct intel_engine_cs *engine;
+               u64 base = (u64)i << 32;
+               struct insert_pte_data d = {};
+               struct i915_gem_ww_ctx ww;
+               u64 sz;
+
+               engine = gt->engine_class[COPY_ENGINE_CLASS][i];
+               if (!engine_supports_migration(engine))
+                       continue;
+
+               /*
+                * We copy in 8MiB chunks. Each PDE covers 2MiB, so we need
+                * 4x2 page directories for source/destination.
+                */
+               sz = 2 * CHUNK_SZ;
+               d.offset = base + sz;
+
+               /*
+                * We need another page directory setup so that we can write
+                * the 8x512 PTE in each chunk.
+                */
+               sz += (sz >> 12) * sizeof(u64);
+
+               err = i915_vm_alloc_pt_stash(&vm->vm, &stash, sz);
+               if (err)
+                       goto err_vm;
+
+               for_i915_gem_ww(&ww, err, true) {
+                       err = i915_vm_lock_objects(&vm->vm, &ww);
+                       if (err)
+                               continue;
+                       err = i915_vm_map_pt_stash(&vm->vm, &stash);
+                       if (err)
+                               continue;
+
+                       vm->vm.allocate_va_range(&vm->vm, &stash, base, sz);
+               }
+               i915_vm_free_pt_stash(&vm->vm, &stash);
+               if (err)
+                       goto err_vm;
+
+               /* Now allow the GPU to rewrite the PTE via its own ppGTT */
+               d.is_lmem = i915_gem_object_is_lmem(vm->vm.scratch[0]);
+               vm->vm.foreach(&vm->vm, base, base + sz, insert_pte, &d);
+       }
+
+       return &vm->vm;
+
+err_vm:
+       i915_vm_put(&vm->vm);
+       return ERR_PTR(err);
+}
+
+static struct intel_engine_cs *first_copy_engine(struct intel_gt *gt)
+{
+       struct intel_engine_cs *engine;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(gt->engine_class[COPY_ENGINE_CLASS]); i++) {
+               engine = gt->engine_class[COPY_ENGINE_CLASS][i];
+               if (engine_supports_migration(engine))
+                       return engine;
+       }
+
+       return NULL;
+}
+
+static struct intel_context *pinned_context(struct intel_gt *gt)
+{
+       static struct lock_class_key key;
+       struct intel_engine_cs *engine;
+       struct i915_address_space *vm;
+       struct intel_context *ce;
+
+       engine = first_copy_engine(gt);
+       if (!engine)
+               return ERR_PTR(-ENODEV);
+
+       vm = migrate_vm(gt);
+       if (IS_ERR(vm))
+               return ERR_CAST(vm);
+
+       ce = intel_engine_create_pinned_context(engine, vm, SZ_512K,
+                                               I915_GEM_HWS_MIGRATE,
+                                               &key, "migrate");
+       i915_vm_put(ce->vm);
+       return ce;
+}
+
+int intel_migrate_init(struct intel_migrate *m, struct intel_gt *gt)
+{
+       struct intel_context *ce;
+
+       memset(m, 0, sizeof(*m));
+
+       ce = pinned_context(gt);
+       if (IS_ERR(ce))
+               return PTR_ERR(ce);
+
+       m->context = ce;
+       return 0;
+}
+
+static int random_index(unsigned int max)
+{
+       return upper_32_bits(mul_u32_u32(get_random_u32(), max));
+}
+
+static struct intel_context *__migrate_engines(struct intel_gt *gt)
+{
+       struct intel_engine_cs *engines[MAX_ENGINE_INSTANCE];
+       struct intel_engine_cs *engine;
+       unsigned int count, i;
+
+       count = 0;
+       for (i = 0; i < ARRAY_SIZE(gt->engine_class[COPY_ENGINE_CLASS]); i++) {
+               engine = gt->engine_class[COPY_ENGINE_CLASS][i];
+               if (engine_supports_migration(engine))
+                       engines[count++] = engine;
+       }
+
+       return intel_context_create(engines[random_index(count)]);
+}
+
+struct intel_context *intel_migrate_create_context(struct intel_migrate *m)
+{
+       struct intel_context *ce;
+
+       /*
+        * We randomly distribute contexts across the engines upon constrction,
+        * as they all share the same pinned vm, and so in order to allow
+        * multiple blits to run in parallel, we must construct each blit
+        * to use a different range of the vm for its GTT. This has to be
+        * known at construction, so we can not use the late greedy load
+        * balancing of the virtual-engine.
+        */
+       ce = __migrate_engines(m->context->engine->gt);
+       if (IS_ERR(ce))
+               return ce;
+
+       ce->ring = NULL;
+       ce->ring_size = SZ_256K;
+
+       i915_vm_put(ce->vm);
+       ce->vm = i915_vm_get(m->context->vm);
+
+       return ce;
+}
+
+static inline struct sgt_dma sg_sgt(struct scatterlist *sg)
+{
+       dma_addr_t addr = sg_dma_address(sg);
+
+       return (struct sgt_dma){ sg, addr, addr + sg_dma_len(sg) };
+}
+
+static int emit_no_arbitration(struct i915_request *rq)
+{
+       u32 *cs;
+
+       cs = intel_ring_begin(rq, 2);
+       if (IS_ERR(cs))
+               return PTR_ERR(cs);
+
+       /* Explicitly disable preemption for this request. */
+       *cs++ = MI_ARB_ON_OFF;
+       *cs++ = MI_NOOP;
+       intel_ring_advance(rq, cs);
+
+       return 0;
+}
+
+static int emit_pte(struct i915_request *rq,
+                   struct sgt_dma *it,
+                   enum i915_cache_level cache_level,
+                   bool is_lmem,
+                   u64 offset,
+                   int length)
+{
+       const u64 encode = rq->context->vm->pte_encode(0, cache_level,
+                                                      is_lmem ? PTE_LM : 0);
+       struct intel_ring *ring = rq->ring;
+       int total = 0;
+       u32 *hdr, *cs;
+       int pkt;
+
+       GEM_BUG_ON(GRAPHICS_VER(rq->engine->i915) < 8);
+
+       /* Compute the page directory offset for the target address range */
+       offset += (u64)rq->engine->instance << 32;
+       offset >>= 12;
+       offset *= sizeof(u64);
+       offset += 2 * CHUNK_SZ;
+
+       cs = intel_ring_begin(rq, 6);
+       if (IS_ERR(cs))
+               return PTR_ERR(cs);
+
+       /* Pack as many PTE updates as possible into a single MI command */
+       pkt = min_t(int, 0x400, ring->space / sizeof(u32) + 5);
+       pkt = min_t(int, pkt, (ring->size - ring->emit) / sizeof(u32) + 5);
+
+       hdr = cs;
+       *cs++ = MI_STORE_DATA_IMM | REG_BIT(21); /* as qword elements */
+       *cs++ = lower_32_bits(offset);
+       *cs++ = upper_32_bits(offset);
+
+       do {
+               if (cs - hdr >= pkt) {
+                       *hdr += cs - hdr - 2;
+                       *cs++ = MI_NOOP;
+
+                       ring->emit = (void *)cs - ring->vaddr;
+                       intel_ring_advance(rq, cs);
+                       intel_ring_update_space(ring);
+
+                       cs = intel_ring_begin(rq, 6);
+                       if (IS_ERR(cs))
+                               return PTR_ERR(cs);
+
+                       pkt = min_t(int, 0x400, ring->space / sizeof(u32) + 5);
+                       pkt = min_t(int, pkt, (ring->size - ring->emit) / sizeof(u32) + 5);
+
+                       hdr = cs;
+                       *cs++ = MI_STORE_DATA_IMM | REG_BIT(21);
+                       *cs++ = lower_32_bits(offset);
+                       *cs++ = upper_32_bits(offset);
+               }
+
+               *cs++ = lower_32_bits(encode | it->dma);
+               *cs++ = upper_32_bits(encode | it->dma);
+
+               offset += 8;
+               total += I915_GTT_PAGE_SIZE;
+
+               it->dma += I915_GTT_PAGE_SIZE;
+               if (it->dma >= it->max) {
+                       it->sg = __sg_next(it->sg);
+                       if (!it->sg || sg_dma_len(it->sg) == 0)
+                               break;
+
+                       it->dma = sg_dma_address(it->sg);
+                       it->max = it->dma + sg_dma_len(it->sg);
+               }
+       } while (total < length);
+
+       *hdr += cs - hdr - 2;
+       *cs++ = MI_NOOP;
+
+       ring->emit = (void *)cs - ring->vaddr;
+       intel_ring_advance(rq, cs);
+       intel_ring_update_space(ring);
+
+       return total;
+}
+
+static bool wa_1209644611_applies(int ver, u32 size)
+{
+       u32 height = size >> PAGE_SHIFT;
+
+       if (ver != 11)
+               return false;
+
+       return height % 4 == 3 && height <= 8;
+}
+
+static int emit_copy(struct i915_request *rq, int size)
+{
+       const int ver = GRAPHICS_VER(rq->engine->i915);
+       u32 instance = rq->engine->instance;
+       u32 *cs;
+
+       cs = intel_ring_begin(rq, ver >= 8 ? 10 : 6);
+       if (IS_ERR(cs))
+               return PTR_ERR(cs);
+
+       if (ver >= 9 && !wa_1209644611_applies(ver, size)) {
+               *cs++ = GEN9_XY_FAST_COPY_BLT_CMD | (10 - 2);
+               *cs++ = BLT_DEPTH_32 | PAGE_SIZE;
+               *cs++ = 0;
+               *cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
+               *cs++ = CHUNK_SZ; /* dst offset */
+               *cs++ = instance;
+               *cs++ = 0;
+               *cs++ = PAGE_SIZE;
+               *cs++ = 0; /* src offset */
+               *cs++ = instance;
+       } else if (ver >= 8) {
+               *cs++ = XY_SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (10 - 2);
+               *cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | PAGE_SIZE;
+               *cs++ = 0;
+               *cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
+               *cs++ = CHUNK_SZ; /* dst offset */
+               *cs++ = instance;
+               *cs++ = 0;
+               *cs++ = PAGE_SIZE;
+               *cs++ = 0; /* src offset */
+               *cs++ = instance;
+       } else {
+               GEM_BUG_ON(instance);
+               *cs++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (6 - 2);
+               *cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | PAGE_SIZE;
+               *cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE;
+               *cs++ = CHUNK_SZ; /* dst offset */
+               *cs++ = PAGE_SIZE;
+               *cs++ = 0; /* src offset */
+       }
+
+       intel_ring_advance(rq, cs);
+       return 0;
+}
+
+int
+intel_context_migrate_copy(struct intel_context *ce,
+                          struct dma_fence *await,
+                          struct scatterlist *src,
+                          enum i915_cache_level src_cache_level,
+                          bool src_is_lmem,
+                          struct scatterlist *dst,
+                          enum i915_cache_level dst_cache_level,
+                          bool dst_is_lmem,
+                          struct i915_request **out)
+{
+       struct sgt_dma it_src = sg_sgt(src), it_dst = sg_sgt(dst);
+       struct i915_request *rq;
+       int err;
+
+       GEM_BUG_ON(ce->vm != ce->engine->gt->migrate.context->vm);
+       *out = NULL;
+
+       GEM_BUG_ON(ce->ring->size < SZ_64K);
+
+       do {
+               int len;
+
+               rq = i915_request_create(ce);
+               if (IS_ERR(rq)) {
+                       err = PTR_ERR(rq);
+                       goto out_ce;
+               }
+
+               if (await) {
+                       err = i915_request_await_dma_fence(rq, await);
+                       if (err)
+                               goto out_rq;
+
+                       if (rq->engine->emit_init_breadcrumb) {
+                               err = rq->engine->emit_init_breadcrumb(rq);
+                               if (err)
+                                       goto out_rq;
+                       }
+
+                       await = NULL;
+               }
+
+               /* The PTE updates + copy must not be interrupted. */
+               err = emit_no_arbitration(rq);
+               if (err)
+                       goto out_rq;
+
+               len = emit_pte(rq, &it_src, src_cache_level, src_is_lmem, 0,
+                              CHUNK_SZ);
+               if (len <= 0) {
+                       err = len;
+                       goto out_rq;
+               }
+
+               err = emit_pte(rq, &it_dst, dst_cache_level, dst_is_lmem,
+                              CHUNK_SZ, len);
+               if (err < 0)
+                       goto out_rq;
+               if (err < len) {
+                       err = -EINVAL;
+                       goto out_rq;
+               }
+
+               err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
+               if (err)
+                       goto out_rq;
+
+               err = emit_copy(rq, len);
+
+               /* Arbitration is re-enabled between requests. */
+out_rq:
+               if (*out)
+                       i915_request_put(*out);
+               *out = i915_request_get(rq);
+               i915_request_add(rq);
+               if (err || !it_src.sg || !sg_dma_len(it_src.sg))
+                       break;
+
+               cond_resched();
+       } while (1);
+
+out_ce:
+       return err;
+}
+
+static int emit_clear(struct i915_request *rq, int size, u32 value)
+{
+       const int ver = GRAPHICS_VER(rq->engine->i915);
+       u32 instance = rq->engine->instance;
+       u32 *cs;
+
+       GEM_BUG_ON(size >> PAGE_SHIFT > S16_MAX);
+
+       cs = intel_ring_begin(rq, ver >= 8 ? 8 : 6);
+       if (IS_ERR(cs))
+               return PTR_ERR(cs);
+
+       if (ver >= 8) {
+               *cs++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (7 - 2);
+               *cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE;
+               *cs++ = 0;
+               *cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
+               *cs++ = 0; /* offset */
+               *cs++ = instance;
+               *cs++ = value;
+               *cs++ = MI_NOOP;
+       } else {
+               GEM_BUG_ON(instance);
+               *cs++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (6 - 2);
+               *cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE;
+               *cs++ = 0;
+               *cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
+               *cs++ = 0;
+               *cs++ = value;
+       }
+
+       intel_ring_advance(rq, cs);
+       return 0;
+}
+
+int
+intel_context_migrate_clear(struct intel_context *ce,
+                           struct dma_fence *await,
+                           struct scatterlist *sg,
+                           enum i915_cache_level cache_level,
+                           bool is_lmem,
+                           u32 value,
+                           struct i915_request **out)
+{
+       struct sgt_dma it = sg_sgt(sg);
+       struct i915_request *rq;
+       int err;
+
+       GEM_BUG_ON(ce->vm != ce->engine->gt->migrate.context->vm);
+       *out = NULL;
+
+       GEM_BUG_ON(ce->ring->size < SZ_64K);
+
+       do {
+               int len;
+
+               rq = i915_request_create(ce);
+               if (IS_ERR(rq)) {
+                       err = PTR_ERR(rq);
+                       goto out_ce;
+               }
+
+               if (await) {
+                       err = i915_request_await_dma_fence(rq, await);
+                       if (err)
+                               goto out_rq;
+
+                       if (rq->engine->emit_init_breadcrumb) {
+                               err = rq->engine->emit_init_breadcrumb(rq);
+                               if (err)
+                                       goto out_rq;
+                       }
+
+                       await = NULL;
+               }
+
+               /* The PTE updates + clear must not be interrupted. */
+               err = emit_no_arbitration(rq);
+               if (err)
+                       goto out_rq;
+
+               len = emit_pte(rq, &it, cache_level, is_lmem, 0, CHUNK_SZ);
+               if (len <= 0) {
+                       err = len;
+                       goto out_rq;
+               }
+
+               err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
+               if (err)
+                       goto out_rq;
+
+               err = emit_clear(rq, len, value);
+
+               /* Arbitration is re-enabled between requests. */
+out_rq:
+               if (*out)
+                       i915_request_put(*out);
+               *out = i915_request_get(rq);
+               i915_request_add(rq);
+               if (err || !it.sg || !sg_dma_len(it.sg))
+                       break;
+
+               cond_resched();
+       } while (1);
+
+out_ce:
+       return err;
+}
+
+int intel_migrate_copy(struct intel_migrate *m,
+                      struct i915_gem_ww_ctx *ww,
+                      struct dma_fence *await,
+                      struct scatterlist *src,
+                      enum i915_cache_level src_cache_level,
+                      bool src_is_lmem,
+                      struct scatterlist *dst,
+                      enum i915_cache_level dst_cache_level,
+                      bool dst_is_lmem,
+                      struct i915_request **out)
+{
+       struct intel_context *ce;
+       int err;
+
+       *out = NULL;
+       if (!m->context)
+               return -ENODEV;
+
+       ce = intel_migrate_create_context(m);
+       if (IS_ERR(ce))
+               ce = intel_context_get(m->context);
+       GEM_BUG_ON(IS_ERR(ce));
+
+       err = intel_context_pin_ww(ce, ww);
+       if (err)
+               goto out;
+
+       err = intel_context_migrate_copy(ce, await,
+                                        src, src_cache_level, src_is_lmem,
+                                        dst, dst_cache_level, dst_is_lmem,
+                                        out);
+
+       intel_context_unpin(ce);
+out:
+       intel_context_put(ce);
+       return err;
+}
+
+int
+intel_migrate_clear(struct intel_migrate *m,
+                   struct i915_gem_ww_ctx *ww,
+                   struct dma_fence *await,
+                   struct scatterlist *sg,
+                   enum i915_cache_level cache_level,
+                   bool is_lmem,
+                   u32 value,
+                   struct i915_request **out)
+{
+       struct intel_context *ce;
+       int err;
+
+       *out = NULL;
+       if (!m->context)
+               return -ENODEV;
+
+       ce = intel_migrate_create_context(m);
+       if (IS_ERR(ce))
+               ce = intel_context_get(m->context);
+       GEM_BUG_ON(IS_ERR(ce));
+
+       err = intel_context_pin_ww(ce, ww);
+       if (err)
+               goto out;
+
+       err = intel_context_migrate_clear(ce, await, sg, cache_level,
+                                         is_lmem, value, out);
+
+       intel_context_unpin(ce);
+out:
+       intel_context_put(ce);
+       return err;
+}
+
+void intel_migrate_fini(struct intel_migrate *m)
+{
+       struct intel_context *ce;
+
+       ce = fetch_and_zero(&m->context);
+       if (!ce)
+               return;
+
+       intel_engine_destroy_pinned_context(ce);
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_migrate.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_migrate.h b/drivers/gpu/drm/i915/gt/intel_migrate.h
new file mode 100644 (file)
index 0000000..4e18e75
--- /dev/null
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright Â© 2020 Intel Corporation
+ */
+
+#ifndef __INTEL_MIGRATE__
+#define __INTEL_MIGRATE__
+
+#include <linux/types.h>
+
+#include "intel_migrate_types.h"
+
+struct dma_fence;
+struct i915_request;
+struct i915_gem_ww_ctx;
+struct intel_gt;
+struct scatterlist;
+enum i915_cache_level;
+
+int intel_migrate_init(struct intel_migrate *m, struct intel_gt *gt);
+
+struct intel_context *intel_migrate_create_context(struct intel_migrate *m);
+
+int intel_migrate_copy(struct intel_migrate *m,
+                      struct i915_gem_ww_ctx *ww,
+                      struct dma_fence *await,
+                      struct scatterlist *src,
+                      enum i915_cache_level src_cache_level,
+                      bool src_is_lmem,
+                      struct scatterlist *dst,
+                      enum i915_cache_level dst_cache_level,
+                      bool dst_is_lmem,
+                      struct i915_request **out);
+
+int intel_context_migrate_copy(struct intel_context *ce,
+                              struct dma_fence *await,
+                              struct scatterlist *src,
+                              enum i915_cache_level src_cache_level,
+                              bool src_is_lmem,
+                              struct scatterlist *dst,
+                              enum i915_cache_level dst_cache_level,
+                              bool dst_is_lmem,
+                              struct i915_request **out);
+
+int
+intel_migrate_clear(struct intel_migrate *m,
+                   struct i915_gem_ww_ctx *ww,
+                   struct dma_fence *await,
+                   struct scatterlist *sg,
+                   enum i915_cache_level cache_level,
+                   bool is_lmem,
+                   u32 value,
+                   struct i915_request **out);
+int
+intel_context_migrate_clear(struct intel_context *ce,
+                           struct dma_fence *await,
+                           struct scatterlist *sg,
+                           enum i915_cache_level cache_level,
+                           bool is_lmem,
+                           u32 value,
+                           struct i915_request **out);
+
+void intel_migrate_fini(struct intel_migrate *m);
+
+#endif /* __INTEL_MIGRATE__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_migrate_types.h b/drivers/gpu/drm/i915/gt/intel_migrate_types.h
new file mode 100644 (file)
index 0000000..d982305
--- /dev/null
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright Â© 2020 Intel Corporation
+ */
+
+#ifndef __INTEL_MIGRATE_TYPES__
+#define __INTEL_MIGRATE_TYPES__
+
+struct intel_context;
+
+struct intel_migrate {
+       struct intel_context *context;
+};
+
+#endif /* __INTEL_MIGRATE_TYPES__ */
index 1784880..582c442 100644 (file)
@@ -352,7 +352,7 @@ static unsigned int get_mocs_settings(const struct drm_i915_private *i915,
                table->size  = ARRAY_SIZE(icl_mocs_table);
                table->table = icl_mocs_table;
                table->n_entries = GEN9_NUM_MOCS_ENTRIES;
-       } else if (IS_GEN9_BC(i915) || IS_CANNONLAKE(i915)) {
+       } else if (IS_GEN9_BC(i915)) {
                table->size  = ARRAY_SIZE(skl_mocs_table);
                table->n_entries = GEN9_NUM_MOCS_ENTRIES;
                table->table = skl_mocs_table;
index 259d7eb..799d382 100644 (file)
@@ -62,20 +62,25 @@ static void gen11_rc6_enable(struct intel_rc6 *rc6)
        u32 pg_enable;
        int i;
 
-       /* 2b: Program RC6 thresholds.*/
-       set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16 | 85);
-       set(uncore, GEN10_MEDIA_WAKE_RATE_LIMIT, 150);
+       /*
+        * With GuCRC, these parameters are set by GuC
+        */
+       if (!intel_uc_uses_guc_rc(&gt->uc)) {
+               /* 2b: Program RC6 thresholds.*/
+               set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16 | 85);
+               set(uncore, GEN10_MEDIA_WAKE_RATE_LIMIT, 150);
 
-       set(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
-       set(uncore, GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
-       for_each_engine(engine, rc6_to_gt(rc6), id)
-               set(uncore, RING_MAX_IDLE(engine->mmio_base), 10);
+               set(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
+               set(uncore, GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
+               for_each_engine(engine, rc6_to_gt(rc6), id)
+                       set(uncore, RING_MAX_IDLE(engine->mmio_base), 10);
 
-       set(uncore, GUC_MAX_IDLE_COUNT, 0xA);
+               set(uncore, GUC_MAX_IDLE_COUNT, 0xA);
 
-       set(uncore, GEN6_RC_SLEEP, 0);
+               set(uncore, GEN6_RC_SLEEP, 0);
 
-       set(uncore, GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
+               set(uncore, GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
+       }
 
        /*
         * 2c: Program Coarse Power Gating Policies.
@@ -98,11 +103,19 @@ static void gen11_rc6_enable(struct intel_rc6 *rc6)
        set(uncore, GEN9_MEDIA_PG_IDLE_HYSTERESIS, 60);
        set(uncore, GEN9_RENDER_PG_IDLE_HYSTERESIS, 60);
 
-       /* 3a: Enable RC6 */
-       rc6->ctl_enable =
-               GEN6_RC_CTL_HW_ENABLE |
-               GEN6_RC_CTL_RC6_ENABLE |
-               GEN6_RC_CTL_EI_MODE(1);
+       /* 3a: Enable RC6
+        *
+        * With GuCRC, we do not enable bit 31 of RC_CTL,
+        * thus allowing GuC to control RC6 entry/exit fully instead.
+        * We will not set the HW ENABLE and EI bits
+        */
+       if (!intel_guc_rc_enable(&gt->uc.guc))
+               rc6->ctl_enable = GEN6_RC_CTL_RC6_ENABLE;
+       else
+               rc6->ctl_enable =
+                       GEN6_RC_CTL_HW_ENABLE |
+                       GEN6_RC_CTL_RC6_ENABLE |
+                       GEN6_RC_CTL_EI_MODE(1);
 
        pg_enable =
                GEN9_RENDER_PG_ENABLE |
@@ -126,7 +139,7 @@ static void gen9_rc6_enable(struct intel_rc6 *rc6)
        enum intel_engine_id id;
 
        /* 2b: Program RC6 thresholds.*/
-       if (GRAPHICS_VER(rc6_to_i915(rc6)) >= 10) {
+       if (GRAPHICS_VER(rc6_to_i915(rc6)) >= 11) {
                set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16 | 85);
                set(uncore, GEN10_MEDIA_WAKE_RATE_LIMIT, 150);
        } else if (IS_SKYLAKE(rc6_to_i915(rc6))) {
@@ -513,6 +526,10 @@ static void __intel_rc6_disable(struct intel_rc6 *rc6)
 {
        struct drm_i915_private *i915 = rc6_to_i915(rc6);
        struct intel_uncore *uncore = rc6_to_uncore(rc6);
+       struct intel_gt *gt = rc6_to_gt(rc6);
+
+       /* Take control of RC6 back from GuC */
+       intel_guc_rc_disable(&gt->uc.guc);
 
        intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
        if (GRAPHICS_VER(i915) >= 9)
index 4ae1f71..a74b72f 100644 (file)
@@ -10,7 +10,7 @@
 #include "gem/i915_gem_lmem.h"
 #include "gem/i915_gem_region.h"
 #include "gem/i915_gem_ttm.h"
-#include "intel_region_lmem.h"
+#include "gt/intel_gt.h"
 
 static int init_fake_lmem_bar(struct intel_memory_region *mem)
 {
@@ -158,7 +158,7 @@ intel_gt_setup_fake_lmem(struct intel_gt *gt)
 static bool get_legacy_lowmem_region(struct intel_uncore *uncore,
                                     u64 *start, u32 *size)
 {
-       if (!IS_DG1_REVID(uncore->i915, DG1_REVID_A0, DG1_REVID_B0))
+       if (!IS_DG1_GT_STEP(uncore->i915, STEP_A0, STEP_C0))
                return false;
 
        *start = 0;
index 48f0092..4da4c52 100644 (file)
@@ -8,6 +8,7 @@
 
 #include <linux/types.h>
 #include "i915_gem.h"
+#include "i915_gem_ww.h"
 
 struct i915_request;
 struct intel_context;
index 7225163..91200c4 100644 (file)
@@ -22,7 +22,6 @@
 #include "intel_reset.h"
 
 #include "uc/intel_guc.h"
-#include "uc/intel_guc_submission.h"
 
 #define RESET_MAX_RETRIES 3
 
@@ -39,21 +38,6 @@ static void rmw_clear_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
        intel_uncore_rmw_fw(uncore, reg, clr, 0);
 }
 
-static void skip_context(struct i915_request *rq)
-{
-       struct intel_context *hung_ctx = rq->context;
-
-       list_for_each_entry_from_rcu(rq, &hung_ctx->timeline->requests, link) {
-               if (!i915_request_is_active(rq))
-                       return;
-
-               if (rq->context == hung_ctx) {
-                       i915_request_set_error_once(rq, -EIO);
-                       __i915_request_skip(rq);
-               }
-       }
-}
-
 static void client_mark_guilty(struct i915_gem_context *ctx, bool banned)
 {
        struct drm_i915_file_private *file_priv = ctx->file_priv;
@@ -88,10 +72,8 @@ static bool mark_guilty(struct i915_request *rq)
        bool banned;
        int i;
 
-       if (intel_context_is_closed(rq->context)) {
-               intel_context_set_banned(rq->context);
+       if (intel_context_is_closed(rq->context))
                return true;
-       }
 
        rcu_read_lock();
        ctx = rcu_dereference(rq->context->gem_context);
@@ -123,11 +105,9 @@ static bool mark_guilty(struct i915_request *rq)
        banned = !i915_gem_context_is_recoverable(ctx);
        if (time_before(jiffies, prev_hang + CONTEXT_FAST_HANG_JIFFIES))
                banned = true;
-       if (banned) {
+       if (banned)
                drm_dbg(&ctx->i915->drm, "context %s: guilty %d, banned\n",
                        ctx->name, atomic_read(&ctx->guilty_count));
-               intel_context_set_banned(rq->context);
-       }
 
        client_mark_guilty(ctx, banned);
 
@@ -149,6 +129,8 @@ static void mark_innocent(struct i915_request *rq)
 
 void __i915_request_reset(struct i915_request *rq, bool guilty)
 {
+       bool banned = false;
+
        RQ_TRACE(rq, "guilty? %s\n", yesno(guilty));
        GEM_BUG_ON(__i915_request_is_complete(rq));
 
@@ -156,13 +138,15 @@ void __i915_request_reset(struct i915_request *rq, bool guilty)
        if (guilty) {
                i915_request_set_error_once(rq, -EIO);
                __i915_request_skip(rq);
-               if (mark_guilty(rq))
-                       skip_context(rq);
+               banned = mark_guilty(rq);
        } else {
                i915_request_set_error_once(rq, -EAGAIN);
                mark_innocent(rq);
        }
        rcu_read_unlock();
+
+       if (banned)
+               intel_context_ban(rq->context, rq);
 }
 
 static bool i915_in_reset(struct pci_dev *pdev)
@@ -515,8 +499,14 @@ static int gen11_reset_engines(struct intel_gt *gt,
                [VCS1]  = GEN11_GRDOM_MEDIA2,
                [VCS2]  = GEN11_GRDOM_MEDIA3,
                [VCS3]  = GEN11_GRDOM_MEDIA4,
+               [VCS4]  = GEN11_GRDOM_MEDIA5,
+               [VCS5]  = GEN11_GRDOM_MEDIA6,
+               [VCS6]  = GEN11_GRDOM_MEDIA7,
+               [VCS7]  = GEN11_GRDOM_MEDIA8,
                [VECS0] = GEN11_GRDOM_VECS,
                [VECS1] = GEN11_GRDOM_VECS2,
+               [VECS2] = GEN11_GRDOM_VECS3,
+               [VECS3] = GEN11_GRDOM_VECS4,
        };
        struct intel_engine_cs *engine;
        intel_engine_mask_t tmp;
@@ -826,6 +816,8 @@ static int gt_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask)
                __intel_engine_reset(engine, stalled_mask & engine->mask);
        local_bh_enable();
 
+       intel_uc_reset(&gt->uc, true);
+
        intel_ggtt_restore_fences(gt->ggtt);
 
        return err;
@@ -850,6 +842,8 @@ static void reset_finish(struct intel_gt *gt, intel_engine_mask_t awake)
                if (awake & engine->mask)
                        intel_engine_pm_put(engine);
        }
+
+       intel_uc_reset_finish(&gt->uc);
 }
 
 static void nop_submit_request(struct i915_request *request)
@@ -903,6 +897,7 @@ static void __intel_gt_set_wedged(struct intel_gt *gt)
        for_each_engine(engine, gt, id)
                if (engine->reset.cancel)
                        engine->reset.cancel(engine);
+       intel_uc_cancel_requests(&gt->uc);
        local_bh_enable();
 
        reset_finish(gt, awake);
@@ -1191,6 +1186,9 @@ int __intel_engine_reset_bh(struct intel_engine_cs *engine, const char *msg)
        ENGINE_TRACE(engine, "flags=%lx\n", gt->reset.flags);
        GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &gt->reset.flags));
 
+       if (intel_engine_uses_guc(engine))
+               return -ENODEV;
+
        if (!intel_engine_pm_get_if_awake(engine))
                return 0;
 
@@ -1201,13 +1199,10 @@ int __intel_engine_reset_bh(struct intel_engine_cs *engine, const char *msg)
                           "Resetting %s for %s\n", engine->name, msg);
        atomic_inc(&engine->i915->gpu_error.reset_engine_count[engine->uabi_class]);
 
-       if (intel_engine_uses_guc(engine))
-               ret = intel_guc_reset_engine(&engine->gt->uc.guc, engine);
-       else
-               ret = intel_gt_reset_engine(engine);
+       ret = intel_gt_reset_engine(engine);
        if (ret) {
                /* If we fail here, we expect to fallback to a global reset */
-               ENGINE_TRACE(engine, "Failed to reset, err: %d\n", ret);
+               ENGINE_TRACE(engine, "Failed to reset %s, err: %d\n", engine->name, ret);
                goto out;
        }
 
@@ -1341,7 +1336,8 @@ void intel_gt_handle_error(struct intel_gt *gt,
         * Try engine reset when available. We fall back to full reset if
         * single reset fails.
         */
-       if (intel_has_reset_engine(gt) && !intel_gt_is_wedged(gt)) {
+       if (!intel_uc_uses_guc_submission(&gt->uc) &&
+           intel_has_reset_engine(gt) && !intel_gt_is_wedged(gt)) {
                local_bh_disable();
                for_each_engine_masked(engine, gt, engine_mask, tmp) {
                        BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
index dbf5f14..1b32dad 100644 (file)
@@ -49,6 +49,7 @@ static inline void intel_ring_advance(struct i915_request *rq, u32 *cs)
         * intel_ring_begin()).
         */
        GEM_BUG_ON((rq->ring->vaddr + rq->ring->emit) != cs);
+       GEM_BUG_ON(!IS_ALIGNED(rq->ring->emit, 8)); /* RING_TAIL qword align */
 }
 
 static inline u32 intel_ring_wrap(const struct intel_ring *ring, u32 pos)
index 37d74d4..2958e2f 100644 (file)
@@ -16,6 +16,7 @@
 #include "intel_reset.h"
 #include "intel_ring.h"
 #include "shmem_utils.h"
+#include "intel_engine_heartbeat.h"
 
 /* Rough estimate of the typical request size, performing a flush,
  * set-context and then emitting the batch.
@@ -342,9 +343,9 @@ static void reset_rewind(struct intel_engine_cs *engine, bool stalled)
        u32 head;
 
        rq = NULL;
-       spin_lock_irqsave(&engine->active.lock, flags);
+       spin_lock_irqsave(&engine->sched_engine->lock, flags);
        rcu_read_lock();
-       list_for_each_entry(pos, &engine->active.requests, sched.link) {
+       list_for_each_entry(pos, &engine->sched_engine->requests, sched.link) {
                if (!__i915_request_is_complete(pos)) {
                        rq = pos;
                        break;
@@ -399,7 +400,7 @@ static void reset_rewind(struct intel_engine_cs *engine, bool stalled)
        }
        engine->legacy.ring->head = intel_ring_wrap(engine->legacy.ring, head);
 
-       spin_unlock_irqrestore(&engine->active.lock, flags);
+       spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
 }
 
 static void reset_finish(struct intel_engine_cs *engine)
@@ -411,16 +412,16 @@ static void reset_cancel(struct intel_engine_cs *engine)
        struct i915_request *request;
        unsigned long flags;
 
-       spin_lock_irqsave(&engine->active.lock, flags);
+       spin_lock_irqsave(&engine->sched_engine->lock, flags);
 
        /* Mark all submitted requests as skipped. */
-       list_for_each_entry(request, &engine->active.requests, sched.link)
+       list_for_each_entry(request, &engine->sched_engine->requests, sched.link)
                i915_request_put(i915_request_mark_eio(request));
        intel_engine_signal_breadcrumbs(engine);
 
        /* Remaining _unready_ requests will be nop'ed when submitted */
 
-       spin_unlock_irqrestore(&engine->active.lock, flags);
+       spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
 }
 
 static void i9xx_submit_request(struct i915_request *request)
@@ -586,9 +587,44 @@ static void ring_context_reset(struct intel_context *ce)
        clear_bit(CONTEXT_VALID_BIT, &ce->flags);
 }
 
+static void ring_context_ban(struct intel_context *ce,
+                            struct i915_request *rq)
+{
+       struct intel_engine_cs *engine;
+
+       if (!rq || !i915_request_is_active(rq))
+               return;
+
+       engine = rq->engine;
+       lockdep_assert_held(&engine->sched_engine->lock);
+       list_for_each_entry_continue(rq, &engine->sched_engine->requests,
+                                    sched.link)
+               if (rq->context == ce) {
+                       i915_request_set_error_once(rq, -EIO);
+                       __i915_request_skip(rq);
+               }
+}
+
+static void ring_context_cancel_request(struct intel_context *ce,
+                                       struct i915_request *rq)
+{
+       struct intel_engine_cs *engine = NULL;
+
+       i915_request_active_engine(rq, &engine);
+
+       if (engine && intel_engine_pulse(engine))
+               intel_gt_handle_error(engine->gt, engine->mask, 0,
+                                     "request cancellation by %s",
+                                     current->comm);
+}
+
 static const struct intel_context_ops ring_context_ops = {
        .alloc = ring_context_alloc,
 
+       .cancel_request = ring_context_cancel_request,
+
+       .ban = ring_context_ban,
+
        .pre_pin = ring_context_pre_pin,
        .pin = ring_context_pin,
        .unpin = ring_context_unpin,
@@ -1047,6 +1083,25 @@ static void setup_irq(struct intel_engine_cs *engine)
        }
 }
 
+static void add_to_engine(struct i915_request *rq)
+{
+       lockdep_assert_held(&rq->engine->sched_engine->lock);
+       list_move_tail(&rq->sched.link, &rq->engine->sched_engine->requests);
+}
+
+static void remove_from_engine(struct i915_request *rq)
+{
+       spin_lock_irq(&rq->engine->sched_engine->lock);
+       list_del_init(&rq->sched.link);
+
+       /* Prevent further __await_execution() registering a cb, then flush */
+       set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
+
+       spin_unlock_irq(&rq->engine->sched_engine->lock);
+
+       i915_request_notify_execute_cb_imm(rq);
+}
+
 static void setup_common(struct intel_engine_cs *engine)
 {
        struct drm_i915_private *i915 = engine->i915;
@@ -1064,6 +1119,9 @@ static void setup_common(struct intel_engine_cs *engine)
        engine->reset.cancel = reset_cancel;
        engine->reset.finish = reset_finish;
 
+       engine->add_active_request = add_to_engine;
+       engine->remove_active_request = remove_from_engine;
+
        engine->cops = &ring_context_ops;
        engine->request_alloc = ring_request_alloc;
 
index 06e9a8e..d812b27 100644 (file)
@@ -37,6 +37,20 @@ static struct intel_uncore *rps_to_uncore(struct intel_rps *rps)
        return rps_to_gt(rps)->uncore;
 }
 
+static struct intel_guc_slpc *rps_to_slpc(struct intel_rps *rps)
+{
+       struct intel_gt *gt = rps_to_gt(rps);
+
+       return &gt->uc.guc.slpc;
+}
+
+static bool rps_uses_slpc(struct intel_rps *rps)
+{
+       struct intel_gt *gt = rps_to_gt(rps);
+
+       return intel_uc_uses_guc_slpc(&gt->uc);
+}
+
 static u32 rps_pm_sanitize_mask(struct intel_rps *rps, u32 mask)
 {
        return mask & ~rps->pm_intrmsk_mbz;
@@ -167,6 +181,8 @@ static void rps_enable_interrupts(struct intel_rps *rps)
 {
        struct intel_gt *gt = rps_to_gt(rps);
 
+       GEM_BUG_ON(rps_uses_slpc(rps));
+
        GT_TRACE(gt, "interrupts:on rps->pm_events: %x, rps_pm_mask:%x\n",
                 rps->pm_events, rps_pm_mask(rps, rps->last_freq));
 
@@ -771,6 +787,8 @@ static int gen6_rps_set(struct intel_rps *rps, u8 val)
        struct drm_i915_private *i915 = rps_to_i915(rps);
        u32 swreq;
 
+       GEM_BUG_ON(rps_uses_slpc(rps));
+
        if (GRAPHICS_VER(i915) >= 9)
                swreq = GEN9_FREQUENCY(val);
        else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
@@ -861,6 +879,9 @@ void intel_rps_park(struct intel_rps *rps)
 {
        int adj;
 
+       if (!intel_rps_is_enabled(rps))
+               return;
+
        GEM_BUG_ON(atomic_read(&rps->num_waiters));
 
        if (!intel_rps_clear_active(rps))
@@ -999,7 +1020,7 @@ static void gen6_rps_init(struct intel_rps *rps)
 
        rps->efficient_freq = rps->rp1_freq;
        if (IS_HASWELL(i915) || IS_BROADWELL(i915) ||
-           IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 10) {
+           IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 11) {
                u32 ddcc_status = 0;
 
                if (sandybridge_pcode_read(i915,
@@ -1012,7 +1033,7 @@ static void gen6_rps_init(struct intel_rps *rps)
                                        rps->max_freq);
        }
 
-       if (IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 10) {
+       if (IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 11) {
                /* Store the frequency values in 16.66 MHZ units, which is
                 * the natural hardware unit for SKL
                 */
@@ -1356,6 +1377,9 @@ void intel_rps_enable(struct intel_rps *rps)
        if (!HAS_RPS(i915))
                return;
 
+       if (rps_uses_slpc(rps))
+               return;
+
        intel_gt_check_clock_frequency(rps_to_gt(rps));
 
        intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
@@ -1829,6 +1853,9 @@ void intel_rps_init(struct intel_rps *rps)
 {
        struct drm_i915_private *i915 = rps_to_i915(rps);
 
+       if (rps_uses_slpc(rps))
+               return;
+
        if (IS_CHERRYVIEW(i915))
                chv_rps_init(rps);
        else if (IS_VALLEYVIEW(i915))
@@ -1877,10 +1904,17 @@ void intel_rps_init(struct intel_rps *rps)
 
        if (GRAPHICS_VER(i915) >= 8 && GRAPHICS_VER(i915) < 11)
                rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
+
+       /* GuC needs ARAT expired interrupt unmasked */
+       if (intel_uc_uses_guc_submission(&rps_to_gt(rps)->uc))
+               rps->pm_intrmsk_mbz |= ARAT_EXPIRED_INTRMSK;
 }
 
 void intel_rps_sanitize(struct intel_rps *rps)
 {
+       if (rps_uses_slpc(rps))
+               return;
+
        if (GRAPHICS_VER(rps_to_i915(rps)) >= 6)
                rps_disable_interrupts(rps);
 }
@@ -1936,6 +1970,176 @@ u32 intel_rps_read_actual_frequency(struct intel_rps *rps)
        return freq;
 }
 
+u32 intel_rps_read_punit_req(struct intel_rps *rps)
+{
+       struct intel_uncore *uncore = rps_to_uncore(rps);
+
+       return intel_uncore_read(uncore, GEN6_RPNSWREQ);
+}
+
+static u32 intel_rps_get_req(u32 pureq)
+{
+       u32 req = pureq >> GEN9_SW_REQ_UNSLICE_RATIO_SHIFT;
+
+       return req;
+}
+
+u32 intel_rps_read_punit_req_frequency(struct intel_rps *rps)
+{
+       u32 freq = intel_rps_get_req(intel_rps_read_punit_req(rps));
+
+       return intel_gpu_freq(rps, freq);
+}
+
+u32 intel_rps_get_requested_frequency(struct intel_rps *rps)
+{
+       if (rps_uses_slpc(rps))
+               return intel_rps_read_punit_req_frequency(rps);
+       else
+               return intel_gpu_freq(rps, rps->cur_freq);
+}
+
+u32 intel_rps_get_max_frequency(struct intel_rps *rps)
+{
+       struct intel_guc_slpc *slpc = rps_to_slpc(rps);
+
+       if (rps_uses_slpc(rps))
+               return slpc->max_freq_softlimit;
+       else
+               return intel_gpu_freq(rps, rps->max_freq_softlimit);
+}
+
+u32 intel_rps_get_rp0_frequency(struct intel_rps *rps)
+{
+       struct intel_guc_slpc *slpc = rps_to_slpc(rps);
+
+       if (rps_uses_slpc(rps))
+               return slpc->rp0_freq;
+       else
+               return intel_gpu_freq(rps, rps->rp0_freq);
+}
+
+u32 intel_rps_get_rp1_frequency(struct intel_rps *rps)
+{
+       struct intel_guc_slpc *slpc = rps_to_slpc(rps);
+
+       if (rps_uses_slpc(rps))
+               return slpc->rp1_freq;
+       else
+               return intel_gpu_freq(rps, rps->rp1_freq);
+}
+
+u32 intel_rps_get_rpn_frequency(struct intel_rps *rps)
+{
+       struct intel_guc_slpc *slpc = rps_to_slpc(rps);
+
+       if (rps_uses_slpc(rps))
+               return slpc->min_freq;
+       else
+               return intel_gpu_freq(rps, rps->min_freq);
+}
+
+static int set_max_freq(struct intel_rps *rps, u32 val)
+{
+       struct drm_i915_private *i915 = rps_to_i915(rps);
+       int ret = 0;
+
+       mutex_lock(&rps->lock);
+
+       val = intel_freq_opcode(rps, val);
+       if (val < rps->min_freq ||
+           val > rps->max_freq ||
+           val < rps->min_freq_softlimit) {
+               ret = -EINVAL;
+               goto unlock;
+       }
+
+       if (val > rps->rp0_freq)
+               drm_dbg(&i915->drm, "User requested overclocking to %d\n",
+                       intel_gpu_freq(rps, val));
+
+       rps->max_freq_softlimit = val;
+
+       val = clamp_t(int, rps->cur_freq,
+                     rps->min_freq_softlimit,
+                     rps->max_freq_softlimit);
+
+       /*
+        * We still need *_set_rps to process the new max_delay and
+        * update the interrupt limits and PMINTRMSK even though
+        * frequency request may be unchanged.
+        */
+       intel_rps_set(rps, val);
+
+unlock:
+       mutex_unlock(&rps->lock);
+
+       return ret;
+}
+
+int intel_rps_set_max_frequency(struct intel_rps *rps, u32 val)
+{
+       struct intel_guc_slpc *slpc = rps_to_slpc(rps);
+
+       if (rps_uses_slpc(rps))
+               return intel_guc_slpc_set_max_freq(slpc, val);
+       else
+               return set_max_freq(rps, val);
+}
+
+u32 intel_rps_get_min_frequency(struct intel_rps *rps)
+{
+       struct intel_guc_slpc *slpc = rps_to_slpc(rps);
+
+       if (rps_uses_slpc(rps))
+               return slpc->min_freq_softlimit;
+       else
+               return intel_gpu_freq(rps, rps->min_freq_softlimit);
+}
+
+static int set_min_freq(struct intel_rps *rps, u32 val)
+{
+       int ret = 0;
+
+       mutex_lock(&rps->lock);
+
+       val = intel_freq_opcode(rps, val);
+       if (val < rps->min_freq ||
+           val > rps->max_freq ||
+           val > rps->max_freq_softlimit) {
+               ret = -EINVAL;
+               goto unlock;
+       }
+
+       rps->min_freq_softlimit = val;
+
+       val = clamp_t(int, rps->cur_freq,
+                     rps->min_freq_softlimit,
+                     rps->max_freq_softlimit);
+
+       /*
+        * We still need *_set_rps to process the new min_delay and
+        * update the interrupt limits and PMINTRMSK even though
+        * frequency request may be unchanged.
+        */
+       intel_rps_set(rps, val);
+
+unlock:
+       mutex_unlock(&rps->lock);
+
+       return ret;
+}
+
+int intel_rps_set_min_frequency(struct intel_rps *rps, u32 val)
+{
+       struct intel_guc_slpc *slpc = rps_to_slpc(rps);
+
+       if (rps_uses_slpc(rps))
+               return intel_guc_slpc_set_min_freq(slpc, val);
+       else
+               return set_min_freq(rps, val);
+}
+
 /* External interface for intel_ips.ko */
 
 static struct drm_i915_private __rcu *ips_mchdev;
@@ -2129,4 +2333,5 @@ EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
 
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 #include "selftest_rps.c"
+#include "selftest_slpc.c"
 #endif
index 1d2cfc9..4213bcc 100644 (file)
@@ -31,6 +31,16 @@ int intel_gpu_freq(struct intel_rps *rps, int val);
 int intel_freq_opcode(struct intel_rps *rps, int val);
 u32 intel_rps_get_cagf(struct intel_rps *rps, u32 rpstat1);
 u32 intel_rps_read_actual_frequency(struct intel_rps *rps);
+u32 intel_rps_get_requested_frequency(struct intel_rps *rps);
+u32 intel_rps_get_min_frequency(struct intel_rps *rps);
+int intel_rps_set_min_frequency(struct intel_rps *rps, u32 val);
+u32 intel_rps_get_max_frequency(struct intel_rps *rps);
+int intel_rps_set_max_frequency(struct intel_rps *rps, u32 val);
+u32 intel_rps_get_rp0_frequency(struct intel_rps *rps);
+u32 intel_rps_get_rp1_frequency(struct intel_rps *rps);
+u32 intel_rps_get_rpn_frequency(struct intel_rps *rps);
+u32 intel_rps_read_punit_req(struct intel_rps *rps);
+u32 intel_rps_read_punit_req_frequency(struct intel_rps *rps);
 
 void gen5_rps_irq_handler(struct intel_rps *rps);
 void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir);
index 367fd44..bbd2729 100644 (file)
@@ -139,17 +139,36 @@ static void gen12_sseu_info_init(struct intel_gt *gt)
         * Gen12 has Dual-Subslices, which behave similarly to 2 gen11 SS.
         * Instead of splitting these, provide userspace with an array
         * of DSS to more closely represent the hardware resource.
+        *
+        * In addition, the concept of slice has been removed in Xe_HP.
+        * To be compatible with prior generations, assume a single slice
+        * across the entire device. Then calculate out the DSS for each
+        * workload type within that software slice.
         */
-       intel_sseu_set_info(sseu, 1, 6, 16);
+       if (IS_DG2(gt->i915) || IS_XEHPSDV(gt->i915))
+               intel_sseu_set_info(sseu, 1, 32, 16);
+       else
+               intel_sseu_set_info(sseu, 1, 6, 16);
 
-       s_en = intel_uncore_read(uncore, GEN11_GT_SLICE_ENABLE) &
-               GEN11_GT_S_ENA_MASK;
+       /*
+        * As mentioned above, Xe_HP does not have the concept of a slice.
+        * Enable one for software backwards compatibility.
+        */
+       if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 50))
+               s_en = 0x1;
+       else
+               s_en = intel_uncore_read(uncore, GEN11_GT_SLICE_ENABLE) &
+                      GEN11_GT_S_ENA_MASK;
 
        dss_en = intel_uncore_read(uncore, GEN12_GT_DSS_ENABLE);
 
        /* one bit per pair of EUs */
-       eu_en_fuse = ~(intel_uncore_read(uncore, GEN11_EU_DISABLE) &
-                      GEN11_EU_DIS_MASK);
+       if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 50))
+               eu_en_fuse = intel_uncore_read(uncore, XEHP_EU_ENABLE) & XEHP_EU_ENA_MASK;
+       else
+               eu_en_fuse = ~(intel_uncore_read(uncore, GEN11_EU_DISABLE) &
+                              GEN11_EU_DIS_MASK);
+
        for (eu = 0; eu < sseu->max_eus_per_subslice / 2; eu++)
                if (eu_en_fuse & BIT(eu))
                        eu_en |= BIT(eu * 2) | BIT(eu * 2 + 1);
@@ -188,83 +207,6 @@ static void gen11_sseu_info_init(struct intel_gt *gt)
        sseu->has_eu_pg = 1;
 }
 
-static void gen10_sseu_info_init(struct intel_gt *gt)
-{
-       struct intel_uncore *uncore = gt->uncore;
-       struct sseu_dev_info *sseu = &gt->info.sseu;
-       const u32 fuse2 = intel_uncore_read(uncore, GEN8_FUSE2);
-       const int eu_mask = 0xff;
-       u32 subslice_mask, eu_en;
-       int s, ss;
-
-       intel_sseu_set_info(sseu, 6, 4, 8);
-
-       sseu->slice_mask = (fuse2 & GEN10_F2_S_ENA_MASK) >>
-               GEN10_F2_S_ENA_SHIFT;
-
-       /* Slice0 */
-       eu_en = ~intel_uncore_read(uncore, GEN8_EU_DISABLE0);
-       for (ss = 0; ss < sseu->max_subslices; ss++)
-               sseu_set_eus(sseu, 0, ss, (eu_en >> (8 * ss)) & eu_mask);
-       /* Slice1 */
-       sseu_set_eus(sseu, 1, 0, (eu_en >> 24) & eu_mask);
-       eu_en = ~intel_uncore_read(uncore, GEN8_EU_DISABLE1);
-       sseu_set_eus(sseu, 1, 1, eu_en & eu_mask);
-       /* Slice2 */
-       sseu_set_eus(sseu, 2, 0, (eu_en >> 8) & eu_mask);
-       sseu_set_eus(sseu, 2, 1, (eu_en >> 16) & eu_mask);
-       /* Slice3 */
-       sseu_set_eus(sseu, 3, 0, (eu_en >> 24) & eu_mask);
-       eu_en = ~intel_uncore_read(uncore, GEN8_EU_DISABLE2);
-       sseu_set_eus(sseu, 3, 1, eu_en & eu_mask);
-       /* Slice4 */
-       sseu_set_eus(sseu, 4, 0, (eu_en >> 8) & eu_mask);
-       sseu_set_eus(sseu, 4, 1, (eu_en >> 16) & eu_mask);
-       /* Slice5 */
-       sseu_set_eus(sseu, 5, 0, (eu_en >> 24) & eu_mask);
-       eu_en = ~intel_uncore_read(uncore, GEN10_EU_DISABLE3);
-       sseu_set_eus(sseu, 5, 1, eu_en & eu_mask);
-
-       subslice_mask = (1 << 4) - 1;
-       subslice_mask &= ~((fuse2 & GEN10_F2_SS_DIS_MASK) >>
-                          GEN10_F2_SS_DIS_SHIFT);
-
-       for (s = 0; s < sseu->max_slices; s++) {
-               u32 subslice_mask_with_eus = subslice_mask;
-
-               for (ss = 0; ss < sseu->max_subslices; ss++) {
-                       if (sseu_get_eus(sseu, s, ss) == 0)
-                               subslice_mask_with_eus &= ~BIT(ss);
-               }
-
-               /*
-                * Slice0 can have up to 3 subslices, but there are only 2 in
-                * slice1/2.
-                */
-               intel_sseu_set_subslices(sseu, s, s == 0 ?
-                                        subslice_mask_with_eus :
-                                        subslice_mask_with_eus & 0x3);
-       }
-
-       sseu->eu_total = compute_eu_total(sseu);
-
-       /*
-        * CNL is expected to always have a uniform distribution
-        * of EU across subslices with the exception that any one
-        * EU in any one subslice may be fused off for die
-        * recovery.
-        */
-       sseu->eu_per_subslice =
-               intel_sseu_subslice_total(sseu) ?
-               DIV_ROUND_UP(sseu->eu_total, intel_sseu_subslice_total(sseu)) :
-               0;
-
-       /* No restrictions on Power Gating */
-       sseu->has_slice_pg = 1;
-       sseu->has_subslice_pg = 1;
-       sseu->has_eu_pg = 1;
-}
-
 static void cherryview_sseu_info_init(struct intel_gt *gt)
 {
        struct sseu_dev_info *sseu = &gt->info.sseu;
@@ -592,8 +534,6 @@ void intel_sseu_info_init(struct intel_gt *gt)
                bdw_sseu_info_init(gt);
        else if (GRAPHICS_VER(i915) == 9)
                gen9_sseu_info_init(gt);
-       else if (GRAPHICS_VER(i915) == 10)
-               gen10_sseu_info_init(gt);
        else if (GRAPHICS_VER(i915) == 11)
                gen11_sseu_info_init(gt);
        else if (GRAPHICS_VER(i915) >= 12)
@@ -759,3 +699,21 @@ void intel_sseu_print_topology(const struct sseu_dev_info *sseu,
                }
        }
 }
+
+u16 intel_slicemask_from_dssmask(u64 dss_mask, int dss_per_slice)
+{
+       u16 slice_mask = 0;
+       int i;
+
+       WARN_ON(sizeof(dss_mask) * 8 / dss_per_slice > 8 * sizeof(slice_mask));
+
+       for (i = 0; dss_mask; i++) {
+               if (dss_mask & GENMASK(dss_per_slice - 1, 0))
+                       slice_mask |= BIT(i);
+
+               dss_mask >>= dss_per_slice;
+       }
+
+       return slice_mask;
+}
+
index 4cd1a8a..22fef98 100644 (file)
@@ -15,13 +15,17 @@ struct drm_i915_private;
 struct intel_gt;
 struct drm_printer;
 
-#define GEN_MAX_SLICES         (6) /* CNL upper bound */
-#define GEN_MAX_SUBSLICES      (8) /* ICL upper bound */
+#define GEN_MAX_SLICES         (3) /* SKL upper bound */
+#define GEN_MAX_SUBSLICES      (32) /* XEHPSDV upper bound */
 #define GEN_SSEU_STRIDE(max_entries) DIV_ROUND_UP(max_entries, BITS_PER_BYTE)
 #define GEN_MAX_SUBSLICE_STRIDE GEN_SSEU_STRIDE(GEN_MAX_SUBSLICES)
 #define GEN_MAX_EUS            (16) /* TGL upper bound */
 #define GEN_MAX_EU_STRIDE GEN_SSEU_STRIDE(GEN_MAX_EUS)
 
+#define GEN_DSS_PER_GSLICE     4
+#define GEN_DSS_PER_CSLICE     8
+#define GEN_DSS_PER_MSLICE     8
+
 struct sseu_dev_info {
        u8 slice_mask;
        u8 subslice_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICE_STRIDE];
@@ -104,4 +108,6 @@ void intel_sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p);
 void intel_sseu_print_topology(const struct sseu_dev_info *sseu,
                               struct drm_printer *p);
 
+u16 intel_slicemask_from_dssmask(u64 dss_mask, int dss_per_slice);
+
 #endif /* __INTEL_SSEU_H__ */
index 714fe84..1ba8b7d 100644 (file)
@@ -50,10 +50,10 @@ static void cherryview_sseu_device_status(struct intel_gt *gt,
 #undef SS_MAX
 }
 
-static void gen10_sseu_device_status(struct intel_gt *gt,
+static void gen11_sseu_device_status(struct intel_gt *gt,
                                     struct sseu_dev_info *sseu)
 {
-#define SS_MAX 6
+#define SS_MAX 8
        struct intel_uncore *uncore = gt->uncore;
        const struct intel_gt_info *info = &gt->info;
        u32 s_reg[SS_MAX], eu_reg[2 * SS_MAX], eu_mask[2];
@@ -267,8 +267,8 @@ int intel_sseu_status(struct seq_file *m, struct intel_gt *gt)
                        bdw_sseu_device_status(gt, &sseu);
                else if (GRAPHICS_VER(i915) == 9)
                        gen9_sseu_device_status(gt, &sseu);
-               else if (GRAPHICS_VER(i915) >= 10)
-                       gen10_sseu_device_status(gt, &sseu);
+               else if (GRAPHICS_VER(i915) >= 11)
+                       gen11_sseu_device_status(gt, &sseu);
        }
 
        i915_print_sseu_info(m, false, HAS_POOLED_EU(i915), &sseu);
index b62d1e3..aae609d 100644 (file)
@@ -150,13 +150,14 @@ static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa)
 }
 
 static void wa_add(struct i915_wa_list *wal, i915_reg_t reg,
-                  u32 clear, u32 set, u32 read_mask)
+                  u32 clear, u32 set, u32 read_mask, bool masked_reg)
 {
        struct i915_wa wa = {
                .reg  = reg,
                .clr  = clear,
                .set  = set,
                .read = read_mask,
+               .masked_reg = masked_reg,
        };
 
        _wa_add(wal, &wa);
@@ -165,7 +166,7 @@ static void wa_add(struct i915_wa_list *wal, i915_reg_t reg,
 static void
 wa_write_clr_set(struct i915_wa_list *wal, i915_reg_t reg, u32 clear, u32 set)
 {
-       wa_add(wal, reg, clear, set, clear);
+       wa_add(wal, reg, clear, set, clear, false);
 }
 
 static void
@@ -200,20 +201,20 @@ wa_write_clr(struct i915_wa_list *wal, i915_reg_t reg, u32 clr)
 static void
 wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
 {
-       wa_add(wal, reg, 0, _MASKED_BIT_ENABLE(val), val);
+       wa_add(wal, reg, 0, _MASKED_BIT_ENABLE(val), val, true);
 }
 
 static void
 wa_masked_dis(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
 {
-       wa_add(wal, reg, 0, _MASKED_BIT_DISABLE(val), val);
+       wa_add(wal, reg, 0, _MASKED_BIT_DISABLE(val), val, true);
 }
 
 static void
 wa_masked_field_set(struct i915_wa_list *wal, i915_reg_t reg,
                    u32 mask, u32 val)
 {
-       wa_add(wal, reg, 0, _MASKED_FIELD(mask, val), mask);
+       wa_add(wal, reg, 0, _MASKED_FIELD(mask, val), mask, true);
 }
 
 static void gen6_ctx_workarounds_init(struct intel_engine_cs *engine,
@@ -514,53 +515,15 @@ static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine,
                     GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
 }
 
-static void cnl_ctx_workarounds_init(struct intel_engine_cs *engine,
-                                    struct i915_wa_list *wal)
-{
-       /* WaForceContextSaveRestoreNonCoherent:cnl */
-       wa_masked_en(wal, CNL_HDC_CHICKEN0,
-                    HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT);
-
-       /* WaDisableReplayBufferBankArbitrationOptimization:cnl */
-       wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
-                    GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
-
-       /* WaPushConstantDereferenceHoldDisable:cnl */
-       wa_masked_en(wal, GEN7_ROW_CHICKEN2, PUSH_CONSTANT_DEREF_DISABLE);
-
-       /* FtrEnableFastAnisoL1BankingFix:cnl */
-       wa_masked_en(wal, HALF_SLICE_CHICKEN3, CNL_FAST_ANISO_L1_BANKING_FIX);
-
-       /* WaDisable3DMidCmdPreemption:cnl */
-       wa_masked_dis(wal, GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
-
-       /* WaDisableGPGPUMidCmdPreemption:cnl */
-       wa_masked_field_set(wal, GEN8_CS_CHICKEN1,
-                           GEN9_PREEMPT_GPGPU_LEVEL_MASK,
-                           GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
-
-       /* WaDisableEarlyEOT:cnl */
-       wa_masked_en(wal, GEN8_ROW_CHICKEN, DISABLE_EARLY_EOT);
-}
-
 static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
                                     struct i915_wa_list *wal)
 {
-       struct drm_i915_private *i915 = engine->i915;
-
-       /* WaDisableBankHangMode:icl */
+       /* Wa_1406697149 (WaDisableBankHangMode:icl) */
        wa_write(wal,
                 GEN8_L3CNTLREG,
                 intel_uncore_read(engine->uncore, GEN8_L3CNTLREG) |
                 GEN8_ERRDETBCTRL);
 
-       /* Wa_1604370585:icl (pre-prod)
-        * Formerly known as WaPushConstantDereferenceHoldDisable
-        */
-       if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
-               wa_masked_en(wal, GEN7_ROW_CHICKEN2,
-                            PUSH_CONSTANT_DEREF_DISABLE);
-
        /* WaForceEnableNonCoherent:icl
         * This is not the same workaround as in early Gen9 platforms, where
         * lacking this could cause system hangs, but coherency performance
@@ -570,23 +533,11 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
         */
        wa_masked_en(wal, ICL_HDC_MODE, HDC_FORCE_NON_COHERENT);
 
-       /* Wa_2006611047:icl (pre-prod)
-        * Formerly known as WaDisableImprovedTdlClkGating
-        */
-       if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
-               wa_masked_en(wal, GEN7_ROW_CHICKEN2,
-                            GEN11_TDL_CLOCK_GATING_FIX_DISABLE);
-
-       /* Wa_2006665173:icl (pre-prod) */
-       if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
-               wa_masked_en(wal, GEN11_COMMON_SLICE_CHICKEN3,
-                            GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC);
-
        /* WaEnableFloatBlendOptimization:icl */
-       wa_write_clr_set(wal,
-                        GEN10_CACHE_MODE_SS,
-                        0, /* write-only, so skip validation */
-                        _MASKED_BIT_ENABLE(FLOAT_BLEND_OPTIMIZATION_ENABLE));
+       wa_add(wal, GEN10_CACHE_MODE_SS, 0,
+              _MASKED_BIT_ENABLE(FLOAT_BLEND_OPTIMIZATION_ENABLE),
+              0 /* write-only, so skip validation */,
+              true);
 
        /* WaDisableGPGPUMidThreadPreemption:icl */
        wa_masked_field_set(wal, GEN8_CS_CHICKEN1,
@@ -631,7 +582,7 @@ static void gen12_ctx_gt_tuning_init(struct intel_engine_cs *engine,
               FF_MODE2,
               FF_MODE2_TDS_TIMER_MASK,
               FF_MODE2_TDS_TIMER_128,
-              0);
+              0, false);
 }
 
 static void gen12_ctx_workarounds_init(struct intel_engine_cs *engine,
@@ -640,15 +591,16 @@ static void gen12_ctx_workarounds_init(struct intel_engine_cs *engine,
        gen12_ctx_gt_tuning_init(engine, wal);
 
        /*
-        * Wa_1409142259:tgl
-        * Wa_1409347922:tgl
-        * Wa_1409252684:tgl
-        * Wa_1409217633:tgl
-        * Wa_1409207793:tgl
-        * Wa_1409178076:tgl
-        * Wa_1408979724:tgl
-        * Wa_14010443199:rkl
-        * Wa_14010698770:rkl
+        * Wa_1409142259:tgl,dg1,adl-p
+        * Wa_1409347922:tgl,dg1,adl-p
+        * Wa_1409252684:tgl,dg1,adl-p
+        * Wa_1409217633:tgl,dg1,adl-p
+        * Wa_1409207793:tgl,dg1,adl-p
+        * Wa_1409178076:tgl,dg1,adl-p
+        * Wa_1408979724:tgl,dg1,adl-p
+        * Wa_14010443199:tgl,rkl,dg1,adl-p
+        * Wa_14010698770:tgl,rkl,dg1,adl-s,adl-p
+        * Wa_1409342910:tgl,rkl,dg1,adl-s,adl-p
         */
        wa_masked_en(wal, GEN11_COMMON_SLICE_CHICKEN3,
                     GEN12_DISABLE_CPS_AWARE_COLOR_PIPE);
@@ -668,7 +620,14 @@ static void gen12_ctx_workarounds_init(struct intel_engine_cs *engine,
               FF_MODE2,
               FF_MODE2_GS_TIMER_MASK,
               FF_MODE2_GS_TIMER_224,
-              0);
+              0, false);
+
+       /*
+        * Wa_14012131227:dg1
+        * Wa_1508744258:tgl,rkl,dg1,adl-s,adl-p
+        */
+       wa_masked_en(wal, GEN7_COMMON_SLICE_CHICKEN1,
+                    GEN9_RHWO_OPTIMIZATION_DISABLE);
 }
 
 static void dg1_ctx_workarounds_init(struct intel_engine_cs *engine,
@@ -703,8 +662,6 @@ __intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
                gen12_ctx_workarounds_init(engine, wal);
        else if (GRAPHICS_VER(i915) == 11)
                icl_ctx_workarounds_init(engine, wal);
-       else if (IS_CANNONLAKE(i915))
-               cnl_ctx_workarounds_init(engine, wal);
        else if (IS_COFFEELAKE(i915) || IS_COMETLAKE(i915))
                cfl_ctx_workarounds_init(engine, wal);
        else if (IS_GEMINILAKE(i915))
@@ -839,7 +796,7 @@ hsw_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
        wa_add(wal,
               HSW_ROW_CHICKEN3, 0,
               _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE),
-               0 /* XXX does this reg exist? */);
+              0 /* XXX does this reg exist? */, true);
 
        /* WaVSRefCountFullforceMissDisable:hsw */
        wa_write_clr(wal, GEN7_FF_THREAD_MODE, GEN7_FF_VS_REF_CNT_FFME);
@@ -882,30 +839,19 @@ skl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
                    GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
 
        /* WaInPlaceDecompressionHang:skl */
-       if (IS_SKL_REVID(i915, SKL_REVID_H0, REVID_FOREVER))
+       if (IS_SKL_GT_STEP(i915, STEP_A0, STEP_H0))
                wa_write_or(wal,
                            GEN9_GAMT_ECO_REG_RW_IA,
                            GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
 }
 
-static void
-bxt_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
-{
-       gen9_gt_workarounds_init(i915, wal);
-
-       /* WaInPlaceDecompressionHang:bxt */
-       wa_write_or(wal,
-                   GEN9_GAMT_ECO_REG_RW_IA,
-                   GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
-}
-
 static void
 kbl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
 {
        gen9_gt_workarounds_init(i915, wal);
 
        /* WaDisableDynamicCreditSharing:kbl */
-       if (IS_KBL_GT_STEP(i915, 0, STEP_B0))
+       if (IS_KBL_GT_STEP(i915, 0, STEP_C0))
                wa_write_or(wal,
                            GAMT_CHKN_BIT_REG,
                            GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
@@ -943,98 +889,144 @@ cfl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
                    GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
 }
 
+static void __set_mcr_steering(struct i915_wa_list *wal,
+                              i915_reg_t steering_reg,
+                              unsigned int slice, unsigned int subslice)
+{
+       u32 mcr, mcr_mask;
+
+       mcr = GEN11_MCR_SLICE(slice) | GEN11_MCR_SUBSLICE(subslice);
+       mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK;
+
+       wa_write_clr_set(wal, steering_reg, mcr_mask, mcr);
+}
+
+static void __add_mcr_wa(struct drm_i915_private *i915, struct i915_wa_list *wal,
+                        unsigned int slice, unsigned int subslice)
+{
+       drm_dbg(&i915->drm, "MCR slice=0x%x, subslice=0x%x\n", slice, subslice);
+
+       __set_mcr_steering(wal, GEN8_MCR_SELECTOR, slice, subslice);
+}
+
 static void
-wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
+icl_wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
 {
        const struct sseu_dev_info *sseu = &i915->gt.info.sseu;
        unsigned int slice, subslice;
-       u32 l3_en, mcr, mcr_mask;
 
-       GEM_BUG_ON(GRAPHICS_VER(i915) < 10);
+       GEM_BUG_ON(GRAPHICS_VER(i915) < 11);
+       GEM_BUG_ON(hweight8(sseu->slice_mask) > 1);
+       slice = 0;
 
        /*
-        * WaProgramMgsrForL3BankSpecificMmioReads: cnl,icl
-        * L3Banks could be fused off in single slice scenario. If that is
-        * the case, we might need to program MCR select to a valid L3Bank
-        * by default, to make sure we correctly read certain registers
-        * later on (in the range 0xB100 - 0xB3FF).
+        * Although a platform may have subslices, we need to always steer
+        * reads to the lowest instance that isn't fused off.  When Render
+        * Power Gating is enabled, grabbing forcewake will only power up a
+        * single subslice (the "minconfig") if there isn't a real workload
+        * that needs to be run; this means that if we steer register reads to
+        * one of the higher subslices, we run the risk of reading back 0's or
+        * random garbage.
+        */
+       subslice = __ffs(intel_sseu_get_subslices(sseu, slice));
+
+       /*
+        * If the subslice we picked above also steers us to a valid L3 bank,
+        * then we can just rely on the default steering and won't need to
+        * worry about explicitly re-steering L3BANK reads later.
+        */
+       if (i915->gt.info.l3bank_mask & BIT(subslice))
+               i915->gt.steering_table[L3BANK] = NULL;
+
+       __add_mcr_wa(i915, wal, slice, subslice);
+}
+
+static void
+xehp_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal)
+{
+       struct drm_i915_private *i915 = gt->i915;
+       const struct sseu_dev_info *sseu = &gt->info.sseu;
+       unsigned long slice, subslice = 0, slice_mask = 0;
+       u64 dss_mask = 0;
+       u32 lncf_mask = 0;
+       int i;
+
+       /*
+        * On Xe_HP the steering increases in complexity. There are now several
+        * more units that require steering and we're not guaranteed to be able
+        * to find a common setting for all of them. These are:
+        * - GSLICE (fusable)
+        * - DSS (sub-unit within gslice; fusable)
+        * - L3 Bank (fusable)
+        * - MSLICE (fusable)
+        * - LNCF (sub-unit within mslice; always present if mslice is present)
         *
-        * WaProgramMgsrForCorrectSliceSpecificMmioReads:cnl,icl
-        * Before any MMIO read into slice/subslice specific registers, MCR
-        * packet control register needs to be programmed to point to any
-        * enabled s/ss pair. Otherwise, incorrect values will be returned.
-        * This means each subsequent MMIO read will be forwarded to an
-        * specific s/ss combination, but this is OK since these registers
-        * are consistent across s/ss in almost all cases. In the rare
-        * occasions, such as INSTDONE, where this value is dependent
-        * on s/ss combo, the read should be done with read_subslice_reg.
+        * We'll do our default/implicit steering based on GSLICE (in the
+        * sliceid field) and DSS (in the subsliceid field).  If we can
+        * find overlap between the valid MSLICE and/or LNCF values with
+        * a suitable GSLICE, then we can just re-use the default value and
+        * skip and explicit steering at runtime.
         *
-        * Since GEN8_MCR_SELECTOR contains dual-purpose bits which select both
-        * to which subslice, or to which L3 bank, the respective mmio reads
-        * will go, we have to find a common index which works for both
-        * accesses.
+        * We only need to look for overlap between GSLICE/MSLICE/LNCF to find
+        * a valid sliceid value.  DSS steering is the only type of steering
+        * that utilizes the 'subsliceid' bits.
         *
-        * Case where we cannot find a common index fortunately should not
-        * happen in production hardware, so we only emit a warning instead of
-        * implementing something more complex that requires checking the range
-        * of every MMIO read.
+        * Also note that, even though the steering domain is called "GSlice"
+        * and it is encoded in the register using the gslice format, the spec
+        * says that the combined (geometry | compute) fuse should be used to
+        * select the steering.
         */
 
-       if (GRAPHICS_VER(i915) >= 10 && is_power_of_2(sseu->slice_mask)) {
-               u32 l3_fuse =
-                       intel_uncore_read(&i915->uncore, GEN10_MIRROR_FUSE3) &
-                       GEN10_L3BANK_MASK;
+       /* Find the potential gslice candidates */
+       dss_mask = intel_sseu_get_subslices(sseu, 0);
+       slice_mask = intel_slicemask_from_dssmask(dss_mask, GEN_DSS_PER_GSLICE);
 
-               drm_dbg(&i915->drm, "L3 fuse = %x\n", l3_fuse);
-               l3_en = ~(l3_fuse << GEN10_L3BANK_PAIR_COUNT | l3_fuse);
-       } else {
-               l3_en = ~0;
-       }
+       /*
+        * Find the potential LNCF candidates.  Either LNCF within a valid
+        * mslice is fine.
+        */
+       for_each_set_bit(i, &gt->info.mslice_mask, GEN12_MAX_MSLICES)
+               lncf_mask |= (0x3 << (i * 2));
 
-       slice = fls(sseu->slice_mask) - 1;
-       subslice = fls(l3_en & intel_sseu_get_subslices(sseu, slice));
-       if (!subslice) {
-               drm_warn(&i915->drm,
-                        "No common index found between subslice mask %x and L3 bank mask %x!\n",
-                        intel_sseu_get_subslices(sseu, slice), l3_en);
-               subslice = fls(l3_en);
-               drm_WARN_ON(&i915->drm, !subslice);
-       }
-       subslice--;
-
-       if (GRAPHICS_VER(i915) >= 11) {
-               mcr = GEN11_MCR_SLICE(slice) | GEN11_MCR_SUBSLICE(subslice);
-               mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK;
-       } else {
-               mcr = GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice);
-               mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK;
+       /*
+        * Are there any sliceid values that work for both GSLICE and LNCF
+        * steering?
+        */
+       if (slice_mask & lncf_mask) {
+               slice_mask &= lncf_mask;
+               gt->steering_table[LNCF] = NULL;
        }
 
-       drm_dbg(&i915->drm, "MCR slice/subslice = %x\n", mcr);
+       /* How about sliceid values that also work for MSLICE steering? */
+       if (slice_mask & gt->info.mslice_mask) {
+               slice_mask &= gt->info.mslice_mask;
+               gt->steering_table[MSLICE] = NULL;
+       }
 
-       wa_write_clr_set(wal, GEN8_MCR_SELECTOR, mcr_mask, mcr);
-}
+       slice = __ffs(slice_mask);
+       subslice = __ffs(dss_mask >> (slice * GEN_DSS_PER_GSLICE));
+       WARN_ON(subslice > GEN_DSS_PER_GSLICE);
+       WARN_ON(dss_mask >> (slice * GEN_DSS_PER_GSLICE) == 0);
 
-static void
-cnl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
-{
-       wa_init_mcr(i915, wal);
+       __add_mcr_wa(i915, wal, slice, subslice);
 
-       /* WaInPlaceDecompressionHang:cnl */
-       wa_write_or(wal,
-                   GEN9_GAMT_ECO_REG_RW_IA,
-                   GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+       /*
+        * SQIDI ranges are special because they use different steering
+        * registers than everything else we work with.  On XeHP SDV and
+        * DG2-G10, any value in the steering registers will work fine since
+        * all instances are present, but DG2-G11 only has SQIDI instances at
+        * ID's 2 and 3, so we need to steer to one of those.  For simplicity
+        * we'll just steer to a hardcoded "2" since that value will work
+        * everywhere.
+        */
+       __set_mcr_steering(wal, MCFG_MCR_SELECTOR, 0, 2);
+       __set_mcr_steering(wal, SF_MCR_SELECTOR, 0, 2);
 }
 
 static void
 icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
 {
-       wa_init_mcr(i915, wal);
-
-       /* WaInPlaceDecompressionHang:icl */
-       wa_write_or(wal,
-                   GEN9_GAMT_ECO_REG_RW_IA,
-                   GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+       icl_wa_init_mcr(i915, wal);
 
        /* WaModifyGamTlbPartitioning:icl */
        wa_write_clr_set(wal,
@@ -1057,18 +1049,6 @@ icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
                    GEN8_GAMW_ECO_DEV_RW_IA,
                    GAMW_ECO_DEV_CTX_RELOAD_DISABLE);
 
-       /* Wa_1405779004:icl (pre-prod) */
-       if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
-               wa_write_or(wal,
-                           SLICE_UNIT_LEVEL_CLKGATE,
-                           MSCUNIT_CLKGATE_DIS);
-
-       /* Wa_1406838659:icl (pre-prod) */
-       if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
-               wa_write_or(wal,
-                           INF_UNIT_LEVEL_CLKGATE,
-                           CGPSF_CLKGATE_DIS);
-
        /* Wa_1406463099:icl
         * Formerly known as WaGamTlbPendError
         */
@@ -1078,10 +1058,16 @@ icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
 
        /* Wa_1607087056:icl,ehl,jsl */
        if (IS_ICELAKE(i915) ||
-           IS_JSL_EHL_REVID(i915, EHL_REVID_A0, EHL_REVID_A0))
+           IS_JSL_EHL_GT_STEP(i915, STEP_A0, STEP_B0))
                wa_write_or(wal,
                            SLICE_UNIT_LEVEL_CLKGATE,
                            L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
+
+       /*
+        * This is not a documented workaround, but rather an optimization
+        * to reduce sampler power.
+        */
+       wa_write_clr(wal, GEN10_DFR_RATIO_EN_AND_CHICKEN, DFR_DISABLE);
 }
 
 /*
@@ -1111,10 +1097,13 @@ static void
 gen12_gt_workarounds_init(struct drm_i915_private *i915,
                          struct i915_wa_list *wal)
 {
-       wa_init_mcr(i915, wal);
+       icl_wa_init_mcr(i915, wal);
 
-       /* Wa_14011060649:tgl,rkl,dg1,adls */
+       /* Wa_14011060649:tgl,rkl,dg1,adl-s,adl-p */
        wa_14011060649(i915, wal);
+
+       /* Wa_14011059788:tgl,rkl,adl-s,dg1,adl-p */
+       wa_write_or(wal, GEN10_DFR_RATIO_EN_AND_CHICKEN, DFR_DISABLE);
 }
 
 static void
@@ -1123,19 +1112,19 @@ tgl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
        gen12_gt_workarounds_init(i915, wal);
 
        /* Wa_1409420604:tgl */
-       if (IS_TGL_UY_GT_STEP(i915, STEP_A0, STEP_A0))
+       if (IS_TGL_UY_GT_STEP(i915, STEP_A0, STEP_B0))
                wa_write_or(wal,
                            SUBSLICE_UNIT_LEVEL_CLKGATE2,
                            CPSSUNIT_CLKGATE_DIS);
 
        /* Wa_1607087056:tgl also know as BUG:1409180338 */
-       if (IS_TGL_UY_GT_STEP(i915, STEP_A0, STEP_A0))
+       if (IS_TGL_UY_GT_STEP(i915, STEP_A0, STEP_B0))
                wa_write_or(wal,
                            SLICE_UNIT_LEVEL_CLKGATE,
                            L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
 
        /* Wa_1408615072:tgl[a0] */
-       if (IS_TGL_UY_GT_STEP(i915, STEP_A0, STEP_A0))
+       if (IS_TGL_UY_GT_STEP(i915, STEP_A0, STEP_B0))
                wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
                            VSUNIT_CLKGATE_DIS_TGL);
 }
@@ -1146,7 +1135,7 @@ dg1_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
        gen12_gt_workarounds_init(i915, wal);
 
        /* Wa_1607087056:dg1 */
-       if (IS_DG1_REVID(i915, DG1_REVID_A0, DG1_REVID_A0))
+       if (IS_DG1_GT_STEP(i915, STEP_A0, STEP_B0))
                wa_write_or(wal,
                            SLICE_UNIT_LEVEL_CLKGATE,
                            L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
@@ -1164,10 +1153,18 @@ dg1_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
                            VSUNIT_CLKGATE_DIS_TGL);
 }
 
+static void
+xehpsdv_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
+{
+       xehp_init_mcr(&i915->gt, wal);
+}
+
 static void
 gt_init_workarounds(struct drm_i915_private *i915, struct i915_wa_list *wal)
 {
-       if (IS_DG1(i915))
+       if (IS_XEHPSDV(i915))
+               xehpsdv_gt_workarounds_init(i915, wal);
+       else if (IS_DG1(i915))
                dg1_gt_workarounds_init(i915, wal);
        else if (IS_TIGERLAKE(i915))
                tgl_gt_workarounds_init(i915, wal);
@@ -1175,8 +1172,6 @@ gt_init_workarounds(struct drm_i915_private *i915, struct i915_wa_list *wal)
                gen12_gt_workarounds_init(i915, wal);
        else if (GRAPHICS_VER(i915) == 11)
                icl_gt_workarounds_init(i915, wal);
-       else if (IS_CANNONLAKE(i915))
-               cnl_gt_workarounds_init(i915, wal);
        else if (IS_COFFEELAKE(i915) || IS_COMETLAKE(i915))
                cfl_gt_workarounds_init(i915, wal);
        else if (IS_GEMINILAKE(i915))
@@ -1184,7 +1179,7 @@ gt_init_workarounds(struct drm_i915_private *i915, struct i915_wa_list *wal)
        else if (IS_KABYLAKE(i915))
                kbl_gt_workarounds_init(i915, wal);
        else if (IS_BROXTON(i915))
-               bxt_gt_workarounds_init(i915, wal);
+               gen9_gt_workarounds_init(i915, wal);
        else if (IS_SKYLAKE(i915))
                skl_gt_workarounds_init(i915, wal);
        else if (IS_HASWELL(i915))
@@ -1247,8 +1242,9 @@ wa_verify(const struct i915_wa *wa, u32 cur, const char *name, const char *from)
 }
 
 static void
-wa_list_apply(struct intel_uncore *uncore, const struct i915_wa_list *wal)
+wa_list_apply(struct intel_gt *gt, const struct i915_wa_list *wal)
 {
+       struct intel_uncore *uncore = gt->uncore;
        enum forcewake_domains fw;
        unsigned long flags;
        struct i915_wa *wa;
@@ -1263,13 +1259,16 @@ wa_list_apply(struct intel_uncore *uncore, const struct i915_wa_list *wal)
        intel_uncore_forcewake_get__locked(uncore, fw);
 
        for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
-               if (wa->clr)
-                       intel_uncore_rmw_fw(uncore, wa->reg, wa->clr, wa->set);
-               else
-                       intel_uncore_write_fw(uncore, wa->reg, wa->set);
+               u32 val, old = 0;
+
+               /* open-coded rmw due to steering */
+               old = wa->clr ? intel_gt_read_register_fw(gt, wa->reg) : 0;
+               val = (old & ~wa->clr) | wa->set;
+               if (val != old || !wa->clr)
+                       intel_uncore_write_fw(uncore, wa->reg, val);
+
                if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
-                       wa_verify(wa,
-                                 intel_uncore_read_fw(uncore, wa->reg),
+                       wa_verify(wa, intel_gt_read_register_fw(gt, wa->reg),
                                  wal->name, "application");
        }
 
@@ -1279,28 +1278,39 @@ wa_list_apply(struct intel_uncore *uncore, const struct i915_wa_list *wal)
 
 void intel_gt_apply_workarounds(struct intel_gt *gt)
 {
-       wa_list_apply(gt->uncore, &gt->i915->gt_wa_list);
+       wa_list_apply(gt, &gt->i915->gt_wa_list);
 }
 
-static bool wa_list_verify(struct intel_uncore *uncore,
+static bool wa_list_verify(struct intel_gt *gt,
                           const struct i915_wa_list *wal,
                           const char *from)
 {
+       struct intel_uncore *uncore = gt->uncore;
        struct i915_wa *wa;
+       enum forcewake_domains fw;
+       unsigned long flags;
        unsigned int i;
        bool ok = true;
 
+       fw = wal_get_fw_for_rmw(uncore, wal);
+
+       spin_lock_irqsave(&uncore->lock, flags);
+       intel_uncore_forcewake_get__locked(uncore, fw);
+
        for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
                ok &= wa_verify(wa,
-                               intel_uncore_read(uncore, wa->reg),
+                               intel_gt_read_register_fw(gt, wa->reg),
                                wal->name, from);
 
+       intel_uncore_forcewake_put__locked(uncore, fw);
+       spin_unlock_irqrestore(&uncore->lock, flags);
+
        return ok;
 }
 
 bool intel_gt_verify_workarounds(struct intel_gt *gt, const char *from)
 {
-       return wa_list_verify(gt->uncore, &gt->i915->gt_wa_list, from);
+       return wa_list_verify(gt, &gt->i915->gt_wa_list, from);
 }
 
 __maybe_unused
@@ -1438,17 +1448,6 @@ static void cml_whitelist_build(struct intel_engine_cs *engine)
        cfl_whitelist_build(engine);
 }
 
-static void cnl_whitelist_build(struct intel_engine_cs *engine)
-{
-       struct i915_wa_list *w = &engine->whitelist;
-
-       if (engine->class != RENDER_CLASS)
-               return;
-
-       /* WaEnablePreemptionGranularityControlByUMD:cnl */
-       whitelist_reg(w, GEN8_CS_CHICKEN1);
-}
-
 static void icl_whitelist_build(struct intel_engine_cs *engine)
 {
        struct i915_wa_list *w = &engine->whitelist;
@@ -1542,7 +1541,7 @@ static void dg1_whitelist_build(struct intel_engine_cs *engine)
        tgl_whitelist_build(engine);
 
        /* GEN:BUG:1409280441:dg1 */
-       if (IS_DG1_REVID(engine->i915, DG1_REVID_A0, DG1_REVID_A0) &&
+       if (IS_DG1_GT_STEP(engine->i915, STEP_A0, STEP_B0) &&
            (engine->class == RENDER_CLASS ||
             engine->class == COPY_ENGINE_CLASS))
                whitelist_reg_ext(w, RING_ID(engine->mmio_base),
@@ -1562,8 +1561,6 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine)
                tgl_whitelist_build(engine);
        else if (GRAPHICS_VER(i915) == 11)
                icl_whitelist_build(engine);
-       else if (IS_CANNONLAKE(i915))
-               cnl_whitelist_build(engine);
        else if (IS_COMETLAKE(i915))
                cml_whitelist_build(engine);
        else if (IS_COFFEELAKE(i915))
@@ -1612,8 +1609,8 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
 {
        struct drm_i915_private *i915 = engine->i915;
 
-       if (IS_DG1_REVID(i915, DG1_REVID_A0, DG1_REVID_A0) ||
-           IS_TGL_UY_GT_STEP(i915, STEP_A0, STEP_A0)) {
+       if (IS_DG1_GT_STEP(i915, STEP_A0, STEP_B0) ||
+           IS_TGL_UY_GT_STEP(i915, STEP_A0, STEP_B0)) {
                /*
                 * Wa_1607138336:tgl[a0],dg1[a0]
                 * Wa_1607063988:tgl[a0],dg1[a0]
@@ -1623,7 +1620,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
                            GEN12_DISABLE_POSH_BUSY_FF_DOP_CG);
        }
 
-       if (IS_TGL_UY_GT_STEP(i915, STEP_A0, STEP_A0)) {
+       if (IS_TGL_UY_GT_STEP(i915, STEP_A0, STEP_B0)) {
                /*
                 * Wa_1606679103:tgl
                 * (see also Wa_1606682166:icl)
@@ -1633,44 +1630,46 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
                            GEN7_DISABLE_SAMPLER_PREFETCH);
        }
 
-       if (IS_ALDERLAKE_S(i915) || IS_DG1(i915) ||
+       if (IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) || IS_DG1(i915) ||
            IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
-               /* Wa_1606931601:tgl,rkl,dg1,adl-s */
+               /* Wa_1606931601:tgl,rkl,dg1,adl-s,adl-p */
                wa_masked_en(wal, GEN7_ROW_CHICKEN2, GEN12_DISABLE_EARLY_READ);
 
                /*
                 * Wa_1407928979:tgl A*
                 * Wa_18011464164:tgl[B0+],dg1[B0+]
                 * Wa_22010931296:tgl[B0+],dg1[B0+]
-                * Wa_14010919138:rkl,dg1,adl-s
+                * Wa_14010919138:rkl,dg1,adl-s,adl-p
                 */
                wa_write_or(wal, GEN7_FF_THREAD_MODE,
                            GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
 
                /*
-                * Wa_1606700617:tgl,dg1
-                * Wa_22010271021:tgl,rkl,dg1, adl-s
+                * Wa_1606700617:tgl,dg1,adl-p
+                * Wa_22010271021:tgl,rkl,dg1,adl-s,adl-p
+                * Wa_14010826681:tgl,dg1,rkl,adl-p
                 */
                wa_masked_en(wal,
                             GEN9_CS_DEBUG_MODE1,
                             FF_DOP_CLOCK_GATE_DISABLE);
        }
 
-       if (IS_ALDERLAKE_S(i915) || IS_DG1_REVID(i915, DG1_REVID_A0, DG1_REVID_A0) ||
+       if (IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) ||
+           IS_DG1_GT_STEP(i915, STEP_A0, STEP_B0) ||
            IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
-               /* Wa_1409804808:tgl,rkl,dg1[a0],adl-s */
+               /* Wa_1409804808:tgl,rkl,dg1[a0],adl-s,adl-p */
                wa_masked_en(wal, GEN7_ROW_CHICKEN2,
                             GEN12_PUSH_CONST_DEREF_HOLD_DIS);
 
                /*
                 * Wa_1409085225:tgl
-                * Wa_14010229206:tgl,rkl,dg1[a0],adl-s
+                * Wa_14010229206:tgl,rkl,dg1[a0],adl-s,adl-p
                 */
                wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN12_DISABLE_TDL_PUSH);
        }
 
 
-       if (IS_DG1_REVID(i915, DG1_REVID_A0, DG1_REVID_A0) ||
+       if (IS_DG1_GT_STEP(i915, STEP_A0, STEP_B0) ||
            IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
                /*
                 * Wa_1607030317:tgl
@@ -1688,8 +1687,9 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
                             GEN8_RC_SEMA_IDLE_MSG_DISABLE);
        }
 
-       if (IS_DG1(i915) || IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
-               /* Wa_1406941453:tgl,rkl,dg1 */
+       if (IS_DG1(i915) || IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915) ||
+           IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) {
+               /* Wa_1406941453:tgl,rkl,dg1,adl-s,adl-p */
                wa_masked_en(wal,
                             GEN10_SAMPLER_MODE,
                             ENABLE_SMALLPL);
@@ -1701,11 +1701,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
                             _3D_CHICKEN3,
                             _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE);
 
-               /* WaPipelineFlushCoherentLines:icl */
-               wa_write_or(wal,
-                           GEN8_L3SQCREG4,
-                           GEN8_LQSC_FLUSH_COHERENT_LINES);
-
                /*
                 * Wa_1405543622:icl
                 * Formerly known as WaGAPZPriorityScheme
@@ -1735,19 +1730,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
                            GEN8_L3SQCREG4,
                            GEN11_LQSC_CLEAN_EVICT_DISABLE);
 
-               /* WaForwardProgressSoftReset:icl */
-               wa_write_or(wal,
-                           GEN10_SCRATCH_LNCF2,
-                           PMFLUSHDONE_LNICRSDROP |
-                           PMFLUSH_GAPL3UNBLOCK |
-                           PMFLUSHDONE_LNEBLK);
-
-               /* Wa_1406609255:icl (pre-prod) */
-               if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
-                       wa_write_or(wal,
-                                   GEN7_SARCHKMD,
-                                   GEN7_DISABLE_DEMAND_PREFETCH);
-
                /* Wa_1606682166:icl */
                wa_write_or(wal,
                            GEN7_SARCHKMD,
@@ -1947,10 +1929,10 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
                 * disable bit, which we don't touch here, but it's good
                 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
                 */
-               wa_add(wal, GEN7_GT_MODE, 0,
-                      _MASKED_FIELD(GEN6_WIZ_HASHING_MASK,
-                                    GEN6_WIZ_HASHING_16x4),
-                      GEN6_WIZ_HASHING_16x4);
+               wa_masked_field_set(wal,
+                                   GEN7_GT_MODE,
+                                   GEN6_WIZ_HASHING_MASK,
+                                   GEN6_WIZ_HASHING_16x4);
        }
 
        if (IS_GRAPHICS_VER(i915, 6, 7))
@@ -2000,10 +1982,10 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
                 * disable bit, which we don't touch here, but it's good
                 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
                 */
-               wa_add(wal,
-                      GEN6_GT_MODE, 0,
-                      _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4),
-                      GEN6_WIZ_HASHING_16x4);
+               wa_masked_field_set(wal,
+                                   GEN6_GT_MODE,
+                                   GEN6_WIZ_HASHING_MASK,
+                                   GEN6_WIZ_HASHING_16x4);
 
                /* WaDisable_RenderCache_OperationalFlush:snb */
                wa_masked_dis(wal, CACHE_MODE_0, RC_OP_FLUSH_ENABLE);
@@ -2024,7 +2006,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
                wa_add(wal, MI_MODE,
                       0, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH),
                       /* XXX bit doesn't stick on Broadwater */
-                      IS_I965G(i915) ? 0 : VS_TIMER_DISPATCH);
+                      IS_I965G(i915) ? 0 : VS_TIMER_DISPATCH, true);
 
        if (GRAPHICS_VER(i915) == 4)
                /*
@@ -2039,7 +2021,8 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
                 */
                wa_add(wal, ECOSKPD,
                       0, _MASKED_BIT_ENABLE(ECO_CONSTANT_BUFFER_SR_DISABLE),
-                      0 /* XXX bit doesn't stick on Broadwater */);
+                      0 /* XXX bit doesn't stick on Broadwater */,
+                      true);
 }
 
 static void
@@ -2048,7 +2031,7 @@ xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
        struct drm_i915_private *i915 = engine->i915;
 
        /* WaKBLVECSSemaphoreWaitPoll:kbl */
-       if (IS_KBL_GT_STEP(i915, STEP_A0, STEP_E0)) {
+       if (IS_KBL_GT_STEP(i915, STEP_A0, STEP_F0)) {
                wa_write(wal,
                         RING_SEMA_WAIT_POLL(engine->mmio_base),
                         1);
@@ -2081,7 +2064,7 @@ void intel_engine_init_workarounds(struct intel_engine_cs *engine)
 
 void intel_engine_apply_workarounds(struct intel_engine_cs *engine)
 {
-       wa_list_apply(engine->uncore, &engine->wa_list);
+       wa_list_apply(engine->gt, &engine->wa_list);
 }
 
 struct mcr_range {
@@ -2107,12 +2090,31 @@ static const struct mcr_range mcr_ranges_gen12[] = {
        {},
 };
 
+static const struct mcr_range mcr_ranges_xehp[] = {
+       { .start =  0x4000, .end =  0x4aff },
+       { .start =  0x5200, .end =  0x52ff },
+       { .start =  0x5400, .end =  0x7fff },
+       { .start =  0x8140, .end =  0x815f },
+       { .start =  0x8c80, .end =  0x8dff },
+       { .start =  0x94d0, .end =  0x955f },
+       { .start =  0x9680, .end =  0x96ff },
+       { .start =  0xb000, .end =  0xb3ff },
+       { .start =  0xc800, .end =  0xcfff },
+       { .start =  0xd800, .end =  0xd8ff },
+       { .start =  0xdc00, .end =  0xffff },
+       { .start = 0x17000, .end = 0x17fff },
+       { .start = 0x24a00, .end = 0x24a7f },
+       {},
+};
+
 static bool mcr_range(struct drm_i915_private *i915, u32 offset)
 {
        const struct mcr_range *mcr_ranges;
        int i;
 
-       if (GRAPHICS_VER(i915) >= 12)
+       if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
+               mcr_ranges = mcr_ranges_xehp;
+       else if (GRAPHICS_VER(i915) >= 12)
                mcr_ranges = mcr_ranges_gen12;
        else if (GRAPHICS_VER(i915) >= 8)
                mcr_ranges = mcr_ranges_gen8;
index c214111..1e87368 100644 (file)
@@ -15,6 +15,7 @@ struct i915_wa {
        u32             clr;
        u32             set;
        u32             read;
+       bool            masked_reg;
 };
 
 struct i915_wa_list {
index 32589c6..2c1af03 100644 (file)
@@ -235,6 +235,34 @@ static void mock_submit_request(struct i915_request *request)
        spin_unlock_irqrestore(&engine->hw_lock, flags);
 }
 
+static void mock_add_to_engine(struct i915_request *rq)
+{
+       lockdep_assert_held(&rq->engine->sched_engine->lock);
+       list_move_tail(&rq->sched.link, &rq->engine->sched_engine->requests);
+}
+
+static void mock_remove_from_engine(struct i915_request *rq)
+{
+       struct intel_engine_cs *engine, *locked;
+
+       /*
+        * Virtual engines complicate acquiring the engine timeline lock,
+        * as their rq->engine pointer is not stable until under that
+        * engine lock. The simple ploy we use is to take the lock then
+        * check that the rq still belongs to the newly locked engine.
+        */
+
+       locked = READ_ONCE(rq->engine);
+       spin_lock_irq(&locked->sched_engine->lock);
+       while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) {
+               spin_unlock(&locked->sched_engine->lock);
+               spin_lock(&engine->sched_engine->lock);
+               locked = engine;
+       }
+       list_del_init(&rq->sched.link);
+       spin_unlock_irq(&locked->sched_engine->lock);
+}
+
 static void mock_reset_prepare(struct intel_engine_cs *engine)
 {
 }
@@ -253,10 +281,10 @@ static void mock_reset_cancel(struct intel_engine_cs *engine)
 
        del_timer_sync(&mock->hw_delay);
 
-       spin_lock_irqsave(&engine->active.lock, flags);
+       spin_lock_irqsave(&engine->sched_engine->lock, flags);
 
        /* Mark all submitted requests as skipped. */
-       list_for_each_entry(rq, &engine->active.requests, sched.link)
+       list_for_each_entry(rq, &engine->sched_engine->requests, sched.link)
                i915_request_put(i915_request_mark_eio(rq));
        intel_engine_signal_breadcrumbs(engine);
 
@@ -269,7 +297,7 @@ static void mock_reset_cancel(struct intel_engine_cs *engine)
        }
        INIT_LIST_HEAD(&mock->hw_queue);
 
-       spin_unlock_irqrestore(&engine->active.lock, flags);
+       spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
 }
 
 static void mock_reset_finish(struct intel_engine_cs *engine)
@@ -283,7 +311,8 @@ static void mock_engine_release(struct intel_engine_cs *engine)
 
        GEM_BUG_ON(timer_pending(&mock->hw_delay));
 
-       intel_breadcrumbs_free(engine->breadcrumbs);
+       i915_sched_engine_put(engine->sched_engine);
+       intel_breadcrumbs_put(engine->breadcrumbs);
 
        intel_context_unpin(engine->kernel_context);
        intel_context_put(engine->kernel_context);
@@ -320,6 +349,8 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
        engine->base.emit_flush = mock_emit_flush;
        engine->base.emit_fini_breadcrumb = mock_emit_breadcrumb;
        engine->base.submit_request = mock_submit_request;
+       engine->base.add_active_request = mock_add_to_engine;
+       engine->base.remove_active_request = mock_remove_from_engine;
 
        engine->base.reset.prepare = mock_reset_prepare;
        engine->base.reset.rewind = mock_reset_rewind;
@@ -345,14 +376,18 @@ int mock_engine_init(struct intel_engine_cs *engine)
 {
        struct intel_context *ce;
 
-       intel_engine_init_active(engine, ENGINE_MOCK);
+       engine->sched_engine = i915_sched_engine_create(ENGINE_MOCK);
+       if (!engine->sched_engine)
+               return -ENOMEM;
+       engine->sched_engine->private_data = engine;
+
        intel_engine_init_execlists(engine);
        intel_engine_init__pm(engine);
        intel_engine_init_retire(engine);
 
        engine->breadcrumbs = intel_breadcrumbs_create(NULL);
        if (!engine->breadcrumbs)
-               return -ENOMEM;
+               goto err_schedule;
 
        ce = create_kernel_context(engine);
        if (IS_ERR(ce))
@@ -365,7 +400,9 @@ int mock_engine_init(struct intel_engine_cs *engine)
        return 0;
 
 err_breadcrumbs:
-       intel_breadcrumbs_free(engine->breadcrumbs);
+       intel_breadcrumbs_put(engine->breadcrumbs);
+err_schedule:
+       i915_sched_engine_put(engine->sched_engine);
        return -ENOMEM;
 }
 
index 26685b9..fa7b99a 100644 (file)
@@ -209,7 +209,13 @@ static int __live_active_context(struct intel_engine_cs *engine)
         * This test makes sure that the context is kept alive until a
         * subsequent idle-barrier (emitted when the engine wakeref hits 0
         * with no more outstanding requests).
+        *
+        * In GuC submission mode we don't use idle barriers and we instead
+        * get a message from the GuC to signal that it is safe to unpin the
+        * context from memory.
         */
+       if (intel_engine_uses_guc(engine))
+               return 0;
 
        if (intel_engine_pm_is_awake(engine)) {
                pr_err("%s is awake before starting %s!\n",
@@ -357,7 +363,11 @@ static int __live_remote_context(struct intel_engine_cs *engine)
         * on the context image remotely (intel_context_prepare_remote_request),
         * which inserts foreign fences into intel_context.active, does not
         * clobber the idle-barrier.
+        *
+        * In GuC submission mode we don't use idle barriers.
         */
+       if (intel_engine_uses_guc(engine))
+               return 0;
 
        if (intel_engine_pm_is_awake(engine)) {
                pr_err("%s is awake before starting %s!\n",
index 4896e4c..317eebf 100644 (file)
@@ -405,3 +405,25 @@ void st_engine_heartbeat_enable(struct intel_engine_cs *engine)
        engine->props.heartbeat_interval_ms =
                engine->defaults.heartbeat_interval_ms;
 }
+
+void st_engine_heartbeat_disable_no_pm(struct intel_engine_cs *engine)
+{
+       engine->props.heartbeat_interval_ms = 0;
+
+       /*
+        * Park the heartbeat but without holding the PM lock as that
+        * makes the engines appear not-idle. Note that if/when unpark
+        * is called due to the PM lock being acquired later the
+        * heartbeat still won't be enabled because of the above = 0.
+        */
+       if (intel_engine_pm_get_if_awake(engine)) {
+               intel_engine_park_heartbeat(engine);
+               intel_engine_pm_put(engine);
+       }
+}
+
+void st_engine_heartbeat_enable_no_pm(struct intel_engine_cs *engine)
+{
+       engine->props.heartbeat_interval_ms =
+               engine->defaults.heartbeat_interval_ms;
+}
index cd27113..81da2cd 100644 (file)
@@ -9,6 +9,8 @@
 struct intel_engine_cs;
 
 void st_engine_heartbeat_disable(struct intel_engine_cs *engine);
+void st_engine_heartbeat_disable_no_pm(struct intel_engine_cs *engine);
 void st_engine_heartbeat_enable(struct intel_engine_cs *engine);
+void st_engine_heartbeat_enable_no_pm(struct intel_engine_cs *engine);
 
 #endif /* SELFTEST_ENGINE_HEARTBEAT_H */
index 72cca3f..7556966 100644 (file)
@@ -173,8 +173,8 @@ static int __live_engine_timestamps(struct intel_engine_cs *engine)
        d_ctx = trifilter(s_ctx);
 
        d_ctx *= engine->gt->clock_frequency;
-       if (IS_ICELAKE(engine->i915))
-               d_ring *= 12500000; /* Fixed 80ns for icl ctx timestamp? */
+       if (GRAPHICS_VER(engine->i915) == 11)
+               d_ring *= 12500000; /* Fixed 80ns for GEN11 ctx timestamp? */
        else
                d_ring *= engine->gt->clock_frequency;
 
index 1c8108d..f12ffe7 100644 (file)
@@ -43,7 +43,7 @@ static int wait_for_submit(struct intel_engine_cs *engine,
                           unsigned long timeout)
 {
        /* Ignore our own attempts to suppress excess tasklets */
-       tasklet_hi_schedule(&engine->execlists.tasklet);
+       tasklet_hi_schedule(&engine->sched_engine->tasklet);
 
        timeout += jiffies;
        do {
@@ -273,7 +273,7 @@ static int live_unlite_restore(struct intel_gt *gt, int prio)
                        };
 
                        /* Alternatively preempt the spinner with ce[1] */
-                       engine->schedule(rq[1], &attr);
+                       engine->sched_engine->schedule(rq[1], &attr);
                }
 
                /* And switch back to ce[0] for good measure */
@@ -553,13 +553,13 @@ static int live_pin_rewind(void *arg)
 
 static int engine_lock_reset_tasklet(struct intel_engine_cs *engine)
 {
-       tasklet_disable(&engine->execlists.tasklet);
+       tasklet_disable(&engine->sched_engine->tasklet);
        local_bh_disable();
 
        if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
                             &engine->gt->reset.flags)) {
                local_bh_enable();
-               tasklet_enable(&engine->execlists.tasklet);
+               tasklet_enable(&engine->sched_engine->tasklet);
 
                intel_gt_set_wedged(engine->gt);
                return -EBUSY;
@@ -574,7 +574,7 @@ static void engine_unlock_reset_tasklet(struct intel_engine_cs *engine)
                              &engine->gt->reset.flags);
 
        local_bh_enable();
-       tasklet_enable(&engine->execlists.tasklet);
+       tasklet_enable(&engine->sched_engine->tasklet);
 }
 
 static int live_hold_reset(void *arg)
@@ -628,7 +628,7 @@ static int live_hold_reset(void *arg)
                if (err)
                        goto out;
 
-               engine->execlists.tasklet.callback(&engine->execlists.tasklet);
+               engine->sched_engine->tasklet.callback(&engine->sched_engine->tasklet);
                GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
 
                i915_request_get(rq);
@@ -917,7 +917,7 @@ release_queue(struct intel_engine_cs *engine,
        i915_request_add(rq);
 
        local_bh_disable();
-       engine->schedule(rq, &attr);
+       engine->sched_engine->schedule(rq, &attr);
        local_bh_enable(); /* kick tasklet */
 
        i915_request_put(rq);
@@ -1200,7 +1200,7 @@ static int live_timeslice_rewind(void *arg)
                while (i915_request_is_active(rq[A2])) { /* semaphore yield! */
                        /* Wait for the timeslice to kick in */
                        del_timer(&engine->execlists.timer);
-                       tasklet_hi_schedule(&engine->execlists.tasklet);
+                       tasklet_hi_schedule(&engine->sched_engine->tasklet);
                        intel_engine_flush_submission(engine);
                }
                /* -> ELSP[] = { { A:rq1 }, { B:rq1 } } */
@@ -1342,7 +1342,7 @@ static int live_timeslice_queue(void *arg)
                        err = PTR_ERR(rq);
                        goto err_heartbeat;
                }
-               engine->schedule(rq, &attr);
+               engine->sched_engine->schedule(rq, &attr);
                err = wait_for_submit(engine, rq, HZ / 2);
                if (err) {
                        pr_err("%s: Timed out trying to submit semaphores\n",
@@ -1539,12 +1539,12 @@ static int live_busywait_preempt(void *arg)
         * preempt the busywaits used to synchronise between rings.
         */
 
-       ctx_hi = kernel_context(gt->i915);
+       ctx_hi = kernel_context(gt->i915, NULL);
        if (!ctx_hi)
                return -ENOMEM;
        ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
 
-       ctx_lo = kernel_context(gt->i915);
+       ctx_lo = kernel_context(gt->i915, NULL);
        if (!ctx_lo)
                goto err_ctx_hi;
        ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
@@ -1741,12 +1741,12 @@ static int live_preempt(void *arg)
        if (igt_spinner_init(&spin_lo, gt))
                goto err_spin_hi;
 
-       ctx_hi = kernel_context(gt->i915);
+       ctx_hi = kernel_context(gt->i915, NULL);
        if (!ctx_hi)
                goto err_spin_lo;
        ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
 
-       ctx_lo = kernel_context(gt->i915);
+       ctx_lo = kernel_context(gt->i915, NULL);
        if (!ctx_lo)
                goto err_ctx_hi;
        ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
@@ -1833,11 +1833,11 @@ static int live_late_preempt(void *arg)
        if (igt_spinner_init(&spin_lo, gt))
                goto err_spin_hi;
 
-       ctx_hi = kernel_context(gt->i915);
+       ctx_hi = kernel_context(gt->i915, NULL);
        if (!ctx_hi)
                goto err_spin_lo;
 
-       ctx_lo = kernel_context(gt->i915);
+       ctx_lo = kernel_context(gt->i915, NULL);
        if (!ctx_lo)
                goto err_ctx_hi;
 
@@ -1884,7 +1884,7 @@ static int live_late_preempt(void *arg)
                }
 
                attr.priority = I915_PRIORITY_MAX;
-               engine->schedule(rq, &attr);
+               engine->sched_engine->schedule(rq, &attr);
 
                if (!igt_wait_for_spinner(&spin_hi, rq)) {
                        pr_err("High priority context failed to preempt the low priority context\n");
@@ -1927,7 +1927,7 @@ struct preempt_client {
 
 static int preempt_client_init(struct intel_gt *gt, struct preempt_client *c)
 {
-       c->ctx = kernel_context(gt->i915);
+       c->ctx = kernel_context(gt->i915, NULL);
        if (!c->ctx)
                return -ENOMEM;
 
@@ -2497,7 +2497,7 @@ static int live_suppress_self_preempt(void *arg)
                        i915_request_add(rq_b);
 
                        GEM_BUG_ON(i915_request_completed(rq_a));
-                       engine->schedule(rq_a, &attr);
+                       engine->sched_engine->schedule(rq_a, &attr);
                        igt_spinner_end(&a.spin);
 
                        if (!igt_wait_for_spinner(&b.spin, rq_b)) {
@@ -2629,7 +2629,7 @@ static int live_chain_preempt(void *arg)
 
                        i915_request_get(rq);
                        i915_request_add(rq);
-                       engine->schedule(rq, &attr);
+                       engine->sched_engine->schedule(rq, &attr);
 
                        igt_spinner_end(&hi.spin);
                        if (i915_request_wait(rq, 0, HZ / 5) < 0) {
@@ -2810,7 +2810,7 @@ static int __live_preempt_ring(struct intel_engine_cs *engine,
                        goto err_ce;
                }
 
-               tmp->ring = __intel_context_ring_size(ring_sz);
+               tmp->ring_size = ring_sz;
 
                err = intel_context_pin(tmp);
                if (err) {
@@ -2988,7 +2988,7 @@ static int live_preempt_gang(void *arg)
                                break;
 
                        /* Submit each spinner at increasing priority */
-                       engine->schedule(rq, &attr);
+                       engine->sched_engine->schedule(rq, &attr);
                } while (prio <= I915_PRIORITY_MAX &&
                         !__igt_timeout(end_time, NULL));
                pr_debug("%s: Preempt chain of %d requests\n",
@@ -3236,7 +3236,7 @@ static int preempt_user(struct intel_engine_cs *engine,
        i915_request_get(rq);
        i915_request_add(rq);
 
-       engine->schedule(rq, &attr);
+       engine->sched_engine->schedule(rq, &attr);
 
        if (i915_request_wait(rq, 0, HZ / 2) < 0)
                err = -ETIME;
@@ -3384,12 +3384,12 @@ static int live_preempt_timeout(void *arg)
        if (igt_spinner_init(&spin_lo, gt))
                return -ENOMEM;
 
-       ctx_hi = kernel_context(gt->i915);
+       ctx_hi = kernel_context(gt->i915, NULL);
        if (!ctx_hi)
                goto err_spin_lo;
        ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
 
-       ctx_lo = kernel_context(gt->i915);
+       ctx_lo = kernel_context(gt->i915, NULL);
        if (!ctx_lo)
                goto err_ctx_hi;
        ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
@@ -3561,12 +3561,16 @@ static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
 #define BATCH BIT(0)
 {
        struct task_struct *tsk[I915_NUM_ENGINES] = {};
-       struct preempt_smoke arg[I915_NUM_ENGINES];
+       struct preempt_smoke *arg;
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
        unsigned long count;
        int err = 0;
 
+       arg = kmalloc_array(I915_NUM_ENGINES, sizeof(*arg), GFP_KERNEL);
+       if (!arg)
+               return -ENOMEM;
+
        for_each_engine(engine, smoke->gt, id) {
                arg[id] = *smoke;
                arg[id].engine = engine;
@@ -3574,7 +3578,7 @@ static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
                        arg[id].batch = NULL;
                arg[id].count = 0;
 
-               tsk[id] = kthread_run(smoke_crescendo_thread, &arg,
+               tsk[id] = kthread_run(smoke_crescendo_thread, arg,
                                      "igt/smoke:%d", id);
                if (IS_ERR(tsk[id])) {
                        err = PTR_ERR(tsk[id]);
@@ -3603,6 +3607,8 @@ static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
 
        pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
                count, flags, smoke->gt->info.num_engines, smoke->ncontext);
+
+       kfree(arg);
        return 0;
 }
 
@@ -3676,7 +3682,7 @@ static int live_preempt_smoke(void *arg)
        }
 
        for (n = 0; n < smoke.ncontext; n++) {
-               smoke.contexts[n] = kernel_context(smoke.gt->i915);
+               smoke.contexts[n] = kernel_context(smoke.gt->i915, NULL);
                if (!smoke.contexts[n])
                        goto err_ctx;
        }
@@ -3727,7 +3733,7 @@ static int nop_virtual_engine(struct intel_gt *gt,
        GEM_BUG_ON(!nctx || nctx > ARRAY_SIZE(ve));
 
        for (n = 0; n < nctx; n++) {
-               ve[n] = intel_execlists_create_virtual(siblings, nsibling);
+               ve[n] = intel_engine_create_virtual(siblings, nsibling);
                if (IS_ERR(ve[n])) {
                        err = PTR_ERR(ve[n]);
                        nctx = n;
@@ -3923,7 +3929,7 @@ static int mask_virtual_engine(struct intel_gt *gt,
         * restrict it to our desired engine within the virtual engine.
         */
 
-       ve = intel_execlists_create_virtual(siblings, nsibling);
+       ve = intel_engine_create_virtual(siblings, nsibling);
        if (IS_ERR(ve)) {
                err = PTR_ERR(ve);
                goto out_close;
@@ -4054,7 +4060,7 @@ static int slicein_virtual_engine(struct intel_gt *gt,
                i915_request_add(rq);
        }
 
-       ce = intel_execlists_create_virtual(siblings, nsibling);
+       ce = intel_engine_create_virtual(siblings, nsibling);
        if (IS_ERR(ce)) {
                err = PTR_ERR(ce);
                goto out;
@@ -4106,7 +4112,7 @@ static int sliceout_virtual_engine(struct intel_gt *gt,
 
        /* XXX We do not handle oversubscription and fairness with normal rq */
        for (n = 0; n < nsibling; n++) {
-               ce = intel_execlists_create_virtual(siblings, nsibling);
+               ce = intel_engine_create_virtual(siblings, nsibling);
                if (IS_ERR(ce)) {
                        err = PTR_ERR(ce);
                        goto out;
@@ -4208,7 +4214,7 @@ static int preserved_virtual_engine(struct intel_gt *gt,
        if (err)
                goto out_scratch;
 
-       ve = intel_execlists_create_virtual(siblings, nsibling);
+       ve = intel_engine_create_virtual(siblings, nsibling);
        if (IS_ERR(ve)) {
                err = PTR_ERR(ve);
                goto out_scratch;
@@ -4328,234 +4334,6 @@ static int live_virtual_preserved(void *arg)
        return 0;
 }
 
-static int bond_virtual_engine(struct intel_gt *gt,
-                              unsigned int class,
-                              struct intel_engine_cs **siblings,
-                              unsigned int nsibling,
-                              unsigned int flags)
-#define BOND_SCHEDULE BIT(0)
-{
-       struct intel_engine_cs *master;
-       struct i915_request *rq[16];
-       enum intel_engine_id id;
-       struct igt_spinner spin;
-       unsigned long n;
-       int err;
-
-       /*
-        * A set of bonded requests is intended to be run concurrently
-        * across a number of engines. We use one request per-engine
-        * and a magic fence to schedule each of the bonded requests
-        * at the same time. A consequence of our current scheduler is that
-        * we only move requests to the HW ready queue when the request
-        * becomes ready, that is when all of its prerequisite fences have
-        * been signaled. As one of those fences is the master submit fence,
-        * there is a delay on all secondary fences as the HW may be
-        * currently busy. Equally, as all the requests are independent,
-        * they may have other fences that delay individual request
-        * submission to HW. Ergo, we do not guarantee that all requests are
-        * immediately submitted to HW at the same time, just that if the
-        * rules are abided by, they are ready at the same time as the
-        * first is submitted. Userspace can embed semaphores in its batch
-        * to ensure parallel execution of its phases as it requires.
-        * Though naturally it gets requested that perhaps the scheduler should
-        * take care of parallel execution, even across preemption events on
-        * different HW. (The proper answer is of course "lalalala".)
-        *
-        * With the submit-fence, we have identified three possible phases
-        * of synchronisation depending on the master fence: queued (not
-        * ready), executing, and signaled. The first two are quite simple
-        * and checked below. However, the signaled master fence handling is
-        * contentious. Currently we do not distinguish between a signaled
-        * fence and an expired fence, as once signaled it does not convey
-        * any information about the previous execution. It may even be freed
-        * and hence checking later it may not exist at all. Ergo we currently
-        * do not apply the bonding constraint for an already signaled fence,
-        * as our expectation is that it should not constrain the secondaries
-        * and is outside of the scope of the bonded request API (i.e. all
-        * userspace requests are meant to be running in parallel). As
-        * it imposes no constraint, and is effectively a no-op, we do not
-        * check below as normal execution flows are checked extensively above.
-        *
-        * XXX Is the degenerate handling of signaled submit fences the
-        * expected behaviour for userpace?
-        */
-
-       GEM_BUG_ON(nsibling >= ARRAY_SIZE(rq) - 1);
-
-       if (igt_spinner_init(&spin, gt))
-               return -ENOMEM;
-
-       err = 0;
-       rq[0] = ERR_PTR(-ENOMEM);
-       for_each_engine(master, gt, id) {
-               struct i915_sw_fence fence = {};
-               struct intel_context *ce;
-
-               if (master->class == class)
-                       continue;
-
-               ce = intel_context_create(master);
-               if (IS_ERR(ce)) {
-                       err = PTR_ERR(ce);
-                       goto out;
-               }
-
-               memset_p((void *)rq, ERR_PTR(-EINVAL), ARRAY_SIZE(rq));
-
-               rq[0] = igt_spinner_create_request(&spin, ce, MI_NOOP);
-               intel_context_put(ce);
-               if (IS_ERR(rq[0])) {
-                       err = PTR_ERR(rq[0]);
-                       goto out;
-               }
-               i915_request_get(rq[0]);
-
-               if (flags & BOND_SCHEDULE) {
-                       onstack_fence_init(&fence);
-                       err = i915_sw_fence_await_sw_fence_gfp(&rq[0]->submit,
-                                                              &fence,
-                                                              GFP_KERNEL);
-               }
-
-               i915_request_add(rq[0]);
-               if (err < 0)
-                       goto out;
-
-               if (!(flags & BOND_SCHEDULE) &&
-                   !igt_wait_for_spinner(&spin, rq[0])) {
-                       err = -EIO;
-                       goto out;
-               }
-
-               for (n = 0; n < nsibling; n++) {
-                       struct intel_context *ve;
-
-                       ve = intel_execlists_create_virtual(siblings, nsibling);
-                       if (IS_ERR(ve)) {
-                               err = PTR_ERR(ve);
-                               onstack_fence_fini(&fence);
-                               goto out;
-                       }
-
-                       err = intel_virtual_engine_attach_bond(ve->engine,
-                                                              master,
-                                                              siblings[n]);
-                       if (err) {
-                               intel_context_put(ve);
-                               onstack_fence_fini(&fence);
-                               goto out;
-                       }
-
-                       err = intel_context_pin(ve);
-                       intel_context_put(ve);
-                       if (err) {
-                               onstack_fence_fini(&fence);
-                               goto out;
-                       }
-
-                       rq[n + 1] = i915_request_create(ve);
-                       intel_context_unpin(ve);
-                       if (IS_ERR(rq[n + 1])) {
-                               err = PTR_ERR(rq[n + 1]);
-                               onstack_fence_fini(&fence);
-                               goto out;
-                       }
-                       i915_request_get(rq[n + 1]);
-
-                       err = i915_request_await_execution(rq[n + 1],
-                                                          &rq[0]->fence,
-                                                          ve->engine->bond_execute);
-                       i915_request_add(rq[n + 1]);
-                       if (err < 0) {
-                               onstack_fence_fini(&fence);
-                               goto out;
-                       }
-               }
-               onstack_fence_fini(&fence);
-               intel_engine_flush_submission(master);
-               igt_spinner_end(&spin);
-
-               if (i915_request_wait(rq[0], 0, HZ / 10) < 0) {
-                       pr_err("Master request did not execute (on %s)!\n",
-                              rq[0]->engine->name);
-                       err = -EIO;
-                       goto out;
-               }
-
-               for (n = 0; n < nsibling; n++) {
-                       if (i915_request_wait(rq[n + 1], 0,
-                                             MAX_SCHEDULE_TIMEOUT) < 0) {
-                               err = -EIO;
-                               goto out;
-                       }
-
-                       if (rq[n + 1]->engine != siblings[n]) {
-                               pr_err("Bonded request did not execute on target engine: expected %s, used %s; master was %s\n",
-                                      siblings[n]->name,
-                                      rq[n + 1]->engine->name,
-                                      rq[0]->engine->name);
-                               err = -EINVAL;
-                               goto out;
-                       }
-               }
-
-               for (n = 0; !IS_ERR(rq[n]); n++)
-                       i915_request_put(rq[n]);
-               rq[0] = ERR_PTR(-ENOMEM);
-       }
-
-out:
-       for (n = 0; !IS_ERR(rq[n]); n++)
-               i915_request_put(rq[n]);
-       if (igt_flush_test(gt->i915))
-               err = -EIO;
-
-       igt_spinner_fini(&spin);
-       return err;
-}
-
-static int live_virtual_bond(void *arg)
-{
-       static const struct phase {
-               const char *name;
-               unsigned int flags;
-       } phases[] = {
-               { "", 0 },
-               { "schedule", BOND_SCHEDULE },
-               { },
-       };
-       struct intel_gt *gt = arg;
-       struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
-       unsigned int class;
-       int err;
-
-       if (intel_uc_uses_guc_submission(&gt->uc))
-               return 0;
-
-       for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
-               const struct phase *p;
-               int nsibling;
-
-               nsibling = select_siblings(gt, class, siblings);
-               if (nsibling < 2)
-                       continue;
-
-               for (p = phases; p->name; p++) {
-                       err = bond_virtual_engine(gt,
-                                                 class, siblings, nsibling,
-                                                 p->flags);
-                       if (err) {
-                               pr_err("%s(%s): failed class=%d, nsibling=%d, err=%d\n",
-                                      __func__, p->name, class, nsibling, err);
-                               return err;
-                       }
-               }
-       }
-
-       return 0;
-}
-
 static int reset_virtual_engine(struct intel_gt *gt,
                                struct intel_engine_cs **siblings,
                                unsigned int nsibling)
@@ -4576,7 +4354,7 @@ static int reset_virtual_engine(struct intel_gt *gt,
        if (igt_spinner_init(&spin, gt))
                return -ENOMEM;
 
-       ve = intel_execlists_create_virtual(siblings, nsibling);
+       ve = intel_engine_create_virtual(siblings, nsibling);
        if (IS_ERR(ve)) {
                err = PTR_ERR(ve);
                goto out_spin;
@@ -4606,13 +4384,13 @@ static int reset_virtual_engine(struct intel_gt *gt,
        if (err)
                goto out_heartbeat;
 
-       engine->execlists.tasklet.callback(&engine->execlists.tasklet);
+       engine->sched_engine->tasklet.callback(&engine->sched_engine->tasklet);
        GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
 
        /* Fake a preemption event; failed of course */
-       spin_lock_irq(&engine->active.lock);
+       spin_lock_irq(&engine->sched_engine->lock);
        __unwind_incomplete_requests(engine);
-       spin_unlock_irq(&engine->active.lock);
+       spin_unlock_irq(&engine->sched_engine->lock);
        GEM_BUG_ON(rq->engine != engine);
 
        /* Reset the engine while keeping our active request on hold */
@@ -4721,7 +4499,6 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
                SUBTEST(live_virtual_mask),
                SUBTEST(live_virtual_preserved),
                SUBTEST(live_virtual_slice),
-               SUBTEST(live_virtual_bond),
                SUBTEST(live_virtual_reset),
        };
 
index 853246f..08f011f 100644 (file)
@@ -17,6 +17,8 @@
 #include "selftests/igt_flush_test.h"
 #include "selftests/igt_reset.h"
 #include "selftests/igt_atomic.h"
+#include "selftests/igt_spinner.h"
+#include "selftests/intel_scheduler_helpers.h"
 
 #include "selftests/mock_drm.h"
 
@@ -42,7 +44,7 @@ static int hang_init(struct hang *h, struct intel_gt *gt)
        memset(h, 0, sizeof(*h));
        h->gt = gt;
 
-       h->ctx = kernel_context(gt->i915);
+       h->ctx = kernel_context(gt->i915, NULL);
        if (IS_ERR(h->ctx))
                return PTR_ERR(h->ctx);
 
@@ -378,6 +380,7 @@ static int igt_reset_nop(void *arg)
                        ce = intel_context_create(engine);
                        if (IS_ERR(ce)) {
                                err = PTR_ERR(ce);
+                               pr_err("[%s] Create context failed: %d!\n", engine->name, err);
                                break;
                        }
 
@@ -387,6 +390,8 @@ static int igt_reset_nop(void *arg)
                                rq = intel_context_create_request(ce);
                                if (IS_ERR(rq)) {
                                        err = PTR_ERR(rq);
+                                       pr_err("[%s] Create request failed: %d!\n",
+                                              engine->name, err);
                                        break;
                                }
 
@@ -401,24 +406,31 @@ static int igt_reset_nop(void *arg)
                igt_global_reset_unlock(gt);
 
                if (intel_gt_is_wedged(gt)) {
+                       pr_err("[%s] GT is wedged!\n", engine->name);
                        err = -EIO;
                        break;
                }
 
                if (i915_reset_count(global) != reset_count + ++count) {
-                       pr_err("Full GPU reset not recorded!\n");
+                       pr_err("[%s] Reset not recorded: %d vs %d + %d!\n",
+                              engine->name, i915_reset_count(global), reset_count, count);
                        err = -EINVAL;
                        break;
                }
 
                err = igt_flush_test(gt->i915);
-               if (err)
+               if (err) {
+                       pr_err("[%s] Flush failed: %d!\n", engine->name, err);
                        break;
+               }
        } while (time_before(jiffies, end_time));
        pr_info("%s: %d resets\n", __func__, count);
 
-       if (igt_flush_test(gt->i915))
+       if (igt_flush_test(gt->i915)) {
+               pr_err("Post flush failed: %d!\n", err);
                err = -EIO;
+       }
+
        return err;
 }
 
@@ -440,9 +452,19 @@ static int igt_reset_nop_engine(void *arg)
                IGT_TIMEOUT(end_time);
                int err;
 
+               if (intel_engine_uses_guc(engine)) {
+                       /* Engine level resets are triggered by GuC when a hang
+                        * is detected. They can't be triggered by the KMD any
+                        * more. Thus a nop batch cannot be used as a reset test
+                        */
+                       continue;
+               }
+
                ce = intel_context_create(engine);
-               if (IS_ERR(ce))
+               if (IS_ERR(ce)) {
+                       pr_err("[%s] Create context failed: %d!\n", engine->name, err);
                        return PTR_ERR(ce);
+               }
 
                reset_count = i915_reset_count(global);
                reset_engine_count = i915_reset_engine_count(global, engine);
@@ -549,9 +571,15 @@ static int igt_reset_fail_engine(void *arg)
                IGT_TIMEOUT(end_time);
                int err;
 
+               /* Can't manually break the reset if i915 doesn't perform it */
+               if (intel_engine_uses_guc(engine))
+                       continue;
+
                ce = intel_context_create(engine);
-               if (IS_ERR(ce))
+               if (IS_ERR(ce)) {
+                       pr_err("[%s] Create context failed: %d!\n", engine->name, err);
                        return PTR_ERR(ce);
+               }
 
                st_engine_heartbeat_disable(engine);
                set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
@@ -686,8 +714,12 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
        for_each_engine(engine, gt, id) {
                unsigned int reset_count, reset_engine_count;
                unsigned long count;
+               bool using_guc = intel_engine_uses_guc(engine);
                IGT_TIMEOUT(end_time);
 
+               if (using_guc && !active)
+                       continue;
+
                if (active && !intel_engine_can_store_dword(engine))
                        continue;
 
@@ -705,13 +737,24 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
                set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
                count = 0;
                do {
-                       if (active) {
-                               struct i915_request *rq;
+                       struct i915_request *rq = NULL;
+                       struct intel_selftest_saved_policy saved;
+                       int err2;
+
+                       err = intel_selftest_modify_policy(engine, &saved,
+                                                          SELFTEST_SCHEDULER_MODIFY_FAST_RESET);
+                       if (err) {
+                               pr_err("[%s] Modify policy failed: %d!\n", engine->name, err);
+                               break;
+                       }
 
+                       if (active) {
                                rq = hang_create_request(&h, engine);
                                if (IS_ERR(rq)) {
                                        err = PTR_ERR(rq);
-                                       break;
+                                       pr_err("[%s] Create hang request failed: %d!\n",
+                                              engine->name, err);
+                                       goto restore;
                                }
 
                                i915_request_get(rq);
@@ -727,34 +770,59 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
 
                                        i915_request_put(rq);
                                        err = -EIO;
-                                       break;
+                                       goto restore;
                                }
+                       }
 
-                               i915_request_put(rq);
+                       if (!using_guc) {
+                               err = intel_engine_reset(engine, NULL);
+                               if (err) {
+                                       pr_err("intel_engine_reset(%s) failed, err:%d\n",
+                                              engine->name, err);
+                                       goto skip;
+                               }
                        }
 
-                       err = intel_engine_reset(engine, NULL);
-                       if (err) {
-                               pr_err("intel_engine_reset(%s) failed, err:%d\n",
-                                      engine->name, err);
-                               break;
+                       if (rq) {
+                               /* Ensure the reset happens and kills the engine */
+                               err = intel_selftest_wait_for_rq(rq);
+                               if (err)
+                                       pr_err("[%s] Wait for request %lld:%lld [0x%04X] failed: %d!\n",
+                                              engine->name, rq->fence.context,
+                                              rq->fence.seqno, rq->context->guc_id, err);
                        }
 
+skip:
+                       if (rq)
+                               i915_request_put(rq);
+
                        if (i915_reset_count(global) != reset_count) {
                                pr_err("Full GPU reset recorded! (engine reset expected)\n");
                                err = -EINVAL;
-                               break;
+                               goto restore;
                        }
 
-                       if (i915_reset_engine_count(global, engine) !=
-                           ++reset_engine_count) {
-                               pr_err("%s engine reset not recorded!\n",
-                                      engine->name);
-                               err = -EINVAL;
-                               break;
+                       /* GuC based resets are not logged per engine */
+                       if (!using_guc) {
+                               if (i915_reset_engine_count(global, engine) !=
+                                   ++reset_engine_count) {
+                                       pr_err("%s engine reset not recorded!\n",
+                                              engine->name);
+                                       err = -EINVAL;
+                                       goto restore;
+                               }
                        }
 
                        count++;
+
+restore:
+                       err2 = intel_selftest_restore_policy(engine, &saved);
+                       if (err2)
+                               pr_err("[%s] Restore policy failed: %d!\n", engine->name, err);
+                       if (err == 0)
+                               err = err2;
+                       if (err)
+                               break;
                } while (time_before(jiffies, end_time));
                clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
                st_engine_heartbeat_enable(engine);
@@ -765,12 +833,16 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
                        break;
 
                err = igt_flush_test(gt->i915);
-               if (err)
+               if (err) {
+                       pr_err("[%s] Flush failed: %d!\n", engine->name, err);
                        break;
+               }
        }
 
-       if (intel_gt_is_wedged(gt))
+       if (intel_gt_is_wedged(gt)) {
+               pr_err("GT is wedged!\n");
                err = -EIO;
+       }
 
        if (active)
                hang_fini(&h);
@@ -807,7 +879,7 @@ static int active_request_put(struct i915_request *rq)
        if (!rq)
                return 0;
 
-       if (i915_request_wait(rq, 0, 5 * HZ) < 0) {
+       if (i915_request_wait(rq, 0, 10 * HZ) < 0) {
                GEM_TRACE("%s timed out waiting for completion of fence %llx:%lld\n",
                          rq->engine->name,
                          rq->fence.context,
@@ -837,6 +909,7 @@ static int active_engine(void *data)
                ce[count] = intel_context_create(engine);
                if (IS_ERR(ce[count])) {
                        err = PTR_ERR(ce[count]);
+                       pr_err("[%s] Create context #%ld failed: %d!\n", engine->name, count, err);
                        while (--count)
                                intel_context_put(ce[count]);
                        return err;
@@ -852,23 +925,26 @@ static int active_engine(void *data)
                new = intel_context_create_request(ce[idx]);
                if (IS_ERR(new)) {
                        err = PTR_ERR(new);
+                       pr_err("[%s] Create request #%d failed: %d!\n", engine->name, idx, err);
                        break;
                }
 
                rq[idx] = i915_request_get(new);
                i915_request_add(new);
 
-               if (engine->schedule && arg->flags & TEST_PRIORITY) {
+               if (engine->sched_engine->schedule && arg->flags & TEST_PRIORITY) {
                        struct i915_sched_attr attr = {
                                .priority =
                                        i915_prandom_u32_max_state(512, &prng),
                        };
-                       engine->schedule(rq[idx], &attr);
+                       engine->sched_engine->schedule(rq[idx], &attr);
                }
 
                err = active_request_put(old);
-               if (err)
+               if (err) {
+                       pr_err("[%s] Request put failed: %d!\n", engine->name, err);
                        break;
+               }
 
                cond_resched();
        }
@@ -876,6 +952,9 @@ static int active_engine(void *data)
        for (count = 0; count < ARRAY_SIZE(rq); count++) {
                int err__ = active_request_put(rq[count]);
 
+               if (err)
+                       pr_err("[%s] Request put #%ld failed: %d!\n", engine->name, count, err);
+
                /* Keep the first error */
                if (!err)
                        err = err__;
@@ -916,10 +995,13 @@ static int __igt_reset_engines(struct intel_gt *gt,
                struct active_engine threads[I915_NUM_ENGINES] = {};
                unsigned long device = i915_reset_count(global);
                unsigned long count = 0, reported;
+               bool using_guc = intel_engine_uses_guc(engine);
                IGT_TIMEOUT(end_time);
 
-               if (flags & TEST_ACTIVE &&
-                   !intel_engine_can_store_dword(engine))
+               if (flags & TEST_ACTIVE) {
+                       if (!intel_engine_can_store_dword(engine))
+                               continue;
+               } else if (using_guc)
                        continue;
 
                if (!wait_for_idle(engine)) {
@@ -949,6 +1031,7 @@ static int __igt_reset_engines(struct intel_gt *gt,
                                          "igt/%s", other->name);
                        if (IS_ERR(tsk)) {
                                err = PTR_ERR(tsk);
+                               pr_err("[%s] Thread spawn failed: %d!\n", engine->name, err);
                                goto unwind;
                        }
 
@@ -958,16 +1041,27 @@ static int __igt_reset_engines(struct intel_gt *gt,
 
                yield(); /* start all threads before we begin */
 
-               st_engine_heartbeat_disable(engine);
+               st_engine_heartbeat_disable_no_pm(engine);
                set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
                do {
                        struct i915_request *rq = NULL;
+                       struct intel_selftest_saved_policy saved;
+                       int err2;
+
+                       err = intel_selftest_modify_policy(engine, &saved,
+                                                          SELFTEST_SCHEDULER_MODIFY_FAST_RESET);
+                       if (err) {
+                               pr_err("[%s] Modify policy failed: %d!\n", engine->name, err);
+                               break;
+                       }
 
                        if (flags & TEST_ACTIVE) {
                                rq = hang_create_request(&h, engine);
                                if (IS_ERR(rq)) {
                                        err = PTR_ERR(rq);
-                                       break;
+                                       pr_err("[%s] Create hang request failed: %d!\n",
+                                              engine->name, err);
+                                       goto restore;
                                }
 
                                i915_request_get(rq);
@@ -983,32 +1077,44 @@ static int __igt_reset_engines(struct intel_gt *gt,
 
                                        i915_request_put(rq);
                                        err = -EIO;
-                                       break;
+                                       goto restore;
                                }
+                       } else {
+                               intel_engine_pm_get(engine);
                        }
 
-                       err = intel_engine_reset(engine, NULL);
-                       if (err) {
-                               pr_err("i915_reset_engine(%s:%s): failed, err=%d\n",
-                                      engine->name, test_name, err);
-                               break;
+                       if (!using_guc) {
+                               err = intel_engine_reset(engine, NULL);
+                               if (err) {
+                                       pr_err("i915_reset_engine(%s:%s): failed, err=%d\n",
+                                              engine->name, test_name, err);
+                                       goto restore;
+                               }
+                       }
+
+                       if (rq) {
+                               /* Ensure the reset happens and kills the engine */
+                               err = intel_selftest_wait_for_rq(rq);
+                               if (err)
+                                       pr_err("[%s] Wait for request %lld:%lld [0x%04X] failed: %d!\n",
+                                              engine->name, rq->fence.context,
+                                              rq->fence.seqno, rq->context->guc_id, err);
                        }
 
                        count++;
 
                        if (rq) {
                                if (rq->fence.error != -EIO) {
-                                       pr_err("i915_reset_engine(%s:%s):"
-                                              " failed to reset request %llx:%lld\n",
+                                       pr_err("i915_reset_engine(%s:%s): failed to reset request %lld:%lld [0x%04X]\n",
                                               engine->name, test_name,
                                               rq->fence.context,
-                                              rq->fence.seqno);
+                                              rq->fence.seqno, rq->context->guc_id);
                                        i915_request_put(rq);
 
                                        GEM_TRACE_DUMP();
                                        intel_gt_set_wedged(gt);
                                        err = -EIO;
-                                       break;
+                                       goto restore;
                                }
 
                                if (i915_request_wait(rq, 0, HZ / 5) < 0) {
@@ -1027,12 +1133,15 @@ static int __igt_reset_engines(struct intel_gt *gt,
                                        GEM_TRACE_DUMP();
                                        intel_gt_set_wedged(gt);
                                        err = -EIO;
-                                       break;
+                                       goto restore;
                                }
 
                                i915_request_put(rq);
                        }
 
+                       if (!(flags & TEST_ACTIVE))
+                               intel_engine_pm_put(engine);
+
                        if (!(flags & TEST_SELF) && !wait_for_idle(engine)) {
                                struct drm_printer p =
                                        drm_info_printer(gt->i915->drm.dev);
@@ -1044,22 +1153,34 @@ static int __igt_reset_engines(struct intel_gt *gt,
                                                  "%s\n", engine->name);
 
                                err = -EIO;
-                               break;
+                               goto restore;
                        }
+
+restore:
+                       err2 = intel_selftest_restore_policy(engine, &saved);
+                       if (err2)
+                               pr_err("[%s] Restore policy failed: %d!\n", engine->name, err2);
+                       if (err == 0)
+                               err = err2;
+                       if (err)
+                               break;
                } while (time_before(jiffies, end_time));
                clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
-               st_engine_heartbeat_enable(engine);
+               st_engine_heartbeat_enable_no_pm(engine);
 
                pr_info("i915_reset_engine(%s:%s): %lu resets\n",
                        engine->name, test_name, count);
 
-               reported = i915_reset_engine_count(global, engine);
-               reported -= threads[engine->id].resets;
-               if (reported != count) {
-                       pr_err("i915_reset_engine(%s:%s): reset %lu times, but reported %lu\n",
-                              engine->name, test_name, count, reported);
-                       if (!err)
-                               err = -EINVAL;
+               /* GuC based resets are not logged per engine */
+               if (!using_guc) {
+                       reported = i915_reset_engine_count(global, engine);
+                       reported -= threads[engine->id].resets;
+                       if (reported != count) {
+                               pr_err("i915_reset_engine(%s:%s): reset %lu times, but reported %lu\n",
+                                      engine->name, test_name, count, reported);
+                               if (!err)
+                                       err = -EINVAL;
+                       }
                }
 
 unwind:
@@ -1078,15 +1199,18 @@ unwind:
                        }
                        put_task_struct(threads[tmp].task);
 
-                       if (other->uabi_class != engine->uabi_class &&
-                           threads[tmp].resets !=
-                           i915_reset_engine_count(global, other)) {
-                               pr_err("Innocent engine %s was reset (count=%ld)\n",
-                                      other->name,
-                                      i915_reset_engine_count(global, other) -
-                                      threads[tmp].resets);
-                               if (!err)
-                                       err = -EINVAL;
+                       /* GuC based resets are not logged per engine */
+                       if (!using_guc) {
+                               if (other->uabi_class != engine->uabi_class &&
+                                   threads[tmp].resets !=
+                                   i915_reset_engine_count(global, other)) {
+                                       pr_err("Innocent engine %s was reset (count=%ld)\n",
+                                              other->name,
+                                              i915_reset_engine_count(global, other) -
+                                              threads[tmp].resets);
+                                       if (!err)
+                                               err = -EINVAL;
+                               }
                        }
                }
 
@@ -1101,8 +1225,10 @@ unwind:
                        break;
 
                err = igt_flush_test(gt->i915);
-               if (err)
+               if (err) {
+                       pr_err("[%s] Flush failed: %d!\n", engine->name, err);
                        break;
+               }
        }
 
        if (intel_gt_is_wedged(gt))
@@ -1180,12 +1306,15 @@ static int igt_reset_wait(void *arg)
        igt_global_reset_lock(gt);
 
        err = hang_init(&h, gt);
-       if (err)
+       if (err) {
+               pr_err("[%s] Hang init failed: %d!\n", engine->name, err);
                goto unlock;
+       }
 
        rq = hang_create_request(&h, engine);
        if (IS_ERR(rq)) {
                err = PTR_ERR(rq);
+               pr_err("[%s] Create hang request failed: %d!\n", engine->name, err);
                goto fini;
        }
 
@@ -1310,12 +1439,15 @@ static int __igt_reset_evict_vma(struct intel_gt *gt,
        /* Check that we can recover an unbind stuck on a hanging request */
 
        err = hang_init(&h, gt);
-       if (err)
+       if (err) {
+               pr_err("[%s] Hang init failed: %d!\n", engine->name, err);
                return err;
+       }
 
        obj = i915_gem_object_create_internal(gt->i915, SZ_1M);
        if (IS_ERR(obj)) {
                err = PTR_ERR(obj);
+               pr_err("[%s] Create object failed: %d!\n", engine->name, err);
                goto fini;
        }
 
@@ -1330,12 +1462,14 @@ static int __igt_reset_evict_vma(struct intel_gt *gt,
        arg.vma = i915_vma_instance(obj, vm, NULL);
        if (IS_ERR(arg.vma)) {
                err = PTR_ERR(arg.vma);
+               pr_err("[%s] VMA instance failed: %d!\n", engine->name, err);
                goto out_obj;
        }
 
        rq = hang_create_request(&h, engine);
        if (IS_ERR(rq)) {
                err = PTR_ERR(rq);
+               pr_err("[%s] Create hang request failed: %d!\n", engine->name, err);
                goto out_obj;
        }
 
@@ -1347,6 +1481,7 @@ static int __igt_reset_evict_vma(struct intel_gt *gt,
        err = i915_vma_pin(arg.vma, 0, 0, pin_flags);
        if (err) {
                i915_request_add(rq);
+               pr_err("[%s] VMA pin failed: %d!\n", engine->name, err);
                goto out_obj;
        }
 
@@ -1363,8 +1498,14 @@ static int __igt_reset_evict_vma(struct intel_gt *gt,
        i915_vma_lock(arg.vma);
        err = i915_request_await_object(rq, arg.vma->obj,
                                        flags & EXEC_OBJECT_WRITE);
-       if (err == 0)
+       if (err == 0) {
                err = i915_vma_move_to_active(arg.vma, rq, flags);
+               if (err)
+                       pr_err("[%s] Move to active failed: %d!\n", engine->name, err);
+       } else {
+               pr_err("[%s] Request await failed: %d!\n", engine->name, err);
+       }
+
        i915_vma_unlock(arg.vma);
 
        if (flags & EXEC_OBJECT_NEEDS_FENCE)
@@ -1392,6 +1533,7 @@ static int __igt_reset_evict_vma(struct intel_gt *gt,
        tsk = kthread_run(fn, &arg, "igt/evict_vma");
        if (IS_ERR(tsk)) {
                err = PTR_ERR(tsk);
+               pr_err("[%s] Thread spawn failed: %d!\n", engine->name, err);
                tsk = NULL;
                goto out_reset;
        }
@@ -1508,17 +1650,29 @@ static int igt_reset_queue(void *arg)
                goto unlock;
 
        for_each_engine(engine, gt, id) {
+               struct intel_selftest_saved_policy saved;
                struct i915_request *prev;
                IGT_TIMEOUT(end_time);
                unsigned int count;
+               bool using_guc = intel_engine_uses_guc(engine);
 
                if (!intel_engine_can_store_dword(engine))
                        continue;
 
+               if (using_guc) {
+                       err = intel_selftest_modify_policy(engine, &saved,
+                                                          SELFTEST_SCHEDULER_MODIFY_NO_HANGCHECK);
+                       if (err) {
+                               pr_err("[%s] Modify policy failed: %d!\n", engine->name, err);
+                               goto fini;
+                       }
+               }
+
                prev = hang_create_request(&h, engine);
                if (IS_ERR(prev)) {
                        err = PTR_ERR(prev);
-                       goto fini;
+                       pr_err("[%s] Create 'prev' hang request failed: %d!\n", engine->name, err);
+                       goto restore;
                }
 
                i915_request_get(prev);
@@ -1532,7 +1686,8 @@ static int igt_reset_queue(void *arg)
                        rq = hang_create_request(&h, engine);
                        if (IS_ERR(rq)) {
                                err = PTR_ERR(rq);
-                               goto fini;
+                               pr_err("[%s] Create hang request failed: %d!\n", engine->name, err);
+                               goto restore;
                        }
 
                        i915_request_get(rq);
@@ -1557,7 +1712,7 @@ static int igt_reset_queue(void *arg)
 
                                GEM_TRACE_DUMP();
                                intel_gt_set_wedged(gt);
-                               goto fini;
+                               goto restore;
                        }
 
                        if (!wait_until_running(&h, prev)) {
@@ -1575,7 +1730,7 @@ static int igt_reset_queue(void *arg)
                                intel_gt_set_wedged(gt);
 
                                err = -EIO;
-                               goto fini;
+                               goto restore;
                        }
 
                        reset_count = fake_hangcheck(gt, BIT(id));
@@ -1586,7 +1741,7 @@ static int igt_reset_queue(void *arg)
                                i915_request_put(rq);
                                i915_request_put(prev);
                                err = -EINVAL;
-                               goto fini;
+                               goto restore;
                        }
 
                        if (rq->fence.error) {
@@ -1595,7 +1750,7 @@ static int igt_reset_queue(void *arg)
                                i915_request_put(rq);
                                i915_request_put(prev);
                                err = -EINVAL;
-                               goto fini;
+                               goto restore;
                        }
 
                        if (i915_reset_count(global) == reset_count) {
@@ -1603,7 +1758,7 @@ static int igt_reset_queue(void *arg)
                                i915_request_put(rq);
                                i915_request_put(prev);
                                err = -EINVAL;
-                               goto fini;
+                               goto restore;
                        }
 
                        i915_request_put(prev);
@@ -1618,9 +1773,24 @@ static int igt_reset_queue(void *arg)
 
                i915_request_put(prev);
 
-               err = igt_flush_test(gt->i915);
+restore:
+               if (using_guc) {
+                       int err2 = intel_selftest_restore_policy(engine, &saved);
+
+                       if (err2)
+                               pr_err("%s:%d> [%s] Restore policy failed: %d!\n",
+                                      __func__, __LINE__, engine->name, err2);
+                       if (err == 0)
+                               err = err2;
+               }
                if (err)
+                       goto fini;
+
+               err = igt_flush_test(gt->i915);
+               if (err) {
+                       pr_err("[%s] Flush failed: %d!\n", engine->name, err);
                        break;
+               }
        }
 
 fini:
@@ -1653,12 +1823,15 @@ static int igt_handle_error(void *arg)
                return 0;
 
        err = hang_init(&h, gt);
-       if (err)
+       if (err) {
+               pr_err("[%s] Hang init failed: %d!\n", engine->name, err);
                return err;
+       }
 
        rq = hang_create_request(&h, engine);
        if (IS_ERR(rq)) {
                err = PTR_ERR(rq);
+               pr_err("[%s] Create hang request failed: %d!\n", engine->name, err);
                goto err_fini;
        }
 
@@ -1702,7 +1875,7 @@ static int __igt_atomic_reset_engine(struct intel_engine_cs *engine,
                                     const struct igt_atomic_section *p,
                                     const char *mode)
 {
-       struct tasklet_struct * const t = &engine->execlists.tasklet;
+       struct tasklet_struct * const t = &engine->sched_engine->tasklet;
        int err;
 
        GEM_TRACE("i915_reset_engine(%s:%s) under %s\n",
@@ -1743,12 +1916,15 @@ static int igt_atomic_reset_engine(struct intel_engine_cs *engine,
                return err;
 
        err = hang_init(&h, engine->gt);
-       if (err)
+       if (err) {
+               pr_err("[%s] Hang init failed: %d!\n", engine->name, err);
                return err;
+       }
 
        rq = hang_create_request(&h, engine);
        if (IS_ERR(rq)) {
                err = PTR_ERR(rq);
+               pr_err("[%s] Create hang request failed: %d!\n", engine->name, err);
                goto out;
        }
 
index 3119016..b0977a3 100644 (file)
@@ -49,7 +49,7 @@ static int wait_for_submit(struct intel_engine_cs *engine,
                           unsigned long timeout)
 {
        /* Ignore our own attempts to suppress excess tasklets */
-       tasklet_hi_schedule(&engine->execlists.tasklet);
+       tasklet_hi_schedule(&engine->sched_engine->tasklet);
 
        timeout += jiffies;
        do {
@@ -1613,12 +1613,12 @@ static void garbage_reset(struct intel_engine_cs *engine,
 
        local_bh_disable();
        if (!test_and_set_bit(bit, lock)) {
-               tasklet_disable(&engine->execlists.tasklet);
+               tasklet_disable(&engine->sched_engine->tasklet);
 
                if (!rq->fence.error)
                        __intel_engine_reset_bh(engine, NULL);
 
-               tasklet_enable(&engine->execlists.tasklet);
+               tasklet_enable(&engine->sched_engine->tasklet);
                clear_and_wake_up_bit(bit, lock);
        }
        local_bh_enable();
diff --git a/drivers/gpu/drm/i915/gt/selftest_migrate.c b/drivers/gpu/drm/i915/gt/selftest_migrate.c
new file mode 100644 (file)
index 0000000..12ef283
--- /dev/null
@@ -0,0 +1,669 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright Â© 2020 Intel Corporation
+ */
+
+#include <linux/sort.h>
+
+#include "selftests/i915_random.h"
+
+static const unsigned int sizes[] = {
+       SZ_4K,
+       SZ_64K,
+       SZ_2M,
+       CHUNK_SZ - SZ_4K,
+       CHUNK_SZ,
+       CHUNK_SZ + SZ_4K,
+       SZ_64M,
+};
+
+static struct drm_i915_gem_object *
+create_lmem_or_internal(struct drm_i915_private *i915, size_t size)
+{
+       struct drm_i915_gem_object *obj;
+
+       obj = i915_gem_object_create_lmem(i915, size, 0);
+       if (!IS_ERR(obj))
+               return obj;
+
+       return i915_gem_object_create_internal(i915, size);
+}
+
+static int copy(struct intel_migrate *migrate,
+               int (*fn)(struct intel_migrate *migrate,
+                         struct i915_gem_ww_ctx *ww,
+                         struct drm_i915_gem_object *src,
+                         struct drm_i915_gem_object *dst,
+                         struct i915_request **out),
+               u32 sz, struct rnd_state *prng)
+{
+       struct drm_i915_private *i915 = migrate->context->engine->i915;
+       struct drm_i915_gem_object *src, *dst;
+       struct i915_request *rq;
+       struct i915_gem_ww_ctx ww;
+       u32 *vaddr;
+       int err = 0;
+       int i;
+
+       src = create_lmem_or_internal(i915, sz);
+       if (IS_ERR(src))
+               return 0;
+
+       dst = i915_gem_object_create_internal(i915, sz);
+       if (IS_ERR(dst))
+               goto err_free_src;
+
+       for_i915_gem_ww(&ww, err, true) {
+               err = i915_gem_object_lock(src, &ww);
+               if (err)
+                       continue;
+
+               err = i915_gem_object_lock(dst, &ww);
+               if (err)
+                       continue;
+
+               vaddr = i915_gem_object_pin_map(src, I915_MAP_WC);
+               if (IS_ERR(vaddr)) {
+                       err = PTR_ERR(vaddr);
+                       continue;
+               }
+
+               for (i = 0; i < sz / sizeof(u32); i++)
+                       vaddr[i] = i;
+               i915_gem_object_flush_map(src);
+
+               vaddr = i915_gem_object_pin_map(dst, I915_MAP_WC);
+               if (IS_ERR(vaddr)) {
+                       err = PTR_ERR(vaddr);
+                       goto unpin_src;
+               }
+
+               for (i = 0; i < sz / sizeof(u32); i++)
+                       vaddr[i] = ~i;
+               i915_gem_object_flush_map(dst);
+
+               err = fn(migrate, &ww, src, dst, &rq);
+               if (!err)
+                       continue;
+
+               if (err != -EDEADLK && err != -EINTR && err != -ERESTARTSYS)
+                       pr_err("%ps failed, size: %u\n", fn, sz);
+               if (rq) {
+                       i915_request_wait(rq, 0, HZ);
+                       i915_request_put(rq);
+               }
+               i915_gem_object_unpin_map(dst);
+unpin_src:
+               i915_gem_object_unpin_map(src);
+       }
+       if (err)
+               goto err_out;
+
+       if (rq) {
+               if (i915_request_wait(rq, 0, HZ) < 0) {
+                       pr_err("%ps timed out, size: %u\n", fn, sz);
+                       err = -ETIME;
+               }
+               i915_request_put(rq);
+       }
+
+       for (i = 0; !err && i < sz / PAGE_SIZE; i++) {
+               int x = i * 1024 + i915_prandom_u32_max_state(1024, prng);
+
+               if (vaddr[x] != x) {
+                       pr_err("%ps failed, size: %u, offset: %zu\n",
+                              fn, sz, x * sizeof(u32));
+                       igt_hexdump(vaddr + i * 1024, 4096);
+                       err = -EINVAL;
+               }
+       }
+
+       i915_gem_object_unpin_map(dst);
+       i915_gem_object_unpin_map(src);
+
+err_out:
+       i915_gem_object_put(dst);
+err_free_src:
+       i915_gem_object_put(src);
+
+       return err;
+}
+
+static int clear(struct intel_migrate *migrate,
+                int (*fn)(struct intel_migrate *migrate,
+                          struct i915_gem_ww_ctx *ww,
+                          struct drm_i915_gem_object *obj,
+                          u32 value,
+                          struct i915_request **out),
+                u32 sz, struct rnd_state *prng)
+{
+       struct drm_i915_private *i915 = migrate->context->engine->i915;
+       struct drm_i915_gem_object *obj;
+       struct i915_request *rq;
+       struct i915_gem_ww_ctx ww;
+       u32 *vaddr;
+       int err = 0;
+       int i;
+
+       obj = create_lmem_or_internal(i915, sz);
+       if (IS_ERR(obj))
+               return 0;
+
+       for_i915_gem_ww(&ww, err, true) {
+               err = i915_gem_object_lock(obj, &ww);
+               if (err)
+                       continue;
+
+               vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
+               if (IS_ERR(vaddr)) {
+                       err = PTR_ERR(vaddr);
+                       continue;
+               }
+
+               for (i = 0; i < sz / sizeof(u32); i++)
+                       vaddr[i] = ~i;
+               i915_gem_object_flush_map(obj);
+
+               err = fn(migrate, &ww, obj, sz, &rq);
+               if (!err)
+                       continue;
+
+               if (err != -EDEADLK && err != -EINTR && err != -ERESTARTSYS)
+                       pr_err("%ps failed, size: %u\n", fn, sz);
+               if (rq) {
+                       i915_request_wait(rq, 0, HZ);
+                       i915_request_put(rq);
+               }
+               i915_gem_object_unpin_map(obj);
+       }
+       if (err)
+               goto err_out;
+
+       if (rq) {
+               if (i915_request_wait(rq, 0, HZ) < 0) {
+                       pr_err("%ps timed out, size: %u\n", fn, sz);
+                       err = -ETIME;
+               }
+               i915_request_put(rq);
+       }
+
+       for (i = 0; !err && i < sz / PAGE_SIZE; i++) {
+               int x = i * 1024 + i915_prandom_u32_max_state(1024, prng);
+
+               if (vaddr[x] != sz) {
+                       pr_err("%ps failed, size: %u, offset: %zu\n",
+                              fn, sz, x * sizeof(u32));
+                       igt_hexdump(vaddr + i * 1024, 4096);
+                       err = -EINVAL;
+               }
+       }
+
+       i915_gem_object_unpin_map(obj);
+err_out:
+       i915_gem_object_put(obj);
+
+       return err;
+}
+
+static int __migrate_copy(struct intel_migrate *migrate,
+                         struct i915_gem_ww_ctx *ww,
+                         struct drm_i915_gem_object *src,
+                         struct drm_i915_gem_object *dst,
+                         struct i915_request **out)
+{
+       return intel_migrate_copy(migrate, ww, NULL,
+                                 src->mm.pages->sgl, src->cache_level,
+                                 i915_gem_object_is_lmem(src),
+                                 dst->mm.pages->sgl, dst->cache_level,
+                                 i915_gem_object_is_lmem(dst),
+                                 out);
+}
+
+static int __global_copy(struct intel_migrate *migrate,
+                        struct i915_gem_ww_ctx *ww,
+                        struct drm_i915_gem_object *src,
+                        struct drm_i915_gem_object *dst,
+                        struct i915_request **out)
+{
+       return intel_context_migrate_copy(migrate->context, NULL,
+                                         src->mm.pages->sgl, src->cache_level,
+                                         i915_gem_object_is_lmem(src),
+                                         dst->mm.pages->sgl, dst->cache_level,
+                                         i915_gem_object_is_lmem(dst),
+                                         out);
+}
+
+static int
+migrate_copy(struct intel_migrate *migrate, u32 sz, struct rnd_state *prng)
+{
+       return copy(migrate, __migrate_copy, sz, prng);
+}
+
+static int
+global_copy(struct intel_migrate *migrate, u32 sz, struct rnd_state *prng)
+{
+       return copy(migrate, __global_copy, sz, prng);
+}
+
+static int __migrate_clear(struct intel_migrate *migrate,
+                          struct i915_gem_ww_ctx *ww,
+                          struct drm_i915_gem_object *obj,
+                          u32 value,
+                          struct i915_request **out)
+{
+       return intel_migrate_clear(migrate, ww, NULL,
+                                  obj->mm.pages->sgl,
+                                  obj->cache_level,
+                                  i915_gem_object_is_lmem(obj),
+                                  value, out);
+}
+
+static int __global_clear(struct intel_migrate *migrate,
+                         struct i915_gem_ww_ctx *ww,
+                         struct drm_i915_gem_object *obj,
+                         u32 value,
+                         struct i915_request **out)
+{
+       return intel_context_migrate_clear(migrate->context, NULL,
+                                          obj->mm.pages->sgl,
+                                          obj->cache_level,
+                                          i915_gem_object_is_lmem(obj),
+                                          value, out);
+}
+
+static int
+migrate_clear(struct intel_migrate *migrate, u32 sz, struct rnd_state *prng)
+{
+       return clear(migrate, __migrate_clear, sz, prng);
+}
+
+static int
+global_clear(struct intel_migrate *migrate, u32 sz, struct rnd_state *prng)
+{
+       return clear(migrate, __global_clear, sz, prng);
+}
+
+static int live_migrate_copy(void *arg)
+{
+       struct intel_migrate *migrate = arg;
+       struct drm_i915_private *i915 = migrate->context->engine->i915;
+       I915_RND_STATE(prng);
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(sizes); i++) {
+               int err;
+
+               err = migrate_copy(migrate, sizes[i], &prng);
+               if (err == 0)
+                       err = global_copy(migrate, sizes[i], &prng);
+               i915_gem_drain_freed_objects(i915);
+               if (err)
+                       return err;
+       }
+
+       return 0;
+}
+
+static int live_migrate_clear(void *arg)
+{
+       struct intel_migrate *migrate = arg;
+       struct drm_i915_private *i915 = migrate->context->engine->i915;
+       I915_RND_STATE(prng);
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(sizes); i++) {
+               int err;
+
+               err = migrate_clear(migrate, sizes[i], &prng);
+               if (err == 0)
+                       err = global_clear(migrate, sizes[i], &prng);
+
+               i915_gem_drain_freed_objects(i915);
+               if (err)
+                       return err;
+       }
+
+       return 0;
+}
+
+struct threaded_migrate {
+       struct intel_migrate *migrate;
+       struct task_struct *tsk;
+       struct rnd_state prng;
+};
+
+static int threaded_migrate(struct intel_migrate *migrate,
+                           int (*fn)(void *arg),
+                           unsigned int flags)
+{
+       const unsigned int n_cpus = num_online_cpus() + 1;
+       struct threaded_migrate *thread;
+       I915_RND_STATE(prng);
+       unsigned int i;
+       int err = 0;
+
+       thread = kcalloc(n_cpus, sizeof(*thread), GFP_KERNEL);
+       if (!thread)
+               return 0;
+
+       for (i = 0; i < n_cpus; ++i) {
+               struct task_struct *tsk;
+
+               thread[i].migrate = migrate;
+               thread[i].prng =
+                       I915_RND_STATE_INITIALIZER(prandom_u32_state(&prng));
+
+               tsk = kthread_run(fn, &thread[i], "igt-%d", i);
+               if (IS_ERR(tsk)) {
+                       err = PTR_ERR(tsk);
+                       break;
+               }
+
+               get_task_struct(tsk);
+               thread[i].tsk = tsk;
+       }
+
+       msleep(10); /* start all threads before we kthread_stop() */
+
+       for (i = 0; i < n_cpus; ++i) {
+               struct task_struct *tsk = thread[i].tsk;
+               int status;
+
+               if (IS_ERR_OR_NULL(tsk))
+                       continue;
+
+               status = kthread_stop(tsk);
+               if (status && !err)
+                       err = status;
+
+               put_task_struct(tsk);
+       }
+
+       kfree(thread);
+       return err;
+}
+
+static int __thread_migrate_copy(void *arg)
+{
+       struct threaded_migrate *tm = arg;
+
+       return migrate_copy(tm->migrate, 2 * CHUNK_SZ, &tm->prng);
+}
+
+static int thread_migrate_copy(void *arg)
+{
+       return threaded_migrate(arg, __thread_migrate_copy, 0);
+}
+
+static int __thread_global_copy(void *arg)
+{
+       struct threaded_migrate *tm = arg;
+
+       return global_copy(tm->migrate, 2 * CHUNK_SZ, &tm->prng);
+}
+
+static int thread_global_copy(void *arg)
+{
+       return threaded_migrate(arg, __thread_global_copy, 0);
+}
+
+static int __thread_migrate_clear(void *arg)
+{
+       struct threaded_migrate *tm = arg;
+
+       return migrate_clear(tm->migrate, 2 * CHUNK_SZ, &tm->prng);
+}
+
+static int __thread_global_clear(void *arg)
+{
+       struct threaded_migrate *tm = arg;
+
+       return global_clear(tm->migrate, 2 * CHUNK_SZ, &tm->prng);
+}
+
+static int thread_migrate_clear(void *arg)
+{
+       return threaded_migrate(arg, __thread_migrate_clear, 0);
+}
+
+static int thread_global_clear(void *arg)
+{
+       return threaded_migrate(arg, __thread_global_clear, 0);
+}
+
+int intel_migrate_live_selftests(struct drm_i915_private *i915)
+{
+       static const struct i915_subtest tests[] = {
+               SUBTEST(live_migrate_copy),
+               SUBTEST(live_migrate_clear),
+               SUBTEST(thread_migrate_copy),
+               SUBTEST(thread_migrate_clear),
+               SUBTEST(thread_global_copy),
+               SUBTEST(thread_global_clear),
+       };
+       struct intel_gt *gt = &i915->gt;
+
+       if (!gt->migrate.context)
+               return 0;
+
+       return i915_subtests(tests, &gt->migrate);
+}
+
+static struct drm_i915_gem_object *
+create_init_lmem_internal(struct intel_gt *gt, size_t sz, bool try_lmem)
+{
+       struct drm_i915_gem_object *obj = NULL;
+       int err;
+
+       if (try_lmem)
+               obj = i915_gem_object_create_lmem(gt->i915, sz, 0);
+
+       if (IS_ERR_OR_NULL(obj)) {
+               obj = i915_gem_object_create_internal(gt->i915, sz);
+               if (IS_ERR(obj))
+                       return obj;
+       }
+
+       i915_gem_object_trylock(obj);
+       err = i915_gem_object_pin_pages(obj);
+       if (err) {
+               i915_gem_object_unlock(obj);
+               i915_gem_object_put(obj);
+               return ERR_PTR(err);
+       }
+
+       return obj;
+}
+
+static int wrap_ktime_compare(const void *A, const void *B)
+{
+       const ktime_t *a = A, *b = B;
+
+       return ktime_compare(*a, *b);
+}
+
+static int __perf_clear_blt(struct intel_context *ce,
+                           struct scatterlist *sg,
+                           enum i915_cache_level cache_level,
+                           bool is_lmem,
+                           size_t sz)
+{
+       ktime_t t[5];
+       int pass;
+       int err = 0;
+
+       for (pass = 0; pass < ARRAY_SIZE(t); pass++) {
+               struct i915_request *rq;
+               ktime_t t0, t1;
+
+               t0 = ktime_get();
+
+               err = intel_context_migrate_clear(ce, NULL, sg, cache_level,
+                                                 is_lmem, 0, &rq);
+               if (rq) {
+                       if (i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT) < 0)
+                               err = -EIO;
+                       i915_request_put(rq);
+               }
+               if (err)
+                       break;
+
+               t1 = ktime_get();
+               t[pass] = ktime_sub(t1, t0);
+       }
+       if (err)
+               return err;
+
+       sort(t, ARRAY_SIZE(t), sizeof(*t), wrap_ktime_compare, NULL);
+       pr_info("%s: %zd KiB fill: %lld MiB/s\n",
+               ce->engine->name, sz >> 10,
+               div64_u64(mul_u32_u32(4 * sz,
+                                     1000 * 1000 * 1000),
+                         t[1] + 2 * t[2] + t[3]) >> 20);
+       return 0;
+}
+
+static int perf_clear_blt(void *arg)
+{
+       struct intel_gt *gt = arg;
+       static const unsigned long sizes[] = {
+               SZ_4K,
+               SZ_64K,
+               SZ_2M,
+               SZ_64M
+       };
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(sizes); i++) {
+               struct drm_i915_gem_object *dst;
+               int err;
+
+               dst = create_init_lmem_internal(gt, sizes[i], true);
+               if (IS_ERR(dst))
+                       return PTR_ERR(dst);
+
+               err = __perf_clear_blt(gt->migrate.context,
+                                      dst->mm.pages->sgl,
+                                      I915_CACHE_NONE,
+                                      i915_gem_object_is_lmem(dst),
+                                      sizes[i]);
+
+               i915_gem_object_unlock(dst);
+               i915_gem_object_put(dst);
+               if (err)
+                       return err;
+       }
+
+       return 0;
+}
+
+static int __perf_copy_blt(struct intel_context *ce,
+                          struct scatterlist *src,
+                          enum i915_cache_level src_cache_level,
+                          bool src_is_lmem,
+                          struct scatterlist *dst,
+                          enum i915_cache_level dst_cache_level,
+                          bool dst_is_lmem,
+                          size_t sz)
+{
+       ktime_t t[5];
+       int pass;
+       int err = 0;
+
+       for (pass = 0; pass < ARRAY_SIZE(t); pass++) {
+               struct i915_request *rq;
+               ktime_t t0, t1;
+
+               t0 = ktime_get();
+
+               err = intel_context_migrate_copy(ce, NULL,
+                                                src, src_cache_level,
+                                                src_is_lmem,
+                                                dst, dst_cache_level,
+                                                dst_is_lmem,
+                                                &rq);
+               if (rq) {
+                       if (i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT) < 0)
+                               err = -EIO;
+                       i915_request_put(rq);
+               }
+               if (err)
+                       break;
+
+               t1 = ktime_get();
+               t[pass] = ktime_sub(t1, t0);
+       }
+       if (err)
+               return err;
+
+       sort(t, ARRAY_SIZE(t), sizeof(*t), wrap_ktime_compare, NULL);
+       pr_info("%s: %zd KiB copy: %lld MiB/s\n",
+               ce->engine->name, sz >> 10,
+               div64_u64(mul_u32_u32(4 * sz,
+                                     1000 * 1000 * 1000),
+                         t[1] + 2 * t[2] + t[3]) >> 20);
+       return 0;
+}
+
+static int perf_copy_blt(void *arg)
+{
+       struct intel_gt *gt = arg;
+       static const unsigned long sizes[] = {
+               SZ_4K,
+               SZ_64K,
+               SZ_2M,
+               SZ_64M
+       };
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(sizes); i++) {
+               struct drm_i915_gem_object *src, *dst;
+               int err;
+
+               src = create_init_lmem_internal(gt, sizes[i], true);
+               if (IS_ERR(src))
+                       return PTR_ERR(src);
+
+               dst = create_init_lmem_internal(gt, sizes[i], false);
+               if (IS_ERR(dst)) {
+                       err = PTR_ERR(dst);
+                       goto err_src;
+               }
+
+               err = __perf_copy_blt(gt->migrate.context,
+                                     src->mm.pages->sgl,
+                                     I915_CACHE_NONE,
+                                     i915_gem_object_is_lmem(src),
+                                     dst->mm.pages->sgl,
+                                     I915_CACHE_NONE,
+                                     i915_gem_object_is_lmem(dst),
+                                     sizes[i]);
+
+               i915_gem_object_unlock(dst);
+               i915_gem_object_put(dst);
+err_src:
+               i915_gem_object_unlock(src);
+               i915_gem_object_put(src);
+               if (err)
+                       return err;
+       }
+
+       return 0;
+}
+
+int intel_migrate_perf_selftests(struct drm_i915_private *i915)
+{
+       static const struct i915_subtest tests[] = {
+               SUBTEST(perf_clear_blt),
+               SUBTEST(perf_copy_blt),
+       };
+       struct intel_gt *gt = &i915->gt;
+
+       if (intel_gt_is_wedged(gt))
+               return 0;
+
+       if (!gt->migrate.context)
+               return 0;
+
+       return intel_gt_live_subtests(tests, gt);
+}
index b9bb0e6..13d25bf 100644 (file)
@@ -10,6 +10,7 @@
 #include "gem/selftests/mock_context.h"
 #include "selftests/igt_reset.h"
 #include "selftests/igt_spinner.h"
+#include "selftests/intel_scheduler_helpers.h"
 
 struct live_mocs {
        struct drm_i915_mocs_table table;
@@ -28,7 +29,7 @@ static struct intel_context *mocs_context_create(struct intel_engine_cs *engine)
                return ce;
 
        /* We build large requests to read the registers from the ring */
-       ce->ring = __intel_context_ring_size(SZ_16K);
+       ce->ring_size = SZ_16K;
 
        return ce;
 }
@@ -318,7 +319,8 @@ static int live_mocs_clean(void *arg)
 }
 
 static int active_engine_reset(struct intel_context *ce,
-                              const char *reason)
+                              const char *reason,
+                              bool using_guc)
 {
        struct igt_spinner spin;
        struct i915_request *rq;
@@ -335,9 +337,13 @@ static int active_engine_reset(struct intel_context *ce,
        }
 
        err = request_add_spin(rq, &spin);
-       if (err == 0)
+       if (err == 0 && !using_guc)
                err = intel_engine_reset(ce->engine, reason);
 
+       /* Ensure the reset happens and kills the engine */
+       if (err == 0)
+               err = intel_selftest_wait_for_rq(rq);
+
        igt_spinner_end(&spin);
        igt_spinner_fini(&spin);
 
@@ -345,21 +351,23 @@ static int active_engine_reset(struct intel_context *ce,
 }
 
 static int __live_mocs_reset(struct live_mocs *mocs,
-                            struct intel_context *ce)
+                            struct intel_context *ce, bool using_guc)
 {
        struct intel_gt *gt = ce->engine->gt;
        int err;
 
        if (intel_has_reset_engine(gt)) {
-               err = intel_engine_reset(ce->engine, "mocs");
-               if (err)
-                       return err;
-
-               err = check_mocs_engine(mocs, ce);
-               if (err)
-                       return err;
+               if (!using_guc) {
+                       err = intel_engine_reset(ce->engine, "mocs");
+                       if (err)
+                               return err;
+
+                       err = check_mocs_engine(mocs, ce);
+                       if (err)
+                               return err;
+               }
 
-               err = active_engine_reset(ce, "mocs");
+               err = active_engine_reset(ce, "mocs", using_guc);
                if (err)
                        return err;
 
@@ -395,19 +403,33 @@ static int live_mocs_reset(void *arg)
 
        igt_global_reset_lock(gt);
        for_each_engine(engine, gt, id) {
+               bool using_guc = intel_engine_uses_guc(engine);
+               struct intel_selftest_saved_policy saved;
                struct intel_context *ce;
+               int err2;
+
+               err = intel_selftest_modify_policy(engine, &saved,
+                                                  SELFTEST_SCHEDULER_MODIFY_FAST_RESET);
+               if (err)
+                       break;
 
                ce = mocs_context_create(engine);
                if (IS_ERR(ce)) {
                        err = PTR_ERR(ce);
-                       break;
+                       goto restore;
                }
 
                intel_engine_pm_get(engine);
-               err = __live_mocs_reset(&mocs, ce);
-               intel_engine_pm_put(engine);
 
+               err = __live_mocs_reset(&mocs, ce, using_guc);
+
+               intel_engine_pm_put(engine);
                intel_context_put(ce);
+
+restore:
+               err2 = intel_selftest_restore_policy(engine, &saved);
+               if (err == 0)
+                       err = err2;
                if (err)
                        break;
        }
index 8784257..7a50c9f 100644 (file)
@@ -321,7 +321,7 @@ static int igt_atomic_engine_reset(void *arg)
                goto out_unlock;
 
        for_each_engine(engine, gt, id) {
-               struct tasklet_struct *t = &engine->execlists.tasklet;
+               struct tasklet_struct *t = &engine->sched_engine->tasklet;
 
                if (t->func)
                        tasklet_disable(t);
diff --git a/drivers/gpu/drm/i915/gt/selftest_slpc.c b/drivers/gpu/drm/i915/gt/selftest_slpc.c
new file mode 100644 (file)
index 0000000..9334bad
--- /dev/null
@@ -0,0 +1,311 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright Â© 2021 Intel Corporation
+ */
+
+#define NUM_STEPS 5
+#define H2G_DELAY 50000
+#define delay_for_h2g() usleep_range(H2G_DELAY, H2G_DELAY + 10000)
+#define FREQUENCY_REQ_UNIT     DIV_ROUND_CLOSEST(GT_FREQUENCY_MULTIPLIER, \
+                                                 GEN9_FREQ_SCALER)
+
+static int slpc_set_min_freq(struct intel_guc_slpc *slpc, u32 freq)
+{
+       int ret;
+
+       ret = intel_guc_slpc_set_min_freq(slpc, freq);
+       if (ret)
+               pr_err("Could not set min frequency to [%u]\n", freq);
+       else /* Delay to ensure h2g completes */
+               delay_for_h2g();
+
+       return ret;
+}
+
+static int slpc_set_max_freq(struct intel_guc_slpc *slpc, u32 freq)
+{
+       int ret;
+
+       ret = intel_guc_slpc_set_max_freq(slpc, freq);
+       if (ret)
+               pr_err("Could not set maximum frequency [%u]\n",
+                      freq);
+       else /* Delay to ensure h2g completes */
+               delay_for_h2g();
+
+       return ret;
+}
+
+static int live_slpc_clamp_min(void *arg)
+{
+       struct drm_i915_private *i915 = arg;
+       struct intel_gt *gt = &i915->gt;
+       struct intel_guc_slpc *slpc = &gt->uc.guc.slpc;
+       struct intel_rps *rps = &gt->rps;
+       struct intel_engine_cs *engine;
+       enum intel_engine_id id;
+       struct igt_spinner spin;
+       u32 slpc_min_freq, slpc_max_freq;
+       int err = 0;
+
+       if (!intel_uc_uses_guc_slpc(&gt->uc))
+               return 0;
+
+       if (igt_spinner_init(&spin, gt))
+               return -ENOMEM;
+
+       if (intel_guc_slpc_get_max_freq(slpc, &slpc_max_freq)) {
+               pr_err("Could not get SLPC max freq\n");
+               return -EIO;
+       }
+
+       if (intel_guc_slpc_get_min_freq(slpc, &slpc_min_freq)) {
+               pr_err("Could not get SLPC min freq\n");
+               return -EIO;
+       }
+
+       if (slpc_min_freq == slpc_max_freq) {
+               pr_err("Min/Max are fused to the same value\n");
+               return -EINVAL;
+       }
+
+       intel_gt_pm_wait_for_idle(gt);
+       intel_gt_pm_get(gt);
+       for_each_engine(engine, gt, id) {
+               struct i915_request *rq;
+               u32 step, min_freq, req_freq;
+               u32 act_freq, max_act_freq;
+
+               if (!intel_engine_can_store_dword(engine))
+                       continue;
+
+               /* Go from min to max in 5 steps */
+               step = (slpc_max_freq - slpc_min_freq) / NUM_STEPS;
+               max_act_freq = slpc_min_freq;
+               for (min_freq = slpc_min_freq; min_freq < slpc_max_freq;
+                                       min_freq += step) {
+                       err = slpc_set_min_freq(slpc, min_freq);
+                       if (err)
+                               break;
+
+                       st_engine_heartbeat_disable(engine);
+
+                       rq = igt_spinner_create_request(&spin,
+                                                       engine->kernel_context,
+                                                       MI_NOOP);
+                       if (IS_ERR(rq)) {
+                               err = PTR_ERR(rq);
+                               st_engine_heartbeat_enable(engine);
+                               break;
+                       }
+
+                       i915_request_add(rq);
+
+                       if (!igt_wait_for_spinner(&spin, rq)) {
+                               pr_err("%s: Spinner did not start\n",
+                                      engine->name);
+                               igt_spinner_end(&spin);
+                               st_engine_heartbeat_enable(engine);
+                               intel_gt_set_wedged(engine->gt);
+                               err = -EIO;
+                               break;
+                       }
+
+                       /* Wait for GuC to detect business and raise
+                        * requested frequency if necessary.
+                        */
+                       delay_for_h2g();
+
+                       req_freq = intel_rps_read_punit_req_frequency(rps);
+
+                       /* GuC requests freq in multiples of 50/3 MHz */
+                       if (req_freq < (min_freq - FREQUENCY_REQ_UNIT)) {
+                               pr_err("SWReq is %d, should be at least %d\n", req_freq,
+                                      min_freq - FREQUENCY_REQ_UNIT);
+                               igt_spinner_end(&spin);
+                               st_engine_heartbeat_enable(engine);
+                               err = -EINVAL;
+                               break;
+                       }
+
+                       act_freq =  intel_rps_read_actual_frequency(rps);
+                       if (act_freq > max_act_freq)
+                               max_act_freq = act_freq;
+
+                       igt_spinner_end(&spin);
+                       st_engine_heartbeat_enable(engine);
+               }
+
+               pr_info("Max actual frequency for %s was %d\n",
+                       engine->name, max_act_freq);
+
+               /* Actual frequency should rise above min */
+               if (max_act_freq == slpc_min_freq) {
+                       pr_err("Actual freq did not rise above min\n");
+                       err = -EINVAL;
+               }
+
+               if (err)
+                       break;
+       }
+
+       /* Restore min/max frequencies */
+       slpc_set_max_freq(slpc, slpc_max_freq);
+       slpc_set_min_freq(slpc, slpc_min_freq);
+
+       if (igt_flush_test(gt->i915))
+               err = -EIO;
+
+       intel_gt_pm_put(gt);
+       igt_spinner_fini(&spin);
+       intel_gt_pm_wait_for_idle(gt);
+
+       return err;
+}
+
+static int live_slpc_clamp_max(void *arg)
+{
+       struct drm_i915_private *i915 = arg;
+       struct intel_gt *gt = &i915->gt;
+       struct intel_guc_slpc *slpc;
+       struct intel_rps *rps;
+       struct intel_engine_cs *engine;
+       enum intel_engine_id id;
+       struct igt_spinner spin;
+       int err = 0;
+       u32 slpc_min_freq, slpc_max_freq;
+
+       slpc = &gt->uc.guc.slpc;
+       rps = &gt->rps;
+
+       if (!intel_uc_uses_guc_slpc(&gt->uc))
+               return 0;
+
+       if (igt_spinner_init(&spin, gt))
+               return -ENOMEM;
+
+       if (intel_guc_slpc_get_max_freq(slpc, &slpc_max_freq)) {
+               pr_err("Could not get SLPC max freq\n");
+               return -EIO;
+       }
+
+       if (intel_guc_slpc_get_min_freq(slpc, &slpc_min_freq)) {
+               pr_err("Could not get SLPC min freq\n");
+               return -EIO;
+       }
+
+       if (slpc_min_freq == slpc_max_freq) {
+               pr_err("Min/Max are fused to the same value\n");
+               return -EINVAL;
+       }
+
+       intel_gt_pm_wait_for_idle(gt);
+       intel_gt_pm_get(gt);
+       for_each_engine(engine, gt, id) {
+               struct i915_request *rq;
+               u32 max_freq, req_freq;
+               u32 act_freq, max_act_freq;
+               u32 step;
+
+               if (!intel_engine_can_store_dword(engine))
+                       continue;
+
+               /* Go from max to min in 5 steps */
+               step = (slpc_max_freq - slpc_min_freq) / NUM_STEPS;
+               max_act_freq = slpc_min_freq;
+               for (max_freq = slpc_max_freq; max_freq > slpc_min_freq;
+                                       max_freq -= step) {
+                       err = slpc_set_max_freq(slpc, max_freq);
+                       if (err)
+                               break;
+
+                       st_engine_heartbeat_disable(engine);
+
+                       rq = igt_spinner_create_request(&spin,
+                                                       engine->kernel_context,
+                                                       MI_NOOP);
+                       if (IS_ERR(rq)) {
+                               st_engine_heartbeat_enable(engine);
+                               err = PTR_ERR(rq);
+                               break;
+                       }
+
+                       i915_request_add(rq);
+
+                       if (!igt_wait_for_spinner(&spin, rq)) {
+                               pr_err("%s: SLPC spinner did not start\n",
+                                      engine->name);
+                               igt_spinner_end(&spin);
+                               st_engine_heartbeat_enable(engine);
+                               intel_gt_set_wedged(engine->gt);
+                               err = -EIO;
+                               break;
+                       }
+
+                       delay_for_h2g();
+
+                       /* Verify that SWREQ indeed was set to specific value */
+                       req_freq = intel_rps_read_punit_req_frequency(rps);
+
+                       /* GuC requests freq in multiples of 50/3 MHz */
+                       if (req_freq > (max_freq + FREQUENCY_REQ_UNIT)) {
+                               pr_err("SWReq is %d, should be at most %d\n", req_freq,
+                                      max_freq + FREQUENCY_REQ_UNIT);
+                               igt_spinner_end(&spin);
+                               st_engine_heartbeat_enable(engine);
+                               err = -EINVAL;
+                               break;
+                       }
+
+                       act_freq =  intel_rps_read_actual_frequency(rps);
+                       if (act_freq > max_act_freq)
+                               max_act_freq = act_freq;
+
+                       st_engine_heartbeat_enable(engine);
+                       igt_spinner_end(&spin);
+
+                       if (err)
+                               break;
+               }
+
+               pr_info("Max actual frequency for %s was %d\n",
+                       engine->name, max_act_freq);
+
+               /* Actual frequency should rise above min */
+               if (max_act_freq == slpc_min_freq) {
+                       pr_err("Actual freq did not rise above min\n");
+                       err = -EINVAL;
+               }
+
+               if (igt_flush_test(gt->i915)) {
+                       err = -EIO;
+                       break;
+               }
+
+               if (err)
+                       break;
+       }
+
+       /* Restore min/max freq */
+       slpc_set_max_freq(slpc, slpc_max_freq);
+       slpc_set_min_freq(slpc, slpc_min_freq);
+
+       intel_gt_pm_put(gt);
+       igt_spinner_fini(&spin);
+       intel_gt_pm_wait_for_idle(gt);
+
+       return err;
+}
+
+int intel_slpc_live_selftests(struct drm_i915_private *i915)
+{
+       static const struct i915_subtest tests[] = {
+               SUBTEST(live_slpc_clamp_max),
+               SUBTEST(live_slpc_clamp_min),
+       };
+
+       if (intel_gt_is_wedged(&i915->gt))
+               return 0;
+
+       return i915_live_subtests(tests, i915);
+}
index 64da0c9..d0b6a3a 100644 (file)
@@ -874,7 +874,7 @@ static int create_watcher(struct hwsp_watcher *w,
        if (IS_ERR(ce))
                return PTR_ERR(ce);
 
-       ce->ring = __intel_context_ring_size(ringsz);
+       ce->ring_size = ringsz;
        w->rq = intel_context_create_request(ce);
        intel_context_put(ce);
        if (IS_ERR(w->rq))
index c30754d..e623ac4 100644 (file)
@@ -12,6 +12,7 @@
 #include "selftests/igt_flush_test.h"
 #include "selftests/igt_reset.h"
 #include "selftests/igt_spinner.h"
+#include "selftests/intel_scheduler_helpers.h"
 #include "selftests/mock_drm.h"
 
 #include "gem/selftests/igt_gem_utils.h"
@@ -261,28 +262,34 @@ static int do_engine_reset(struct intel_engine_cs *engine)
        return intel_engine_reset(engine, "live_workarounds");
 }
 
+static int do_guc_reset(struct intel_engine_cs *engine)
+{
+       /* Currently a no-op as the reset is handled by GuC */
+       return 0;
+}
+
 static int
 switch_to_scratch_context(struct intel_engine_cs *engine,
-                         struct igt_spinner *spin)
+                         struct igt_spinner *spin,
+                         struct i915_request **rq)
 {
        struct intel_context *ce;
-       struct i915_request *rq;
        int err = 0;
 
        ce = intel_context_create(engine);
        if (IS_ERR(ce))
                return PTR_ERR(ce);
 
-       rq = igt_spinner_create_request(spin, ce, MI_NOOP);
+       *rq = igt_spinner_create_request(spin, ce, MI_NOOP);
        intel_context_put(ce);
 
-       if (IS_ERR(rq)) {
+       if (IS_ERR(*rq)) {
                spin = NULL;
-               err = PTR_ERR(rq);
+               err = PTR_ERR(*rq);
                goto err;
        }
 
-       err = request_add_spin(rq, spin);
+       err = request_add_spin(*rq, spin);
 err:
        if (err && spin)
                igt_spinner_end(spin);
@@ -296,6 +303,7 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
 {
        struct intel_context *ce, *tmp;
        struct igt_spinner spin;
+       struct i915_request *rq;
        intel_wakeref_t wakeref;
        int err;
 
@@ -316,13 +324,24 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
                goto out_spin;
        }
 
-       err = switch_to_scratch_context(engine, &spin);
+       err = switch_to_scratch_context(engine, &spin, &rq);
        if (err)
                goto out_spin;
 
+       /* Ensure the spinner hasn't aborted */
+       if (i915_request_completed(rq)) {
+               pr_err("%s spinner failed to start\n", name);
+               err = -ETIMEDOUT;
+               goto out_spin;
+       }
+
        with_intel_runtime_pm(engine->uncore->rpm, wakeref)
                err = reset(engine);
 
+       /* Ensure the reset happens and kills the engine */
+       if (err == 0)
+               err = intel_selftest_wait_for_rq(rq);
+
        igt_spinner_end(&spin);
 
        if (err) {
@@ -787,9 +806,28 @@ static int live_reset_whitelist(void *arg)
                        continue;
 
                if (intel_has_reset_engine(gt)) {
-                       err = check_whitelist_across_reset(engine,
-                                                          do_engine_reset,
-                                                          "engine");
+                       if (intel_engine_uses_guc(engine)) {
+                               struct intel_selftest_saved_policy saved;
+                               int err2;
+
+                               err = intel_selftest_modify_policy(engine, &saved,
+                                                                  SELFTEST_SCHEDULER_MODIFY_FAST_RESET);
+                               if (err)
+                                       goto out;
+
+                               err = check_whitelist_across_reset(engine,
+                                                                  do_guc_reset,
+                                                                  "guc");
+
+                               err2 = intel_selftest_restore_policy(engine, &saved);
+                               if (err == 0)
+                                       err = err2;
+                       } else {
+                               err = check_whitelist_across_reset(engine,
+                                                                  do_engine_reset,
+                                                                  "engine");
+                       }
+
                        if (err)
                                goto out;
                }
@@ -1147,7 +1185,7 @@ verify_wa_lists(struct intel_gt *gt, struct wa_lists *lists,
        enum intel_engine_id id;
        bool ok = true;
 
-       ok &= wa_list_verify(gt->uncore, &lists->gt_wa_list, str);
+       ok &= wa_list_verify(gt, &lists->gt_wa_list, str);
 
        for_each_engine(engine, gt, id) {
                struct intel_context *ce;
@@ -1175,31 +1213,36 @@ live_gpu_reset_workarounds(void *arg)
 {
        struct intel_gt *gt = arg;
        intel_wakeref_t wakeref;
-       struct wa_lists lists;
+       struct wa_lists *lists;
        bool ok;
 
        if (!intel_has_gpu_reset(gt))
                return 0;
 
+       lists = kzalloc(sizeof(*lists), GFP_KERNEL);
+       if (!lists)
+               return -ENOMEM;
+
        pr_info("Verifying after GPU reset...\n");
 
        igt_global_reset_lock(gt);
        wakeref = intel_runtime_pm_get(gt->uncore->rpm);
 
-       reference_lists_init(gt, &lists);
+       reference_lists_init(gt, lists);
 
-       ok = verify_wa_lists(gt, &lists, "before reset");
+       ok = verify_wa_lists(gt, lists, "before reset");
        if (!ok)
                goto out;
 
        intel_gt_reset(gt, ALL_ENGINES, "live_workarounds");
 
-       ok = verify_wa_lists(gt, &lists, "after reset");
+       ok = verify_wa_lists(gt, lists, "after reset");
 
 out:
-       reference_lists_fini(gt, &lists);
+       reference_lists_fini(gt, lists);
        intel_runtime_pm_put(gt->uncore->rpm, wakeref);
        igt_global_reset_unlock(gt);
+       kfree(lists);
 
        return ok ? 0 : -ESRCH;
 }
@@ -1214,43 +1257,57 @@ live_engine_reset_workarounds(void *arg)
        struct igt_spinner spin;
        struct i915_request *rq;
        intel_wakeref_t wakeref;
-       struct wa_lists lists;
+       struct wa_lists *lists;
        int ret = 0;
 
        if (!intel_has_reset_engine(gt))
                return 0;
 
+       lists = kzalloc(sizeof(*lists), GFP_KERNEL);
+       if (!lists)
+               return -ENOMEM;
+
        igt_global_reset_lock(gt);
        wakeref = intel_runtime_pm_get(gt->uncore->rpm);
 
-       reference_lists_init(gt, &lists);
+       reference_lists_init(gt, lists);
 
        for_each_engine(engine, gt, id) {
+               struct intel_selftest_saved_policy saved;
+               bool using_guc = intel_engine_uses_guc(engine);
                bool ok;
+               int ret2;
 
                pr_info("Verifying after %s reset...\n", engine->name);
+               ret = intel_selftest_modify_policy(engine, &saved,
+                                                  SELFTEST_SCHEDULER_MODIFY_FAST_RESET);
+               if (ret)
+                       break;
+
                ce = intel_context_create(engine);
                if (IS_ERR(ce)) {
                        ret = PTR_ERR(ce);
-                       break;
+                       goto restore;
                }
 
-               ok = verify_wa_lists(gt, &lists, "before reset");
-               if (!ok) {
-                       ret = -ESRCH;
-                       goto err;
-               }
+               if (!using_guc) {
+                       ok = verify_wa_lists(gt, lists, "before reset");
+                       if (!ok) {
+                               ret = -ESRCH;
+                               goto err;
+                       }
 
-               ret = intel_engine_reset(engine, "live_workarounds:idle");
-               if (ret) {
-                       pr_err("%s: Reset failed while idle\n", engine->name);
-                       goto err;
-               }
+                       ret = intel_engine_reset(engine, "live_workarounds:idle");
+                       if (ret) {
+                               pr_err("%s: Reset failed while idle\n", engine->name);
+                               goto err;
+                       }
 
-               ok = verify_wa_lists(gt, &lists, "after idle reset");
-               if (!ok) {
-                       ret = -ESRCH;
-                       goto err;
+                       ok = verify_wa_lists(gt, lists, "after idle reset");
+                       if (!ok) {
+                               ret = -ESRCH;
+                               goto err;
+                       }
                }
 
                ret = igt_spinner_init(&spin, engine->gt);
@@ -1271,32 +1328,49 @@ live_engine_reset_workarounds(void *arg)
                        goto err;
                }
 
-               ret = intel_engine_reset(engine, "live_workarounds:active");
-               if (ret) {
-                       pr_err("%s: Reset failed on an active spinner\n",
-                              engine->name);
-                       igt_spinner_fini(&spin);
-                       goto err;
+               /* Ensure the spinner hasn't aborted */
+               if (i915_request_completed(rq)) {
+                       ret = -ETIMEDOUT;
+                       goto skip;
                }
 
+               if (!using_guc) {
+                       ret = intel_engine_reset(engine, "live_workarounds:active");
+                       if (ret) {
+                               pr_err("%s: Reset failed on an active spinner\n",
+                                      engine->name);
+                               igt_spinner_fini(&spin);
+                               goto err;
+                       }
+               }
+
+               /* Ensure the reset happens and kills the engine */
+               if (ret == 0)
+                       ret = intel_selftest_wait_for_rq(rq);
+
+skip:
                igt_spinner_end(&spin);
                igt_spinner_fini(&spin);
 
-               ok = verify_wa_lists(gt, &lists, "after busy reset");
-               if (!ok) {
+               ok = verify_wa_lists(gt, lists, "after busy reset");
+               if (!ok)
                        ret = -ESRCH;
-                       goto err;
-               }
 
 err:
                intel_context_put(ce);
+
+restore:
+               ret2 = intel_selftest_restore_policy(engine, &saved);
+               if (ret == 0)
+                       ret = ret2;
                if (ret)
                        break;
        }
 
-       reference_lists_fini(gt, &lists);
+       reference_lists_fini(gt, lists);
        intel_runtime_pm_put(gt->uncore->rpm, wakeref);
        igt_global_reset_unlock(gt);
+       kfree(lists);
 
        igt_flush_test(gt->i915);
 
index 90efef8..8ff5822 100644 (file)
@@ -6,6 +6,113 @@
 #ifndef _ABI_GUC_ACTIONS_ABI_H
 #define _ABI_GUC_ACTIONS_ABI_H
 
+/**
+ * DOC: HOST2GUC_REGISTER_CTB
+ *
+ * This message is used as part of the `CTB based communication`_ setup.
+ *
+ * This message must be sent as `MMIO HXG Message`_.
+ *
+ *  +---+-------+--------------------------------------------------------------+
+ *  |   | Bits  | Description                                                  |
+ *  +===+=======+==============================================================+
+ *  | 0 |    31 | ORIGIN = GUC_HXG_ORIGIN_HOST_                                |
+ *  |   +-------+--------------------------------------------------------------+
+ *  |   | 30:28 | TYPE = GUC_HXG_TYPE_REQUEST_                                 |
+ *  |   +-------+--------------------------------------------------------------+
+ *  |   | 27:16 | DATA0 = MBZ                                                  |
+ *  |   +-------+--------------------------------------------------------------+
+ *  |   |  15:0 | ACTION = _`GUC_ACTION_HOST2GUC_REGISTER_CTB` = 0x4505        |
+ *  +---+-------+--------------------------------------------------------------+
+ *  | 1 | 31:12 | RESERVED = MBZ                                               |
+ *  |   +-------+--------------------------------------------------------------+
+ *  |   |  11:8 | **TYPE** - type for the `CT Buffer`_                         |
+ *  |   |       |                                                              |
+ *  |   |       |   - _`GUC_CTB_TYPE_HOST2GUC` = 0                             |
+ *  |   |       |   - _`GUC_CTB_TYPE_GUC2HOST` = 1                             |
+ *  |   +-------+--------------------------------------------------------------+
+ *  |   |   7:0 | **SIZE** - size of the `CT Buffer`_ in 4K units minus 1      |
+ *  +---+-------+--------------------------------------------------------------+
+ *  | 2 |  31:0 | **DESC_ADDR** - GGTT address of the `CTB Descriptor`_        |
+ *  +---+-------+--------------------------------------------------------------+
+ *  | 3 |  31:0 | **BUFF_ADDF** - GGTT address of the `CT Buffer`_             |
+ *  +---+-------+--------------------------------------------------------------+
+ *
+ *  +---+-------+--------------------------------------------------------------+
+ *  |   | Bits  | Description                                                  |
+ *  +===+=======+==============================================================+
+ *  | 0 |    31 | ORIGIN = GUC_HXG_ORIGIN_GUC_                                 |
+ *  |   +-------+--------------------------------------------------------------+
+ *  |   | 30:28 | TYPE = GUC_HXG_TYPE_RESPONSE_SUCCESS_                        |
+ *  |   +-------+--------------------------------------------------------------+
+ *  |   |  27:0 | DATA0 = MBZ                                                  |
+ *  +---+-------+--------------------------------------------------------------+
+ */
+#define GUC_ACTION_HOST2GUC_REGISTER_CTB               0x4505
+
+#define HOST2GUC_REGISTER_CTB_REQUEST_MSG_LEN          (GUC_HXG_REQUEST_MSG_MIN_LEN + 3u)
+#define HOST2GUC_REGISTER_CTB_REQUEST_MSG_0_MBZ                GUC_HXG_REQUEST_MSG_0_DATA0
+#define HOST2GUC_REGISTER_CTB_REQUEST_MSG_1_MBZ                (0xfffff << 12)
+#define HOST2GUC_REGISTER_CTB_REQUEST_MSG_1_TYPE       (0xf << 8)
+#define   GUC_CTB_TYPE_HOST2GUC                                0u
+#define   GUC_CTB_TYPE_GUC2HOST                                1u
+#define HOST2GUC_REGISTER_CTB_REQUEST_MSG_1_SIZE       (0xff << 0)
+#define HOST2GUC_REGISTER_CTB_REQUEST_MSG_2_DESC_ADDR  GUC_HXG_REQUEST_MSG_n_DATAn
+#define HOST2GUC_REGISTER_CTB_REQUEST_MSG_3_BUFF_ADDR  GUC_HXG_REQUEST_MSG_n_DATAn
+
+#define HOST2GUC_REGISTER_CTB_RESPONSE_MSG_LEN         GUC_HXG_RESPONSE_MSG_MIN_LEN
+#define HOST2GUC_REGISTER_CTB_RESPONSE_MSG_0_MBZ       GUC_HXG_RESPONSE_MSG_0_DATA0
+
+/**
+ * DOC: HOST2GUC_DEREGISTER_CTB
+ *
+ * This message is used as part of the `CTB based communication`_ teardown.
+ *
+ * This message must be sent as `MMIO HXG Message`_.
+ *
+ *  +---+-------+--------------------------------------------------------------+
+ *  |   | Bits  | Description                                                  |
+ *  +===+=======+==============================================================+
+ *  | 0 |    31 | ORIGIN = GUC_HXG_ORIGIN_HOST_                                |
+ *  |   +-------+--------------------------------------------------------------+
+ *  |   | 30:28 | TYPE = GUC_HXG_TYPE_REQUEST_                                 |
+ *  |   +-------+--------------------------------------------------------------+
+ *  |   | 27:16 | DATA0 = MBZ                                                  |
+ *  |   +-------+--------------------------------------------------------------+
+ *  |   |  15:0 | ACTION = _`GUC_ACTION_HOST2GUC_DEREGISTER_CTB` = 0x4506      |
+ *  +---+-------+--------------------------------------------------------------+
+ *  | 1 | 31:12 | RESERVED = MBZ                                               |
+ *  |   +-------+--------------------------------------------------------------+
+ *  |   |  11:8 | **TYPE** - type of the `CT Buffer`_                          |
+ *  |   |       |                                                              |
+ *  |   |       | see `GUC_ACTION_HOST2GUC_REGISTER_CTB`_                      |
+ *  |   +-------+--------------------------------------------------------------+
+ *  |   |   7:0 | RESERVED = MBZ                                               |
+ *  +---+-------+--------------------------------------------------------------+
+ *
+ *  +---+-------+--------------------------------------------------------------+
+ *  |   | Bits  | Description                                                  |
+ *  +===+=======+==============================================================+
+ *  | 0 |    31 | ORIGIN = GUC_HXG_ORIGIN_GUC_                                 |
+ *  |   +-------+--------------------------------------------------------------+
+ *  |   | 30:28 | TYPE = GUC_HXG_TYPE_RESPONSE_SUCCESS_                        |
+ *  |   +-------+--------------------------------------------------------------+
+ *  |   |  27:0 | DATA0 = MBZ                                                  |
+ *  +---+-------+--------------------------------------------------------------+
+ */
+#define GUC_ACTION_HOST2GUC_DEREGISTER_CTB             0x4506
+
+#define HOST2GUC_DEREGISTER_CTB_REQUEST_MSG_LEN                (GUC_HXG_REQUEST_MSG_MIN_LEN + 1u)
+#define HOST2GUC_DEREGISTER_CTB_REQUEST_MSG_0_MBZ      GUC_HXG_REQUEST_MSG_0_DATA0
+#define HOST2GUC_DEREGISTER_CTB_REQUEST_MSG_1_MBZ      (0xfffff << 12)
+#define HOST2GUC_DEREGISTER_CTB_REQUEST_MSG_1_TYPE     (0xf << 8)
+#define HOST2GUC_DEREGISTER_CTB_REQUEST_MSG_1_MBZ2     (0xff << 0)
+
+#define HOST2GUC_DEREGISTER_CTB_RESPONSE_MSG_LEN       GUC_HXG_RESPONSE_MSG_MIN_LEN
+#define HOST2GUC_DEREGISTER_CTB_RESPONSE_MSG_0_MBZ     GUC_HXG_RESPONSE_MSG_0_DATA0
+
+/* legacy definitions */
+
 enum intel_guc_action {
        INTEL_GUC_ACTION_DEFAULT = 0x0,
        INTEL_GUC_ACTION_REQUEST_PREEMPTION = 0x2,
@@ -17,13 +124,33 @@ enum intel_guc_action {
        INTEL_GUC_ACTION_FORCE_LOG_BUFFER_FLUSH = 0x302,
        INTEL_GUC_ACTION_ENTER_S_STATE = 0x501,
        INTEL_GUC_ACTION_EXIT_S_STATE = 0x502,
-       INTEL_GUC_ACTION_SLPC_REQUEST = 0x3003,
+       INTEL_GUC_ACTION_GLOBAL_SCHED_POLICY_CHANGE = 0x506,
+       INTEL_GUC_ACTION_SCHED_CONTEXT = 0x1000,
+       INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_SET = 0x1001,
+       INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_DONE = 0x1002,
+       INTEL_GUC_ACTION_SCHED_ENGINE_MODE_SET = 0x1003,
+       INTEL_GUC_ACTION_SCHED_ENGINE_MODE_DONE = 0x1004,
+       INTEL_GUC_ACTION_SET_CONTEXT_PRIORITY = 0x1005,
+       INTEL_GUC_ACTION_SET_CONTEXT_EXECUTION_QUANTUM = 0x1006,
+       INTEL_GUC_ACTION_SET_CONTEXT_PREEMPTION_TIMEOUT = 0x1007,
+       INTEL_GUC_ACTION_CONTEXT_RESET_NOTIFICATION = 0x1008,
+       INTEL_GUC_ACTION_ENGINE_FAILURE_NOTIFICATION = 0x1009,
+       INTEL_GUC_ACTION_SETUP_PC_GUCRC = 0x3004,
        INTEL_GUC_ACTION_AUTHENTICATE_HUC = 0x4000,
+       INTEL_GUC_ACTION_REGISTER_CONTEXT = 0x4502,
+       INTEL_GUC_ACTION_DEREGISTER_CONTEXT = 0x4503,
        INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER = 0x4505,
        INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER = 0x4506,
+       INTEL_GUC_ACTION_DEREGISTER_CONTEXT_DONE = 0x4600,
+       INTEL_GUC_ACTION_RESET_CLIENT = 0x5507,
        INTEL_GUC_ACTION_LIMIT
 };
 
+enum intel_guc_rc_options {
+       INTEL_GUCRC_HOST_CONTROL,
+       INTEL_GUCRC_FIRMWARE_CONTROL,
+};
+
 enum intel_guc_preempt_options {
        INTEL_GUC_PREEMPT_OPTION_DROP_WORK_Q = 0x4,
        INTEL_GUC_PREEMPT_OPTION_DROP_SUBMIT_Q = 0x8,
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_slpc_abi.h
new file mode 100644 (file)
index 0000000..7a8d4bf
--- /dev/null
@@ -0,0 +1,235 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright Â© 2021 Intel Corporation
+ */
+
+#ifndef _GUC_ACTIONS_SLPC_ABI_H_
+#define _GUC_ACTIONS_SLPC_ABI_H_
+
+#include <linux/types.h>
+#include "i915_reg.h"
+
+/**
+ * DOC: SLPC SHARED DATA STRUCTURE
+ *
+ *  +----+------+--------------------------------------------------------------+
+ *  | CL | Bytes| Description                                                  |
+ *  +====+======+==============================================================+
+ *  | 1  | 0-3  | SHARED DATA SIZE                                             |
+ *  |    +------+--------------------------------------------------------------+
+ *  |    | 4-7  | GLOBAL STATE                                                 |
+ *  |    +------+--------------------------------------------------------------+
+ *  |    | 8-11 | DISPLAY DATA ADDRESS                                         |
+ *  |    +------+--------------------------------------------------------------+
+ *  |    | 12:63| PADDING                                                      |
+ *  +----+------+--------------------------------------------------------------+
+ *  |    | 0:63 | PADDING(PLATFORM INFO)                                       |
+ *  +----+------+--------------------------------------------------------------+
+ *  | 3  | 0-3  | TASK STATE DATA                                              |
+ *  +    +------+--------------------------------------------------------------+
+ *  |    | 4:63 | PADDING                                                      |
+ *  +----+------+--------------------------------------------------------------+
+ *  |4-21|0:1087| OVERRIDE PARAMS AND BIT FIELDS                               |
+ *  +----+------+--------------------------------------------------------------+
+ *  |    |      | PADDING + EXTRA RESERVED PAGE                                |
+ *  +----+------+--------------------------------------------------------------+
+ */
+
+/*
+ * SLPC exposes certain parameters for global configuration by the host.
+ * These are referred to as override parameters, because in most cases
+ * the host will not need to modify the default values used by SLPC.
+ * SLPC remembers the default values which allows the host to easily restore
+ * them by simply unsetting the override. The host can set or unset override
+ * parameters during SLPC (re-)initialization using the SLPC Reset event.
+ * The host can also set or unset override parameters on the fly using the
+ * Parameter Set and Parameter Unset events
+ */
+
+#define SLPC_MAX_OVERRIDE_PARAMETERS           256
+#define SLPC_OVERRIDE_BITFIELD_SIZE \
+               (SLPC_MAX_OVERRIDE_PARAMETERS / 32)
+
+#define SLPC_PAGE_SIZE_BYTES                   4096
+#define SLPC_CACHELINE_SIZE_BYTES              64
+#define SLPC_SHARED_DATA_SIZE_BYTE_HEADER      SLPC_CACHELINE_SIZE_BYTES
+#define SLPC_SHARED_DATA_SIZE_BYTE_PLATFORM_INFO       SLPC_CACHELINE_SIZE_BYTES
+#define SLPC_SHARED_DATA_SIZE_BYTE_TASK_STATE  SLPC_CACHELINE_SIZE_BYTES
+#define SLPC_SHARED_DATA_MODE_DEFN_TABLE_SIZE  SLPC_PAGE_SIZE_BYTES
+#define SLPC_SHARED_DATA_SIZE_BYTE_MAX         (2 * SLPC_PAGE_SIZE_BYTES)
+
+/*
+ * Cacheline size aligned (Total size needed for
+ * SLPM_KMD_MAX_OVERRIDE_PARAMETERS=256 is 1088 bytes)
+ */
+#define SLPC_OVERRIDE_PARAMS_TOTAL_BYTES       (((((SLPC_MAX_OVERRIDE_PARAMETERS * 4) \
+                                               + ((SLPC_MAX_OVERRIDE_PARAMETERS / 32) * 4)) \
+               + (SLPC_CACHELINE_SIZE_BYTES - 1)) / SLPC_CACHELINE_SIZE_BYTES) * \
+                                       SLPC_CACHELINE_SIZE_BYTES)
+
+#define SLPC_SHARED_DATA_SIZE_BYTE_OTHER       (SLPC_SHARED_DATA_SIZE_BYTE_MAX - \
+                                       (SLPC_SHARED_DATA_SIZE_BYTE_HEADER \
+                                       + SLPC_SHARED_DATA_SIZE_BYTE_PLATFORM_INFO \
+                                       + SLPC_SHARED_DATA_SIZE_BYTE_TASK_STATE \
+                                       + SLPC_OVERRIDE_PARAMS_TOTAL_BYTES \
+                                       + SLPC_SHARED_DATA_MODE_DEFN_TABLE_SIZE))
+
+enum slpc_task_enable {
+       SLPC_PARAM_TASK_DEFAULT = 0,
+       SLPC_PARAM_TASK_ENABLED,
+       SLPC_PARAM_TASK_DISABLED,
+       SLPC_PARAM_TASK_UNKNOWN
+};
+
+enum slpc_global_state {
+       SLPC_GLOBAL_STATE_NOT_RUNNING = 0,
+       SLPC_GLOBAL_STATE_INITIALIZING = 1,
+       SLPC_GLOBAL_STATE_RESETTING = 2,
+       SLPC_GLOBAL_STATE_RUNNING = 3,
+       SLPC_GLOBAL_STATE_SHUTTING_DOWN = 4,
+       SLPC_GLOBAL_STATE_ERROR = 5
+};
+
+enum slpc_param_id {
+       SLPC_PARAM_TASK_ENABLE_GTPERF = 0,
+       SLPC_PARAM_TASK_DISABLE_GTPERF = 1,
+       SLPC_PARAM_TASK_ENABLE_BALANCER = 2,
+       SLPC_PARAM_TASK_DISABLE_BALANCER = 3,
+       SLPC_PARAM_TASK_ENABLE_DCC = 4,
+       SLPC_PARAM_TASK_DISABLE_DCC = 5,
+       SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ = 6,
+       SLPC_PARAM_GLOBAL_MAX_GT_UNSLICE_FREQ_MHZ = 7,
+       SLPC_PARAM_GLOBAL_MIN_GT_SLICE_FREQ_MHZ = 8,
+       SLPC_PARAM_GLOBAL_MAX_GT_SLICE_FREQ_MHZ = 9,
+       SLPC_PARAM_GTPERF_THRESHOLD_MAX_FPS = 10,
+       SLPC_PARAM_GLOBAL_DISABLE_GT_FREQ_MANAGEMENT = 11,
+       SLPC_PARAM_GTPERF_ENABLE_FRAMERATE_STALLING = 12,
+       SLPC_PARAM_GLOBAL_DISABLE_RC6_MODE_CHANGE = 13,
+       SLPC_PARAM_GLOBAL_OC_UNSLICE_FREQ_MHZ = 14,
+       SLPC_PARAM_GLOBAL_OC_SLICE_FREQ_MHZ = 15,
+       SLPC_PARAM_GLOBAL_ENABLE_IA_GT_BALANCING = 16,
+       SLPC_PARAM_GLOBAL_ENABLE_ADAPTIVE_BURST_TURBO = 17,
+       SLPC_PARAM_GLOBAL_ENABLE_EVAL_MODE = 18,
+       SLPC_PARAM_GLOBAL_ENABLE_BALANCER_IN_NON_GAMING_MODE = 19,
+       SLPC_PARAM_GLOBAL_RT_MODE_TURBO_FREQ_DELTA_MHZ = 20,
+       SLPC_PARAM_PWRGATE_RC_MODE = 21,
+       SLPC_PARAM_EDR_MODE_COMPUTE_TIMEOUT_MS = 22,
+       SLPC_PARAM_EDR_QOS_FREQ_MHZ = 23,
+       SLPC_PARAM_MEDIA_FF_RATIO_MODE = 24,
+       SLPC_PARAM_ENABLE_IA_FREQ_LIMITING = 25,
+       SLPC_PARAM_STRATEGIES = 26,
+       SLPC_PARAM_POWER_PROFILE = 27,
+       SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY = 28,
+       SLPC_MAX_PARAM = 32,
+};
+
+enum slpc_event_id {
+       SLPC_EVENT_RESET = 0,
+       SLPC_EVENT_SHUTDOWN = 1,
+       SLPC_EVENT_PLATFORM_INFO_CHANGE = 2,
+       SLPC_EVENT_DISPLAY_MODE_CHANGE = 3,
+       SLPC_EVENT_FLIP_COMPLETE = 4,
+       SLPC_EVENT_QUERY_TASK_STATE = 5,
+       SLPC_EVENT_PARAMETER_SET = 6,
+       SLPC_EVENT_PARAMETER_UNSET = 7,
+};
+
+struct slpc_task_state_data {
+       union {
+               u32 task_status_padding;
+               struct {
+                       u32 status;
+#define SLPC_GTPERF_TASK_ENABLED       REG_BIT(0)
+#define SLPC_DCC_TASK_ENABLED          REG_BIT(11)
+#define SLPC_IN_DCC                    REG_BIT(12)
+#define SLPC_BALANCER_ENABLED          REG_BIT(15)
+#define SLPC_IBC_TASK_ENABLED          REG_BIT(16)
+#define SLPC_BALANCER_IA_LMT_ENABLED   REG_BIT(17)
+#define SLPC_BALANCER_IA_LMT_ACTIVE    REG_BIT(18)
+               };
+       };
+       union {
+               u32 freq_padding;
+               struct {
+#define SLPC_MAX_UNSLICE_FREQ_MASK     REG_GENMASK(7, 0)
+#define SLPC_MIN_UNSLICE_FREQ_MASK     REG_GENMASK(15, 8)
+#define SLPC_MAX_SLICE_FREQ_MASK       REG_GENMASK(23, 16)
+#define SLPC_MIN_SLICE_FREQ_MASK       REG_GENMASK(31, 24)
+                       u32 freq;
+               };
+       };
+} __packed;
+
+struct slpc_shared_data_header {
+       /* Total size in bytes of this shared buffer. */
+       u32 size;
+       u32 global_state;
+       u32 display_data_addr;
+} __packed;
+
+struct slpc_override_params {
+       u32 bits[SLPC_OVERRIDE_BITFIELD_SIZE];
+       u32 values[SLPC_MAX_OVERRIDE_PARAMETERS];
+} __packed;
+
+struct slpc_shared_data {
+       struct slpc_shared_data_header header;
+       u8 shared_data_header_pad[SLPC_SHARED_DATA_SIZE_BYTE_HEADER -
+                               sizeof(struct slpc_shared_data_header)];
+
+       u8 platform_info_pad[SLPC_SHARED_DATA_SIZE_BYTE_PLATFORM_INFO];
+
+       struct slpc_task_state_data task_state_data;
+       u8 task_state_data_pad[SLPC_SHARED_DATA_SIZE_BYTE_TASK_STATE -
+                               sizeof(struct slpc_task_state_data)];
+
+       struct slpc_override_params override_params;
+       u8 override_params_pad[SLPC_OVERRIDE_PARAMS_TOTAL_BYTES -
+                               sizeof(struct slpc_override_params)];
+
+       u8 shared_data_pad[SLPC_SHARED_DATA_SIZE_BYTE_OTHER];
+
+       /* PAGE 2 (4096 bytes), mode based parameter will be removed soon */
+       u8 reserved_mode_definition[4096];
+} __packed;
+
+/**
+ * DOC: SLPC H2G MESSAGE FORMAT
+ *
+ *  +---+-------+--------------------------------------------------------------+
+ *  |   | Bits  | Description                                                  |
+ *  +===+=======+==============================================================+
+ *  | 0 |    31 | ORIGIN = GUC_HXG_ORIGIN_HOST_                                |
+ *  |   +-------+--------------------------------------------------------------+
+ *  |   | 30:28 | TYPE = GUC_HXG_TYPE_REQUEST_                                 |
+ *  |   +-------+--------------------------------------------------------------+
+ *  |   | 27:16 | DATA0 = MBZ                                                  |
+ *  |   +-------+--------------------------------------------------------------+
+ *  |   |  15:0 | ACTION = _`GUC_ACTION_HOST2GUC_PC_SLPM_REQUEST` = 0x3003     |
+ *  +---+-------+--------------------------------------------------------------+
+ *  | 1 |  31:8 | **EVENT_ID**                                                 |
+ *  +   +-------+--------------------------------------------------------------+
+ *  |   |   7:0 | **EVENT_ARGC** - number of data arguments                    |
+ *  +---+-------+--------------------------------------------------------------+
+ *  | 2 |  31:0 | **EVENT_DATA1**                                              |
+ *  +---+-------+--------------------------------------------------------------+
+ *  |...|  31:0 | ...                                                          |
+ *  +---+-------+--------------------------------------------------------------+
+ *  |2+n|  31:0 | **EVENT_DATAn**                                              |
+ *  +---+-------+--------------------------------------------------------------+
+ */
+
+#define GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST            0x3003
+
+#define HOST2GUC_PC_SLPC_REQUEST_MSG_MIN_LEN \
+                               (GUC_HXG_REQUEST_MSG_MIN_LEN + 1u)
+#define HOST2GUC_PC_SLPC_EVENT_MAX_INPUT_ARGS          9
+#define HOST2GUC_PC_SLPC_REQUEST_MSG_MAX_LEN \
+               (HOST2GUC_PC_SLPC_REQUEST_REQUEST_MSG_MIN_LEN + \
+                       HOST2GUC_PC_SLPC_EVENT_MAX_INPUT_ARGS)
+#define HOST2GUC_PC_SLPC_REQUEST_MSG_0_MBZ             GUC_HXG_REQUEST_MSG_0_DATA0
+#define HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ID                (0xff << 8)
+#define HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ARGC      (0xff << 0)
+#define HOST2GUC_PC_SLPC_REQUEST_MSG_N_EVENT_DATA_N    GUC_HXG_REQUEST_MSG_n_DATAn
+
+#endif
index d38935f..99e1fad 100644 (file)
@@ -7,6 +7,111 @@
 #define _ABI_GUC_COMMUNICATION_CTB_ABI_H
 
 #include <linux/types.h>
+#include <linux/build_bug.h>
+
+#include "guc_messages_abi.h"
+
+/**
+ * DOC: CT Buffer
+ *
+ * Circular buffer used to send `CTB Message`_
+ */
+
+/**
+ * DOC: CTB Descriptor
+ *
+ *  +---+-------+--------------------------------------------------------------+
+ *  |   | Bits  | Description                                                  |
+ *  +===+=======+==============================================================+
+ *  | 0 |  31:0 | **HEAD** - offset (in dwords) to the last dword that was     |
+ *  |   |       | read from the `CT Buffer`_.                                  |
+ *  |   |       | It can only be updated by the receiver.                      |
+ *  +---+-------+--------------------------------------------------------------+
+ *  | 1 |  31:0 | **TAIL** - offset (in dwords) to the last dword that was     |
+ *  |   |       | written to the `CT Buffer`_.                                 |
+ *  |   |       | It can only be updated by the sender.                        |
+ *  +---+-------+--------------------------------------------------------------+
+ *  | 2 |  31:0 | **STATUS** - status of the CTB                               |
+ *  |   |       |                                                              |
+ *  |   |       |   - _`GUC_CTB_STATUS_NO_ERROR` = 0 (normal operation)        |
+ *  |   |       |   - _`GUC_CTB_STATUS_OVERFLOW` = 1 (head/tail too large)     |
+ *  |   |       |   - _`GUC_CTB_STATUS_UNDERFLOW` = 2 (truncated message)      |
+ *  |   |       |   - _`GUC_CTB_STATUS_MISMATCH` = 4 (head/tail modified)      |
+ *  +---+-------+--------------------------------------------------------------+
+ *  |...|       | RESERVED = MBZ                                               |
+ *  +---+-------+--------------------------------------------------------------+
+ *  | 15|  31:0 | RESERVED = MBZ                                               |
+ *  +---+-------+--------------------------------------------------------------+
+ */
+
+struct guc_ct_buffer_desc {
+       u32 head;
+       u32 tail;
+       u32 status;
+#define GUC_CTB_STATUS_NO_ERROR                                0
+#define GUC_CTB_STATUS_OVERFLOW                                (1 << 0)
+#define GUC_CTB_STATUS_UNDERFLOW                       (1 << 1)
+#define GUC_CTB_STATUS_MISMATCH                                (1 << 2)
+       u32 reserved[13];
+} __packed;
+static_assert(sizeof(struct guc_ct_buffer_desc) == 64);
+
+/**
+ * DOC: CTB Message
+ *
+ *  +---+-------+--------------------------------------------------------------+
+ *  |   | Bits  | Description                                                  |
+ *  +===+=======+==============================================================+
+ *  | 0 | 31:16 | **FENCE** - message identifier                               |
+ *  |   +-------+--------------------------------------------------------------+
+ *  |   | 15:12 | **FORMAT** - format of the CTB message                       |
+ *  |   |       |  - _`GUC_CTB_FORMAT_HXG` = 0 - see `CTB HXG Message`_        |
+ *  |   +-------+--------------------------------------------------------------+
+ *  |   |  11:8 | **RESERVED**                                                 |
+ *  |   +-------+--------------------------------------------------------------+
+ *  |   |   7:0 | **NUM_DWORDS** - length of the CTB message (w/o header)      |
+ *  +---+-------+--------------------------------------------------------------+
+ *  | 1 |  31:0 | optional (depends on FORMAT)                                 |
+ *  +---+-------+                                                              |
+ *  |...|       |                                                              |
+ *  +---+-------+                                                              |
+ *  | n |  31:0 |                                                              |
+ *  +---+-------+--------------------------------------------------------------+
+ */
+
+#define GUC_CTB_HDR_LEN                                1u
+#define GUC_CTB_MSG_MIN_LEN                    GUC_CTB_HDR_LEN
+#define GUC_CTB_MSG_MAX_LEN                    256u
+#define GUC_CTB_MSG_0_FENCE                    (0xffff << 16)
+#define GUC_CTB_MSG_0_FORMAT                   (0xf << 12)
+#define   GUC_CTB_FORMAT_HXG                   0u
+#define GUC_CTB_MSG_0_RESERVED                 (0xf << 8)
+#define GUC_CTB_MSG_0_NUM_DWORDS               (0xff << 0)
+
+/**
+ * DOC: CTB HXG Message
+ *
+ *  +---+-------+--------------------------------------------------------------+
+ *  |   | Bits  | Description                                                  |
+ *  +===+=======+==============================================================+
+ *  | 0 | 31:16 | FENCE                                                        |
+ *  |   +-------+--------------------------------------------------------------+
+ *  |   | 15:12 | FORMAT = GUC_CTB_FORMAT_HXG_                                 |
+ *  |   +-------+--------------------------------------------------------------+
+ *  |   |  11:8 | RESERVED = MBZ                                               |
+ *  |   +-------+--------------------------------------------------------------+
+ *  |   |   7:0 | NUM_DWORDS = length (in dwords) of the embedded HXG message  |
+ *  +---+-------+--------------------------------------------------------------+
+ *  | 1 |  31:0 |  +--------------------------------------------------------+  |
+ *  +---+-------+  |                                                        |  |
+ *  |...|       |  |  Embedded `HXG Message`_                               |  |
+ *  +---+-------+  |                                                        |  |
+ *  | n |  31:0 |  +--------------------------------------------------------+  |
+ *  +---+-------+--------------------------------------------------------------+
+ */
+
+#define GUC_CTB_HXG_MSG_MIN_LEN                (GUC_CTB_MSG_MIN_LEN + GUC_HXG_MSG_MIN_LEN)
+#define GUC_CTB_HXG_MSG_MAX_LEN                GUC_CTB_MSG_MAX_LEN
 
 /**
  * DOC: CTB based communication
  * - **flags**, holds various bits to control message handling
  */
 
-/*
- * Describes single command transport buffer.
- * Used by both guc-master and clients.
- */
-struct guc_ct_buffer_desc {
-       u32 addr;               /* gfx address */
-       u64 host_private;       /* host private data */
-       u32 size;               /* size in bytes */
-       u32 head;               /* offset updated by GuC*/
-       u32 tail;               /* offset updated by owner */
-       u32 is_in_error;        /* error indicator */
-       u32 reserved1;
-       u32 reserved2;
-       u32 owner;              /* id of the channel owner */
-       u32 owner_sub_id;       /* owner-defined field for extra tracking */
-       u32 reserved[5];
-} __packed;
-
-/* Type of command transport buffer */
-#define INTEL_GUC_CT_BUFFER_TYPE_SEND  0x0u
-#define INTEL_GUC_CT_BUFFER_TYPE_RECV  0x1u
-
 /*
  * Definition of the command transport message header (DW0)
  *
index be066a6..bbf1ddb 100644 (file)
@@ -7,46 +7,43 @@
 #define _ABI_GUC_COMMUNICATION_MMIO_ABI_H
 
 /**
- * DOC: MMIO based communication
+ * DOC: GuC MMIO based communication
  *
- * The MMIO based communication between Host and GuC uses software scratch
- * registers, where first register holds data treated as message header,
- * and other registers are used to hold message payload.
+ * The MMIO based communication between Host and GuC relies on special
+ * hardware registers which format could be defined by the software
+ * (so called scratch registers).
  *
- * For Gen9+, GuC uses software scratch registers 0xC180-0xC1B8,
- * but no H2G command takes more than 8 parameters and the GuC FW
- * itself uses an 8-element array to store the H2G message.
+ * Each MMIO based message, both Host to GuC (H2G) and GuC to Host (G2H)
+ * messages, which maximum length depends on number of available scratch
+ * registers, is directly written into those scratch registers.
  *
- *      +-----------+---------+---------+---------+
- *      |  MMIO[0]  | MMIO[1] |   ...   | MMIO[n] |
- *      +-----------+---------+---------+---------+
- *      | header    |      optional payload       |
- *      +======+====+=========+=========+=========+
- *      | 31:28|type|         |         |         |
- *      +------+----+         |         |         |
- *      | 27:16|data|         |         |         |
- *      +------+----+         |         |         |
- *      |  15:0|code|         |         |         |
- *      +------+----+---------+---------+---------+
+ * For Gen9+, there are 16 software scratch registers 0xC180-0xC1B8,
+ * but no H2G command takes more than 4 parameters and the GuC firmware
+ * itself uses an 4-element array to store the H2G message.
  *
- * The message header consists of:
+ * For Gen11+, there are additional 4 registers 0x190240-0x19024C, which
+ * are, regardless on lower count, preferred over legacy ones.
  *
- * - **type**, indicates message type
- * - **code**, indicates message code, is specific for **type**
- * - **data**, indicates message data, optional, depends on **code**
- *
- * The following message **types** are supported:
- *
- * - **REQUEST**, indicates Host-to-GuC request, requested GuC action code
- *   must be priovided in **code** field. Optional action specific parameters
- *   can be provided in remaining payload registers or **data** field.
- *
- * - **RESPONSE**, indicates GuC-to-Host response from earlier GuC request,
- *   action response status will be provided in **code** field. Optional
- *   response data can be returned in remaining payload registers or **data**
- *   field.
+ * The MMIO based communication is mainly used during driver initialization
+ * phase to setup the `CTB based communication`_ that will be used afterwards.
  */
 
-#define GUC_MAX_MMIO_MSG_LEN           8
+#define GUC_MAX_MMIO_MSG_LEN           4
+
+/**
+ * DOC: MMIO HXG Message
+ *
+ * Format of the MMIO messages follows definitions of `HXG Message`_.
+ *
+ *  +---+-------+--------------------------------------------------------------+
+ *  |   | Bits  | Description                                                  |
+ *  +===+=======+==============================================================+
+ *  | 0 |  31:0 |  +--------------------------------------------------------+  |
+ *  +---+-------+  |                                                        |  |
+ *  |...|       |  |  Embedded `HXG Message`_                               |  |
+ *  +---+-------+  |                                                        |  |
+ *  | n |  31:0 |  +--------------------------------------------------------+  |
+ *  +---+-------+--------------------------------------------------------------+
+ */
 
 #endif /* _ABI_GUC_COMMUNICATION_MMIO_ABI_H */
index 775e21f..29ac823 100644 (file)
@@ -6,6 +6,219 @@
 #ifndef _ABI_GUC_MESSAGES_ABI_H
 #define _ABI_GUC_MESSAGES_ABI_H
 
+/**
+ * DOC: HXG Message
+ *
+ * All messages exchanged with GuC are defined using 32 bit dwords.
+ * First dword is treated as a message header. Remaining dwords are optional.
+ *
+ *  +---+-------+--------------------------------------------------------------+
+ *  |   | Bits  | Description                                                  |
+ *  +===+=======+==============================================================+
+ *  |   |       |                                                              |
+ *  | 0 |    31 | **ORIGIN** - originator of the message                       |
+ *  |   |       |   - _`GUC_HXG_ORIGIN_HOST` = 0                               |
+ *  |   |       |   - _`GUC_HXG_ORIGIN_GUC` = 1                                |
+ *  |   |       |                                                              |
+ *  |   +-------+--------------------------------------------------------------+
+ *  |   | 30:28 | **TYPE** - message type                                      |
+ *  |   |       |   - _`GUC_HXG_TYPE_REQUEST` = 0                              |
+ *  |   |       |   - _`GUC_HXG_TYPE_EVENT` = 1                                |
+ *  |   |       |   - _`GUC_HXG_TYPE_NO_RESPONSE_BUSY` = 3                     |
+ *  |   |       |   - _`GUC_HXG_TYPE_NO_RESPONSE_RETRY` = 5                    |
+ *  |   |       |   - _`GUC_HXG_TYPE_RESPONSE_FAILURE` = 6                     |
+ *  |   |       |   - _`GUC_HXG_TYPE_RESPONSE_SUCCESS` = 7                     |
+ *  |   +-------+--------------------------------------------------------------+
+ *  |   |  27:0 | **AUX** - auxiliary data (depends on TYPE)                   |
+ *  +---+-------+--------------------------------------------------------------+
+ *  | 1 |  31:0 |                                                              |
+ *  +---+-------+                                                              |
+ *  |...|       | **PAYLOAD** - optional payload (depends on TYPE)             |
+ *  +---+-------+                                                              |
+ *  | n |  31:0 |                                                              |
+ *  +---+-------+--------------------------------------------------------------+
+ */
+
+#define GUC_HXG_MSG_MIN_LEN                    1u
+#define GUC_HXG_MSG_0_ORIGIN                   (0x1 << 31)
+#define   GUC_HXG_ORIGIN_HOST                  0u
+#define   GUC_HXG_ORIGIN_GUC                   1u
+#define GUC_HXG_MSG_0_TYPE                     (0x7 << 28)
+#define   GUC_HXG_TYPE_REQUEST                 0u
+#define   GUC_HXG_TYPE_EVENT                   1u
+#define   GUC_HXG_TYPE_NO_RESPONSE_BUSY                3u
+#define   GUC_HXG_TYPE_NO_RESPONSE_RETRY       5u
+#define   GUC_HXG_TYPE_RESPONSE_FAILURE                6u
+#define   GUC_HXG_TYPE_RESPONSE_SUCCESS                7u
+#define GUC_HXG_MSG_0_AUX                      (0xfffffff << 0)
+#define GUC_HXG_MSG_n_PAYLOAD                  (0xffffffff << 0)
+
+/**
+ * DOC: HXG Request
+ *
+ * The `HXG Request`_ message should be used to initiate synchronous activity
+ * for which confirmation or return data is expected.
+ *
+ * The recipient of this message shall use `HXG Response`_, `HXG Failure`_
+ * or `HXG Retry`_ message as a definite reply, and may use `HXG Busy`_
+ * message as a intermediate reply.
+ *
+ * Format of @DATA0 and all @DATAn fields depends on the @ACTION code.
+ *
+ *  +---+-------+--------------------------------------------------------------+
+ *  |   | Bits  | Description                                                  |
+ *  +===+=======+==============================================================+
+ *  | 0 |    31 | ORIGIN                                                       |
+ *  |   +-------+--------------------------------------------------------------+
+ *  |   | 30:28 | TYPE = GUC_HXG_TYPE_REQUEST_                                 |
+ *  |   +-------+--------------------------------------------------------------+
+ *  |   | 27:16 | **DATA0** - request data (depends on ACTION)                 |
+ *  |   +-------+--------------------------------------------------------------+
+ *  |   |  15:0 | **ACTION** - requested action code                           |
+ *  +---+-------+--------------------------------------------------------------+
+ *  | 1 |  31:0 |                                                              |
+ *  +---+-------+                                                              |
+ *  |...|       | **DATAn** - optional data (depends on ACTION)                |
+ *  +---+-------+                                                              |
+ *  | n |  31:0 |                                                              |
+ *  +---+-------+--------------------------------------------------------------+
+ */
+
+#define GUC_HXG_REQUEST_MSG_MIN_LEN            GUC_HXG_MSG_MIN_LEN
+#define GUC_HXG_REQUEST_MSG_0_DATA0            (0xfff << 16)
+#define GUC_HXG_REQUEST_MSG_0_ACTION           (0xffff << 0)
+#define GUC_HXG_REQUEST_MSG_n_DATAn            GUC_HXG_MSG_n_PAYLOAD
+
+/**
+ * DOC: HXG Event
+ *
+ * The `HXG Event`_ message should be used to initiate asynchronous activity
+ * that does not involves immediate confirmation nor data.
+ *
+ * Format of @DATA0 and all @DATAn fields depends on the @ACTION code.
+ *
+ *  +---+-------+--------------------------------------------------------------+
+ *  |   | Bits  | Description                                                  |
+ *  +===+=======+==============================================================+
+ *  | 0 |    31 | ORIGIN                                                       |
+ *  |   +-------+--------------------------------------------------------------+
+ *  |   | 30:28 | TYPE = GUC_HXG_TYPE_EVENT_                                   |
+ *  |   +-------+--------------------------------------------------------------+
+ *  |   | 27:16 | **DATA0** - event data (depends on ACTION)                   |
+ *  |   +-------+--------------------------------------------------------------+
+ *  |   |  15:0 | **ACTION** - event action code                               |
+ *  +---+-------+--------------------------------------------------------------+
+ *  | 1 |  31:0 |                                                              |
+ *  +---+-------+                                                              |
+ *  |...|       | **DATAn** - optional event  data (depends on ACTION)         |
+ *  +---+-------+                                                              |
+ *  | n |  31:0 |                                                              |
+ *  +---+-------+--------------------------------------------------------------+
+ */
+
+#define GUC_HXG_EVENT_MSG_MIN_LEN              GUC_HXG_MSG_MIN_LEN
+#define GUC_HXG_EVENT_MSG_0_DATA0              (0xfff << 16)
+#define GUC_HXG_EVENT_MSG_0_ACTION             (0xffff << 0)
+#define GUC_HXG_EVENT_MSG_n_DATAn              GUC_HXG_MSG_n_PAYLOAD
+
+/**
+ * DOC: HXG Busy
+ *
+ * The `HXG Busy`_ message may be used to acknowledge reception of the `HXG Request`_
+ * message if the recipient expects that it processing will be longer than default
+ * timeout.
+ *
+ * The @COUNTER field may be used as a progress indicator.
+ *
+ *  +---+-------+--------------------------------------------------------------+
+ *  |   | Bits  | Description                                                  |
+ *  +===+=======+==============================================================+
+ *  | 0 |    31 | ORIGIN                                                       |
+ *  |   +-------+--------------------------------------------------------------+
+ *  |   | 30:28 | TYPE = GUC_HXG_TYPE_NO_RESPONSE_BUSY_                        |
+ *  |   +-------+--------------------------------------------------------------+
+ *  |   |  27:0 | **COUNTER** - progress indicator                             |
+ *  +---+-------+--------------------------------------------------------------+
+ */
+
+#define GUC_HXG_BUSY_MSG_LEN                   GUC_HXG_MSG_MIN_LEN
+#define GUC_HXG_BUSY_MSG_0_COUNTER             GUC_HXG_MSG_0_AUX
+
+/**
+ * DOC: HXG Retry
+ *
+ * The `HXG Retry`_ message should be used by recipient to indicate that the
+ * `HXG Request`_ message was dropped and it should be resent again.
+ *
+ * The @REASON field may be used to provide additional information.
+ *
+ *  +---+-------+--------------------------------------------------------------+
+ *  |   | Bits  | Description                                                  |
+ *  +===+=======+==============================================================+
+ *  | 0 |    31 | ORIGIN                                                       |
+ *  |   +-------+--------------------------------------------------------------+
+ *  |   | 30:28 | TYPE = GUC_HXG_TYPE_NO_RESPONSE_RETRY_                       |
+ *  |   +-------+--------------------------------------------------------------+
+ *  |   |  27:0 | **REASON** - reason for retry                                |
+ *  |   |       |  - _`GUC_HXG_RETRY_REASON_UNSPECIFIED` = 0                   |
+ *  +---+-------+--------------------------------------------------------------+
+ */
+
+#define GUC_HXG_RETRY_MSG_LEN                  GUC_HXG_MSG_MIN_LEN
+#define GUC_HXG_RETRY_MSG_0_REASON             GUC_HXG_MSG_0_AUX
+#define   GUC_HXG_RETRY_REASON_UNSPECIFIED     0u
+
+/**
+ * DOC: HXG Failure
+ *
+ * The `HXG Failure`_ message shall be used as a reply to the `HXG Request`_
+ * message that could not be processed due to an error.
+ *
+ *  +---+-------+--------------------------------------------------------------+
+ *  |   | Bits  | Description                                                  |
+ *  +===+=======+==============================================================+
+ *  | 0 |    31 | ORIGIN                                                       |
+ *  |   +-------+--------------------------------------------------------------+
+ *  |   | 30:28 | TYPE = GUC_HXG_TYPE_RESPONSE_FAILURE_                        |
+ *  |   +-------+--------------------------------------------------------------+
+ *  |   | 27:16 | **HINT** - additional error hint                             |
+ *  |   +-------+--------------------------------------------------------------+
+ *  |   |  15:0 | **ERROR** - error/result code                                |
+ *  +---+-------+--------------------------------------------------------------+
+ */
+
+#define GUC_HXG_FAILURE_MSG_LEN                        GUC_HXG_MSG_MIN_LEN
+#define GUC_HXG_FAILURE_MSG_0_HINT             (0xfff << 16)
+#define GUC_HXG_FAILURE_MSG_0_ERROR            (0xffff << 0)
+
+/**
+ * DOC: HXG Response
+ *
+ * The `HXG Response`_ message shall be used as a reply to the `HXG Request`_
+ * message that was successfully processed without an error.
+ *
+ *  +---+-------+--------------------------------------------------------------+
+ *  |   | Bits  | Description                                                  |
+ *  +===+=======+==============================================================+
+ *  | 0 |    31 | ORIGIN                                                       |
+ *  |   +-------+--------------------------------------------------------------+
+ *  |   | 30:28 | TYPE = GUC_HXG_TYPE_RESPONSE_SUCCESS_                        |
+ *  |   +-------+--------------------------------------------------------------+
+ *  |   |  27:0 | **DATA0** - data (depends on ACTION from `HXG Request`_)     |
+ *  +---+-------+--------------------------------------------------------------+
+ *  | 1 |  31:0 |                                                              |
+ *  +---+-------+                                                              |
+ *  |...|       | **DATAn** - data (depends on ACTION from `HXG Request`_)     |
+ *  +---+-------+                                                              |
+ *  | n |  31:0 |                                                              |
+ *  +---+-------+--------------------------------------------------------------+
+ */
+
+#define GUC_HXG_RESPONSE_MSG_MIN_LEN           GUC_HXG_MSG_MIN_LEN
+#define GUC_HXG_RESPONSE_MSG_0_DATA0           GUC_HXG_MSG_0_AUX
+#define GUC_HXG_RESPONSE_MSG_n_DATAn           GUC_HXG_MSG_n_PAYLOAD
+
+/* deprecated */
 #define INTEL_GUC_MSG_TYPE_SHIFT       28
 #define INTEL_GUC_MSG_TYPE_MASK                (0xF << INTEL_GUC_MSG_TYPE_SHIFT)
 #define INTEL_GUC_MSG_DATA_SHIFT       16
index f147cb3..fbfcae7 100644 (file)
@@ -7,6 +7,7 @@
 #include "gt/intel_gt_irq.h"
 #include "gt/intel_gt_pm_irq.h"
 #include "intel_guc.h"
+#include "intel_guc_slpc.h"
 #include "intel_guc_ads.h"
 #include "intel_guc_submission.h"
 #include "i915_drv.h"
@@ -157,6 +158,8 @@ void intel_guc_init_early(struct intel_guc *guc)
        intel_guc_ct_init_early(&guc->ct);
        intel_guc_log_init_early(&guc->log);
        intel_guc_submission_init_early(guc);
+       intel_guc_slpc_init_early(&guc->slpc);
+       intel_guc_rc_init_early(guc);
 
        mutex_init(&guc->send_mutex);
        spin_lock_init(&guc->irq_lock);
@@ -180,6 +183,11 @@ void intel_guc_init_early(struct intel_guc *guc)
        }
 }
 
+void intel_guc_init_late(struct intel_guc *guc)
+{
+       intel_guc_ads_init_late(guc);
+}
+
 static u32 guc_ctl_debug_flags(struct intel_guc *guc)
 {
        u32 level = intel_guc_log_get_level(&guc->log);
@@ -201,6 +209,9 @@ static u32 guc_ctl_feature_flags(struct intel_guc *guc)
        if (!intel_guc_submission_is_used(guc))
                flags |= GUC_CTL_DISABLE_SCHEDULER;
 
+       if (intel_guc_slpc_is_used(guc))
+               flags |= GUC_CTL_ENABLE_SLPC;
+
        return flags;
 }
 
@@ -219,24 +230,19 @@ static u32 guc_ctl_log_params_flags(struct intel_guc *guc)
 
        BUILD_BUG_ON(!CRASH_BUFFER_SIZE);
        BUILD_BUG_ON(!IS_ALIGNED(CRASH_BUFFER_SIZE, UNIT));
-       BUILD_BUG_ON(!DPC_BUFFER_SIZE);
-       BUILD_BUG_ON(!IS_ALIGNED(DPC_BUFFER_SIZE, UNIT));
-       BUILD_BUG_ON(!ISR_BUFFER_SIZE);
-       BUILD_BUG_ON(!IS_ALIGNED(ISR_BUFFER_SIZE, UNIT));
+       BUILD_BUG_ON(!DEBUG_BUFFER_SIZE);
+       BUILD_BUG_ON(!IS_ALIGNED(DEBUG_BUFFER_SIZE, UNIT));
 
        BUILD_BUG_ON((CRASH_BUFFER_SIZE / UNIT - 1) >
                        (GUC_LOG_CRASH_MASK >> GUC_LOG_CRASH_SHIFT));
-       BUILD_BUG_ON((DPC_BUFFER_SIZE / UNIT - 1) >
-                       (GUC_LOG_DPC_MASK >> GUC_LOG_DPC_SHIFT));
-       BUILD_BUG_ON((ISR_BUFFER_SIZE / UNIT - 1) >
-                       (GUC_LOG_ISR_MASK >> GUC_LOG_ISR_SHIFT));
+       BUILD_BUG_ON((DEBUG_BUFFER_SIZE / UNIT - 1) >
+                       (GUC_LOG_DEBUG_MASK >> GUC_LOG_DEBUG_SHIFT));
 
        flags = GUC_LOG_VALID |
                GUC_LOG_NOTIFY_ON_HALF_FULL |
                FLAG |
                ((CRASH_BUFFER_SIZE / UNIT - 1) << GUC_LOG_CRASH_SHIFT) |
-               ((DPC_BUFFER_SIZE / UNIT - 1) << GUC_LOG_DPC_SHIFT) |
-               ((ISR_BUFFER_SIZE / UNIT - 1) << GUC_LOG_ISR_SHIFT) |
+               ((DEBUG_BUFFER_SIZE / UNIT - 1) << GUC_LOG_DEBUG_SHIFT) |
                (offset << GUC_LOG_BUF_ADDR_SHIFT);
 
        #undef UNIT
@@ -331,6 +337,12 @@ int intel_guc_init(struct intel_guc *guc)
                        goto err_ct;
        }
 
+       if (intel_guc_slpc_is_used(guc)) {
+               ret = intel_guc_slpc_init(&guc->slpc);
+               if (ret)
+                       goto err_submission;
+       }
+
        /* now that everything is perma-pinned, initialize the parameters */
        guc_init_params(guc);
 
@@ -341,6 +353,8 @@ int intel_guc_init(struct intel_guc *guc)
 
        return 0;
 
+err_submission:
+       intel_guc_submission_fini(guc);
 err_ct:
        intel_guc_ct_fini(&guc->ct);
 err_ads:
@@ -363,6 +377,9 @@ void intel_guc_fini(struct intel_guc *guc)
 
        i915_ggtt_disable_guc(gt->ggtt);
 
+       if (intel_guc_slpc_is_used(guc))
+               intel_guc_slpc_fini(&guc->slpc);
+
        if (intel_guc_submission_is_used(guc))
                intel_guc_submission_fini(guc);
 
@@ -376,29 +393,27 @@ void intel_guc_fini(struct intel_guc *guc)
 /*
  * This function implements the MMIO based host to GuC interface.
  */
-int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
+int intel_guc_send_mmio(struct intel_guc *guc, const u32 *request, u32 len,
                        u32 *response_buf, u32 response_buf_size)
 {
+       struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
        struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
-       u32 status;
+       u32 header;
        int i;
        int ret;
 
        GEM_BUG_ON(!len);
        GEM_BUG_ON(len > guc->send_regs.count);
 
-       /* We expect only action code */
-       GEM_BUG_ON(*action & ~INTEL_GUC_MSG_CODE_MASK);
-
-       /* If CT is available, we expect to use MMIO only during init/fini */
-       GEM_BUG_ON(*action != INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER &&
-                  *action != INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER);
+       GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, request[0]) != GUC_HXG_ORIGIN_HOST);
+       GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_TYPE, request[0]) != GUC_HXG_TYPE_REQUEST);
 
        mutex_lock(&guc->send_mutex);
        intel_uncore_forcewake_get(uncore, guc->send_regs.fw_domains);
 
+retry:
        for (i = 0; i < len; i++)
-               intel_uncore_write(uncore, guc_send_reg(guc, i), action[i]);
+               intel_uncore_write(uncore, guc_send_reg(guc, i), request[i]);
 
        intel_uncore_posting_read(uncore, guc_send_reg(guc, i - 1));
 
@@ -410,30 +425,74 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
         */
        ret = __intel_wait_for_register_fw(uncore,
                                           guc_send_reg(guc, 0),
-                                          INTEL_GUC_MSG_TYPE_MASK,
-                                          INTEL_GUC_MSG_TYPE_RESPONSE <<
-                                          INTEL_GUC_MSG_TYPE_SHIFT,
-                                          10, 10, &status);
-       /* If GuC explicitly returned an error, convert it to -EIO */
-       if (!ret && !INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(status))
-               ret = -EIO;
+                                          GUC_HXG_MSG_0_ORIGIN,
+                                          FIELD_PREP(GUC_HXG_MSG_0_ORIGIN,
+                                                     GUC_HXG_ORIGIN_GUC),
+                                          10, 10, &header);
+       if (unlikely(ret)) {
+timeout:
+               drm_err(&i915->drm, "mmio request %#x: no reply %x\n",
+                       request[0], header);
+               goto out;
+       }
 
-       if (ret) {
-               DRM_ERROR("MMIO: GuC action %#x failed with error %d %#x\n",
-                         action[0], ret, status);
+       if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) == GUC_HXG_TYPE_NO_RESPONSE_BUSY) {
+#define done ({ header = intel_uncore_read(uncore, guc_send_reg(guc, 0)); \
+               FIELD_GET(GUC_HXG_MSG_0_ORIGIN, header) != GUC_HXG_ORIGIN_GUC || \
+               FIELD_GET(GUC_HXG_MSG_0_TYPE, header) != GUC_HXG_TYPE_NO_RESPONSE_BUSY; })
+
+               ret = wait_for(done, 1000);
+               if (unlikely(ret))
+                       goto timeout;
+               if (unlikely(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, header) !=
+                                      GUC_HXG_ORIGIN_GUC))
+                       goto proto;
+#undef done
+       }
+
+       if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) == GUC_HXG_TYPE_NO_RESPONSE_RETRY) {
+               u32 reason = FIELD_GET(GUC_HXG_RETRY_MSG_0_REASON, header);
+
+               drm_dbg(&i915->drm, "mmio request %#x: retrying, reason %u\n",
+                       request[0], reason);
+               goto retry;
+       }
+
+       if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) == GUC_HXG_TYPE_RESPONSE_FAILURE) {
+               u32 hint = FIELD_GET(GUC_HXG_FAILURE_MSG_0_HINT, header);
+               u32 error = FIELD_GET(GUC_HXG_FAILURE_MSG_0_ERROR, header);
+
+               drm_err(&i915->drm, "mmio request %#x: failure %x/%u\n",
+                       request[0], error, hint);
+               ret = -ENXIO;
+               goto out;
+       }
+
+       if (FIELD_GET(GUC_HXG_MSG_0_TYPE, header) != GUC_HXG_TYPE_RESPONSE_SUCCESS) {
+proto:
+               drm_err(&i915->drm, "mmio request %#x: unexpected reply %#x\n",
+                       request[0], header);
+               ret = -EPROTO;
                goto out;
        }
 
        if (response_buf) {
-               int count = min(response_buf_size, guc->send_regs.count - 1);
+               int count = min(response_buf_size, guc->send_regs.count);
+
+               GEM_BUG_ON(!count);
 
-               for (i = 0; i < count; i++)
+               response_buf[0] = header;
+
+               for (i = 1; i < count; i++)
                        response_buf[i] = intel_uncore_read(uncore,
-                                                           guc_send_reg(guc, i + 1));
-       }
+                                                           guc_send_reg(guc, i));
 
-       /* Use data from the GuC response as our return value */
-       ret = INTEL_GUC_MSG_TO_DATA(status);
+               /* Use number of copied dwords as our return value */
+               ret = count;
+       } else {
+               /* Use data from the GuC response as our return value */
+               ret = FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, header);
+       }
 
 out:
        intel_uncore_forcewake_put(uncore, guc->send_regs.fw_domains);
@@ -487,65 +546,35 @@ int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset)
  */
 int intel_guc_suspend(struct intel_guc *guc)
 {
-       struct intel_uncore *uncore = guc_to_gt(guc)->uncore;
        int ret;
-       u32 status;
        u32 action[] = {
-               INTEL_GUC_ACTION_ENTER_S_STATE,
-               GUC_POWER_D1, /* any value greater than GUC_POWER_D0 */
+               INTEL_GUC_ACTION_RESET_CLIENT,
        };
 
-       /*
-        * If GuC communication is enabled but submission is not supported,
-        * we do not need to suspend the GuC.
-        */
-       if (!intel_guc_submission_is_used(guc) || !intel_guc_is_ready(guc))
+       if (!intel_guc_is_ready(guc))
                return 0;
 
-       /*
-        * The ENTER_S_STATE action queues the save/restore operation in GuC FW
-        * and then returns, so waiting on the H2G is not enough to guarantee
-        * GuC is done. When all the processing is done, GuC writes
-        * INTEL_GUC_SLEEP_STATE_SUCCESS to scratch register 14, so we can poll
-        * on that. Note that GuC does not ensure that the value in the register
-        * is different from INTEL_GUC_SLEEP_STATE_SUCCESS while the action is
-        * in progress so we need to take care of that ourselves as well.
-        */
-
-       intel_uncore_write(uncore, SOFT_SCRATCH(14),
-                          INTEL_GUC_SLEEP_STATE_INVALID_MASK);
-
-       ret = intel_guc_send(guc, action, ARRAY_SIZE(action));
-       if (ret)
-               return ret;
-
-       ret = __intel_wait_for_register(uncore, SOFT_SCRATCH(14),
-                                       INTEL_GUC_SLEEP_STATE_INVALID_MASK,
-                                       0, 0, 10, &status);
-       if (ret)
-               return ret;
-
-       if (status != INTEL_GUC_SLEEP_STATE_SUCCESS) {
-               DRM_ERROR("GuC failed to change sleep state. "
-                         "action=0x%x, err=%u\n",
-                         action[0], status);
-               return -EIO;
+       if (intel_guc_submission_is_used(guc)) {
+               /*
+                * This H2G MMIO command tears down the GuC in two steps. First it will
+                * generate a G2H CTB for every active context indicating a reset. In
+                * practice the i915 shouldn't ever get a G2H as suspend should only be
+                * called when the GPU is idle. Next, it tears down the CTBs and this
+                * H2G MMIO command completes.
+                *
+                * Don't abort on a failure code from the GuC. Keep going and do the
+                * clean up in santize() and re-initialisation on resume and hopefully
+                * the error here won't be problematic.
+                */
+               ret = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
+               if (ret)
+                       DRM_ERROR("GuC suspend: RESET_CLIENT action failed with error %d!\n", ret);
        }
 
-       return 0;
-}
-
-/**
- * intel_guc_reset_engine() - ask GuC to reset an engine
- * @guc:       intel_guc structure
- * @engine:    engine to be reset
- */
-int intel_guc_reset_engine(struct intel_guc *guc,
-                          struct intel_engine_cs *engine)
-{
-       /* XXX: to be implemented with submission interface rework */
+       /* Signal that the GuC isn't running. */
+       intel_guc_sanitize(guc);
 
-       return -ENODEV;
+       return 0;
 }
 
 /**
@@ -554,7 +583,12 @@ int intel_guc_reset_engine(struct intel_guc *guc,
  */
 int intel_guc_resume(struct intel_guc *guc)
 {
-       /* XXX: to be implemented with submission interface rework */
+       /*
+        * NB: This function can still be called even if GuC submission is
+        * disabled, e.g. if GuC is enabled for HuC authentication only. Thus,
+        * if any code is later added here, it must be support doing nothing
+        * if submission is disabled (as per intel_guc_suspend).
+        */
        return 0;
 }
 
index 4abc59f..2e27fe5 100644 (file)
@@ -6,12 +6,16 @@
 #ifndef _INTEL_GUC_H_
 #define _INTEL_GUC_H_
 
+#include <linux/xarray.h>
+#include <linux/delay.h>
+
 #include "intel_uncore.h"
 #include "intel_guc_fw.h"
 #include "intel_guc_fwif.h"
 #include "intel_guc_ct.h"
 #include "intel_guc_log.h"
 #include "intel_guc_reg.h"
+#include "intel_guc_slpc_types.h"
 #include "intel_uc_fw.h"
 #include "i915_utils.h"
 #include "i915_vma.h"
@@ -27,24 +31,47 @@ struct intel_guc {
        struct intel_uc_fw fw;
        struct intel_guc_log log;
        struct intel_guc_ct ct;
+       struct intel_guc_slpc slpc;
+
+       /* Global engine used to submit requests to GuC */
+       struct i915_sched_engine *sched_engine;
+       struct i915_request *stalled_request;
 
        /* intel_guc_recv interrupt related state */
        spinlock_t irq_lock;
        unsigned int msg_enabled_mask;
 
+       atomic_t outstanding_submission_g2h;
+
        struct {
                void (*reset)(struct intel_guc *guc);
                void (*enable)(struct intel_guc *guc);
                void (*disable)(struct intel_guc *guc);
        } interrupts;
 
+       /*
+        * contexts_lock protects the pool of free guc ids and a linked list of
+        * guc ids available to be stolen
+        */
+       spinlock_t contexts_lock;
+       struct ida guc_ids;
+       struct list_head guc_id_list;
+
+       bool submission_supported;
        bool submission_selected;
+       bool rc_supported;
+       bool rc_selected;
 
        struct i915_vma *ads_vma;
        struct __guc_ads_blob *ads_blob;
+       u32 ads_regset_size;
+       u32 ads_golden_ctxt_size;
 
-       struct i915_vma *stage_desc_pool;
-       void *stage_desc_pool_vaddr;
+       struct i915_vma *lrc_desc_pool;
+       void *lrc_desc_pool_vaddr;
+
+       /* guc_id to intel_context lookup */
+       struct xarray context_lookup;
 
        /* Control params for fw initialization */
        u32 params[GUC_CTL_MAX_DWORDS];
@@ -74,7 +101,15 @@ static inline struct intel_guc *log_to_guc(struct intel_guc_log *log)
 static
 inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
 {
-       return intel_guc_ct_send(&guc->ct, action, len, NULL, 0);
+       return intel_guc_ct_send(&guc->ct, action, len, NULL, 0, 0);
+}
+
+static
+inline int intel_guc_send_nb(struct intel_guc *guc, const u32 *action, u32 len,
+                            u32 g2h_len_dw)
+{
+       return intel_guc_ct_send(&guc->ct, action, len, NULL, 0,
+                                MAKE_SEND_FLAGS(g2h_len_dw));
 }
 
 static inline int
@@ -82,7 +117,43 @@ intel_guc_send_and_receive(struct intel_guc *guc, const u32 *action, u32 len,
                           u32 *response_buf, u32 response_buf_size)
 {
        return intel_guc_ct_send(&guc->ct, action, len,
-                                response_buf, response_buf_size);
+                                response_buf, response_buf_size, 0);
+}
+
+static inline int intel_guc_send_busy_loop(struct intel_guc *guc,
+                                          const u32 *action,
+                                          u32 len,
+                                          u32 g2h_len_dw,
+                                          bool loop)
+{
+       int err;
+       unsigned int sleep_period_ms = 1;
+       bool not_atomic = !in_atomic() && !irqs_disabled();
+
+       /*
+        * FIXME: Have caller pass in if we are in an atomic context to avoid
+        * using in_atomic(). It is likely safe here as we check for irqs
+        * disabled which basically all the spin locks in the i915 do but
+        * regardless this should be cleaned up.
+        */
+
+       /* No sleeping with spin locks, just busy loop */
+       might_sleep_if(loop && not_atomic);
+
+retry:
+       err = intel_guc_send_nb(guc, action, len, g2h_len_dw);
+       if (unlikely(err == -EBUSY && loop)) {
+               if (likely(not_atomic)) {
+                       if (msleep_interruptible(sleep_period_ms))
+                               return -EINTR;
+                       sleep_period_ms = sleep_period_ms << 1;
+               } else {
+                       cpu_relax();
+               }
+               goto retry;
+       }
+
+       return err;
 }
 
 static inline void intel_guc_to_host_event_handler(struct intel_guc *guc)
@@ -118,6 +189,7 @@ static inline u32 intel_guc_ggtt_offset(struct intel_guc *guc,
 }
 
 void intel_guc_init_early(struct intel_guc *guc);
+void intel_guc_init_late(struct intel_guc *guc);
 void intel_guc_init_send_regs(struct intel_guc *guc);
 void intel_guc_write_params(struct intel_guc *guc);
 int intel_guc_init(struct intel_guc *guc);
@@ -160,9 +232,25 @@ static inline bool intel_guc_is_ready(struct intel_guc *guc)
        return intel_guc_is_fw_running(guc) && intel_guc_ct_enabled(&guc->ct);
 }
 
+static inline void intel_guc_reset_interrupts(struct intel_guc *guc)
+{
+       guc->interrupts.reset(guc);
+}
+
+static inline void intel_guc_enable_interrupts(struct intel_guc *guc)
+{
+       guc->interrupts.enable(guc);
+}
+
+static inline void intel_guc_disable_interrupts(struct intel_guc *guc)
+{
+       guc->interrupts.disable(guc);
+}
+
 static inline int intel_guc_sanitize(struct intel_guc *guc)
 {
        intel_uc_fw_sanitize(&guc->fw);
+       intel_guc_disable_interrupts(guc);
        intel_guc_ct_sanitize(&guc->ct);
        guc->mmio_msg = 0;
 
@@ -183,8 +271,27 @@ static inline void intel_guc_disable_msg(struct intel_guc *guc, u32 mask)
        spin_unlock_irq(&guc->irq_lock);
 }
 
-int intel_guc_reset_engine(struct intel_guc *guc,
-                          struct intel_engine_cs *engine);
+int intel_guc_wait_for_idle(struct intel_guc *guc, long timeout);
+
+int intel_guc_deregister_done_process_msg(struct intel_guc *guc,
+                                         const u32 *msg, u32 len);
+int intel_guc_sched_done_process_msg(struct intel_guc *guc,
+                                    const u32 *msg, u32 len);
+int intel_guc_context_reset_process_msg(struct intel_guc *guc,
+                                       const u32 *msg, u32 len);
+int intel_guc_engine_failure_process_msg(struct intel_guc *guc,
+                                        const u32 *msg, u32 len);
+
+void intel_guc_find_hung_context(struct intel_engine_cs *engine);
+
+int intel_guc_global_policies_update(struct intel_guc *guc);
+
+void intel_guc_context_ban(struct intel_context *ce, struct i915_request *rq);
+
+void intel_guc_submission_reset_prepare(struct intel_guc *guc);
+void intel_guc_submission_reset(struct intel_guc *guc, bool stalled);
+void intel_guc_submission_reset_finish(struct intel_guc *guc);
+void intel_guc_submission_cancel_requests(struct intel_guc *guc);
 
 void intel_guc_load_status(struct intel_guc *guc, struct drm_printer *p);
 
index 9abfbc6..6926919 100644 (file)
@@ -3,8 +3,11 @@
  * Copyright Â© 2014-2019 Intel Corporation
  */
 
+#include <linux/bsearch.h>
+
 #include "gt/intel_gt.h"
 #include "gt/intel_lrc.h"
+#include "gt/shmem_utils.h"
 #include "intel_guc_ads.h"
 #include "intel_guc_fwif.h"
 #include "intel_uc.h"
  *      | guc_policies                          |
  *      +---------------------------------------+
  *      | guc_gt_system_info                    |
- *      +---------------------------------------+
- *      | guc_clients_info                      |
- *      +---------------------------------------+
- *      | guc_ct_pool_entry[size]               |
+ *      +---------------------------------------+ <== static
+ *      | guc_mmio_reg[countA] (engine 0.0)     |
+ *      | guc_mmio_reg[countB] (engine 0.1)     |
+ *      | guc_mmio_reg[countC] (engine 1.0)     |
+ *      |   ...                                 |
+ *      +---------------------------------------+ <== dynamic
+ *      | padding                               |
+ *      +---------------------------------------+ <== 4K aligned
+ *      | golden contexts                       |
  *      +---------------------------------------+
  *      | padding                               |
  *      +---------------------------------------+ <== 4K aligned
@@ -39,18 +47,49 @@ struct __guc_ads_blob {
        struct guc_ads ads;
        struct guc_policies policies;
        struct guc_gt_system_info system_info;
-       struct guc_clients_info clients_info;
-       struct guc_ct_pool_entry ct_pool[GUC_CT_POOL_SIZE];
+       /* From here on, location is dynamic! Refer to above diagram. */
+       struct guc_mmio_reg regset[0];
 } __packed;
 
+static u32 guc_ads_regset_size(struct intel_guc *guc)
+{
+       GEM_BUG_ON(!guc->ads_regset_size);
+       return guc->ads_regset_size;
+}
+
+static u32 guc_ads_golden_ctxt_size(struct intel_guc *guc)
+{
+       return PAGE_ALIGN(guc->ads_golden_ctxt_size);
+}
+
 static u32 guc_ads_private_data_size(struct intel_guc *guc)
 {
        return PAGE_ALIGN(guc->fw.private_data_size);
 }
 
+static u32 guc_ads_regset_offset(struct intel_guc *guc)
+{
+       return offsetof(struct __guc_ads_blob, regset);
+}
+
+static u32 guc_ads_golden_ctxt_offset(struct intel_guc *guc)
+{
+       u32 offset;
+
+       offset = guc_ads_regset_offset(guc) +
+                guc_ads_regset_size(guc);
+
+       return PAGE_ALIGN(offset);
+}
+
 static u32 guc_ads_private_data_offset(struct intel_guc *guc)
 {
-       return PAGE_ALIGN(sizeof(struct __guc_ads_blob));
+       u32 offset;
+
+       offset = guc_ads_golden_ctxt_offset(guc) +
+                guc_ads_golden_ctxt_size(guc);
+
+       return PAGE_ALIGN(offset);
 }
 
 static u32 guc_ads_blob_size(struct intel_guc *guc)
@@ -59,36 +98,66 @@ static u32 guc_ads_blob_size(struct intel_guc *guc)
               guc_ads_private_data_size(guc);
 }
 
-static void guc_policy_init(struct guc_policy *policy)
+static void guc_policies_init(struct intel_guc *guc, struct guc_policies *policies)
 {
-       policy->execution_quantum = POLICY_DEFAULT_EXECUTION_QUANTUM_US;
-       policy->preemption_time = POLICY_DEFAULT_PREEMPTION_TIME_US;
-       policy->fault_time = POLICY_DEFAULT_FAULT_TIME_US;
-       policy->policy_flags = 0;
+       struct intel_gt *gt = guc_to_gt(guc);
+       struct drm_i915_private *i915 = gt->i915;
+
+       policies->dpc_promote_time = GLOBAL_POLICY_DEFAULT_DPC_PROMOTE_TIME_US;
+       policies->max_num_work_items = GLOBAL_POLICY_MAX_NUM_WI;
+
+       policies->global_flags = 0;
+       if (i915->params.reset < 2)
+               policies->global_flags |= GLOBAL_POLICY_DISABLE_ENGINE_RESET;
+
+       policies->is_valid = 1;
 }
 
-static void guc_policies_init(struct guc_policies *policies)
+void intel_guc_ads_print_policy_info(struct intel_guc *guc,
+                                    struct drm_printer *dp)
 {
-       struct guc_policy *policy;
-       u32 p, i;
+       struct __guc_ads_blob *blob = guc->ads_blob;
 
-       policies->dpc_promote_time = POLICY_DEFAULT_DPC_PROMOTE_TIME_US;
-       policies->max_num_work_items = POLICY_MAX_NUM_WI;
+       if (unlikely(!blob))
+               return;
 
-       for (p = 0; p < GUC_CLIENT_PRIORITY_NUM; p++) {
-               for (i = 0; i < GUC_MAX_ENGINE_CLASSES; i++) {
-                       policy = &policies->policy[p][i];
+       drm_printf(dp, "Global scheduling policies:\n");
+       drm_printf(dp, "  DPC promote time   = %u\n", blob->policies.dpc_promote_time);
+       drm_printf(dp, "  Max num work items = %u\n", blob->policies.max_num_work_items);
+       drm_printf(dp, "  Flags              = %u\n", blob->policies.global_flags);
+}
 
-                       guc_policy_init(policy);
-               }
-       }
+static int guc_action_policies_update(struct intel_guc *guc, u32 policy_offset)
+{
+       u32 action[] = {
+               INTEL_GUC_ACTION_GLOBAL_SCHED_POLICY_CHANGE,
+               policy_offset
+       };
 
-       policies->is_valid = 1;
+       return intel_guc_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true);
 }
 
-static void guc_ct_pool_entries_init(struct guc_ct_pool_entry *pool, u32 num)
+int intel_guc_global_policies_update(struct intel_guc *guc)
 {
-       memset(pool, 0, num * sizeof(*pool));
+       struct __guc_ads_blob *blob = guc->ads_blob;
+       struct intel_gt *gt = guc_to_gt(guc);
+       intel_wakeref_t wakeref;
+       int ret;
+
+       if (!blob)
+               return -EOPNOTSUPP;
+
+       GEM_BUG_ON(!blob->ads.scheduler_policies);
+
+       guc_policies_init(guc, &blob->policies);
+
+       if (!intel_guc_is_ready(guc))
+               return 0;
+
+       with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref)
+               ret = guc_action_policies_update(guc, blob->ads.scheduler_policies);
+
+       return ret;
 }
 
 static void guc_mapping_table_init(struct intel_gt *gt,
@@ -113,53 +182,324 @@ static void guc_mapping_table_init(struct intel_gt *gt,
 }
 
 /*
- * The first 80 dwords of the register state context, containing the
- * execlists and ppgtt registers.
+ * The save/restore register list must be pre-calculated to a temporary
+ * buffer of driver defined size before it can be generated in place
+ * inside the ADS.
  */
-#define LR_HW_CONTEXT_SIZE     (80 * sizeof(u32))
+#define MAX_MMIO_REGS  128     /* Arbitrary size, increase as needed */
+struct temp_regset {
+       struct guc_mmio_reg *registers;
+       u32 used;
+       u32 size;
+};
 
-static void __guc_ads_init(struct intel_guc *guc)
+static int guc_mmio_reg_cmp(const void *a, const void *b)
+{
+       const struct guc_mmio_reg *ra = a;
+       const struct guc_mmio_reg *rb = b;
+
+       return (int)ra->offset - (int)rb->offset;
+}
+
+static void guc_mmio_reg_add(struct temp_regset *regset,
+                            u32 offset, u32 flags)
+{
+       u32 count = regset->used;
+       struct guc_mmio_reg reg = {
+               .offset = offset,
+               .flags = flags,
+       };
+       struct guc_mmio_reg *slot;
+
+       GEM_BUG_ON(count >= regset->size);
+
+       /*
+        * The mmio list is built using separate lists within the driver.
+        * It's possible that at some point we may attempt to add the same
+        * register more than once. Do not consider this an error; silently
+        * move on if the register is already in the list.
+        */
+       if (bsearch(&reg, regset->registers, count,
+                   sizeof(reg), guc_mmio_reg_cmp))
+               return;
+
+       slot = &regset->registers[count];
+       regset->used++;
+       *slot = reg;
+
+       while (slot-- > regset->registers) {
+               GEM_BUG_ON(slot[0].offset == slot[1].offset);
+               if (slot[1].offset > slot[0].offset)
+                       break;
+
+               swap(slot[1], slot[0]);
+       }
+}
+
+#define GUC_MMIO_REG_ADD(regset, reg, masked) \
+       guc_mmio_reg_add(regset, \
+                        i915_mmio_reg_offset((reg)), \
+                        (masked) ? GUC_REGSET_MASKED : 0)
+
+static void guc_mmio_regset_init(struct temp_regset *regset,
+                                struct intel_engine_cs *engine)
+{
+       const u32 base = engine->mmio_base;
+       struct i915_wa_list *wal = &engine->wa_list;
+       struct i915_wa *wa;
+       unsigned int i;
+
+       regset->used = 0;
+
+       GUC_MMIO_REG_ADD(regset, RING_MODE_GEN7(base), true);
+       GUC_MMIO_REG_ADD(regset, RING_HWS_PGA(base), false);
+       GUC_MMIO_REG_ADD(regset, RING_IMR(base), false);
+
+       for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
+               GUC_MMIO_REG_ADD(regset, wa->reg, wa->masked_reg);
+
+       /* Be extra paranoid and include all whitelist registers. */
+       for (i = 0; i < RING_MAX_NONPRIV_SLOTS; i++)
+               GUC_MMIO_REG_ADD(regset,
+                                RING_FORCE_TO_NONPRIV(base, i),
+                                false);
+
+       /* add in local MOCS registers */
+       for (i = 0; i < GEN9_LNCFCMOCS_REG_COUNT; i++)
+               GUC_MMIO_REG_ADD(regset, GEN9_LNCFCMOCS(i), false);
+}
+
+static int guc_mmio_reg_state_query(struct intel_guc *guc)
 {
        struct intel_gt *gt = guc_to_gt(guc);
-       struct drm_i915_private *i915 = gt->i915;
+       struct intel_engine_cs *engine;
+       enum intel_engine_id id;
+       struct temp_regset temp_set;
+       u32 total;
+
+       /*
+        * Need to actually build the list in order to filter out
+        * duplicates and other such data dependent constructions.
+        */
+       temp_set.size = MAX_MMIO_REGS;
+       temp_set.registers = kmalloc_array(temp_set.size,
+                                          sizeof(*temp_set.registers),
+                                          GFP_KERNEL);
+       if (!temp_set.registers)
+               return -ENOMEM;
+
+       total = 0;
+       for_each_engine(engine, gt, id) {
+               guc_mmio_regset_init(&temp_set, engine);
+               total += temp_set.used;
+       }
+
+       kfree(temp_set.registers);
+
+       return total * sizeof(struct guc_mmio_reg);
+}
+
+static void guc_mmio_reg_state_init(struct intel_guc *guc,
+                                   struct __guc_ads_blob *blob)
+{
+       struct intel_gt *gt = guc_to_gt(guc);
+       struct intel_engine_cs *engine;
+       enum intel_engine_id id;
+       struct temp_regset temp_set;
+       struct guc_mmio_reg_set *ads_reg_set;
+       u32 addr_ggtt, offset;
+       u8 guc_class;
+
+       offset = guc_ads_regset_offset(guc);
+       addr_ggtt = intel_guc_ggtt_offset(guc, guc->ads_vma) + offset;
+       temp_set.registers = (struct guc_mmio_reg *)(((u8 *)blob) + offset);
+       temp_set.size = guc->ads_regset_size / sizeof(temp_set.registers[0]);
+
+       for_each_engine(engine, gt, id) {
+               /* Class index is checked in class converter */
+               GEM_BUG_ON(engine->instance >= GUC_MAX_INSTANCES_PER_CLASS);
+
+               guc_class = engine_class_to_guc_class(engine->class);
+               ads_reg_set = &blob->ads.reg_state_list[guc_class][engine->instance];
+
+               guc_mmio_regset_init(&temp_set, engine);
+               if (!temp_set.used) {
+                       ads_reg_set->address = 0;
+                       ads_reg_set->count = 0;
+                       continue;
+               }
+
+               ads_reg_set->address = addr_ggtt;
+               ads_reg_set->count = temp_set.used;
+
+               temp_set.size -= temp_set.used;
+               temp_set.registers += temp_set.used;
+               addr_ggtt += temp_set.used * sizeof(struct guc_mmio_reg);
+       }
+
+       GEM_BUG_ON(temp_set.size);
+}
+
+static void fill_engine_enable_masks(struct intel_gt *gt,
+                                    struct guc_gt_system_info *info)
+{
+       info->engine_enabled_masks[GUC_RENDER_CLASS] = 1;
+       info->engine_enabled_masks[GUC_BLITTER_CLASS] = 1;
+       info->engine_enabled_masks[GUC_VIDEO_CLASS] = VDBOX_MASK(gt);
+       info->engine_enabled_masks[GUC_VIDEOENHANCE_CLASS] = VEBOX_MASK(gt);
+}
+
+static int guc_prep_golden_context(struct intel_guc *guc,
+                                  struct __guc_ads_blob *blob)
+{
+       struct intel_gt *gt = guc_to_gt(guc);
+       u32 addr_ggtt, offset;
+       u32 total_size = 0, alloc_size, real_size;
+       u8 engine_class, guc_class;
+       struct guc_gt_system_info *info, local_info;
+
+       /*
+        * Reserve the memory for the golden contexts and point GuC at it but
+        * leave it empty for now. The context data will be filled in later
+        * once there is something available to put there.
+        *
+        * Note that the HWSP and ring context are not included.
+        *
+        * Note also that the storage must be pinned in the GGTT, so that the
+        * address won't change after GuC has been told where to find it. The
+        * GuC will also validate that the LRC base + size fall within the
+        * allowed GGTT range.
+        */
+       if (blob) {
+               offset = guc_ads_golden_ctxt_offset(guc);
+               addr_ggtt = intel_guc_ggtt_offset(guc, guc->ads_vma) + offset;
+               info = &blob->system_info;
+       } else {
+               memset(&local_info, 0, sizeof(local_info));
+               info = &local_info;
+               fill_engine_enable_masks(gt, info);
+       }
+
+       for (engine_class = 0; engine_class <= MAX_ENGINE_CLASS; ++engine_class) {
+               if (engine_class == OTHER_CLASS)
+                       continue;
+
+               guc_class = engine_class_to_guc_class(engine_class);
+
+               if (!info->engine_enabled_masks[guc_class])
+                       continue;
+
+               real_size = intel_engine_context_size(gt, engine_class);
+               alloc_size = PAGE_ALIGN(real_size);
+               total_size += alloc_size;
+
+               if (!blob)
+                       continue;
+
+               blob->ads.eng_state_size[guc_class] = real_size;
+               blob->ads.golden_context_lrca[guc_class] = addr_ggtt;
+               addr_ggtt += alloc_size;
+       }
+
+       if (!blob)
+               return total_size;
+
+       GEM_BUG_ON(guc->ads_golden_ctxt_size != total_size);
+       return total_size;
+}
+
+static struct intel_engine_cs *find_engine_state(struct intel_gt *gt, u8 engine_class)
+{
+       struct intel_engine_cs *engine;
+       enum intel_engine_id id;
+
+       for_each_engine(engine, gt, id) {
+               if (engine->class != engine_class)
+                       continue;
+
+               if (!engine->default_state)
+                       continue;
+
+               return engine;
+       }
+
+       return NULL;
+}
+
+static void guc_init_golden_context(struct intel_guc *guc)
+{
        struct __guc_ads_blob *blob = guc->ads_blob;
-       const u32 skipped_size = LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SIZE;
-       u32 base;
+       struct intel_engine_cs *engine;
+       struct intel_gt *gt = guc_to_gt(guc);
+       u32 addr_ggtt, offset;
+       u32 total_size = 0, alloc_size, real_size;
        u8 engine_class, guc_class;
+       u8 *ptr;
 
-       /* GuC scheduling policies */
-       guc_policies_init(&blob->policies);
+       /* Skip execlist and PPGTT registers + HWSP */
+       const u32 lr_hw_context_size = 80 * sizeof(u32);
+       const u32 skip_size = LRC_PPHWSP_SZ * PAGE_SIZE +
+               lr_hw_context_size;
+
+       if (!intel_uc_uses_guc_submission(&gt->uc))
+               return;
+
+       GEM_BUG_ON(!blob);
 
        /*
-        * GuC expects a per-engine-class context image and size
-        * (minus hwsp and ring context). The context image will be
-        * used to reinitialize engines after a reset. It must exist
-        * and be pinned in the GGTT, so that the address won't change after
-        * we have told GuC where to find it. The context size will be used
-        * to validate that the LRC base + size fall within allowed GGTT.
+        * Go back and fill in the golden context data now that it is
+        * available.
         */
+       offset = guc_ads_golden_ctxt_offset(guc);
+       addr_ggtt = intel_guc_ggtt_offset(guc, guc->ads_vma) + offset;
+       ptr = ((u8 *)blob) + offset;
+
        for (engine_class = 0; engine_class <= MAX_ENGINE_CLASS; ++engine_class) {
                if (engine_class == OTHER_CLASS)
                        continue;
 
                guc_class = engine_class_to_guc_class(engine_class);
 
-               /*
-                * TODO: Set context pointer to default state to allow
-                * GuC to re-init guilty contexts after internal reset.
-                */
-               blob->ads.golden_context_lrca[guc_class] = 0;
-               blob->ads.eng_state_size[guc_class] =
-                       intel_engine_context_size(guc_to_gt(guc),
-                                                 engine_class) -
-                       skipped_size;
+               if (!blob->system_info.engine_enabled_masks[guc_class])
+                       continue;
+
+               real_size = intel_engine_context_size(gt, engine_class);
+               alloc_size = PAGE_ALIGN(real_size);
+               total_size += alloc_size;
+
+               engine = find_engine_state(gt, engine_class);
+               if (!engine) {
+                       drm_err(&gt->i915->drm, "No engine state recorded for class %d!\n",
+                               engine_class);
+                       blob->ads.eng_state_size[guc_class] = 0;
+                       blob->ads.golden_context_lrca[guc_class] = 0;
+                       continue;
+               }
+
+               GEM_BUG_ON(blob->ads.eng_state_size[guc_class] != real_size);
+               GEM_BUG_ON(blob->ads.golden_context_lrca[guc_class] != addr_ggtt);
+               addr_ggtt += alloc_size;
+
+               shmem_read(engine->default_state, skip_size, ptr + skip_size,
+                          real_size - skip_size);
+               ptr += alloc_size;
        }
 
+       GEM_BUG_ON(guc->ads_golden_ctxt_size != total_size);
+}
+
+static void __guc_ads_init(struct intel_guc *guc)
+{
+       struct intel_gt *gt = guc_to_gt(guc);
+       struct drm_i915_private *i915 = gt->i915;
+       struct __guc_ads_blob *blob = guc->ads_blob;
+       u32 base;
+
+       /* GuC scheduling policies */
+       guc_policies_init(guc, &blob->policies);
+
        /* System info */
-       blob->system_info.engine_enabled_masks[GUC_RENDER_CLASS] = 1;
-       blob->system_info.engine_enabled_masks[GUC_BLITTER_CLASS] = 1;
-       blob->system_info.engine_enabled_masks[GUC_VIDEO_CLASS] = VDBOX_MASK(gt);
-       blob->system_info.engine_enabled_masks[GUC_VIDEOENHANCE_CLASS] = VEBOX_MASK(gt);
+       fill_engine_enable_masks(gt, &blob->system_info);
 
        blob->system_info.generic_gt_sysinfo[GUC_GENERIC_GT_SYSINFO_SLICE_ENABLED] =
                hweight8(gt->info.sseu.slice_mask);
@@ -174,21 +514,19 @@ static void __guc_ads_init(struct intel_guc *guc)
                         GEN12_DOORBELLS_PER_SQIDI) + 1;
        }
 
+       /* Golden contexts for re-initialising after a watchdog reset */
+       guc_prep_golden_context(guc, blob);
+
        guc_mapping_table_init(guc_to_gt(guc), &blob->system_info);
 
        base = intel_guc_ggtt_offset(guc, guc->ads_vma);
 
-       /* Clients info  */
-       guc_ct_pool_entries_init(blob->ct_pool, ARRAY_SIZE(blob->ct_pool));
-
-       blob->clients_info.clients_num = 1;
-       blob->clients_info.ct_pool_addr = base + ptr_offset(blob, ct_pool);
-       blob->clients_info.ct_pool_count = ARRAY_SIZE(blob->ct_pool);
-
        /* ADS */
        blob->ads.scheduler_policies = base + ptr_offset(blob, policies);
        blob->ads.gt_system_info = base + ptr_offset(blob, system_info);
-       blob->ads.clients_info = base + ptr_offset(blob, clients_info);
+
+       /* MMIO save/restore list */
+       guc_mmio_reg_state_init(guc, blob);
 
        /* Private Data */
        blob->ads.private_data = base + guc_ads_private_data_offset(guc);
@@ -210,6 +548,19 @@ int intel_guc_ads_create(struct intel_guc *guc)
 
        GEM_BUG_ON(guc->ads_vma);
 
+       /* Need to calculate the reg state size dynamically: */
+       ret = guc_mmio_reg_state_query(guc);
+       if (ret < 0)
+               return ret;
+       guc->ads_regset_size = ret;
+
+       /* Likewise the golden contexts: */
+       ret = guc_prep_golden_context(guc, NULL);
+       if (ret < 0)
+               return ret;
+       guc->ads_golden_ctxt_size = ret;
+
+       /* Now the total size can be determined: */
        size = guc_ads_blob_size(guc);
 
        ret = intel_guc_allocate_and_map_vma(guc, size, &guc->ads_vma,
@@ -222,6 +573,18 @@ int intel_guc_ads_create(struct intel_guc *guc)
        return 0;
 }
 
+void intel_guc_ads_init_late(struct intel_guc *guc)
+{
+       /*
+        * The golden context setup requires the saved engine state from
+        * __engines_record_defaults(). However, that requires engines to be
+        * operational which means the ADS must already have been configured.
+        * Fortunately, the golden context state is not needed until a hang
+        * occurs, so it can be filled in during this late init phase.
+        */
+       guc_init_golden_context(guc);
+}
+
 void intel_guc_ads_destroy(struct intel_guc *guc)
 {
        i915_vma_unpin_and_release(&guc->ads_vma, I915_VMA_RELEASE_MAP);
index b00d3ae..3d85051 100644 (file)
@@ -7,9 +7,13 @@
 #define _INTEL_GUC_ADS_H_
 
 struct intel_guc;
+struct drm_printer;
 
 int intel_guc_ads_create(struct intel_guc *guc);
 void intel_guc_ads_destroy(struct intel_guc *guc);
+void intel_guc_ads_init_late(struct intel_guc *guc);
 void intel_guc_ads_reset(struct intel_guc *guc);
+void intel_guc_ads_print_policy_info(struct intel_guc *guc,
+                                    struct drm_printer *p);
 
 #endif
index 8f7b148..22b4733 100644 (file)
@@ -3,6 +3,11 @@
  * Copyright Â© 2016-2019 Intel Corporation
  */
 
+#include <linux/circ_buf.h>
+#include <linux/ktime.h>
+#include <linux/time64.h>
+#include <linux/timekeeping.h>
+
 #include "i915_drv.h"
 #include "intel_guc_ct.h"
 #include "gt/intel_gt.h"
@@ -58,11 +63,17 @@ static inline struct drm_device *ct_to_drm(struct intel_guc_ct *ct)
  *      +--------+-----------------------------------------------+------+
  *
  * Size of each `CT Buffer`_ must be multiple of 4K.
- * As we don't expect too many messages, for now use minimum sizes.
+ * We don't expect too many messages in flight at any time, unless we are
+ * using the GuC submission. In that case each request requires a minimum
+ * 2 dwords which gives us a maximum 256 queue'd requests. Hopefully this
+ * enough space to avoid backpressure on the driver. We increase the size
+ * of the receive buffer (relative to the send) to ensure a G2H response
+ * CTB has a landing spot.
  */
 #define CTB_DESC_SIZE          ALIGN(sizeof(struct guc_ct_buffer_desc), SZ_2K)
 #define CTB_H2G_BUFFER_SIZE    (SZ_4K)
-#define CTB_G2H_BUFFER_SIZE    (SZ_4K)
+#define CTB_G2H_BUFFER_SIZE    (4 * CTB_H2G_BUFFER_SIZE)
+#define G2H_ROOM_BUFFER_SIZE   (CTB_G2H_BUFFER_SIZE / 4)
 
 struct ct_request {
        struct list_head link;
@@ -98,66 +109,84 @@ void intel_guc_ct_init_early(struct intel_guc_ct *ct)
        INIT_LIST_HEAD(&ct->requests.incoming);
        INIT_WORK(&ct->requests.worker, ct_incoming_request_worker_func);
        tasklet_setup(&ct->receive_tasklet, ct_receive_tasklet_func);
+       init_waitqueue_head(&ct->wq);
 }
 
 static inline const char *guc_ct_buffer_type_to_str(u32 type)
 {
        switch (type) {
-       case INTEL_GUC_CT_BUFFER_TYPE_SEND:
+       case GUC_CTB_TYPE_HOST2GUC:
                return "SEND";
-       case INTEL_GUC_CT_BUFFER_TYPE_RECV:
+       case GUC_CTB_TYPE_GUC2HOST:
                return "RECV";
        default:
                return "<invalid>";
        }
 }
 
-static void guc_ct_buffer_desc_init(struct guc_ct_buffer_desc *desc,
-                                   u32 cmds_addr, u32 size)
+static void guc_ct_buffer_desc_init(struct guc_ct_buffer_desc *desc)
 {
        memset(desc, 0, sizeof(*desc));
-       desc->addr = cmds_addr;
-       desc->size = size;
-       desc->owner = CTB_OWNER_HOST;
 }
 
-static void guc_ct_buffer_reset(struct intel_guc_ct_buffer *ctb, u32 cmds_addr)
+static void guc_ct_buffer_reset(struct intel_guc_ct_buffer *ctb)
 {
-       guc_ct_buffer_desc_init(ctb->desc, cmds_addr, ctb->size);
+       u32 space;
+
+       ctb->broken = false;
+       ctb->tail = 0;
+       ctb->head = 0;
+       space = CIRC_SPACE(ctb->tail, ctb->head, ctb->size) - ctb->resv_space;
+       atomic_set(&ctb->space, space);
+
+       guc_ct_buffer_desc_init(ctb->desc);
 }
 
 static void guc_ct_buffer_init(struct intel_guc_ct_buffer *ctb,
                               struct guc_ct_buffer_desc *desc,
-                              u32 *cmds, u32 size)
+                              u32 *cmds, u32 size_in_bytes, u32 resv_space)
 {
-       GEM_BUG_ON(size % 4);
+       GEM_BUG_ON(size_in_bytes % 4);
 
        ctb->desc = desc;
        ctb->cmds = cmds;
-       ctb->size = size;
+       ctb->size = size_in_bytes / 4;
+       ctb->resv_space = resv_space / 4;
 
-       guc_ct_buffer_reset(ctb, 0);
+       guc_ct_buffer_reset(ctb);
 }
 
-static int guc_action_register_ct_buffer(struct intel_guc *guc,
-                                        u32 desc_addr,
-                                        u32 type)
+static int guc_action_register_ct_buffer(struct intel_guc *guc, u32 type,
+                                        u32 desc_addr, u32 buff_addr, u32 size)
 {
-       u32 action[] = {
-               INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER,
-               desc_addr,
-               sizeof(struct guc_ct_buffer_desc),
-               type
+       u32 request[HOST2GUC_REGISTER_CTB_REQUEST_MSG_LEN] = {
+               FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
+               FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
+               FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_HOST2GUC_REGISTER_CTB),
+               FIELD_PREP(HOST2GUC_REGISTER_CTB_REQUEST_MSG_1_SIZE, size / SZ_4K - 1) |
+               FIELD_PREP(HOST2GUC_REGISTER_CTB_REQUEST_MSG_1_TYPE, type),
+               FIELD_PREP(HOST2GUC_REGISTER_CTB_REQUEST_MSG_2_DESC_ADDR, desc_addr),
+               FIELD_PREP(HOST2GUC_REGISTER_CTB_REQUEST_MSG_3_BUFF_ADDR, buff_addr),
        };
 
-       /* Can't use generic send(), CT registration must go over MMIO */
-       return intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
+       GEM_BUG_ON(type != GUC_CTB_TYPE_HOST2GUC && type != GUC_CTB_TYPE_GUC2HOST);
+       GEM_BUG_ON(size % SZ_4K);
+
+       /* CT registration must go over MMIO */
+       return intel_guc_send_mmio(guc, request, ARRAY_SIZE(request), NULL, 0);
 }
 
-static int ct_register_buffer(struct intel_guc_ct *ct, u32 desc_addr, u32 type)
+static int ct_register_buffer(struct intel_guc_ct *ct, u32 type,
+                             u32 desc_addr, u32 buff_addr, u32 size)
 {
-       int err = guc_action_register_ct_buffer(ct_to_guc(ct), desc_addr, type);
+       int err;
+
+       err = i915_inject_probe_error(guc_to_gt(ct_to_guc(ct))->i915, -ENXIO);
+       if (unlikely(err))
+               return err;
 
+       err = guc_action_register_ct_buffer(ct_to_guc(ct), type,
+                                           desc_addr, buff_addr, size);
        if (unlikely(err))
                CT_ERROR(ct, "Failed to register %s buffer (err=%d)\n",
                         guc_ct_buffer_type_to_str(type), err);
@@ -166,14 +195,17 @@ static int ct_register_buffer(struct intel_guc_ct *ct, u32 desc_addr, u32 type)
 
 static int guc_action_deregister_ct_buffer(struct intel_guc *guc, u32 type)
 {
-       u32 action[] = {
-               INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER,
-               CTB_OWNER_HOST,
-               type
+       u32 request[HOST2GUC_DEREGISTER_CTB_REQUEST_MSG_LEN] = {
+               FIELD_PREP(GUC_HXG_MSG_0_ORIGIN, GUC_HXG_ORIGIN_HOST) |
+               FIELD_PREP(GUC_HXG_MSG_0_TYPE, GUC_HXG_TYPE_REQUEST) |
+               FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_HOST2GUC_DEREGISTER_CTB),
+               FIELD_PREP(HOST2GUC_DEREGISTER_CTB_REQUEST_MSG_1_TYPE, type),
        };
 
-       /* Can't use generic send(), CT deregistration must go over MMIO */
-       return intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
+       GEM_BUG_ON(type != GUC_CTB_TYPE_HOST2GUC && type != GUC_CTB_TYPE_GUC2HOST);
+
+       /* CT deregistration must go over MMIO */
+       return intel_guc_send_mmio(guc, request, ARRAY_SIZE(request), NULL, 0);
 }
 
 static int ct_deregister_buffer(struct intel_guc_ct *ct, u32 type)
@@ -200,10 +232,15 @@ int intel_guc_ct_init(struct intel_guc_ct *ct)
        struct guc_ct_buffer_desc *desc;
        u32 blob_size;
        u32 cmds_size;
+       u32 resv_space;
        void *blob;
        u32 *cmds;
        int err;
 
+       err = i915_inject_probe_error(guc_to_gt(guc)->i915, -ENXIO);
+       if (err)
+               return err;
+
        GEM_BUG_ON(ct->vma);
 
        blob_size = 2 * CTB_DESC_SIZE + CTB_H2G_BUFFER_SIZE + CTB_G2H_BUFFER_SIZE;
@@ -220,19 +257,23 @@ int intel_guc_ct_init(struct intel_guc_ct *ct)
        desc = blob;
        cmds = blob + 2 * CTB_DESC_SIZE;
        cmds_size = CTB_H2G_BUFFER_SIZE;
-       CT_DEBUG(ct, "%s desc %#tx cmds %#tx size %u\n", "send",
-                ptrdiff(desc, blob), ptrdiff(cmds, blob), cmds_size);
+       resv_space = 0;
+       CT_DEBUG(ct, "%s desc %#tx cmds %#tx size %u/%u\n", "send",
+                ptrdiff(desc, blob), ptrdiff(cmds, blob), cmds_size,
+                resv_space);
 
-       guc_ct_buffer_init(&ct->ctbs.send, desc, cmds, cmds_size);
+       guc_ct_buffer_init(&ct->ctbs.send, desc, cmds, cmds_size, resv_space);
 
        /* store pointers to desc and cmds for recv ctb */
        desc = blob + CTB_DESC_SIZE;
        cmds = blob + 2 * CTB_DESC_SIZE + CTB_H2G_BUFFER_SIZE;
        cmds_size = CTB_G2H_BUFFER_SIZE;
-       CT_DEBUG(ct, "%s desc %#tx cmds %#tx size %u\n", "recv",
-                ptrdiff(desc, blob), ptrdiff(cmds, blob), cmds_size);
+       resv_space = G2H_ROOM_BUFFER_SIZE;
+       CT_DEBUG(ct, "%s desc %#tx cmds %#tx size %u/%u\n", "recv",
+                ptrdiff(desc, blob), ptrdiff(cmds, blob), cmds_size,
+                resv_space);
 
-       guc_ct_buffer_init(&ct->ctbs.recv, desc, cmds, cmds_size);
+       guc_ct_buffer_init(&ct->ctbs.recv, desc, cmds, cmds_size, resv_space);
 
        return 0;
 }
@@ -261,7 +302,7 @@ void intel_guc_ct_fini(struct intel_guc_ct *ct)
 int intel_guc_ct_enable(struct intel_guc_ct *ct)
 {
        struct intel_guc *guc = ct_to_guc(ct);
-       u32 base, cmds;
+       u32 base, desc, cmds;
        void *blob;
        int err;
 
@@ -277,32 +318,36 @@ int intel_guc_ct_enable(struct intel_guc_ct *ct)
        GEM_BUG_ON(blob != ct->ctbs.send.desc);
 
        /* (re)initialize descriptors */
-       cmds = base + ptrdiff(ct->ctbs.send.cmds, blob);
-       guc_ct_buffer_reset(&ct->ctbs.send, cmds);
-
-       cmds = base + ptrdiff(ct->ctbs.recv.cmds, blob);
-       guc_ct_buffer_reset(&ct->ctbs.recv, cmds);
+       guc_ct_buffer_reset(&ct->ctbs.send);
+       guc_ct_buffer_reset(&ct->ctbs.recv);
 
        /*
         * Register both CT buffers starting with RECV buffer.
         * Descriptors are in first half of the blob.
         */
-       err = ct_register_buffer(ct, base + ptrdiff(ct->ctbs.recv.desc, blob),
-                                INTEL_GUC_CT_BUFFER_TYPE_RECV);
+       desc = base + ptrdiff(ct->ctbs.recv.desc, blob);
+       cmds = base + ptrdiff(ct->ctbs.recv.cmds, blob);
+       err = ct_register_buffer(ct, GUC_CTB_TYPE_GUC2HOST,
+                                desc, cmds, ct->ctbs.recv.size * 4);
+
        if (unlikely(err))
                goto err_out;
 
-       err = ct_register_buffer(ct, base + ptrdiff(ct->ctbs.send.desc, blob),
-                                INTEL_GUC_CT_BUFFER_TYPE_SEND);
+       desc = base + ptrdiff(ct->ctbs.send.desc, blob);
+       cmds = base + ptrdiff(ct->ctbs.send.cmds, blob);
+       err = ct_register_buffer(ct, GUC_CTB_TYPE_HOST2GUC,
+                                desc, cmds, ct->ctbs.send.size * 4);
+
        if (unlikely(err))
                goto err_deregister;
 
        ct->enabled = true;
+       ct->stall_time = KTIME_MAX;
 
        return 0;
 
 err_deregister:
-       ct_deregister_buffer(ct, INTEL_GUC_CT_BUFFER_TYPE_RECV);
+       ct_deregister_buffer(ct, GUC_CTB_TYPE_GUC2HOST);
 err_out:
        CT_PROBE_ERROR(ct, "Failed to enable CTB (%pe)\n", ERR_PTR(err));
        return err;
@@ -321,8 +366,8 @@ void intel_guc_ct_disable(struct intel_guc_ct *ct)
        ct->enabled = false;
 
        if (intel_guc_is_fw_running(guc)) {
-               ct_deregister_buffer(ct, INTEL_GUC_CT_BUFFER_TYPE_SEND);
-               ct_deregister_buffer(ct, INTEL_GUC_CT_BUFFER_TYPE_RECV);
+               ct_deregister_buffer(ct, GUC_CTB_TYPE_HOST2GUC);
+               ct_deregister_buffer(ct, GUC_CTB_TYPE_GUC2HOST);
        }
 }
 
@@ -354,81 +399,63 @@ static void write_barrier(struct intel_guc_ct *ct)
        }
 }
 
-/**
- * DOC: CTB Host to GuC request
- *
- * Format of the CTB Host to GuC request message is as follows::
- *
- *      +------------+---------+---------+---------+---------+
- *      |   msg[0]   |   [1]   |   [2]   |   ...   |  [n-1]  |
- *      +------------+---------+---------+---------+---------+
- *      |   MESSAGE  |       MESSAGE PAYLOAD                 |
- *      +   HEADER   +---------+---------+---------+---------+
- *      |            |    0    |    1    |   ...   |    n    |
- *      +============+=========+=========+=========+=========+
- *      |  len >= 1  |  FENCE  |     request specific data   |
- *      +------+-----+---------+---------+---------+---------+
- *
- *                   ^-----------------len-------------------^
- */
-
 static int ct_write(struct intel_guc_ct *ct,
                    const u32 *action,
                    u32 len /* in dwords */,
-                   u32 fence)
+                   u32 fence, u32 flags)
 {
        struct intel_guc_ct_buffer *ctb = &ct->ctbs.send;
        struct guc_ct_buffer_desc *desc = ctb->desc;
-       u32 head = desc->head;
-       u32 tail = desc->tail;
+       u32 tail = ctb->tail;
        u32 size = ctb->size;
-       u32 used;
        u32 header;
+       u32 hxg;
+       u32 type;
        u32 *cmds = ctb->cmds;
        unsigned int i;
 
-       if (unlikely(desc->is_in_error))
-               return -EPIPE;
-
-       if (unlikely(!IS_ALIGNED(head | tail, 4) ||
-                    (tail | head) >= size))
+       if (unlikely(desc->status))
                goto corrupted;
 
-       /* later calculations will be done in dwords */
-       head /= 4;
-       tail /= 4;
-       size /= 4;
-
-       /*
-        * tail == head condition indicates empty. GuC FW does not support
-        * using up the entire buffer to get tail == head meaning full.
-        */
-       if (tail < head)
-               used = (size - head) + tail;
-       else
-               used = tail - head;
+       GEM_BUG_ON(tail > size);
 
-       /* make sure there is a space including extra dw for the fence */
-       if (unlikely(used + len + 1 >= size))
-               return -ENOSPC;
+#ifdef CONFIG_DRM_I915_DEBUG_GUC
+       if (unlikely(tail != READ_ONCE(desc->tail))) {
+               CT_ERROR(ct, "Tail was modified %u != %u\n",
+                        desc->tail, tail);
+               desc->status |= GUC_CTB_STATUS_MISMATCH;
+               goto corrupted;
+       }
+       if (unlikely(READ_ONCE(desc->head) >= size)) {
+               CT_ERROR(ct, "Invalid head offset %u >= %u)\n",
+                        desc->head, size);
+               desc->status |= GUC_CTB_STATUS_OVERFLOW;
+               goto corrupted;
+       }
+#endif
 
        /*
-        * Write the message. The format is the following:
-        * DW0: header (including action code)
-        * DW1: fence
-        * DW2+: action data
+        * dw0: CT header (including fence)
+        * dw1: HXG header (including action code)
+        * dw2+: action data
         */
-       header = (len << GUC_CT_MSG_LEN_SHIFT) |
-                GUC_CT_MSG_SEND_STATUS |
-                (action[0] << GUC_CT_MSG_ACTION_SHIFT);
+       header = FIELD_PREP(GUC_CTB_MSG_0_FORMAT, GUC_CTB_FORMAT_HXG) |
+                FIELD_PREP(GUC_CTB_MSG_0_NUM_DWORDS, len) |
+                FIELD_PREP(GUC_CTB_MSG_0_FENCE, fence);
 
-       CT_DEBUG(ct, "writing %*ph %*ph %*ph\n",
-                4, &header, 4, &fence, 4 * (len - 1), &action[1]);
+       type = (flags & INTEL_GUC_CT_SEND_NB) ? GUC_HXG_TYPE_EVENT :
+               GUC_HXG_TYPE_REQUEST;
+       hxg = FIELD_PREP(GUC_HXG_MSG_0_TYPE, type) |
+               FIELD_PREP(GUC_HXG_EVENT_MSG_0_ACTION |
+                          GUC_HXG_EVENT_MSG_0_DATA0, action[0]);
+
+       CT_DEBUG(ct, "writing (tail %u) %*ph %*ph %*ph\n",
+                tail, 4, &header, 4, &hxg, 4 * (len - 1), &action[1]);
 
        cmds[tail] = header;
        tail = (tail + 1) % size;
 
-       cmds[tail] = fence;
+       cmds[tail] = hxg;
        tail = (tail + 1) % size;
 
        for (i = 1; i < len; i++) {
@@ -443,14 +470,20 @@ static int ct_write(struct intel_guc_ct *ct,
         */
        write_barrier(ct);
 
-       /* now update desc tail (back in bytes) */
-       desc->tail = tail * 4;
+       /* update local copies */
+       ctb->tail = tail;
+       GEM_BUG_ON(atomic_read(&ctb->space) < len + GUC_CTB_HDR_LEN);
+       atomic_sub(len + GUC_CTB_HDR_LEN, &ctb->space);
+
+       /* now update descriptor */
+       WRITE_ONCE(desc->tail, tail);
+
        return 0;
 
 corrupted:
-       CT_ERROR(ct, "Corrupted descriptor addr=%#x head=%u tail=%u size=%u\n",
-                desc->addr, desc->head, desc->tail, desc->size);
-       desc->is_in_error = 1;
+       CT_ERROR(ct, "Corrupted descriptor head=%u tail=%u status=%#x\n",
+                desc->head, desc->tail, desc->status);
+       ctb->broken = true;
        return -EPIPE;
 }
 
@@ -459,7 +492,7 @@ corrupted:
  * @req:       pointer to pending request
  * @status:    placeholder for status
  *
- * For each sent request, Guc shall send bac CT response message.
+ * For each sent request, GuC shall send back CT response message.
  * Our message handler will update status of tracked request once
  * response message with given fence is received. Wait here and
  * check for valid response status value.
@@ -475,12 +508,18 @@ static int wait_for_ct_request_update(struct ct_request *req, u32 *status)
        /*
         * Fast commands should complete in less than 10us, so sample quickly
         * up to that length of time, then switch to a slower sleep-wait loop.
-        * No GuC command should ever take longer than 10ms.
+        * No GuC command should ever take longer than 10ms but many GuC
+        * commands can be inflight at time, so use a 1s timeout on the slower
+        * sleep-wait loop.
         */
-#define done INTEL_GUC_MSG_IS_RESPONSE(READ_ONCE(req->status))
-       err = wait_for_us(done, 10);
+#define GUC_CTB_RESPONSE_TIMEOUT_SHORT_MS 10
+#define GUC_CTB_RESPONSE_TIMEOUT_LONG_MS 1000
+#define done \
+       (FIELD_GET(GUC_HXG_MSG_0_ORIGIN, READ_ONCE(req->status)) == \
+        GUC_HXG_ORIGIN_GUC)
+       err = wait_for_us(done, GUC_CTB_RESPONSE_TIMEOUT_SHORT_MS);
        if (err)
-               err = wait_for(done, 10);
+               err = wait_for(done, GUC_CTB_RESPONSE_TIMEOUT_LONG_MS);
 #undef done
 
        if (unlikely(err))
@@ -490,6 +529,131 @@ static int wait_for_ct_request_update(struct ct_request *req, u32 *status)
        return err;
 }
 
+#define GUC_CTB_TIMEOUT_MS     1500
+static inline bool ct_deadlocked(struct intel_guc_ct *ct)
+{
+       long timeout = GUC_CTB_TIMEOUT_MS;
+       bool ret = ktime_ms_delta(ktime_get(), ct->stall_time) > timeout;
+
+       if (unlikely(ret)) {
+               struct guc_ct_buffer_desc *send = ct->ctbs.send.desc;
+               struct guc_ct_buffer_desc *recv = ct->ctbs.send.desc;
+
+               CT_ERROR(ct, "Communication stalled for %lld ms, desc status=%#x,%#x\n",
+                        ktime_ms_delta(ktime_get(), ct->stall_time),
+                        send->status, recv->status);
+               ct->ctbs.send.broken = true;
+       }
+
+       return ret;
+}
+
+static inline bool g2h_has_room(struct intel_guc_ct *ct, u32 g2h_len_dw)
+{
+       struct intel_guc_ct_buffer *ctb = &ct->ctbs.recv;
+
+       /*
+        * We leave a certain amount of space in the G2H CTB buffer for
+        * unexpected G2H CTBs (e.g. logging, engine hang, etc...)
+        */
+       return !g2h_len_dw || atomic_read(&ctb->space) >= g2h_len_dw;
+}
+
+static inline void g2h_reserve_space(struct intel_guc_ct *ct, u32 g2h_len_dw)
+{
+       lockdep_assert_held(&ct->ctbs.send.lock);
+
+       GEM_BUG_ON(!g2h_has_room(ct, g2h_len_dw));
+
+       if (g2h_len_dw)
+               atomic_sub(g2h_len_dw, &ct->ctbs.recv.space);
+}
+
+static inline void g2h_release_space(struct intel_guc_ct *ct, u32 g2h_len_dw)
+{
+       atomic_add(g2h_len_dw, &ct->ctbs.recv.space);
+}
+
+static inline bool h2g_has_room(struct intel_guc_ct *ct, u32 len_dw)
+{
+       struct intel_guc_ct_buffer *ctb = &ct->ctbs.send;
+       struct guc_ct_buffer_desc *desc = ctb->desc;
+       u32 head;
+       u32 space;
+
+       if (atomic_read(&ctb->space) >= len_dw)
+               return true;
+
+       head = READ_ONCE(desc->head);
+       if (unlikely(head > ctb->size)) {
+               CT_ERROR(ct, "Invalid head offset %u >= %u)\n",
+                        head, ctb->size);
+               desc->status |= GUC_CTB_STATUS_OVERFLOW;
+               ctb->broken = true;
+               return false;
+       }
+
+       space = CIRC_SPACE(ctb->tail, head, ctb->size);
+       atomic_set(&ctb->space, space);
+
+       return space >= len_dw;
+}
+
+static int has_room_nb(struct intel_guc_ct *ct, u32 h2g_dw, u32 g2h_dw)
+{
+       lockdep_assert_held(&ct->ctbs.send.lock);
+
+       if (unlikely(!h2g_has_room(ct, h2g_dw) || !g2h_has_room(ct, g2h_dw))) {
+               if (ct->stall_time == KTIME_MAX)
+                       ct->stall_time = ktime_get();
+
+               if (unlikely(ct_deadlocked(ct)))
+                       return -EPIPE;
+               else
+                       return -EBUSY;
+       }
+
+       ct->stall_time = KTIME_MAX;
+       return 0;
+}
+
+#define G2H_LEN_DW(f) ({ \
+       typeof(f) f_ = (f); \
+       FIELD_GET(INTEL_GUC_CT_SEND_G2H_DW_MASK, f_) ? \
+       FIELD_GET(INTEL_GUC_CT_SEND_G2H_DW_MASK, f_) + \
+       GUC_CTB_HXG_MSG_MIN_LEN : 0; \
+})
+static int ct_send_nb(struct intel_guc_ct *ct,
+                     const u32 *action,
+                     u32 len,
+                     u32 flags)
+{
+       struct intel_guc_ct_buffer *ctb = &ct->ctbs.send;
+       unsigned long spin_flags;
+       u32 g2h_len_dw = G2H_LEN_DW(flags);
+       u32 fence;
+       int ret;
+
+       spin_lock_irqsave(&ctb->lock, spin_flags);
+
+       ret = has_room_nb(ct, len + GUC_CTB_HDR_LEN, g2h_len_dw);
+       if (unlikely(ret))
+               goto out;
+
+       fence = ct_get_next_fence(ct);
+       ret = ct_write(ct, action, len, fence, flags);
+       if (unlikely(ret))
+               goto out;
+
+       g2h_reserve_space(ct, g2h_len_dw);
+       intel_guc_notify(ct_to_guc(ct));
+
+out:
+       spin_unlock_irqrestore(&ctb->lock, spin_flags);
+
+       return ret;
+}
+
 static int ct_send(struct intel_guc_ct *ct,
                   const u32 *action,
                   u32 len,
@@ -497,8 +661,10 @@ static int ct_send(struct intel_guc_ct *ct,
                   u32 response_buf_size,
                   u32 *status)
 {
+       struct intel_guc_ct_buffer *ctb = &ct->ctbs.send;
        struct ct_request request;
        unsigned long flags;
+       unsigned int sleep_period_ms = 1;
        u32 fence;
        int err;
 
@@ -506,8 +672,33 @@ static int ct_send(struct intel_guc_ct *ct,
        GEM_BUG_ON(!len);
        GEM_BUG_ON(len & ~GUC_CT_MSG_LEN_MASK);
        GEM_BUG_ON(!response_buf && response_buf_size);
+       might_sleep();
+
+       /*
+        * We use a lazy spin wait loop here as we believe that if the CT
+        * buffers are sized correctly the flow control condition should be
+        * rare. Reserving the maximum size in the G2H credits as we don't know
+        * how big the response is going to be.
+        */
+retry:
+       spin_lock_irqsave(&ctb->lock, flags);
+       if (unlikely(!h2g_has_room(ct, len + GUC_CTB_HDR_LEN) ||
+                    !g2h_has_room(ct, GUC_CTB_HXG_MSG_MAX_LEN))) {
+               if (ct->stall_time == KTIME_MAX)
+                       ct->stall_time = ktime_get();
+               spin_unlock_irqrestore(&ctb->lock, flags);
+
+               if (unlikely(ct_deadlocked(ct)))
+                       return -EPIPE;
+
+               if (msleep_interruptible(sleep_period_ms))
+                       return -EINTR;
+               sleep_period_ms = sleep_period_ms << 1;
+
+               goto retry;
+       }
 
-       spin_lock_irqsave(&ct->ctbs.send.lock, flags);
+       ct->stall_time = KTIME_MAX;
 
        fence = ct_get_next_fence(ct);
        request.fence = fence;
@@ -519,9 +710,10 @@ static int ct_send(struct intel_guc_ct *ct,
        list_add_tail(&request.link, &ct->requests.pending);
        spin_unlock(&ct->requests.lock);
 
-       err = ct_write(ct, action, len, fence);
+       err = ct_write(ct, action, len, fence, 0);
+       g2h_reserve_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
 
-       spin_unlock_irqrestore(&ct->ctbs.send.lock, flags);
+       spin_unlock_irqrestore(&ctb->lock, flags);
 
        if (unlikely(err))
                goto unlink;
@@ -529,24 +721,25 @@ static int ct_send(struct intel_guc_ct *ct,
        intel_guc_notify(ct_to_guc(ct));
 
        err = wait_for_ct_request_update(&request, status);
+       g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
        if (unlikely(err))
                goto unlink;
 
-       if (!INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(*status)) {
+       if (FIELD_GET(GUC_HXG_MSG_0_TYPE, *status) != GUC_HXG_TYPE_RESPONSE_SUCCESS) {
                err = -EIO;
                goto unlink;
        }
 
        if (response_buf) {
                /* There shall be no data in the status */
-               WARN_ON(INTEL_GUC_MSG_TO_DATA(request.status));
+               WARN_ON(FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, request.status));
                /* Return actual response len */
                err = request.response_len;
        } else {
                /* There shall be no response payload */
                WARN_ON(request.response_len);
                /* Return data decoded from the status dword */
-               err = INTEL_GUC_MSG_TO_DATA(*status);
+               err = FIELD_GET(GUC_HXG_RESPONSE_MSG_0_DATA0, *status);
        }
 
 unlink:
@@ -561,16 +754,25 @@ unlink:
  * Command Transport (CT) buffer based GuC send function.
  */
 int intel_guc_ct_send(struct intel_guc_ct *ct, const u32 *action, u32 len,
-                     u32 *response_buf, u32 response_buf_size)
+                     u32 *response_buf, u32 response_buf_size, u32 flags)
 {
        u32 status = ~0; /* undefined */
        int ret;
 
        if (unlikely(!ct->enabled)) {
-               WARN(1, "Unexpected send: action=%#x\n", *action);
+               struct intel_guc *guc = ct_to_guc(ct);
+               struct intel_uc *uc = container_of(guc, struct intel_uc, guc);
+
+               WARN(!uc->reset_in_progress, "Unexpected send: action=%#x\n", *action);
                return -ENODEV;
        }
 
+       if (unlikely(ct->ctbs.send.broken))
+               return -EPIPE;
+
+       if (flags & INTEL_GUC_CT_SEND_NB)
+               return ct_send_nb(ct, action, len, flags);
+
        ret = ct_send(ct, action, len, response_buf, response_buf_size, &status);
        if (unlikely(ret < 0)) {
                CT_ERROR(ct, "Sending action %#x failed (err=%d status=%#X)\n",
@@ -583,21 +785,6 @@ int intel_guc_ct_send(struct intel_guc_ct *ct, const u32 *action, u32 len,
        return ret;
 }
 
-static inline unsigned int ct_header_get_len(u32 header)
-{
-       return (header >> GUC_CT_MSG_LEN_SHIFT) & GUC_CT_MSG_LEN_MASK;
-}
-
-static inline unsigned int ct_header_get_action(u32 header)
-{
-       return (header >> GUC_CT_MSG_ACTION_SHIFT) & GUC_CT_MSG_ACTION_MASK;
-}
-
-static inline bool ct_header_is_response(u32 header)
-{
-       return !!(header & GUC_CT_MSG_IS_RESPONSE);
-}
-
 static struct ct_incoming_msg *ct_alloc_msg(u32 num_dwords)
 {
        struct ct_incoming_msg *msg;
@@ -621,8 +808,8 @@ static int ct_read(struct intel_guc_ct *ct, struct ct_incoming_msg **msg)
 {
        struct intel_guc_ct_buffer *ctb = &ct->ctbs.recv;
        struct guc_ct_buffer_desc *desc = ctb->desc;
-       u32 head = desc->head;
-       u32 tail = desc->tail;
+       u32 head = ctb->head;
+       u32 tail = READ_ONCE(desc->tail);
        u32 size = ctb->size;
        u32 *cmds = ctb->cmds;
        s32 available;
@@ -630,17 +817,28 @@ static int ct_read(struct intel_guc_ct *ct, struct ct_incoming_msg **msg)
        unsigned int i;
        u32 header;
 
-       if (unlikely(desc->is_in_error))
+       if (unlikely(ctb->broken))
                return -EPIPE;
 
-       if (unlikely(!IS_ALIGNED(head | tail, 4) ||
-                    (tail | head) >= size))
+       if (unlikely(desc->status))
                goto corrupted;
 
-       /* later calculations will be done in dwords */
-       head /= 4;
-       tail /= 4;
-       size /= 4;
+       GEM_BUG_ON(head > size);
+
+#ifdef CONFIG_DRM_I915_DEBUG_GUC
+       if (unlikely(head != READ_ONCE(desc->head))) {
+               CT_ERROR(ct, "Head was modified %u != %u\n",
+                        desc->head, head);
+               desc->status |= GUC_CTB_STATUS_MISMATCH;
+               goto corrupted;
+       }
+#endif
+       if (unlikely(tail >= size)) {
+               CT_ERROR(ct, "Invalid tail offset %u >= %u)\n",
+                        tail, size);
+               desc->status |= GUC_CTB_STATUS_OVERFLOW;
+               goto corrupted;
+       }
 
        /* tail == head condition indicates empty */
        available = tail - head;
@@ -652,14 +850,14 @@ static int ct_read(struct intel_guc_ct *ct, struct ct_incoming_msg **msg)
        /* beware of buffer wrap case */
        if (unlikely(available < 0))
                available += size;
-       CT_DEBUG(ct, "available %d (%u:%u)\n", available, head, tail);
+       CT_DEBUG(ct, "available %d (%u:%u:%u)\n", available, head, tail, size);
        GEM_BUG_ON(available < 0);
 
        header = cmds[head];
        head = (head + 1) % size;
 
        /* message len with header */
-       len = ct_header_get_len(header) + 1;
+       len = FIELD_GET(GUC_CTB_MSG_0_NUM_DWORDS, header) + GUC_CTB_MSG_MIN_LEN;
        if (unlikely(len > (u32)available)) {
                CT_ERROR(ct, "Incomplete message %*ph %*ph %*ph\n",
                         4, &header,
@@ -667,6 +865,7 @@ static int ct_read(struct intel_guc_ct *ct, struct ct_incoming_msg **msg)
                              size - head : available - 1), &cmds[head],
                         4 * (head + available - 1 > size ?
                              available - 1 - size + head : 0), &cmds[0]);
+               desc->status |= GUC_CTB_STATUS_UNDERFLOW;
                goto corrupted;
        }
 
@@ -689,65 +888,39 @@ static int ct_read(struct intel_guc_ct *ct, struct ct_incoming_msg **msg)
        }
        CT_DEBUG(ct, "received %*ph\n", 4 * len, (*msg)->msg);
 
-       desc->head = head * 4;
+       /* update local copies */
+       ctb->head = head;
+
+       /* now update descriptor */
+       WRITE_ONCE(desc->head, head);
+
        return available - len;
 
 corrupted:
-       CT_ERROR(ct, "Corrupted descriptor addr=%#x head=%u tail=%u size=%u\n",
-                desc->addr, desc->head, desc->tail, desc->size);
-       desc->is_in_error = 1;
+       CT_ERROR(ct, "Corrupted descriptor head=%u tail=%u status=%#x\n",
+                desc->head, desc->tail, desc->status);
+       ctb->broken = true;
        return -EPIPE;
 }
 
-/**
- * DOC: CTB GuC to Host response
- *
- * Format of the CTB GuC to Host response message is as follows::
- *
- *      +------------+---------+---------+---------+---------+---------+
- *      |   msg[0]   |   [1]   |   [2]   |   [3]   |   ...   |  [n-1]  |
- *      +------------+---------+---------+---------+---------+---------+
- *      |   MESSAGE  |       MESSAGE PAYLOAD                           |
- *      +   HEADER   +---------+---------+---------+---------+---------+
- *      |            |    0    |    1    |    2    |   ...   |    n    |
- *      +============+=========+=========+=========+=========+=========+
- *      |  len >= 2  |  FENCE  |  STATUS |   response specific data    |
- *      +------+-----+---------+---------+---------+---------+---------+
- *
- *                   ^-----------------------len-----------------------^
- */
-
 static int ct_handle_response(struct intel_guc_ct *ct, struct ct_incoming_msg *response)
 {
-       u32 header = response->msg[0];
-       u32 len = ct_header_get_len(header);
-       u32 fence;
-       u32 status;
-       u32 datalen;
+       u32 len = FIELD_GET(GUC_CTB_MSG_0_NUM_DWORDS, response->msg[0]);
+       u32 fence = FIELD_GET(GUC_CTB_MSG_0_FENCE, response->msg[0]);
+       const u32 *hxg = &response->msg[GUC_CTB_MSG_MIN_LEN];
+       const u32 *data = &hxg[GUC_HXG_MSG_MIN_LEN];
+       u32 datalen = len - GUC_HXG_MSG_MIN_LEN;
        struct ct_request *req;
        unsigned long flags;
        bool found = false;
        int err = 0;
 
-       GEM_BUG_ON(!ct_header_is_response(header));
-
-       /* Response payload shall at least include fence and status */
-       if (unlikely(len < 2)) {
-               CT_ERROR(ct, "Corrupted response (len %u)\n", len);
-               return -EPROTO;
-       }
-
-       fence = response->msg[1];
-       status = response->msg[2];
-       datalen = len - 2;
-
-       /* Format of the status follows RESPONSE message */
-       if (unlikely(!INTEL_GUC_MSG_IS_RESPONSE(status))) {
-               CT_ERROR(ct, "Corrupted response (status %#x)\n", status);
-               return -EPROTO;
-       }
+       GEM_BUG_ON(len < GUC_HXG_MSG_MIN_LEN);
+       GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, hxg[0]) != GUC_HXG_ORIGIN_GUC);
+       GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_RESPONSE_SUCCESS &&
+                  FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_RESPONSE_FAILURE);
 
-       CT_DEBUG(ct, "response fence %u status %#x\n", fence, status);
+       CT_DEBUG(ct, "response fence %u status %#x\n", fence, hxg[0]);
 
        spin_lock_irqsave(&ct->requests.lock, flags);
        list_for_each_entry(req, &ct->requests.pending, link) {
@@ -763,18 +936,22 @@ static int ct_handle_response(struct intel_guc_ct *ct, struct ct_incoming_msg *r
                        err = -EMSGSIZE;
                }
                if (datalen)
-                       memcpy(req->response_buf, response->msg + 3, 4 * datalen);
+                       memcpy(req->response_buf, data, 4 * datalen);
                req->response_len = datalen;
-               WRITE_ONCE(req->status, status);
+               WRITE_ONCE(req->status, hxg[0]);
                found = true;
                break;
        }
-       spin_unlock_irqrestore(&ct->requests.lock, flags);
-
        if (!found) {
                CT_ERROR(ct, "Unsolicited response (fence %u)\n", fence);
-               return -ENOKEY;
+               CT_ERROR(ct, "Could not find fence=%u, last_fence=%u\n", fence,
+                        ct->requests.last_fence);
+               list_for_each_entry(req, &ct->requests.pending, link)
+                       CT_ERROR(ct, "request %u awaits response\n",
+                                req->fence);
+               err = -ENOKEY;
        }
+       spin_unlock_irqrestore(&ct->requests.lock, flags);
 
        if (unlikely(err))
                return err;
@@ -786,14 +963,16 @@ static int ct_handle_response(struct intel_guc_ct *ct, struct ct_incoming_msg *r
 static int ct_process_request(struct intel_guc_ct *ct, struct ct_incoming_msg *request)
 {
        struct intel_guc *guc = ct_to_guc(ct);
-       u32 header, action, len;
+       const u32 *hxg;
        const u32 *payload;
+       u32 hxg_len, action, len;
        int ret;
 
-       header = request->msg[0];
-       payload = &request->msg[1];
-       action = ct_header_get_action(header);
-       len = ct_header_get_len(header);
+       hxg = &request->msg[GUC_CTB_MSG_MIN_LEN];
+       hxg_len = request->size - GUC_CTB_MSG_MIN_LEN;
+       payload = &hxg[GUC_HXG_MSG_MIN_LEN];
+       action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
+       len = hxg_len - GUC_HXG_MSG_MIN_LEN;
 
        CT_DEBUG(ct, "request %x %*ph\n", action, 4 * len, payload);
 
@@ -801,6 +980,19 @@ static int ct_process_request(struct intel_guc_ct *ct, struct ct_incoming_msg *r
        case INTEL_GUC_ACTION_DEFAULT:
                ret = intel_guc_to_host_process_recv_msg(guc, payload, len);
                break;
+       case INTEL_GUC_ACTION_DEREGISTER_CONTEXT_DONE:
+               ret = intel_guc_deregister_done_process_msg(guc, payload,
+                                                           len);
+               break;
+       case INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_DONE:
+               ret = intel_guc_sched_done_process_msg(guc, payload, len);
+               break;
+       case INTEL_GUC_ACTION_CONTEXT_RESET_NOTIFICATION:
+               ret = intel_guc_context_reset_process_msg(guc, payload, len);
+               break;
+       case INTEL_GUC_ACTION_ENGINE_FAILURE_NOTIFICATION:
+               ret = intel_guc_engine_failure_process_msg(guc, payload, len);
+               break;
        default:
                ret = -EOPNOTSUPP;
                break;
@@ -855,29 +1047,24 @@ static void ct_incoming_request_worker_func(struct work_struct *w)
                queue_work(system_unbound_wq, &ct->requests.worker);
 }
 
-/**
- * DOC: CTB GuC to Host request
- *
- * Format of the CTB GuC to Host request message is as follows::
- *
- *      +------------+---------+---------+---------+---------+---------+
- *      |   msg[0]   |   [1]   |   [2]   |   [3]   |   ...   |  [n-1]  |
- *      +------------+---------+---------+---------+---------+---------+
- *      |   MESSAGE  |       MESSAGE PAYLOAD                           |
- *      +   HEADER   +---------+---------+---------+---------+---------+
- *      |            |    0    |    1    |    2    |   ...   |    n    |
- *      +============+=========+=========+=========+=========+=========+
- *      |     len    |            request specific data                |
- *      +------+-----+---------+---------+---------+---------+---------+
- *
- *                   ^-----------------------len-----------------------^
- */
-
-static int ct_handle_request(struct intel_guc_ct *ct, struct ct_incoming_msg *request)
+static int ct_handle_event(struct intel_guc_ct *ct, struct ct_incoming_msg *request)
 {
+       const u32 *hxg = &request->msg[GUC_CTB_MSG_MIN_LEN];
+       u32 action = FIELD_GET(GUC_HXG_EVENT_MSG_0_ACTION, hxg[0]);
        unsigned long flags;
 
-       GEM_BUG_ON(ct_header_is_response(request->msg[0]));
+       GEM_BUG_ON(FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]) != GUC_HXG_TYPE_EVENT);
+
+       /*
+        * Adjusting the space must be done in IRQ or deadlock can occur as the
+        * CTB processing in the below workqueue can send CTBs which creates a
+        * circular dependency if the space was returned there.
+        */
+       switch (action) {
+       case INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_DONE:
+       case INTEL_GUC_ACTION_DEREGISTER_CONTEXT_DONE:
+               g2h_release_space(ct, request->size);
+       }
 
        spin_lock_irqsave(&ct->requests.lock, flags);
        list_add_tail(&request->link, &ct->requests.incoming);
@@ -887,15 +1074,53 @@ static int ct_handle_request(struct intel_guc_ct *ct, struct ct_incoming_msg *re
        return 0;
 }
 
-static void ct_handle_msg(struct intel_guc_ct *ct, struct ct_incoming_msg *msg)
+static int ct_handle_hxg(struct intel_guc_ct *ct, struct ct_incoming_msg *msg)
 {
-       u32 header = msg->msg[0];
+       u32 origin, type;
+       u32 *hxg;
        int err;
 
-       if (ct_header_is_response(header))
+       if (unlikely(msg->size < GUC_CTB_HXG_MSG_MIN_LEN))
+               return -EBADMSG;
+
+       hxg = &msg->msg[GUC_CTB_MSG_MIN_LEN];
+
+       origin = FIELD_GET(GUC_HXG_MSG_0_ORIGIN, hxg[0]);
+       if (unlikely(origin != GUC_HXG_ORIGIN_GUC)) {
+               err = -EPROTO;
+               goto failed;
+       }
+
+       type = FIELD_GET(GUC_HXG_MSG_0_TYPE, hxg[0]);
+       switch (type) {
+       case GUC_HXG_TYPE_EVENT:
+               err = ct_handle_event(ct, msg);
+               break;
+       case GUC_HXG_TYPE_RESPONSE_SUCCESS:
+       case GUC_HXG_TYPE_RESPONSE_FAILURE:
                err = ct_handle_response(ct, msg);
+               break;
+       default:
+               err = -EOPNOTSUPP;
+       }
+
+       if (unlikely(err)) {
+failed:
+               CT_ERROR(ct, "Failed to handle HXG message (%pe) %*ph\n",
+                        ERR_PTR(err), 4 * GUC_HXG_MSG_MIN_LEN, hxg);
+       }
+       return err;
+}
+
+static void ct_handle_msg(struct intel_guc_ct *ct, struct ct_incoming_msg *msg)
+{
+       u32 format = FIELD_GET(GUC_CTB_MSG_0_FORMAT, msg->msg[0]);
+       int err;
+
+       if (format == GUC_CTB_FORMAT_HXG)
+               err = ct_handle_hxg(ct, msg);
        else
-               err = ct_handle_request(ct, msg);
+               err = -EOPNOTSUPP;
 
        if (unlikely(err)) {
                CT_ERROR(ct, "Failed to process CT message (%pe) %*ph\n",
@@ -958,3 +1183,25 @@ void intel_guc_ct_event_handler(struct intel_guc_ct *ct)
 
        ct_try_receive_message(ct);
 }
+
+void intel_guc_ct_print_info(struct intel_guc_ct *ct,
+                            struct drm_printer *p)
+{
+       drm_printf(p, "CT %s\n", enableddisabled(ct->enabled));
+
+       if (!ct->enabled)
+               return;
+
+       drm_printf(p, "H2G Space: %u\n",
+                  atomic_read(&ct->ctbs.send.space) * 4);
+       drm_printf(p, "Head: %u\n",
+                  ct->ctbs.send.desc->head);
+       drm_printf(p, "Tail: %u\n",
+                  ct->ctbs.send.desc->tail);
+       drm_printf(p, "G2H Space: %u\n",
+                  atomic_read(&ct->ctbs.recv.space) * 4);
+       drm_printf(p, "Head: %u\n",
+                  ct->ctbs.recv.desc->head);
+       drm_printf(p, "Tail: %u\n",
+                  ct->ctbs.recv.desc->tail);
+}
index cb222f2..f709a19 100644 (file)
@@ -9,11 +9,14 @@
 #include <linux/interrupt.h>
 #include <linux/spinlock.h>
 #include <linux/workqueue.h>
+#include <linux/ktime.h>
+#include <linux/wait.h>
 
 #include "intel_guc_fwif.h"
 
 struct i915_vma;
 struct intel_guc;
+struct drm_printer;
 
 /**
  * DOC: Command Transport (CT).
@@ -31,16 +34,25 @@ struct intel_guc;
  * @lock: protects access to the commands buffer and buffer descriptor
  * @desc: pointer to the buffer descriptor
  * @cmds: pointer to the commands buffer
- * @size: size of the commands buffer
+ * @size: size of the commands buffer in dwords
+ * @resv_space: reserved space in buffer in dwords
+ * @head: local shadow copy of head in dwords
+ * @tail: local shadow copy of tail in dwords
+ * @space: local shadow copy of space in dwords
+ * @broken: flag to indicate if descriptor data is broken
  */
 struct intel_guc_ct_buffer {
        spinlock_t lock;
        struct guc_ct_buffer_desc *desc;
        u32 *cmds;
        u32 size;
+       u32 resv_space;
+       u32 tail;
+       u32 head;
+       atomic_t space;
+       bool broken;
 };
 
-
 /** Top-level structure for Command Transport related data
  *
  * Includes a pair of CT buffers for bi-directional communication and tracking
@@ -58,8 +70,11 @@ struct intel_guc_ct {
 
        struct tasklet_struct receive_tasklet;
 
+       /** @wq: wait queue for g2h chanenl */
+       wait_queue_head_t wq;
+
        struct {
-               u32 last_fence; /* last fence used to send request */
+               u16 last_fence; /* last fence used to send request */
 
                spinlock_t lock; /* protects pending requests list */
                struct list_head pending; /* requests waiting for response */
@@ -67,6 +82,9 @@ struct intel_guc_ct {
                struct list_head incoming; /* incoming requests */
                struct work_struct worker; /* handler for incoming requests */
        } requests;
+
+       /** @stall_time: time of first time a CTB submission is stalled */
+       ktime_t stall_time;
 };
 
 void intel_guc_ct_init_early(struct intel_guc_ct *ct);
@@ -85,8 +103,18 @@ static inline bool intel_guc_ct_enabled(struct intel_guc_ct *ct)
        return ct->enabled;
 }
 
+#define INTEL_GUC_CT_SEND_NB           BIT(31)
+#define INTEL_GUC_CT_SEND_G2H_DW_SHIFT 0
+#define INTEL_GUC_CT_SEND_G2H_DW_MASK  (0xff << INTEL_GUC_CT_SEND_G2H_DW_SHIFT)
+#define MAKE_SEND_FLAGS(len) ({ \
+       typeof(len) len_ = (len); \
+       GEM_BUG_ON(!FIELD_FIT(INTEL_GUC_CT_SEND_G2H_DW_MASK, len_)); \
+       (FIELD_PREP(INTEL_GUC_CT_SEND_G2H_DW_MASK, len_) | INTEL_GUC_CT_SEND_NB); \
+})
 int intel_guc_ct_send(struct intel_guc_ct *ct, const u32 *action, u32 len,
-                     u32 *response_buf, u32 response_buf_size);
+                     u32 *response_buf, u32 response_buf_size, u32 flags);
 void intel_guc_ct_event_handler(struct intel_guc_ct *ct);
 
+void intel_guc_ct_print_info(struct intel_guc_ct *ct, struct drm_printer *p);
+
 #endif /* _INTEL_GUC_CT_H_ */
index fe7cb7b..887c8c8 100644 (file)
@@ -9,6 +9,10 @@
 #include "intel_guc.h"
 #include "intel_guc_debugfs.h"
 #include "intel_guc_log_debugfs.h"
+#include "gt/uc/intel_guc_ct.h"
+#include "gt/uc/intel_guc_ads.h"
+#include "gt/uc/intel_guc_submission.h"
+#include "gt/uc/intel_guc_slpc.h"
 
 static int guc_info_show(struct seq_file *m, void *data)
 {
@@ -22,16 +26,57 @@ static int guc_info_show(struct seq_file *m, void *data)
        drm_puts(&p, "\n");
        intel_guc_log_info(&guc->log, &p);
 
-       /* Add more as required ... */
+       if (!intel_guc_submission_is_used(guc))
+               return 0;
+
+       intel_guc_ct_print_info(&guc->ct, &p);
+       intel_guc_submission_print_info(guc, &p);
+       intel_guc_ads_print_policy_info(guc, &p);
 
        return 0;
 }
 DEFINE_GT_DEBUGFS_ATTRIBUTE(guc_info);
 
+static int guc_registered_contexts_show(struct seq_file *m, void *data)
+{
+       struct intel_guc *guc = m->private;
+       struct drm_printer p = drm_seq_file_printer(m);
+
+       if (!intel_guc_submission_is_used(guc))
+               return -ENODEV;
+
+       intel_guc_submission_print_context_info(guc, &p);
+
+       return 0;
+}
+DEFINE_GT_DEBUGFS_ATTRIBUTE(guc_registered_contexts);
+
+static int guc_slpc_info_show(struct seq_file *m, void *unused)
+{
+       struct intel_guc *guc = m->private;
+       struct intel_guc_slpc *slpc = &guc->slpc;
+       struct drm_printer p = drm_seq_file_printer(m);
+
+       if (!intel_guc_slpc_is_used(guc))
+               return -ENODEV;
+
+       return intel_guc_slpc_print_info(slpc, &p);
+}
+DEFINE_GT_DEBUGFS_ATTRIBUTE(guc_slpc_info);
+
+static bool intel_eval_slpc_support(void *data)
+{
+       struct intel_guc *guc = (struct intel_guc *)data;
+
+       return intel_guc_slpc_is_used(guc);
+}
+
 void intel_guc_debugfs_register(struct intel_guc *guc, struct dentry *root)
 {
        static const struct debugfs_gt_file files[] = {
                { "guc_info", &guc_info_fops, NULL },
+               { "guc_registered_contexts", &guc_registered_contexts_fops, NULL },
+               { "guc_slpc_info", &guc_slpc_info_fops, &intel_eval_slpc_support},
        };
 
        if (!intel_guc_is_supported(guc))
index e9a9d85..fa4be13 100644 (file)
 #include "gt/intel_engine_types.h"
 
 #include "abi/guc_actions_abi.h"
+#include "abi/guc_actions_slpc_abi.h"
 #include "abi/guc_errors_abi.h"
 #include "abi/guc_communication_mmio_abi.h"
 #include "abi/guc_communication_ctb_abi.h"
 #include "abi/guc_messages_abi.h"
 
+/* Payload length only i.e. don't include G2H header length */
+#define G2H_LEN_DW_SCHED_CONTEXT_MODE_SET      2
+#define G2H_LEN_DW_DEREGISTER_CONTEXT          1
+
+#define GUC_CONTEXT_DISABLE            0
+#define GUC_CONTEXT_ENABLE             1
+
 #define GUC_CLIENT_PRIORITY_KMD_HIGH   0
 #define GUC_CLIENT_PRIORITY_HIGH       1
 #define GUC_CLIENT_PRIORITY_KMD_NORMAL 2
 #define GUC_CLIENT_PRIORITY_NORMAL     3
 #define GUC_CLIENT_PRIORITY_NUM                4
 
-#define GUC_MAX_STAGE_DESCRIPTORS      1024
-#define        GUC_INVALID_STAGE_ID            GUC_MAX_STAGE_DESCRIPTORS
+#define GUC_MAX_LRC_DESCRIPTORS                65535
+#define        GUC_INVALID_LRC_ID              GUC_MAX_LRC_DESCRIPTORS
 
 #define GUC_RENDER_ENGINE              0
 #define GUC_VIDEO_ENGINE               1
 #define   GUC_LOG_ALLOC_IN_MEGABYTE    (1 << 3)
 #define   GUC_LOG_CRASH_SHIFT          4
 #define   GUC_LOG_CRASH_MASK           (0x3 << GUC_LOG_CRASH_SHIFT)
-#define   GUC_LOG_DPC_SHIFT            6
-#define   GUC_LOG_DPC_MASK             (0x7 << GUC_LOG_DPC_SHIFT)
-#define   GUC_LOG_ISR_SHIFT            9
-#define   GUC_LOG_ISR_MASK             (0x7 << GUC_LOG_ISR_SHIFT)
+#define   GUC_LOG_DEBUG_SHIFT          6
+#define   GUC_LOG_DEBUG_MASK           (0xF << GUC_LOG_DEBUG_SHIFT)
 #define   GUC_LOG_BUF_ADDR_SHIFT       12
 
 #define GUC_CTL_WA                     1
 #define GUC_CTL_FEATURE                        2
 #define   GUC_CTL_DISABLE_SCHEDULER    (1 << 14)
+#define   GUC_CTL_ENABLE_SLPC          BIT(2)
 
 #define GUC_CTL_DEBUG                  3
 #define   GUC_LOG_VERBOSITY_SHIFT      0
 #define GUC_ID_TO_ENGINE_INSTANCE(guc_id) \
        (((guc_id) & GUC_ENGINE_INSTANCE_MASK) >> GUC_ENGINE_INSTANCE_SHIFT)
 
+#define SLPC_EVENT(id, c) (\
+FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ID, id) | \
+FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ARGC, c) \
+)
+
 static inline u8 engine_class_to_guc_class(u8 class)
 {
        BUILD_BUG_ON(GUC_RENDER_CLASS != RENDER_CLASS);
@@ -177,66 +189,40 @@ struct guc_process_desc {
        u32 reserved[30];
 } __packed;
 
-/* engine id and context id is packed into guc_execlist_context.context_id*/
-#define GUC_ELC_CTXID_OFFSET           0
-#define GUC_ELC_ENGINE_OFFSET          29
+#define CONTEXT_REGISTRATION_FLAG_KMD  BIT(0)
 
-/* The execlist context including software and HW information */
-struct guc_execlist_context {
-       u32 context_desc;
-       u32 context_id;
-       u32 ring_status;
-       u32 ring_lrca;
-       u32 ring_begin;
-       u32 ring_end;
-       u32 ring_next_free_location;
-       u32 ring_current_tail_pointer_value;
-       u8 engine_state_submit_value;
-       u8 engine_state_wait_value;
-       u16 pagefault_count;
-       u16 engine_submit_queue_count;
-} __packed;
+#define CONTEXT_POLICY_DEFAULT_EXECUTION_QUANTUM_US 1000000
+#define CONTEXT_POLICY_DEFAULT_PREEMPTION_TIME_US 500000
+
+/* Preempt to idle on quantum expiry */
+#define CONTEXT_POLICY_FLAG_PREEMPT_TO_IDLE    BIT(0)
 
 /*
- * This structure describes a stage set arranged for a particular communication
- * between uKernel (GuC) and Driver (KMD). Technically, this is known as a
- * "GuC Context descriptor" in the specs, but we use the term "stage descriptor"
- * to avoid confusion with all the other things already named "context" in the
- * driver. A static pool of these descriptors are stored inside a GEM object
- * (stage_desc_pool) which is held for the entire lifetime of our interaction
- * with the GuC, being allocated before the GuC is loaded with its firmware.
+ * GuC Context registration descriptor.
+ * FIXME: This is only required to exist during context registration.
+ * The current 1:1 between guc_lrc_desc and LRCs for the lifetime of the LRC
+ * is not required.
  */
-struct guc_stage_desc {
-       u32 sched_common_area;
-       u32 stage_id;
-       u32 pas_id;
-       u8 engines_used;
-       u64 db_trigger_cpu;
-       u32 db_trigger_uk;
-       u64 db_trigger_phy;
-       u16 db_id;
-
-       struct guc_execlist_context lrc[GUC_MAX_ENGINES_NUM];
-
-       u8 attribute;
-
+struct guc_lrc_desc {
+       u32 hw_context_desc;
+       u32 slpm_perf_mode_hint;        /* SPLC v1 only */
+       u32 slpm_freq_hint;
+       u32 engine_submit_mask;         /* In logical space */
+       u8 engine_class;
+       u8 reserved0[3];
        u32 priority;
-
-       u32 wq_sampled_tail_offset;
-       u32 wq_total_submit_enqueues;
-
        u32 process_desc;
        u32 wq_addr;
        u32 wq_size;
-
-       u32 engine_presence;
-
-       u8 engine_suspended;
-
-       u8 reserved0[3];
-       u64 reserved1[1];
-
-       u64 desc_private;
+       u32 context_flags;              /* CONTEXT_REGISTRATION_* */
+       /* Time for one workload to execute. (in micro seconds) */
+       u32 execution_quantum;
+       /* Time to wait for a preemption request to complete before issuing a
+        * reset. (in micro seconds).
+        */
+       u32 preemption_timeout;
+       u32 policy_flags;               /* CONTEXT_POLICY_* */
+       u32 reserved1[19];
 } __packed;
 
 #define GUC_POWER_UNSPECIFIED  0
@@ -247,32 +233,14 @@ struct guc_stage_desc {
 
 /* Scheduling policy settings */
 
-/* Reset engine upon preempt failure */
-#define POLICY_RESET_ENGINE            (1<<0)
-/* Preempt to idle on quantum expiry */
-#define POLICY_PREEMPT_TO_IDLE         (1<<1)
+#define GLOBAL_POLICY_MAX_NUM_WI 15
 
-#define POLICY_MAX_NUM_WI 15
-#define POLICY_DEFAULT_DPC_PROMOTE_TIME_US 500000
-#define POLICY_DEFAULT_EXECUTION_QUANTUM_US 1000000
-#define POLICY_DEFAULT_PREEMPTION_TIME_US 500000
-#define POLICY_DEFAULT_FAULT_TIME_US 250000
+/* Don't reset an engine upon preemption failure */
+#define GLOBAL_POLICY_DISABLE_ENGINE_RESET                             BIT(0)
 
-struct guc_policy {
-       /* Time for one workload to execute. (in micro seconds) */
-       u32 execution_quantum;
-       /* Time to wait for a preemption request to completed before issuing a
-        * reset. (in micro seconds). */
-       u32 preemption_time;
-       /* How much time to allow to run after the first fault is observed.
-        * Then preempt afterwards. (in micro seconds) */
-       u32 fault_time;
-       u32 policy_flags;
-       u32 reserved[8];
-} __packed;
+#define GLOBAL_POLICY_DEFAULT_DPC_PROMOTE_TIME_US 500000
 
 struct guc_policies {
-       struct guc_policy policy[GUC_CLIENT_PRIORITY_NUM][GUC_MAX_ENGINE_CLASSES];
        u32 submission_queue_depth[GUC_MAX_ENGINE_CLASSES];
        /* In micro seconds. How much time to allow before DPC processing is
         * called back via interrupt (to prevent DPC queue drain starving).
@@ -286,6 +254,7 @@ struct guc_policies {
         * idle. */
        u32 max_num_work_items;
 
+       u32 global_flags;
        u32 reserved[4];
 } __packed;
 
@@ -311,29 +280,13 @@ struct guc_gt_system_info {
        u32 generic_gt_sysinfo[GUC_GENERIC_GT_SYSINFO_MAX];
 } __packed;
 
-/* Clients info */
-struct guc_ct_pool_entry {
-       struct guc_ct_buffer_desc desc;
-       u32 reserved[7];
-} __packed;
-
-#define GUC_CT_POOL_SIZE       2
-
-struct guc_clients_info {
-       u32 clients_num;
-       u32 reserved0[13];
-       u32 ct_pool_addr;
-       u32 ct_pool_count;
-       u32 reserved[4];
-} __packed;
-
 /* GuC Additional Data Struct */
 struct guc_ads {
        struct guc_mmio_reg_set reg_state_list[GUC_MAX_ENGINE_CLASSES][GUC_MAX_INSTANCES_PER_CLASS];
        u32 reserved0;
        u32 scheduler_policies;
        u32 gt_system_info;
-       u32 clients_info;
+       u32 reserved1;
        u32 control_data;
        u32 golden_context_lrca[GUC_MAX_ENGINE_CLASSES];
        u32 eng_state_size[GUC_MAX_ENGINE_CLASSES];
@@ -344,8 +297,7 @@ struct guc_ads {
 /* GuC logging structures */
 
 enum guc_log_buffer_type {
-       GUC_ISR_LOG_BUFFER,
-       GUC_DPC_LOG_BUFFER,
+       GUC_DEBUG_LOG_BUFFER,
        GUC_CRASH_DUMP_LOG_BUFFER,
        GUC_MAX_LOG_BUFFER
 };
@@ -414,23 +366,6 @@ struct guc_shared_ctx_data {
        struct guc_ctx_report preempt_ctx_report[GUC_MAX_ENGINES_NUM];
 } __packed;
 
-#define __INTEL_GUC_MSG_GET(T, m) \
-       (((m) & INTEL_GUC_MSG_ ## T ## _MASK) >> INTEL_GUC_MSG_ ## T ## _SHIFT)
-#define INTEL_GUC_MSG_TO_TYPE(m)       __INTEL_GUC_MSG_GET(TYPE, m)
-#define INTEL_GUC_MSG_TO_DATA(m)       __INTEL_GUC_MSG_GET(DATA, m)
-#define INTEL_GUC_MSG_TO_CODE(m)       __INTEL_GUC_MSG_GET(CODE, m)
-
-#define __INTEL_GUC_MSG_TYPE_IS(T, m) \
-       (INTEL_GUC_MSG_TO_TYPE(m) == INTEL_GUC_MSG_TYPE_ ## T)
-#define INTEL_GUC_MSG_IS_REQUEST(m)    __INTEL_GUC_MSG_TYPE_IS(REQUEST, m)
-#define INTEL_GUC_MSG_IS_RESPONSE(m)   __INTEL_GUC_MSG_TYPE_IS(RESPONSE, m)
-
-#define INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(m) \
-        (typecheck(u32, (m)) && \
-         ((m) & (INTEL_GUC_MSG_TYPE_MASK | INTEL_GUC_MSG_CODE_MASK)) == \
-         ((INTEL_GUC_MSG_TYPE_RESPONSE << INTEL_GUC_MSG_TYPE_SHIFT) | \
-          (INTEL_GUC_RESPONSE_STATUS_SUCCESS << INTEL_GUC_MSG_CODE_SHIFT)))
-
 /* This action will be programmed in C1BC - SOFT_SCRATCH_15_REG */
 enum intel_guc_recv_message {
        INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED = BIT(1),
index c36d5eb..ac0931f 100644 (file)
@@ -197,10 +197,8 @@ static bool guc_check_log_buf_overflow(struct intel_guc_log *log,
 static unsigned int guc_get_log_buffer_size(enum guc_log_buffer_type type)
 {
        switch (type) {
-       case GUC_ISR_LOG_BUFFER:
-               return ISR_BUFFER_SIZE;
-       case GUC_DPC_LOG_BUFFER:
-               return DPC_BUFFER_SIZE;
+       case GUC_DEBUG_LOG_BUFFER:
+               return DEBUG_BUFFER_SIZE;
        case GUC_CRASH_DUMP_LOG_BUFFER:
                return CRASH_BUFFER_SIZE;
        default:
@@ -245,7 +243,7 @@ static void guc_read_update_log_buffer(struct intel_guc_log *log)
        src_data += PAGE_SIZE;
        dst_data += PAGE_SIZE;
 
-       for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
+       for (type = GUC_DEBUG_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
                /*
                 * Make a copy of the state structure, inside GuC log buffer
                 * (which is uncached mapped), on the stack to avoid reading
@@ -463,21 +461,16 @@ int intel_guc_log_create(struct intel_guc_log *log)
         *  +===============================+ 00B
         *  |    Crash dump state header    |
         *  +-------------------------------+ 32B
-        *  |       DPC state header        |
+        *  |      Debug state header       |
         *  +-------------------------------+ 64B
-        *  |       ISR state header        |
-        *  +-------------------------------+ 96B
         *  |                               |
         *  +===============================+ PAGE_SIZE (4KB)
         *  |        Crash Dump logs        |
         *  +===============================+ + CRASH_SIZE
-        *  |           DPC logs            |
-        *  +===============================+ + DPC_SIZE
-        *  |           ISR logs            |
-        *  +===============================+ + ISR_SIZE
+        *  |          Debug logs           |
+        *  +===============================+ + DEBUG_SIZE
         */
-       guc_log_size = PAGE_SIZE + CRASH_BUFFER_SIZE + DPC_BUFFER_SIZE +
-                       ISR_BUFFER_SIZE;
+       guc_log_size = PAGE_SIZE + CRASH_BUFFER_SIZE + DEBUG_BUFFER_SIZE;
 
        vma = intel_guc_allocate_vma(guc, guc_log_size);
        if (IS_ERR(vma)) {
@@ -675,10 +668,8 @@ static const char *
 stringify_guc_log_type(enum guc_log_buffer_type type)
 {
        switch (type) {
-       case GUC_ISR_LOG_BUFFER:
-               return "ISR";
-       case GUC_DPC_LOG_BUFFER:
-               return "DPC";
+       case GUC_DEBUG_LOG_BUFFER:
+               return "DEBUG";
        case GUC_CRASH_DUMP_LOG_BUFFER:
                return "CRASH";
        default:
@@ -708,7 +699,7 @@ void intel_guc_log_info(struct intel_guc_log *log, struct drm_printer *p)
 
        drm_printf(p, "\tRelay full count: %u\n", log->relay.full_count);
 
-       for (type = GUC_ISR_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
+       for (type = GUC_DEBUG_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) {
                drm_printf(p, "\t%s:\tflush count %10u, overflow count %10u\n",
                           stringify_guc_log_type(type),
                           log->stats[type].flush,
index 11fccd0..ac1ee1d 100644 (file)
@@ -17,12 +17,10 @@ struct intel_guc;
 
 #ifdef CONFIG_DRM_I915_DEBUG_GUC
 #define CRASH_BUFFER_SIZE      SZ_2M
-#define DPC_BUFFER_SIZE                SZ_8M
-#define ISR_BUFFER_SIZE                SZ_8M
+#define DEBUG_BUFFER_SIZE      SZ_16M
 #else
 #define CRASH_BUFFER_SIZE      SZ_8K
-#define DPC_BUFFER_SIZE                SZ_32K
-#define ISR_BUFFER_SIZE                SZ_32K
+#define DEBUG_BUFFER_SIZE      SZ_64K
 #endif
 
 /*
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c
new file mode 100644 (file)
index 0000000..fc805d4
--- /dev/null
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright Â© 2021 Intel Corporation
+ */
+
+#include "intel_guc_rc.h"
+#include "gt/intel_gt.h"
+#include "i915_drv.h"
+
+static bool __guc_rc_supported(struct intel_guc *guc)
+{
+       /* GuC RC is unavailable for pre-Gen12 */
+       return guc->submission_supported &&
+               GRAPHICS_VER(guc_to_gt(guc)->i915) >= 12;
+}
+
+static bool __guc_rc_selected(struct intel_guc *guc)
+{
+       if (!intel_guc_rc_is_supported(guc))
+               return false;
+
+       return guc->submission_selected;
+}
+
+void intel_guc_rc_init_early(struct intel_guc *guc)
+{
+       guc->rc_supported = __guc_rc_supported(guc);
+       guc->rc_selected = __guc_rc_selected(guc);
+}
+
+static int guc_action_control_gucrc(struct intel_guc *guc, bool enable)
+{
+       u32 rc_mode = enable ? INTEL_GUCRC_FIRMWARE_CONTROL :
+                               INTEL_GUCRC_HOST_CONTROL;
+       u32 action[] = {
+               INTEL_GUC_ACTION_SETUP_PC_GUCRC,
+               rc_mode
+       };
+       int ret;
+
+       ret = intel_guc_send(guc, action, ARRAY_SIZE(action));
+       ret = ret > 0 ? -EPROTO : ret;
+
+       return ret;
+}
+
+static int __guc_rc_control(struct intel_guc *guc, bool enable)
+{
+       struct intel_gt *gt = guc_to_gt(guc);
+       struct drm_device *drm = &guc_to_gt(guc)->i915->drm;
+       int ret;
+
+       if (!intel_uc_uses_guc_rc(&gt->uc))
+               return -EOPNOTSUPP;
+
+       if (!intel_guc_is_ready(guc))
+               return -EINVAL;
+
+       ret = guc_action_control_gucrc(guc, enable);
+       if (ret) {
+               drm_err(drm, "Failed to %s GuC RC (%pe)\n",
+                       enabledisable(enable), ERR_PTR(ret));
+               return ret;
+       }
+
+       drm_info(&gt->i915->drm, "GuC RC: %s\n",
+                enableddisabled(enable));
+
+       return 0;
+}
+
+int intel_guc_rc_enable(struct intel_guc *guc)
+{
+       return __guc_rc_control(guc, true);
+}
+
+int intel_guc_rc_disable(struct intel_guc *guc)
+{
+       return __guc_rc_control(guc, false);
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.h
new file mode 100644 (file)
index 0000000..57e86c3
--- /dev/null
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright Â© 2021 Intel Corporation
+ */
+
+#ifndef _INTEL_GUC_RC_H_
+#define _INTEL_GUC_RC_H_
+
+#include "intel_guc_submission.h"
+
+void intel_guc_rc_init_early(struct intel_guc *guc);
+
+static inline bool intel_guc_rc_is_supported(struct intel_guc *guc)
+{
+       return guc->rc_supported;
+}
+
+static inline bool intel_guc_rc_is_wanted(struct intel_guc *guc)
+{
+       return guc->submission_selected && intel_guc_rc_is_supported(guc);
+}
+
+static inline bool intel_guc_rc_is_used(struct intel_guc *guc)
+{
+       return intel_guc_submission_is_used(guc) && intel_guc_rc_is_wanted(guc);
+}
+
+int intel_guc_rc_enable(struct intel_guc *guc);
+int intel_guc_rc_disable(struct intel_guc *guc);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
new file mode 100644 (file)
index 0000000..65a3e7f
--- /dev/null
@@ -0,0 +1,626 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright Â© 2021 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "intel_guc_slpc.h"
+#include "gt/intel_gt.h"
+
+static inline struct intel_guc *slpc_to_guc(struct intel_guc_slpc *slpc)
+{
+       return container_of(slpc, struct intel_guc, slpc);
+}
+
+static inline struct intel_gt *slpc_to_gt(struct intel_guc_slpc *slpc)
+{
+       return guc_to_gt(slpc_to_guc(slpc));
+}
+
+static inline struct drm_i915_private *slpc_to_i915(struct intel_guc_slpc *slpc)
+{
+       return slpc_to_gt(slpc)->i915;
+}
+
+static bool __detect_slpc_supported(struct intel_guc *guc)
+{
+       /* GuC SLPC is unavailable for pre-Gen12 */
+       return guc->submission_supported &&
+               GRAPHICS_VER(guc_to_gt(guc)->i915) >= 12;
+}
+
+static bool __guc_slpc_selected(struct intel_guc *guc)
+{
+       if (!intel_guc_slpc_is_supported(guc))
+               return false;
+
+       return guc->submission_selected;
+}
+
+void intel_guc_slpc_init_early(struct intel_guc_slpc *slpc)
+{
+       struct intel_guc *guc = slpc_to_guc(slpc);
+
+       slpc->supported = __detect_slpc_supported(guc);
+       slpc->selected = __guc_slpc_selected(guc);
+}
+
+static void slpc_mem_set_param(struct slpc_shared_data *data,
+                              u32 id, u32 value)
+{
+       GEM_BUG_ON(id >= SLPC_MAX_OVERRIDE_PARAMETERS);
+       /*
+        * When the flag bit is set, corresponding value will be read
+        * and applied by SLPC.
+        */
+       data->override_params.bits[id >> 5] |= (1 << (id % 32));
+       data->override_params.values[id] = value;
+}
+
+static void slpc_mem_set_enabled(struct slpc_shared_data *data,
+                                u8 enable_id, u8 disable_id)
+{
+       /*
+        * Enabling a param involves setting the enable_id
+        * to 1 and disable_id to 0.
+        */
+       slpc_mem_set_param(data, enable_id, 1);
+       slpc_mem_set_param(data, disable_id, 0);
+}
+
+static void slpc_mem_set_disabled(struct slpc_shared_data *data,
+                                 u8 enable_id, u8 disable_id)
+{
+       /*
+        * Disabling a param involves setting the enable_id
+        * to 0 and disable_id to 1.
+        */
+       slpc_mem_set_param(data, disable_id, 1);
+       slpc_mem_set_param(data, enable_id, 0);
+}
+
+int intel_guc_slpc_init(struct intel_guc_slpc *slpc)
+{
+       struct intel_guc *guc = slpc_to_guc(slpc);
+       struct drm_i915_private *i915 = slpc_to_i915(slpc);
+       u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data));
+       int err;
+
+       GEM_BUG_ON(slpc->vma);
+
+       err = intel_guc_allocate_and_map_vma(guc, size, &slpc->vma, (void **)&slpc->vaddr);
+       if (unlikely(err)) {
+               drm_err(&i915->drm,
+                       "Failed to allocate SLPC struct (err=%pe)\n",
+                       ERR_PTR(err));
+               return err;
+       }
+
+       slpc->max_freq_softlimit = 0;
+       slpc->min_freq_softlimit = 0;
+
+       return err;
+}
+
+static u32 slpc_get_state(struct intel_guc_slpc *slpc)
+{
+       struct slpc_shared_data *data;
+
+       GEM_BUG_ON(!slpc->vma);
+
+       drm_clflush_virt_range(slpc->vaddr, sizeof(u32));
+       data = slpc->vaddr;
+
+       return data->header.global_state;
+}
+
+static int guc_action_slpc_set_param(struct intel_guc *guc, u8 id, u32 value)
+{
+       u32 request[] = {
+               GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
+               SLPC_EVENT(SLPC_EVENT_PARAMETER_SET, 2),
+               id,
+               value,
+       };
+       int ret;
+
+       ret = intel_guc_send(guc, request, ARRAY_SIZE(request));
+
+       return ret > 0 ? -EPROTO : ret;
+}
+
+static int guc_action_slpc_unset_param(struct intel_guc *guc, u8 id)
+{
+       u32 request[] = {
+               GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
+               SLPC_EVENT(SLPC_EVENT_PARAMETER_UNSET, 2),
+               id,
+       };
+
+       return intel_guc_send(guc, request, ARRAY_SIZE(request));
+}
+
+static bool slpc_is_running(struct intel_guc_slpc *slpc)
+{
+       return slpc_get_state(slpc) == SLPC_GLOBAL_STATE_RUNNING;
+}
+
+static int guc_action_slpc_query(struct intel_guc *guc, u32 offset)
+{
+       u32 request[] = {
+               GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
+               SLPC_EVENT(SLPC_EVENT_QUERY_TASK_STATE, 2),
+               offset,
+               0,
+       };
+       int ret;
+
+       ret = intel_guc_send(guc, request, ARRAY_SIZE(request));
+
+       return ret > 0 ? -EPROTO : ret;
+}
+
+static int slpc_query_task_state(struct intel_guc_slpc *slpc)
+{
+       struct intel_guc *guc = slpc_to_guc(slpc);
+       struct drm_i915_private *i915 = slpc_to_i915(slpc);
+       u32 offset = intel_guc_ggtt_offset(guc, slpc->vma);
+       int ret;
+
+       ret = guc_action_slpc_query(guc, offset);
+       if (unlikely(ret))
+               drm_err(&i915->drm, "Failed to query task state (%pe)\n",
+                       ERR_PTR(ret));
+
+       drm_clflush_virt_range(slpc->vaddr, SLPC_PAGE_SIZE_BYTES);
+
+       return ret;
+}
+
+static int slpc_set_param(struct intel_guc_slpc *slpc, u8 id, u32 value)
+{
+       struct intel_guc *guc = slpc_to_guc(slpc);
+       struct drm_i915_private *i915 = slpc_to_i915(slpc);
+       int ret;
+
+       GEM_BUG_ON(id >= SLPC_MAX_PARAM);
+
+       ret = guc_action_slpc_set_param(guc, id, value);
+       if (ret)
+               drm_err(&i915->drm, "Failed to set param %d to %u (%pe)\n",
+                       id, value, ERR_PTR(ret));
+
+       return ret;
+}
+
+static int slpc_unset_param(struct intel_guc_slpc *slpc,
+                           u8 id)
+{
+       struct intel_guc *guc = slpc_to_guc(slpc);
+
+       GEM_BUG_ON(id >= SLPC_MAX_PARAM);
+
+       return guc_action_slpc_unset_param(guc, id);
+}
+
+static const char *slpc_global_state_to_string(enum slpc_global_state state)
+{
+       switch (state) {
+       case SLPC_GLOBAL_STATE_NOT_RUNNING:
+               return "not running";
+       case SLPC_GLOBAL_STATE_INITIALIZING:
+               return "initializing";
+       case SLPC_GLOBAL_STATE_RESETTING:
+               return "resetting";
+       case SLPC_GLOBAL_STATE_RUNNING:
+               return "running";
+       case SLPC_GLOBAL_STATE_SHUTTING_DOWN:
+               return "shutting down";
+       case SLPC_GLOBAL_STATE_ERROR:
+               return "error";
+       default:
+               return "unknown";
+       }
+}
+
+static const char *slpc_get_state_string(struct intel_guc_slpc *slpc)
+{
+       return slpc_global_state_to_string(slpc_get_state(slpc));
+}
+
+static int guc_action_slpc_reset(struct intel_guc *guc, u32 offset)
+{
+       u32 request[] = {
+               GUC_ACTION_HOST2GUC_PC_SLPC_REQUEST,
+               SLPC_EVENT(SLPC_EVENT_RESET, 2),
+               offset,
+               0,
+       };
+       int ret;
+
+       ret = intel_guc_send(guc, request, ARRAY_SIZE(request));
+
+       return ret > 0 ? -EPROTO : ret;
+}
+
+static int slpc_reset(struct intel_guc_slpc *slpc)
+{
+       struct drm_i915_private *i915 = slpc_to_i915(slpc);
+       struct intel_guc *guc = slpc_to_guc(slpc);
+       u32 offset = intel_guc_ggtt_offset(guc, slpc->vma);
+       int ret;
+
+       ret = guc_action_slpc_reset(guc, offset);
+
+       if (unlikely(ret < 0)) {
+               drm_err(&i915->drm, "SLPC reset action failed (%pe)\n",
+                       ERR_PTR(ret));
+               return ret;
+       }
+
+       if (!ret) {
+               if (wait_for(slpc_is_running(slpc), SLPC_RESET_TIMEOUT_MS)) {
+                       drm_err(&i915->drm, "SLPC not enabled! State = %s\n",
+                               slpc_get_state_string(slpc));
+                       return -EIO;
+               }
+       }
+
+       return 0;
+}
+
+static u32 slpc_decode_min_freq(struct intel_guc_slpc *slpc)
+{
+       struct slpc_shared_data *data = slpc->vaddr;
+
+       GEM_BUG_ON(!slpc->vma);
+
+       return  DIV_ROUND_CLOSEST(REG_FIELD_GET(SLPC_MIN_UNSLICE_FREQ_MASK,
+                                 data->task_state_data.freq) *
+                                 GT_FREQUENCY_MULTIPLIER, GEN9_FREQ_SCALER);
+}
+
+static u32 slpc_decode_max_freq(struct intel_guc_slpc *slpc)
+{
+       struct slpc_shared_data *data = slpc->vaddr;
+
+       GEM_BUG_ON(!slpc->vma);
+
+       return  DIV_ROUND_CLOSEST(REG_FIELD_GET(SLPC_MAX_UNSLICE_FREQ_MASK,
+                                 data->task_state_data.freq) *
+                                 GT_FREQUENCY_MULTIPLIER, GEN9_FREQ_SCALER);
+}
+
+static void slpc_shared_data_reset(struct slpc_shared_data *data)
+{
+       memset(data, 0, sizeof(struct slpc_shared_data));
+
+       data->header.size = sizeof(struct slpc_shared_data);
+
+       /* Enable only GTPERF task, disable others */
+       slpc_mem_set_enabled(data, SLPC_PARAM_TASK_ENABLE_GTPERF,
+                            SLPC_PARAM_TASK_DISABLE_GTPERF);
+
+       slpc_mem_set_disabled(data, SLPC_PARAM_TASK_ENABLE_BALANCER,
+                             SLPC_PARAM_TASK_DISABLE_BALANCER);
+
+       slpc_mem_set_disabled(data, SLPC_PARAM_TASK_ENABLE_DCC,
+                             SLPC_PARAM_TASK_DISABLE_DCC);
+}
+
+/**
+ * intel_guc_slpc_set_max_freq() - Set max frequency limit for SLPC.
+ * @slpc: pointer to intel_guc_slpc.
+ * @val: frequency (MHz)
+ *
+ * This function will invoke GuC SLPC action to update the max frequency
+ * limit for unslice.
+ *
+ * Return: 0 on success, non-zero error code on failure.
+ */
+int intel_guc_slpc_set_max_freq(struct intel_guc_slpc *slpc, u32 val)
+{
+       struct drm_i915_private *i915 = slpc_to_i915(slpc);
+       intel_wakeref_t wakeref;
+       int ret;
+
+       if (val < slpc->min_freq ||
+           val > slpc->rp0_freq ||
+           val < slpc->min_freq_softlimit)
+               return -EINVAL;
+
+       with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
+               ret = slpc_set_param(slpc,
+                                    SLPC_PARAM_GLOBAL_MAX_GT_UNSLICE_FREQ_MHZ,
+                                    val);
+
+               /* Return standardized err code for sysfs calls */
+               if (ret)
+                       ret = -EIO;
+       }
+
+       if (!ret)
+               slpc->max_freq_softlimit = val;
+
+       return ret;
+}
+
+/**
+ * intel_guc_slpc_get_max_freq() - Get max frequency limit for SLPC.
+ * @slpc: pointer to intel_guc_slpc.
+ * @val: pointer to val which will hold max frequency (MHz)
+ *
+ * This function will invoke GuC SLPC action to read the max frequency
+ * limit for unslice.
+ *
+ * Return: 0 on success, non-zero error code on failure.
+ */
+int intel_guc_slpc_get_max_freq(struct intel_guc_slpc *slpc, u32 *val)
+{
+       struct drm_i915_private *i915 = slpc_to_i915(slpc);
+       intel_wakeref_t wakeref;
+       int ret = 0;
+
+       with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
+               /* Force GuC to update task data */
+               ret = slpc_query_task_state(slpc);
+
+               if (!ret)
+                       *val = slpc_decode_max_freq(slpc);
+       }
+
+       return ret;
+}
+
+/**
+ * intel_guc_slpc_set_min_freq() - Set min frequency limit for SLPC.
+ * @slpc: pointer to intel_guc_slpc.
+ * @val: frequency (MHz)
+ *
+ * This function will invoke GuC SLPC action to update the min unslice
+ * frequency.
+ *
+ * Return: 0 on success, non-zero error code on failure.
+ */
+int intel_guc_slpc_set_min_freq(struct intel_guc_slpc *slpc, u32 val)
+{
+       struct drm_i915_private *i915 = slpc_to_i915(slpc);
+       intel_wakeref_t wakeref;
+       int ret;
+
+       if (val < slpc->min_freq ||
+           val > slpc->rp0_freq ||
+           val > slpc->max_freq_softlimit)
+               return -EINVAL;
+
+       with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
+               ret = slpc_set_param(slpc,
+                                    SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
+                                    val);
+
+               /* Return standardized err code for sysfs calls */
+               if (ret)
+                       ret = -EIO;
+       }
+
+       if (!ret)
+               slpc->min_freq_softlimit = val;
+
+       return ret;
+}
+
+/**
+ * intel_guc_slpc_get_min_freq() - Get min frequency limit for SLPC.
+ * @slpc: pointer to intel_guc_slpc.
+ * @val: pointer to val which will hold min frequency (MHz)
+ *
+ * This function will invoke GuC SLPC action to read the min frequency
+ * limit for unslice.
+ *
+ * Return: 0 on success, non-zero error code on failure.
+ */
+int intel_guc_slpc_get_min_freq(struct intel_guc_slpc *slpc, u32 *val)
+{
+       struct drm_i915_private *i915 = slpc_to_i915(slpc);
+       intel_wakeref_t wakeref;
+       int ret = 0;
+
+       with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
+               /* Force GuC to update task data */
+               ret = slpc_query_task_state(slpc);
+
+               if (!ret)
+                       *val = slpc_decode_min_freq(slpc);
+       }
+
+       return ret;
+}
+
+void intel_guc_pm_intrmsk_enable(struct intel_gt *gt)
+{
+       u32 pm_intrmsk_mbz = 0;
+
+       /*
+        * Allow GuC to receive ARAT timer expiry event.
+        * This interrupt register is setup by RPS code
+        * when host based Turbo is enabled.
+        */
+       pm_intrmsk_mbz |= ARAT_EXPIRED_INTRMSK;
+
+       intel_uncore_rmw(gt->uncore,
+                        GEN6_PMINTRMSK, pm_intrmsk_mbz, 0);
+}
+
+static int slpc_set_softlimits(struct intel_guc_slpc *slpc)
+{
+       int ret = 0;
+
+       /*
+        * Softlimits are initially equivalent to platform limits
+        * unless they have deviated from defaults, in which case,
+        * we retain the values and set min/max accordingly.
+        */
+       if (!slpc->max_freq_softlimit)
+               slpc->max_freq_softlimit = slpc->rp0_freq;
+       else if (slpc->max_freq_softlimit != slpc->rp0_freq)
+               ret = intel_guc_slpc_set_max_freq(slpc,
+                                                 slpc->max_freq_softlimit);
+
+       if (unlikely(ret))
+               return ret;
+
+       if (!slpc->min_freq_softlimit)
+               slpc->min_freq_softlimit = slpc->min_freq;
+       else if (slpc->min_freq_softlimit != slpc->min_freq)
+               return intel_guc_slpc_set_min_freq(slpc,
+                                                  slpc->min_freq_softlimit);
+
+       return 0;
+}
+
+static int slpc_ignore_eff_freq(struct intel_guc_slpc *slpc, bool ignore)
+{
+       int ret = 0;
+
+       if (ignore) {
+               ret = slpc_set_param(slpc,
+                                    SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY,
+                                    ignore);
+               if (!ret)
+                       return slpc_set_param(slpc,
+                                             SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
+                                             slpc->min_freq);
+       } else {
+               ret = slpc_unset_param(slpc,
+                                      SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY);
+               if (!ret)
+                       return slpc_unset_param(slpc,
+                                               SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ);
+       }
+
+       return ret;
+}
+
+static int slpc_use_fused_rp0(struct intel_guc_slpc *slpc)
+{
+       /* Force SLPC to used platform rp0 */
+       return slpc_set_param(slpc,
+                             SLPC_PARAM_GLOBAL_MAX_GT_UNSLICE_FREQ_MHZ,
+                             slpc->rp0_freq);
+}
+
+static void slpc_get_rp_values(struct intel_guc_slpc *slpc)
+{
+       u32 rp_state_cap;
+
+       rp_state_cap = intel_uncore_read(slpc_to_gt(slpc)->uncore,
+                                        GEN6_RP_STATE_CAP);
+
+       slpc->rp0_freq = REG_FIELD_GET(RP0_CAP_MASK, rp_state_cap) *
+                                       GT_FREQUENCY_MULTIPLIER;
+       slpc->rp1_freq = REG_FIELD_GET(RP1_CAP_MASK, rp_state_cap) *
+                                       GT_FREQUENCY_MULTIPLIER;
+       slpc->min_freq = REG_FIELD_GET(RPN_CAP_MASK, rp_state_cap) *
+                                       GT_FREQUENCY_MULTIPLIER;
+}
+
+/*
+ * intel_guc_slpc_enable() - Start SLPC
+ * @slpc: pointer to intel_guc_slpc.
+ *
+ * SLPC is enabled by setting up the shared data structure and
+ * sending reset event to GuC SLPC. Initial data is setup in
+ * intel_guc_slpc_init. Here we send the reset event. We do
+ * not currently need a slpc_disable since this is taken care
+ * of automatically when a reset/suspend occurs and the GuC
+ * CTB is destroyed.
+ *
+ * Return: 0 on success, non-zero error code on failure.
+ */
+int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
+{
+       struct drm_i915_private *i915 = slpc_to_i915(slpc);
+       int ret;
+
+       GEM_BUG_ON(!slpc->vma);
+
+       slpc_shared_data_reset(slpc->vaddr);
+
+       ret = slpc_reset(slpc);
+       if (unlikely(ret < 0)) {
+               drm_err(&i915->drm, "SLPC Reset event returned (%pe)\n",
+                       ERR_PTR(ret));
+               return ret;
+       }
+
+       ret = slpc_query_task_state(slpc);
+       if (unlikely(ret < 0))
+               return ret;
+
+       intel_guc_pm_intrmsk_enable(&i915->gt);
+
+       slpc_get_rp_values(slpc);
+
+       /* Ignore efficient freq and set min to platform min */
+       ret = slpc_ignore_eff_freq(slpc, true);
+       if (unlikely(ret)) {
+               drm_err(&i915->drm, "Failed to set SLPC min to RPn (%pe)\n",
+                       ERR_PTR(ret));
+               return ret;
+       }
+
+       /* Set SLPC max limit to RP0 */
+       ret = slpc_use_fused_rp0(slpc);
+       if (unlikely(ret)) {
+               drm_err(&i915->drm, "Failed to set SLPC max to RP0 (%pe)\n",
+                       ERR_PTR(ret));
+               return ret;
+       }
+
+       /* Revert SLPC min/max to softlimits if necessary */
+       ret = slpc_set_softlimits(slpc);
+       if (unlikely(ret)) {
+               drm_err(&i915->drm, "Failed to set SLPC softlimits (%pe)\n",
+                       ERR_PTR(ret));
+               return ret;
+       }
+
+       return 0;
+}
+
+int intel_guc_slpc_print_info(struct intel_guc_slpc *slpc, struct drm_printer *p)
+{
+       struct drm_i915_private *i915 = slpc_to_i915(slpc);
+       struct slpc_shared_data *data = slpc->vaddr;
+       struct slpc_task_state_data *slpc_tasks;
+       intel_wakeref_t wakeref;
+       int ret = 0;
+
+       GEM_BUG_ON(!slpc->vma);
+
+       with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
+               ret = slpc_query_task_state(slpc);
+
+               if (!ret) {
+                       slpc_tasks = &data->task_state_data;
+
+                       drm_printf(p, "\tSLPC state: %s\n", slpc_get_state_string(slpc));
+                       drm_printf(p, "\tGTPERF task active: %s\n",
+                                  yesno(slpc_tasks->status & SLPC_GTPERF_TASK_ENABLED));
+                       drm_printf(p, "\tMax freq: %u MHz\n",
+                                  slpc_decode_max_freq(slpc));
+                       drm_printf(p, "\tMin freq: %u MHz\n",
+                                  slpc_decode_min_freq(slpc));
+               }
+       }
+
+       return ret;
+}
+
+void intel_guc_slpc_fini(struct intel_guc_slpc *slpc)
+{
+       if (!slpc->vma)
+               return;
+
+       i915_vma_unpin_and_release(&slpc->vma, I915_VMA_RELEASE_MAP);
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.h
new file mode 100644 (file)
index 0000000..e45054d
--- /dev/null
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright Â© 2021 Intel Corporation
+ */
+
+#ifndef _INTEL_GUC_SLPC_H_
+#define _INTEL_GUC_SLPC_H_
+
+#include "intel_guc_submission.h"
+#include "intel_guc_slpc_types.h"
+
+struct intel_gt;
+struct drm_printer;
+
+static inline bool intel_guc_slpc_is_supported(struct intel_guc *guc)
+{
+       return guc->slpc.supported;
+}
+
+static inline bool intel_guc_slpc_is_wanted(struct intel_guc *guc)
+{
+       return guc->slpc.selected;
+}
+
+static inline bool intel_guc_slpc_is_used(struct intel_guc *guc)
+{
+       return intel_guc_submission_is_used(guc) && intel_guc_slpc_is_wanted(guc);
+}
+
+void intel_guc_slpc_init_early(struct intel_guc_slpc *slpc);
+
+int intel_guc_slpc_init(struct intel_guc_slpc *slpc);
+int intel_guc_slpc_enable(struct intel_guc_slpc *slpc);
+void intel_guc_slpc_fini(struct intel_guc_slpc *slpc);
+int intel_guc_slpc_set_max_freq(struct intel_guc_slpc *slpc, u32 val);
+int intel_guc_slpc_set_min_freq(struct intel_guc_slpc *slpc, u32 val);
+int intel_guc_slpc_get_max_freq(struct intel_guc_slpc *slpc, u32 *val);
+int intel_guc_slpc_get_min_freq(struct intel_guc_slpc *slpc, u32 *val);
+int intel_guc_slpc_print_info(struct intel_guc_slpc *slpc, struct drm_printer *p);
+void intel_guc_pm_intrmsk_enable(struct intel_gt *gt);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc_types.h
new file mode 100644 (file)
index 0000000..41d1352
--- /dev/null
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright Â© 2021 Intel Corporation
+ */
+
+#ifndef _INTEL_GUC_SLPC_TYPES_H_
+#define _INTEL_GUC_SLPC_TYPES_H_
+
+#include <linux/types.h>
+
+#define SLPC_RESET_TIMEOUT_MS 5
+
+struct intel_guc_slpc {
+       struct i915_vma *vma;
+       struct slpc_shared_data *vaddr;
+       bool supported;
+       bool selected;
+
+       /* platform frequency limits */
+       u32 min_freq;
+       u32 rp0_freq;
+       u32 rp1_freq;
+
+       /* frequency softlimits */
+       u32 min_freq_softlimit;
+       u32 max_freq_softlimit;
+};
+
+#endif
index 7c8ff97..87d8dc8 100644 (file)
 #include "gt/intel_breadcrumbs.h"
 #include "gt/intel_context.h"
 #include "gt/intel_engine_pm.h"
+#include "gt/intel_engine_heartbeat.h"
 #include "gt/intel_gt.h"
 #include "gt/intel_gt_irq.h"
 #include "gt/intel_gt_pm.h"
+#include "gt/intel_gt_requests.h"
 #include "gt/intel_lrc.h"
+#include "gt/intel_lrc_reg.h"
 #include "gt/intel_mocs.h"
 #include "gt/intel_ring.h"
 
  *
  */
 
+/* GuC Virtual Engine */
+struct guc_virtual_engine {
+       struct intel_engine_cs base;
+       struct intel_context context;
+};
+
+static struct intel_context *
+guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count);
+
 #define GUC_REQUEST_SIZE 64 /* bytes */
 
-static inline struct i915_priolist *to_priolist(struct rb_node *rb)
+/*
+ * Below is a set of functions which control the GuC scheduling state which do
+ * not require a lock as all state transitions are mutually exclusive. i.e. It
+ * is not possible for the context pinning code and submission, for the same
+ * context, to be executing simultaneously. We still need an atomic as it is
+ * possible for some of the bits to changing at the same time though.
+ */
+#define SCHED_STATE_NO_LOCK_ENABLED                    BIT(0)
+#define SCHED_STATE_NO_LOCK_PENDING_ENABLE             BIT(1)
+#define SCHED_STATE_NO_LOCK_REGISTERED                 BIT(2)
+static inline bool context_enabled(struct intel_context *ce)
 {
-       return rb_entry(rb, struct i915_priolist, node);
+       return (atomic_read(&ce->guc_sched_state_no_lock) &
+               SCHED_STATE_NO_LOCK_ENABLED);
+}
+
+static inline void set_context_enabled(struct intel_context *ce)
+{
+       atomic_or(SCHED_STATE_NO_LOCK_ENABLED, &ce->guc_sched_state_no_lock);
+}
+
+static inline void clr_context_enabled(struct intel_context *ce)
+{
+       atomic_and((u32)~SCHED_STATE_NO_LOCK_ENABLED,
+                  &ce->guc_sched_state_no_lock);
+}
+
+static inline bool context_pending_enable(struct intel_context *ce)
+{
+       return (atomic_read(&ce->guc_sched_state_no_lock) &
+               SCHED_STATE_NO_LOCK_PENDING_ENABLE);
 }
 
-static struct guc_stage_desc *__get_stage_desc(struct intel_guc *guc, u32 id)
+static inline void set_context_pending_enable(struct intel_context *ce)
 {
-       struct guc_stage_desc *base = guc->stage_desc_pool_vaddr;
+       atomic_or(SCHED_STATE_NO_LOCK_PENDING_ENABLE,
+                 &ce->guc_sched_state_no_lock);
+}
 
-       return &base[id];
+static inline void clr_context_pending_enable(struct intel_context *ce)
+{
+       atomic_and((u32)~SCHED_STATE_NO_LOCK_PENDING_ENABLE,
+                  &ce->guc_sched_state_no_lock);
 }
 
-static int guc_stage_desc_pool_create(struct intel_guc *guc)
+static inline bool context_registered(struct intel_context *ce)
 {
-       u32 size = PAGE_ALIGN(sizeof(struct guc_stage_desc) *
-                             GUC_MAX_STAGE_DESCRIPTORS);
+       return (atomic_read(&ce->guc_sched_state_no_lock) &
+               SCHED_STATE_NO_LOCK_REGISTERED);
+}
 
-       return intel_guc_allocate_and_map_vma(guc, size, &guc->stage_desc_pool,
-                                             &guc->stage_desc_pool_vaddr);
+static inline void set_context_registered(struct intel_context *ce)
+{
+       atomic_or(SCHED_STATE_NO_LOCK_REGISTERED,
+                 &ce->guc_sched_state_no_lock);
 }
 
-static void guc_stage_desc_pool_destroy(struct intel_guc *guc)
+static inline void clr_context_registered(struct intel_context *ce)
 {
-       i915_vma_unpin_and_release(&guc->stage_desc_pool, I915_VMA_RELEASE_MAP);
+       atomic_and((u32)~SCHED_STATE_NO_LOCK_REGISTERED,
+                  &ce->guc_sched_state_no_lock);
 }
 
 /*
- * Initialise/clear the stage descriptor shared with the GuC firmware.
- *
- * This descriptor tells the GuC where (in GGTT space) to find the important
- * data structures related to work submission (process descriptor, write queue,
- * etc).
+ * Below is a set of functions which control the GuC scheduling state which
+ * require a lock, aside from the special case where the functions are called
+ * from guc_lrc_desc_pin(). In that case it isn't possible for any other code
+ * path to be executing on the context.
  */
-static void guc_stage_desc_init(struct intel_guc *guc)
+#define SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER    BIT(0)
+#define SCHED_STATE_DESTROYED                          BIT(1)
+#define SCHED_STATE_PENDING_DISABLE                    BIT(2)
+#define SCHED_STATE_BANNED                             BIT(3)
+#define SCHED_STATE_BLOCKED_SHIFT                      4
+#define SCHED_STATE_BLOCKED            BIT(SCHED_STATE_BLOCKED_SHIFT)
+#define SCHED_STATE_BLOCKED_MASK       (0xfff << SCHED_STATE_BLOCKED_SHIFT)
+static inline void init_sched_state(struct intel_context *ce)
 {
-       struct guc_stage_desc *desc;
+       /* Only should be called from guc_lrc_desc_pin() */
+       atomic_set(&ce->guc_sched_state_no_lock, 0);
+       ce->guc_state.sched_state = 0;
+}
 
-       /* we only use 1 stage desc, so hardcode it to 0 */
-       desc = __get_stage_desc(guc, 0);
-       memset(desc, 0, sizeof(*desc));
+static inline bool
+context_wait_for_deregister_to_register(struct intel_context *ce)
+{
+       return ce->guc_state.sched_state &
+               SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER;
+}
 
-       desc->attribute = GUC_STAGE_DESC_ATTR_ACTIVE |
-                         GUC_STAGE_DESC_ATTR_KERNEL;
+static inline void
+set_context_wait_for_deregister_to_register(struct intel_context *ce)
+{
+       /* Only should be called from guc_lrc_desc_pin() without lock */
+       ce->guc_state.sched_state |=
+               SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER;
+}
 
-       desc->stage_id = 0;
-       desc->priority = GUC_CLIENT_PRIORITY_KMD_NORMAL;
+static inline void
+clr_context_wait_for_deregister_to_register(struct intel_context *ce)
+{
+       lockdep_assert_held(&ce->guc_state.lock);
+       ce->guc_state.sched_state &=
+               ~SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER;
+}
 
-       desc->wq_size = GUC_WQ_SIZE;
+static inline bool
+context_destroyed(struct intel_context *ce)
+{
+       return ce->guc_state.sched_state & SCHED_STATE_DESTROYED;
 }
 
-static void guc_stage_desc_fini(struct intel_guc *guc)
+static inline void
+set_context_destroyed(struct intel_context *ce)
 {
-       struct guc_stage_desc *desc;
+       lockdep_assert_held(&ce->guc_state.lock);
+       ce->guc_state.sched_state |= SCHED_STATE_DESTROYED;
+}
 
-       desc = __get_stage_desc(guc, 0);
-       memset(desc, 0, sizeof(*desc));
+static inline bool context_pending_disable(struct intel_context *ce)
+{
+       return ce->guc_state.sched_state & SCHED_STATE_PENDING_DISABLE;
 }
 
-static void guc_add_request(struct intel_guc *guc, struct i915_request *rq)
+static inline void set_context_pending_disable(struct intel_context *ce)
 {
-       /* Leaving stub as this function will be used in future patches */
+       lockdep_assert_held(&ce->guc_state.lock);
+       ce->guc_state.sched_state |= SCHED_STATE_PENDING_DISABLE;
 }
 
-/*
- * When we're doing submissions using regular execlists backend, writing to
- * ELSP from CPU side is enough to make sure that writes to ringbuffer pages
- * pinned in mappable aperture portion of GGTT are visible to command streamer.
- * Writes done by GuC on our behalf are not guaranteeing such ordering,
- * therefore, to ensure the flush, we're issuing a POSTING READ.
- */
-static void flush_ggtt_writes(struct i915_vma *vma)
+static inline void clr_context_pending_disable(struct intel_context *ce)
 {
-       if (i915_vma_is_map_and_fenceable(vma))
-               intel_uncore_posting_read_fw(vma->vm->gt->uncore,
-                                            GUC_STATUS);
+       lockdep_assert_held(&ce->guc_state.lock);
+       ce->guc_state.sched_state &= ~SCHED_STATE_PENDING_DISABLE;
 }
 
-static void guc_submit(struct intel_engine_cs *engine,
-                      struct i915_request **out,
-                      struct i915_request **end)
+static inline bool context_banned(struct intel_context *ce)
 {
-       struct intel_guc *guc = &engine->gt->uc.guc;
+       return ce->guc_state.sched_state & SCHED_STATE_BANNED;
+}
 
-       do {
-               struct i915_request *rq = *out++;
+static inline void set_context_banned(struct intel_context *ce)
+{
+       lockdep_assert_held(&ce->guc_state.lock);
+       ce->guc_state.sched_state |= SCHED_STATE_BANNED;
+}
 
-               flush_ggtt_writes(rq->ring->vma);
-               guc_add_request(guc, rq);
-       } while (out != end);
+static inline void clr_context_banned(struct intel_context *ce)
+{
+       lockdep_assert_held(&ce->guc_state.lock);
+       ce->guc_state.sched_state &= ~SCHED_STATE_BANNED;
 }
 
-static inline int rq_prio(const struct i915_request *rq)
+static inline u32 context_blocked(struct intel_context *ce)
 {
-       return rq->sched.attr.priority;
+       return (ce->guc_state.sched_state & SCHED_STATE_BLOCKED_MASK) >>
+               SCHED_STATE_BLOCKED_SHIFT;
+}
+
+static inline void incr_context_blocked(struct intel_context *ce)
+{
+       lockdep_assert_held(&ce->engine->sched_engine->lock);
+       lockdep_assert_held(&ce->guc_state.lock);
+
+       ce->guc_state.sched_state += SCHED_STATE_BLOCKED;
+
+       GEM_BUG_ON(!context_blocked(ce));       /* Overflow check */
+}
+
+static inline void decr_context_blocked(struct intel_context *ce)
+{
+       lockdep_assert_held(&ce->engine->sched_engine->lock);
+       lockdep_assert_held(&ce->guc_state.lock);
+
+       GEM_BUG_ON(!context_blocked(ce));       /* Underflow check */
+
+       ce->guc_state.sched_state -= SCHED_STATE_BLOCKED;
+}
+
+static inline bool context_guc_id_invalid(struct intel_context *ce)
+{
+       return ce->guc_id == GUC_INVALID_LRC_ID;
+}
+
+static inline void set_context_guc_id_invalid(struct intel_context *ce)
+{
+       ce->guc_id = GUC_INVALID_LRC_ID;
+}
+
+static inline struct intel_guc *ce_to_guc(struct intel_context *ce)
+{
+       return &ce->engine->gt->uc.guc;
+}
+
+static inline struct i915_priolist *to_priolist(struct rb_node *rb)
+{
+       return rb_entry(rb, struct i915_priolist, node);
+}
+
+static struct guc_lrc_desc *__get_lrc_desc(struct intel_guc *guc, u32 index)
+{
+       struct guc_lrc_desc *base = guc->lrc_desc_pool_vaddr;
+
+       GEM_BUG_ON(index >= GUC_MAX_LRC_DESCRIPTORS);
+
+       return &base[index];
+}
+
+static inline struct intel_context *__get_context(struct intel_guc *guc, u32 id)
+{
+       struct intel_context *ce = xa_load(&guc->context_lookup, id);
+
+       GEM_BUG_ON(id >= GUC_MAX_LRC_DESCRIPTORS);
+
+       return ce;
+}
+
+static int guc_lrc_desc_pool_create(struct intel_guc *guc)
+{
+       u32 size;
+       int ret;
+
+       size = PAGE_ALIGN(sizeof(struct guc_lrc_desc) *
+                         GUC_MAX_LRC_DESCRIPTORS);
+       ret = intel_guc_allocate_and_map_vma(guc, size, &guc->lrc_desc_pool,
+                                            (void **)&guc->lrc_desc_pool_vaddr);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static void guc_lrc_desc_pool_destroy(struct intel_guc *guc)
+{
+       guc->lrc_desc_pool_vaddr = NULL;
+       i915_vma_unpin_and_release(&guc->lrc_desc_pool, I915_VMA_RELEASE_MAP);
+}
+
+static inline bool guc_submission_initialized(struct intel_guc *guc)
+{
+       return !!guc->lrc_desc_pool_vaddr;
+}
+
+static inline void reset_lrc_desc(struct intel_guc *guc, u32 id)
+{
+       if (likely(guc_submission_initialized(guc))) {
+               struct guc_lrc_desc *desc = __get_lrc_desc(guc, id);
+               unsigned long flags;
+
+               memset(desc, 0, sizeof(*desc));
+
+               /*
+                * xarray API doesn't have xa_erase_irqsave wrapper, so calling
+                * the lower level functions directly.
+                */
+               xa_lock_irqsave(&guc->context_lookup, flags);
+               __xa_erase(&guc->context_lookup, id);
+               xa_unlock_irqrestore(&guc->context_lookup, flags);
+       }
+}
+
+static inline bool lrc_desc_registered(struct intel_guc *guc, u32 id)
+{
+       return __get_context(guc, id);
 }
 
-static struct i915_request *schedule_in(struct i915_request *rq, int idx)
+static inline void set_lrc_desc_registered(struct intel_guc *guc, u32 id,
+                                          struct intel_context *ce)
 {
-       trace_i915_request_in(rq, idx);
+       unsigned long flags;
 
        /*
-        * Currently we are not tracking the rq->context being inflight
-        * (ce->inflight = rq->engine). It is only used by the execlists
-        * backend at the moment, a similar counting strategy would be
-        * required if we generalise the inflight tracking.
+        * xarray API doesn't have xa_save_irqsave wrapper, so calling the
+        * lower level functions directly.
         */
+       xa_lock_irqsave(&guc->context_lookup, flags);
+       __xa_store(&guc->context_lookup, id, ce, GFP_ATOMIC);
+       xa_unlock_irqrestore(&guc->context_lookup, flags);
+}
+
+static int guc_submission_send_busy_loop(struct intel_guc *guc,
+                                        const u32 *action,
+                                        u32 len,
+                                        u32 g2h_len_dw,
+                                        bool loop)
+{
+       int err;
+
+       err = intel_guc_send_busy_loop(guc, action, len, g2h_len_dw, loop);
 
-       __intel_gt_pm_get(rq->engine->gt);
-       return i915_request_get(rq);
+       if (!err && g2h_len_dw)
+               atomic_inc(&guc->outstanding_submission_g2h);
+
+       return err;
 }
 
-static void schedule_out(struct i915_request *rq)
+int intel_guc_wait_for_pending_msg(struct intel_guc *guc,
+                                  atomic_t *wait_var,
+                                  bool interruptible,
+                                  long timeout)
 {
-       trace_i915_request_out(rq);
+       const int state = interruptible ?
+               TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
+       DEFINE_WAIT(wait);
+
+       might_sleep();
+       GEM_BUG_ON(timeout < 0);
+
+       if (!atomic_read(wait_var))
+               return 0;
+
+       if (!timeout)
+               return -ETIME;
+
+       for (;;) {
+               prepare_to_wait(&guc->ct.wq, &wait, state);
+
+               if (!atomic_read(wait_var))
+                       break;
+
+               if (signal_pending_state(state, current)) {
+                       timeout = -EINTR;
+                       break;
+               }
+
+               if (!timeout) {
+                       timeout = -ETIME;
+                       break;
+               }
 
-       intel_gt_pm_put_async(rq->engine->gt);
-       i915_request_put(rq);
+               timeout = io_schedule_timeout(timeout);
+       }
+       finish_wait(&guc->ct.wq, &wait);
+
+       return (timeout < 0) ? timeout : 0;
 }
 
-static void __guc_dequeue(struct intel_engine_cs *engine)
+int intel_guc_wait_for_idle(struct intel_guc *guc, long timeout)
 {
-       struct intel_engine_execlists * const execlists = &engine->execlists;
-       struct i915_request **first = execlists->inflight;
-       struct i915_request ** const last_port = first + execlists->port_mask;
-       struct i915_request *last = first[0];
-       struct i915_request **port;
-       bool submit = false;
-       struct rb_node *rb;
+       if (!intel_uc_uses_guc_submission(&guc_to_gt(guc)->uc))
+               return 0;
+
+       return intel_guc_wait_for_pending_msg(guc,
+                                             &guc->outstanding_submission_g2h,
+                                             true, timeout);
+}
 
-       lockdep_assert_held(&engine->active.lock);
+static int guc_lrc_desc_pin(struct intel_context *ce, bool loop);
 
-       if (last) {
-               if (*++first)
-                       return;
+static int guc_add_request(struct intel_guc *guc, struct i915_request *rq)
+{
+       int err = 0;
+       struct intel_context *ce = rq->context;
+       u32 action[3];
+       int len = 0;
+       u32 g2h_len_dw = 0;
+       bool enabled;
+
+       /*
+        * Corner case where requests were sitting in the priority list or a
+        * request resubmitted after the context was banned.
+        */
+       if (unlikely(intel_context_is_banned(ce))) {
+               i915_request_put(i915_request_mark_eio(rq));
+               intel_engine_signal_breadcrumbs(ce->engine);
+               goto out;
+       }
+
+       GEM_BUG_ON(!atomic_read(&ce->guc_id_ref));
+       GEM_BUG_ON(context_guc_id_invalid(ce));
 
-               last = NULL;
+       /*
+        * Corner case where the GuC firmware was blown away and reloaded while
+        * this context was pinned.
+        */
+       if (unlikely(!lrc_desc_registered(guc, ce->guc_id))) {
+               err = guc_lrc_desc_pin(ce, false);
+               if (unlikely(err))
+                       goto out;
        }
 
        /*
-        * We write directly into the execlists->inflight queue and don't use
-        * the execlists->pending queue, as we don't have a distinct switch
-        * event.
+        * The request / context will be run on the hardware when scheduling
+        * gets enabled in the unblock.
         */
-       port = first;
-       while ((rb = rb_first_cached(&execlists->queue))) {
+       if (unlikely(context_blocked(ce)))
+               goto out;
+
+       enabled = context_enabled(ce);
+
+       if (!enabled) {
+               action[len++] = INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_SET;
+               action[len++] = ce->guc_id;
+               action[len++] = GUC_CONTEXT_ENABLE;
+               set_context_pending_enable(ce);
+               intel_context_get(ce);
+               g2h_len_dw = G2H_LEN_DW_SCHED_CONTEXT_MODE_SET;
+       } else {
+               action[len++] = INTEL_GUC_ACTION_SCHED_CONTEXT;
+               action[len++] = ce->guc_id;
+       }
+
+       err = intel_guc_send_nb(guc, action, len, g2h_len_dw);
+       if (!enabled && !err) {
+               trace_intel_context_sched_enable(ce);
+               atomic_inc(&guc->outstanding_submission_g2h);
+               set_context_enabled(ce);
+       } else if (!enabled) {
+               clr_context_pending_enable(ce);
+               intel_context_put(ce);
+       }
+       if (likely(!err))
+               trace_i915_request_guc_submit(rq);
+
+out:
+       return err;
+}
+
+static inline void guc_set_lrc_tail(struct i915_request *rq)
+{
+       rq->context->lrc_reg_state[CTX_RING_TAIL] =
+               intel_ring_set_tail(rq->ring, rq->tail);
+}
+
+static inline int rq_prio(const struct i915_request *rq)
+{
+       return rq->sched.attr.priority;
+}
+
+static int guc_dequeue_one_context(struct intel_guc *guc)
+{
+       struct i915_sched_engine * const sched_engine = guc->sched_engine;
+       struct i915_request *last = NULL;
+       bool submit = false;
+       struct rb_node *rb;
+       int ret;
+
+       lockdep_assert_held(&sched_engine->lock);
+
+       if (guc->stalled_request) {
+               submit = true;
+               last = guc->stalled_request;
+               goto resubmit;
+       }
+
+       while ((rb = rb_first_cached(&sched_engine->queue))) {
                struct i915_priolist *p = to_priolist(rb);
                struct i915_request *rq, *rn;
 
                priolist_for_each_request_consume(rq, rn, p) {
-                       if (last && rq->context != last->context) {
-                               if (port == last_port)
-                                       goto done;
-
-                               *port = schedule_in(last,
-                                                   port - execlists->inflight);
-                               port++;
-                       }
+                       if (last && rq->context != last->context)
+                               goto done;
 
                        list_del_init(&rq->sched.link);
+
                        __i915_request_submit(rq);
-                       submit = true;
+
+                       trace_i915_request_in(rq, 0);
                        last = rq;
+                       submit = true;
                }
 
-               rb_erase_cached(&p->node, &execlists->queue);
+               rb_erase_cached(&p->node, &sched_engine->queue);
                i915_priolist_free(p);
        }
 done:
-       execlists->queue_priority_hint =
-               rb ? to_priolist(rb)->priority : INT_MIN;
        if (submit) {
-               *port = schedule_in(last, port - execlists->inflight);
-               *++port = NULL;
-               guc_submit(engine, first, port);
+               guc_set_lrc_tail(last);
+resubmit:
+               ret = guc_add_request(guc, last);
+               if (unlikely(ret == -EPIPE))
+                       goto deadlk;
+               else if (ret == -EBUSY) {
+                       tasklet_schedule(&sched_engine->tasklet);
+                       guc->stalled_request = last;
+                       return false;
+               }
        }
-       execlists->active = execlists->inflight;
+
+       guc->stalled_request = NULL;
+       return submit;
+
+deadlk:
+       sched_engine->tasklet.callback = NULL;
+       tasklet_disable_nosync(&sched_engine->tasklet);
+       return false;
 }
 
 static void guc_submission_tasklet(struct tasklet_struct *t)
 {
-       struct intel_engine_cs * const engine =
-               from_tasklet(engine, t, execlists.tasklet);
-       struct intel_engine_execlists * const execlists = &engine->execlists;
-       struct i915_request **port, *rq;
+       struct i915_sched_engine *sched_engine =
+               from_tasklet(sched_engine, t, tasklet);
        unsigned long flags;
+       bool loop;
 
-       spin_lock_irqsave(&engine->active.lock, flags);
-
-       for (port = execlists->inflight; (rq = *port); port++) {
-               if (!i915_request_completed(rq))
-                       break;
+       spin_lock_irqsave(&sched_engine->lock, flags);
 
-               schedule_out(rq);
-       }
-       if (port != execlists->inflight) {
-               int idx = port - execlists->inflight;
-               int rem = ARRAY_SIZE(execlists->inflight) - idx;
-               memmove(execlists->inflight, port, rem * sizeof(*port));
-       }
+       do {
+               loop = guc_dequeue_one_context(sched_engine->private_data);
+       } while (loop);
 
-       __guc_dequeue(engine);
+       i915_sched_engine_reset_on_empty(sched_engine);
 
-       spin_unlock_irqrestore(&engine->active.lock, flags);
+       spin_unlock_irqrestore(&sched_engine->lock, flags);
 }
 
 static void cs_irq_handler(struct intel_engine_cs *engine, u16 iir)
 {
-       if (iir & GT_RENDER_USER_INTERRUPT) {
+       if (iir & GT_RENDER_USER_INTERRUPT)
                intel_engine_signal_breadcrumbs(engine);
-               tasklet_hi_schedule(&engine->execlists.tasklet);
+}
+
+static void __guc_context_destroy(struct intel_context *ce);
+static void release_guc_id(struct intel_guc *guc, struct intel_context *ce);
+static void guc_signal_context_fence(struct intel_context *ce);
+static void guc_cancel_context_requests(struct intel_context *ce);
+static void guc_blocked_fence_complete(struct intel_context *ce);
+
+static void scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc)
+{
+       struct intel_context *ce;
+       unsigned long index, flags;
+       bool pending_disable, pending_enable, deregister, destroyed, banned;
+
+       xa_for_each(&guc->context_lookup, index, ce) {
+               /* Flush context */
+               spin_lock_irqsave(&ce->guc_state.lock, flags);
+               spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+
+               /*
+                * Once we are at this point submission_disabled() is guaranteed
+                * to be visible to all callers who set the below flags (see above
+                * flush and flushes in reset_prepare). If submission_disabled()
+                * is set, the caller shouldn't set these flags.
+                */
+
+               destroyed = context_destroyed(ce);
+               pending_enable = context_pending_enable(ce);
+               pending_disable = context_pending_disable(ce);
+               deregister = context_wait_for_deregister_to_register(ce);
+               banned = context_banned(ce);
+               init_sched_state(ce);
+
+               if (pending_enable || destroyed || deregister) {
+                       atomic_dec(&guc->outstanding_submission_g2h);
+                       if (deregister)
+                               guc_signal_context_fence(ce);
+                       if (destroyed) {
+                               release_guc_id(guc, ce);
+                               __guc_context_destroy(ce);
+                       }
+                       if (pending_enable || deregister)
+                               intel_context_put(ce);
+               }
+
+               /* Not mutualy exclusive with above if statement. */
+               if (pending_disable) {
+                       guc_signal_context_fence(ce);
+                       if (banned) {
+                               guc_cancel_context_requests(ce);
+                               intel_engine_signal_breadcrumbs(ce->engine);
+                       }
+                       intel_context_sched_disable_unpin(ce);
+                       atomic_dec(&guc->outstanding_submission_g2h);
+                       spin_lock_irqsave(&ce->guc_state.lock, flags);
+                       guc_blocked_fence_complete(ce);
+                       spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+
+                       intel_context_put(ce);
+               }
+       }
+}
+
+static inline bool
+submission_disabled(struct intel_guc *guc)
+{
+       struct i915_sched_engine * const sched_engine = guc->sched_engine;
+
+       return unlikely(!sched_engine ||
+                       !__tasklet_is_enabled(&sched_engine->tasklet));
+}
+
+static void disable_submission(struct intel_guc *guc)
+{
+       struct i915_sched_engine * const sched_engine = guc->sched_engine;
+
+       if (__tasklet_is_enabled(&sched_engine->tasklet)) {
+               GEM_BUG_ON(!guc->ct.enabled);
+               __tasklet_disable_sync_once(&sched_engine->tasklet);
+               sched_engine->tasklet.callback = NULL;
+       }
+}
+
+static void enable_submission(struct intel_guc *guc)
+{
+       struct i915_sched_engine * const sched_engine = guc->sched_engine;
+       unsigned long flags;
+
+       spin_lock_irqsave(&guc->sched_engine->lock, flags);
+       sched_engine->tasklet.callback = guc_submission_tasklet;
+       wmb();  /* Make sure callback visible */
+       if (!__tasklet_is_enabled(&sched_engine->tasklet) &&
+           __tasklet_enable(&sched_engine->tasklet)) {
+               GEM_BUG_ON(!guc->ct.enabled);
+
+               /* And kick in case we missed a new request submission. */
+               tasklet_hi_schedule(&sched_engine->tasklet);
        }
+       spin_unlock_irqrestore(&guc->sched_engine->lock, flags);
+}
+
+static void guc_flush_submissions(struct intel_guc *guc)
+{
+       struct i915_sched_engine * const sched_engine = guc->sched_engine;
+       unsigned long flags;
+
+       spin_lock_irqsave(&sched_engine->lock, flags);
+       spin_unlock_irqrestore(&sched_engine->lock, flags);
 }
 
-static void guc_reset_prepare(struct intel_engine_cs *engine)
+void intel_guc_submission_reset_prepare(struct intel_guc *guc)
 {
-       struct intel_engine_execlists * const execlists = &engine->execlists;
+       int i;
 
-       ENGINE_TRACE(engine, "\n");
+       if (unlikely(!guc_submission_initialized(guc))) {
+               /* Reset called during driver load? GuC not yet initialised! */
+               return;
+       }
+
+       intel_gt_park_heartbeats(guc_to_gt(guc));
+       disable_submission(guc);
+       guc->interrupts.disable(guc);
+
+       /* Flush IRQ handler */
+       spin_lock_irq(&guc_to_gt(guc)->irq_lock);
+       spin_unlock_irq(&guc_to_gt(guc)->irq_lock);
+
+       guc_flush_submissions(guc);
 
        /*
-        * Prevent request submission to the hardware until we have
-        * completed the reset in i915_gem_reset_finish(). If a request
-        * is completed by one engine, it may then queue a request
-        * to a second via its execlists->tasklet *just* as we are
-        * calling engine->init_hw() and also writing the ELSP.
-        * Turning off the execlists->tasklet until the reset is over
-        * prevents the race.
+        * Handle any outstanding G2Hs before reset. Call IRQ handler directly
+        * each pass as interrupt have been disabled. We always scrub for
+        * outstanding G2H as it is possible for outstanding_submission_g2h to
+        * be incremented after the context state update.
         */
-       __tasklet_disable_sync_once(&execlists->tasklet);
+       for (i = 0; i < 4 && atomic_read(&guc->outstanding_submission_g2h); ++i) {
+               intel_guc_to_host_event_handler(guc);
+#define wait_for_reset(guc, wait_var) \
+               intel_guc_wait_for_pending_msg(guc, wait_var, false, (HZ / 20))
+               do {
+                       wait_for_reset(guc, &guc->outstanding_submission_g2h);
+               } while (!list_empty(&guc->ct.requests.incoming));
+       }
+       scrub_guc_desc_for_outstanding_g2h(guc);
+}
+
+static struct intel_engine_cs *
+guc_virtual_get_sibling(struct intel_engine_cs *ve, unsigned int sibling)
+{
+       struct intel_engine_cs *engine;
+       intel_engine_mask_t tmp, mask = ve->mask;
+       unsigned int num_siblings = 0;
+
+       for_each_engine_masked(engine, ve->gt, mask, tmp)
+               if (num_siblings++ == sibling)
+                       return engine;
+
+       return NULL;
+}
+
+static inline struct intel_engine_cs *
+__context_to_physical_engine(struct intel_context *ce)
+{
+       struct intel_engine_cs *engine = ce->engine;
+
+       if (intel_engine_is_virtual(engine))
+               engine = guc_virtual_get_sibling(engine, 0);
+
+       return engine;
 }
 
-static void guc_reset_state(struct intel_context *ce,
-                           struct intel_engine_cs *engine,
-                           u32 head,
-                           bool scrub)
+static void guc_reset_state(struct intel_context *ce, u32 head, bool scrub)
 {
+       struct intel_engine_cs *engine = __context_to_physical_engine(ce);
+
+       if (intel_context_is_banned(ce))
+               return;
+
        GEM_BUG_ON(!intel_context_is_pinned(ce));
 
        /*
@@ -313,213 +777,1506 @@ static void guc_reset_state(struct intel_context *ce,
        lrc_update_regs(ce, engine, head);
 }
 
-static void guc_reset_rewind(struct intel_engine_cs *engine, bool stalled)
+static void guc_reset_nop(struct intel_engine_cs *engine)
 {
-       struct intel_engine_execlists * const execlists = &engine->execlists;
-       struct i915_request *rq;
+}
+
+static void guc_rewind_nop(struct intel_engine_cs *engine, bool stalled)
+{
+}
+
+static void
+__unwind_incomplete_requests(struct intel_context *ce)
+{
+       struct i915_request *rq, *rn;
+       struct list_head *pl;
+       int prio = I915_PRIORITY_INVALID;
+       struct i915_sched_engine * const sched_engine =
+               ce->engine->sched_engine;
        unsigned long flags;
 
-       spin_lock_irqsave(&engine->active.lock, flags);
+       spin_lock_irqsave(&sched_engine->lock, flags);
+       spin_lock(&ce->guc_active.lock);
+       list_for_each_entry_safe(rq, rn,
+                                &ce->guc_active.requests,
+                                sched.link) {
+               if (i915_request_completed(rq))
+                       continue;
 
-       /* Push back any incomplete requests for replay after the reset. */
-       rq = execlists_unwind_incomplete_requests(execlists);
-       if (!rq)
-               goto out_unlock;
+               list_del_init(&rq->sched.link);
+               spin_unlock(&ce->guc_active.lock);
 
-       if (!i915_request_started(rq))
-               stalled = false;
+               __i915_request_unsubmit(rq);
 
-       __i915_request_reset(rq, stalled);
-       guc_reset_state(rq->context, engine, rq->head, stalled);
+               /* Push the request back into the queue for later resubmission. */
+               GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
+               if (rq_prio(rq) != prio) {
+                       prio = rq_prio(rq);
+                       pl = i915_sched_lookup_priolist(sched_engine, prio);
+               }
+               GEM_BUG_ON(i915_sched_engine_is_empty(sched_engine));
 
-out_unlock:
-       spin_unlock_irqrestore(&engine->active.lock, flags);
+               list_add_tail(&rq->sched.link, pl);
+               set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+
+               spin_lock(&ce->guc_active.lock);
+       }
+       spin_unlock(&ce->guc_active.lock);
+       spin_unlock_irqrestore(&sched_engine->lock, flags);
 }
 
-static void guc_reset_cancel(struct intel_engine_cs *engine)
+static void __guc_reset_context(struct intel_context *ce, bool stalled)
 {
-       struct intel_engine_execlists * const execlists = &engine->execlists;
-       struct i915_request *rq, *rn;
-       struct rb_node *rb;
-       unsigned long flags;
+       struct i915_request *rq;
+       u32 head;
 
-       ENGINE_TRACE(engine, "\n");
+       intel_context_get(ce);
 
        /*
-        * Before we call engine->cancel_requests(), we should have exclusive
-        * access to the submission state. This is arranged for us by the
-        * caller disabling the interrupt generation, the tasklet and other
-        * threads that may then access the same state, giving us a free hand
-        * to reset state. However, we still need to let lockdep be aware that
-        * we know this state may be accessed in hardirq context, so we
-        * disable the irq around this manipulation and we want to keep
-        * the spinlock focused on its duties and not accidentally conflate
-        * coverage to the submission's irq state. (Similarly, although we
-        * shouldn't need to disable irq around the manipulation of the
-        * submission's irq state, we also wish to remind ourselves that
-        * it is irq state.)
+        * GuC will implicitly mark the context as non-schedulable
+        * when it sends the reset notification. Make sure our state
+        * reflects this change. The context will be marked enabled
+        * on resubmission.
         */
-       spin_lock_irqsave(&engine->active.lock, flags);
+       clr_context_enabled(ce);
 
-       /* Mark all executing requests as skipped. */
-       list_for_each_entry(rq, &engine->active.requests, sched.link) {
-               i915_request_set_error_once(rq, -EIO);
-               i915_request_mark_complete(rq);
+       rq = intel_context_find_active_request(ce);
+       if (!rq) {
+               head = ce->ring->tail;
+               stalled = false;
+               goto out_replay;
        }
 
-       /* Flush the queued requests to the timeline list (for retiring). */
-       while ((rb = rb_first_cached(&execlists->queue))) {
-               struct i915_priolist *p = to_priolist(rb);
+       if (!i915_request_started(rq))
+               stalled = false;
+
+       GEM_BUG_ON(i915_active_is_idle(&ce->active));
+       head = intel_ring_wrap(ce->ring, rq->head);
+       __i915_request_reset(rq, stalled);
+
+out_replay:
+       guc_reset_state(ce, head, stalled);
+       __unwind_incomplete_requests(ce);
+       intel_context_put(ce);
+}
+
+void intel_guc_submission_reset(struct intel_guc *guc, bool stalled)
+{
+       struct intel_context *ce;
+       unsigned long index;
+
+       if (unlikely(!guc_submission_initialized(guc))) {
+               /* Reset called during driver load? GuC not yet initialised! */
+               return;
+       }
+
+       xa_for_each(&guc->context_lookup, index, ce)
+               if (intel_context_is_pinned(ce))
+                       __guc_reset_context(ce, stalled);
+
+       /* GuC is blown away, drop all references to contexts */
+       xa_destroy(&guc->context_lookup);
+}
+
+static void guc_cancel_context_requests(struct intel_context *ce)
+{
+       struct i915_sched_engine *sched_engine = ce_to_guc(ce)->sched_engine;
+       struct i915_request *rq;
+       unsigned long flags;
+
+       /* Mark all executing requests as skipped. */
+       spin_lock_irqsave(&sched_engine->lock, flags);
+       spin_lock(&ce->guc_active.lock);
+       list_for_each_entry(rq, &ce->guc_active.requests, sched.link)
+               i915_request_put(i915_request_mark_eio(rq));
+       spin_unlock(&ce->guc_active.lock);
+       spin_unlock_irqrestore(&sched_engine->lock, flags);
+}
+
+static void
+guc_cancel_sched_engine_requests(struct i915_sched_engine *sched_engine)
+{
+       struct i915_request *rq, *rn;
+       struct rb_node *rb;
+       unsigned long flags;
+
+       /* Can be called during boot if GuC fails to load */
+       if (!sched_engine)
+               return;
+
+       /*
+        * Before we call engine->cancel_requests(), we should have exclusive
+        * access to the submission state. This is arranged for us by the
+        * caller disabling the interrupt generation, the tasklet and other
+        * threads that may then access the same state, giving us a free hand
+        * to reset state. However, we still need to let lockdep be aware that
+        * we know this state may be accessed in hardirq context, so we
+        * disable the irq around this manipulation and we want to keep
+        * the spinlock focused on its duties and not accidentally conflate
+        * coverage to the submission's irq state. (Similarly, although we
+        * shouldn't need to disable irq around the manipulation of the
+        * submission's irq state, we also wish to remind ourselves that
+        * it is irq state.)
+        */
+       spin_lock_irqsave(&sched_engine->lock, flags);
+
+       /* Flush the queued requests to the timeline list (for retiring). */
+       while ((rb = rb_first_cached(&sched_engine->queue))) {
+               struct i915_priolist *p = to_priolist(rb);
+
+               priolist_for_each_request_consume(rq, rn, p) {
+                       list_del_init(&rq->sched.link);
 
-               priolist_for_each_request_consume(rq, rn, p) {
-                       list_del_init(&rq->sched.link);
                        __i915_request_submit(rq);
-                       dma_fence_set_error(&rq->fence, -EIO);
-                       i915_request_mark_complete(rq);
+
+                       i915_request_put(i915_request_mark_eio(rq));
                }
 
-               rb_erase_cached(&p->node, &execlists->queue);
+               rb_erase_cached(&p->node, &sched_engine->queue);
                i915_priolist_free(p);
        }
 
        /* Remaining _unready_ requests will be nop'ed when submitted */
 
-       execlists->queue_priority_hint = INT_MIN;
-       execlists->queue = RB_ROOT_CACHED;
+       sched_engine->queue_priority_hint = INT_MIN;
+       sched_engine->queue = RB_ROOT_CACHED;
+
+       spin_unlock_irqrestore(&sched_engine->lock, flags);
+}
+
+void intel_guc_submission_cancel_requests(struct intel_guc *guc)
+{
+       struct intel_context *ce;
+       unsigned long index;
+
+       xa_for_each(&guc->context_lookup, index, ce)
+               if (intel_context_is_pinned(ce))
+                       guc_cancel_context_requests(ce);
+
+       guc_cancel_sched_engine_requests(guc->sched_engine);
+
+       /* GuC is blown away, drop all references to contexts */
+       xa_destroy(&guc->context_lookup);
+}
+
+void intel_guc_submission_reset_finish(struct intel_guc *guc)
+{
+       /* Reset called during driver load or during wedge? */
+       if (unlikely(!guc_submission_initialized(guc) ||
+                    test_bit(I915_WEDGED, &guc_to_gt(guc)->reset.flags))) {
+               return;
+       }
+
+       /*
+        * Technically possible for either of these values to be non-zero here,
+        * but very unlikely + harmless. Regardless let's add a warn so we can
+        * see in CI if this happens frequently / a precursor to taking down the
+        * machine.
+        */
+       GEM_WARN_ON(atomic_read(&guc->outstanding_submission_g2h));
+       atomic_set(&guc->outstanding_submission_g2h, 0);
+
+       intel_guc_global_policies_update(guc);
+       enable_submission(guc);
+       intel_gt_unpark_heartbeats(guc_to_gt(guc));
+}
+
+/*
+ * Set up the memory resources to be shared with the GuC (via the GGTT)
+ * at firmware loading time.
+ */
+int intel_guc_submission_init(struct intel_guc *guc)
+{
+       int ret;
+
+       if (guc->lrc_desc_pool)
+               return 0;
+
+       ret = guc_lrc_desc_pool_create(guc);
+       if (ret)
+               return ret;
+       /*
+        * Keep static analysers happy, let them know that we allocated the
+        * vma after testing that it didn't exist earlier.
+        */
+       GEM_BUG_ON(!guc->lrc_desc_pool);
+
+       xa_init_flags(&guc->context_lookup, XA_FLAGS_LOCK_IRQ);
+
+       spin_lock_init(&guc->contexts_lock);
+       INIT_LIST_HEAD(&guc->guc_id_list);
+       ida_init(&guc->guc_ids);
+
+       return 0;
+}
+
+void intel_guc_submission_fini(struct intel_guc *guc)
+{
+       if (!guc->lrc_desc_pool)
+               return;
+
+       guc_lrc_desc_pool_destroy(guc);
+       i915_sched_engine_put(guc->sched_engine);
+}
+
+static inline void queue_request(struct i915_sched_engine *sched_engine,
+                                struct i915_request *rq,
+                                int prio)
+{
+       GEM_BUG_ON(!list_empty(&rq->sched.link));
+       list_add_tail(&rq->sched.link,
+                     i915_sched_lookup_priolist(sched_engine, prio));
+       set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+}
+
+static int guc_bypass_tasklet_submit(struct intel_guc *guc,
+                                    struct i915_request *rq)
+{
+       int ret;
+
+       __i915_request_submit(rq);
+
+       trace_i915_request_in(rq, 0);
+
+       guc_set_lrc_tail(rq);
+       ret = guc_add_request(guc, rq);
+       if (ret == -EBUSY)
+               guc->stalled_request = rq;
+
+       if (unlikely(ret == -EPIPE))
+               disable_submission(guc);
+
+       return ret;
+}
+
+static void guc_submit_request(struct i915_request *rq)
+{
+       struct i915_sched_engine *sched_engine = rq->engine->sched_engine;
+       struct intel_guc *guc = &rq->engine->gt->uc.guc;
+       unsigned long flags;
+
+       /* Will be called from irq-context when using foreign fences. */
+       spin_lock_irqsave(&sched_engine->lock, flags);
+
+       if (submission_disabled(guc) || guc->stalled_request ||
+           !i915_sched_engine_is_empty(sched_engine))
+               queue_request(sched_engine, rq, rq_prio(rq));
+       else if (guc_bypass_tasklet_submit(guc, rq) == -EBUSY)
+               tasklet_hi_schedule(&sched_engine->tasklet);
+
+       spin_unlock_irqrestore(&sched_engine->lock, flags);
+}
+
+static int new_guc_id(struct intel_guc *guc)
+{
+       return ida_simple_get(&guc->guc_ids, 0,
+                             GUC_MAX_LRC_DESCRIPTORS, GFP_KERNEL |
+                             __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
+}
+
+static void __release_guc_id(struct intel_guc *guc, struct intel_context *ce)
+{
+       if (!context_guc_id_invalid(ce)) {
+               ida_simple_remove(&guc->guc_ids, ce->guc_id);
+               reset_lrc_desc(guc, ce->guc_id);
+               set_context_guc_id_invalid(ce);
+       }
+       if (!list_empty(&ce->guc_id_link))
+               list_del_init(&ce->guc_id_link);
+}
+
+static void release_guc_id(struct intel_guc *guc, struct intel_context *ce)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&guc->contexts_lock, flags);
+       __release_guc_id(guc, ce);
+       spin_unlock_irqrestore(&guc->contexts_lock, flags);
+}
+
+static int steal_guc_id(struct intel_guc *guc)
+{
+       struct intel_context *ce;
+       int guc_id;
+
+       lockdep_assert_held(&guc->contexts_lock);
+
+       if (!list_empty(&guc->guc_id_list)) {
+               ce = list_first_entry(&guc->guc_id_list,
+                                     struct intel_context,
+                                     guc_id_link);
+
+               GEM_BUG_ON(atomic_read(&ce->guc_id_ref));
+               GEM_BUG_ON(context_guc_id_invalid(ce));
+
+               list_del_init(&ce->guc_id_link);
+               guc_id = ce->guc_id;
+               clr_context_registered(ce);
+               set_context_guc_id_invalid(ce);
+               return guc_id;
+       } else {
+               return -EAGAIN;
+       }
+}
+
+static int assign_guc_id(struct intel_guc *guc, u16 *out)
+{
+       int ret;
+
+       lockdep_assert_held(&guc->contexts_lock);
+
+       ret = new_guc_id(guc);
+       if (unlikely(ret < 0)) {
+               ret = steal_guc_id(guc);
+               if (ret < 0)
+                       return ret;
+       }
+
+       *out = ret;
+       return 0;
+}
+
+#define PIN_GUC_ID_TRIES       4
+static int pin_guc_id(struct intel_guc *guc, struct intel_context *ce)
+{
+       int ret = 0;
+       unsigned long flags, tries = PIN_GUC_ID_TRIES;
+
+       GEM_BUG_ON(atomic_read(&ce->guc_id_ref));
+
+try_again:
+       spin_lock_irqsave(&guc->contexts_lock, flags);
+
+       if (context_guc_id_invalid(ce)) {
+               ret = assign_guc_id(guc, &ce->guc_id);
+               if (ret)
+                       goto out_unlock;
+               ret = 1;        /* Indidcates newly assigned guc_id */
+       }
+       if (!list_empty(&ce->guc_id_link))
+               list_del_init(&ce->guc_id_link);
+       atomic_inc(&ce->guc_id_ref);
+
+out_unlock:
+       spin_unlock_irqrestore(&guc->contexts_lock, flags);
+
+       /*
+        * -EAGAIN indicates no guc_ids are available, let's retire any
+        * outstanding requests to see if that frees up a guc_id. If the first
+        * retire didn't help, insert a sleep with the timeslice duration before
+        * attempting to retire more requests. Double the sleep period each
+        * subsequent pass before finally giving up. The sleep period has max of
+        * 100ms and minimum of 1ms.
+        */
+       if (ret == -EAGAIN && --tries) {
+               if (PIN_GUC_ID_TRIES - tries > 1) {
+                       unsigned int timeslice_shifted =
+                               ce->engine->props.timeslice_duration_ms <<
+                               (PIN_GUC_ID_TRIES - tries - 2);
+                       unsigned int max = min_t(unsigned int, 100,
+                                                timeslice_shifted);
+
+                       msleep(max_t(unsigned int, max, 1));
+               }
+               intel_gt_retire_requests(guc_to_gt(guc));
+               goto try_again;
+       }
+
+       return ret;
+}
+
+static void unpin_guc_id(struct intel_guc *guc, struct intel_context *ce)
+{
+       unsigned long flags;
+
+       GEM_BUG_ON(atomic_read(&ce->guc_id_ref) < 0);
+
+       if (unlikely(context_guc_id_invalid(ce)))
+               return;
+
+       spin_lock_irqsave(&guc->contexts_lock, flags);
+       if (!context_guc_id_invalid(ce) && list_empty(&ce->guc_id_link) &&
+           !atomic_read(&ce->guc_id_ref))
+               list_add_tail(&ce->guc_id_link, &guc->guc_id_list);
+       spin_unlock_irqrestore(&guc->contexts_lock, flags);
+}
+
+static int __guc_action_register_context(struct intel_guc *guc,
+                                        u32 guc_id,
+                                        u32 offset,
+                                        bool loop)
+{
+       u32 action[] = {
+               INTEL_GUC_ACTION_REGISTER_CONTEXT,
+               guc_id,
+               offset,
+       };
+
+       return guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
+                                            0, loop);
+}
+
+static int register_context(struct intel_context *ce, bool loop)
+{
+       struct intel_guc *guc = ce_to_guc(ce);
+       u32 offset = intel_guc_ggtt_offset(guc, guc->lrc_desc_pool) +
+               ce->guc_id * sizeof(struct guc_lrc_desc);
+       int ret;
+
+       trace_intel_context_register(ce);
+
+       ret = __guc_action_register_context(guc, ce->guc_id, offset, loop);
+       if (likely(!ret))
+               set_context_registered(ce);
+
+       return ret;
+}
+
+static int __guc_action_deregister_context(struct intel_guc *guc,
+                                          u32 guc_id,
+                                          bool loop)
+{
+       u32 action[] = {
+               INTEL_GUC_ACTION_DEREGISTER_CONTEXT,
+               guc_id,
+       };
+
+       return guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
+                                            G2H_LEN_DW_DEREGISTER_CONTEXT,
+                                            loop);
+}
+
+static int deregister_context(struct intel_context *ce, u32 guc_id, bool loop)
+{
+       struct intel_guc *guc = ce_to_guc(ce);
+
+       trace_intel_context_deregister(ce);
+
+       return __guc_action_deregister_context(guc, guc_id, loop);
+}
+
+static intel_engine_mask_t adjust_engine_mask(u8 class, intel_engine_mask_t mask)
+{
+       switch (class) {
+       case RENDER_CLASS:
+               return mask >> RCS0;
+       case VIDEO_ENHANCEMENT_CLASS:
+               return mask >> VECS0;
+       case VIDEO_DECODE_CLASS:
+               return mask >> VCS0;
+       case COPY_ENGINE_CLASS:
+               return mask >> BCS0;
+       default:
+               MISSING_CASE(class);
+               return 0;
+       }
+}
+
+static void guc_context_policy_init(struct intel_engine_cs *engine,
+                                   struct guc_lrc_desc *desc)
+{
+       desc->policy_flags = 0;
+
+       if (engine->flags & I915_ENGINE_WANT_FORCED_PREEMPTION)
+               desc->policy_flags |= CONTEXT_POLICY_FLAG_PREEMPT_TO_IDLE;
+
+       /* NB: For both of these, zero means disabled. */
+       desc->execution_quantum = engine->props.timeslice_duration_ms * 1000;
+       desc->preemption_timeout = engine->props.preempt_timeout_ms * 1000;
+}
+
+static inline u8 map_i915_prio_to_guc_prio(int prio);
+
+static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
+{
+       struct intel_engine_cs *engine = ce->engine;
+       struct intel_runtime_pm *runtime_pm = engine->uncore->rpm;
+       struct intel_guc *guc = &engine->gt->uc.guc;
+       u32 desc_idx = ce->guc_id;
+       struct guc_lrc_desc *desc;
+       const struct i915_gem_context *ctx;
+       int prio = I915_CONTEXT_DEFAULT_PRIORITY;
+       bool context_registered;
+       intel_wakeref_t wakeref;
+       int ret = 0;
+
+       GEM_BUG_ON(!engine->mask);
+
+       /*
+        * Ensure LRC + CT vmas are is same region as write barrier is done
+        * based on CT vma region.
+        */
+       GEM_BUG_ON(i915_gem_object_is_lmem(guc->ct.vma->obj) !=
+                  i915_gem_object_is_lmem(ce->ring->vma->obj));
+
+       context_registered = lrc_desc_registered(guc, desc_idx);
+
+       rcu_read_lock();
+       ctx = rcu_dereference(ce->gem_context);
+       if (ctx)
+               prio = ctx->sched.priority;
+       rcu_read_unlock();
+
+       reset_lrc_desc(guc, desc_idx);
+       set_lrc_desc_registered(guc, desc_idx, ce);
+
+       desc = __get_lrc_desc(guc, desc_idx);
+       desc->engine_class = engine_class_to_guc_class(engine->class);
+       desc->engine_submit_mask = adjust_engine_mask(engine->class,
+                                                     engine->mask);
+       desc->hw_context_desc = ce->lrc.lrca;
+       ce->guc_prio = map_i915_prio_to_guc_prio(prio);
+       desc->priority = ce->guc_prio;
+       desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD;
+       guc_context_policy_init(engine, desc);
+       init_sched_state(ce);
+
+       /*
+        * The context_lookup xarray is used to determine if the hardware
+        * context is currently registered. There are two cases in which it
+        * could be registered either the guc_id has been stolen from another
+        * context or the lrc descriptor address of this context has changed. In
+        * either case the context needs to be deregistered with the GuC before
+        * registering this context.
+        */
+       if (context_registered) {
+               trace_intel_context_steal_guc_id(ce);
+               if (!loop) {
+                       set_context_wait_for_deregister_to_register(ce);
+                       intel_context_get(ce);
+               } else {
+                       bool disabled;
+                       unsigned long flags;
+
+                       /* Seal race with Reset */
+                       spin_lock_irqsave(&ce->guc_state.lock, flags);
+                       disabled = submission_disabled(guc);
+                       if (likely(!disabled)) {
+                               set_context_wait_for_deregister_to_register(ce);
+                               intel_context_get(ce);
+                       }
+                       spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+                       if (unlikely(disabled)) {
+                               reset_lrc_desc(guc, desc_idx);
+                               return 0;       /* Will get registered later */
+                       }
+               }
+
+               /*
+                * If stealing the guc_id, this ce has the same guc_id as the
+                * context whose guc_id was stolen.
+                */
+               with_intel_runtime_pm(runtime_pm, wakeref)
+                       ret = deregister_context(ce, ce->guc_id, loop);
+               if (unlikely(ret == -EBUSY)) {
+                       clr_context_wait_for_deregister_to_register(ce);
+                       intel_context_put(ce);
+               } else if (unlikely(ret == -ENODEV)) {
+                       ret = 0;        /* Will get registered later */
+               }
+       } else {
+               with_intel_runtime_pm(runtime_pm, wakeref)
+                       ret = register_context(ce, loop);
+               if (unlikely(ret == -EBUSY))
+                       reset_lrc_desc(guc, desc_idx);
+               else if (unlikely(ret == -ENODEV))
+                       ret = 0;        /* Will get registered later */
+       }
+
+       return ret;
+}
+
+static int __guc_context_pre_pin(struct intel_context *ce,
+                                struct intel_engine_cs *engine,
+                                struct i915_gem_ww_ctx *ww,
+                                void **vaddr)
+{
+       return lrc_pre_pin(ce, engine, ww, vaddr);
+}
+
+static int __guc_context_pin(struct intel_context *ce,
+                            struct intel_engine_cs *engine,
+                            void *vaddr)
+{
+       if (i915_ggtt_offset(ce->state) !=
+           (ce->lrc.lrca & CTX_GTT_ADDRESS_MASK))
+               set_bit(CONTEXT_LRCA_DIRTY, &ce->flags);
+
+       /*
+        * GuC context gets pinned in guc_request_alloc. See that function for
+        * explaination of why.
+        */
+
+       return lrc_pin(ce, engine, vaddr);
+}
+
+static int guc_context_pre_pin(struct intel_context *ce,
+                              struct i915_gem_ww_ctx *ww,
+                              void **vaddr)
+{
+       return __guc_context_pre_pin(ce, ce->engine, ww, vaddr);
+}
+
+static int guc_context_pin(struct intel_context *ce, void *vaddr)
+{
+       return __guc_context_pin(ce, ce->engine, vaddr);
+}
+
+static void guc_context_unpin(struct intel_context *ce)
+{
+       struct intel_guc *guc = ce_to_guc(ce);
+
+       unpin_guc_id(guc, ce);
+       lrc_unpin(ce);
+}
+
+static void guc_context_post_unpin(struct intel_context *ce)
+{
+       lrc_post_unpin(ce);
+}
+
+static void __guc_context_sched_enable(struct intel_guc *guc,
+                                      struct intel_context *ce)
+{
+       u32 action[] = {
+               INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_SET,
+               ce->guc_id,
+               GUC_CONTEXT_ENABLE
+       };
+
+       trace_intel_context_sched_enable(ce);
+
+       guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
+                                     G2H_LEN_DW_SCHED_CONTEXT_MODE_SET, true);
+}
+
+static void __guc_context_sched_disable(struct intel_guc *guc,
+                                       struct intel_context *ce,
+                                       u16 guc_id)
+{
+       u32 action[] = {
+               INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_SET,
+               guc_id, /* ce->guc_id not stable */
+               GUC_CONTEXT_DISABLE
+       };
+
+       GEM_BUG_ON(guc_id == GUC_INVALID_LRC_ID);
+
+       trace_intel_context_sched_disable(ce);
+
+       guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
+                                     G2H_LEN_DW_SCHED_CONTEXT_MODE_SET, true);
+}
+
+static void guc_blocked_fence_complete(struct intel_context *ce)
+{
+       lockdep_assert_held(&ce->guc_state.lock);
+
+       if (!i915_sw_fence_done(&ce->guc_blocked))
+               i915_sw_fence_complete(&ce->guc_blocked);
+}
+
+static void guc_blocked_fence_reinit(struct intel_context *ce)
+{
+       lockdep_assert_held(&ce->guc_state.lock);
+       GEM_BUG_ON(!i915_sw_fence_done(&ce->guc_blocked));
+
+       /*
+        * This fence is always complete unless a pending schedule disable is
+        * outstanding. We arm the fence here and complete it when we receive
+        * the pending schedule disable complete message.
+        */
+       i915_sw_fence_fini(&ce->guc_blocked);
+       i915_sw_fence_reinit(&ce->guc_blocked);
+       i915_sw_fence_await(&ce->guc_blocked);
+       i915_sw_fence_commit(&ce->guc_blocked);
+}
+
+static u16 prep_context_pending_disable(struct intel_context *ce)
+{
+       lockdep_assert_held(&ce->guc_state.lock);
+
+       set_context_pending_disable(ce);
+       clr_context_enabled(ce);
+       guc_blocked_fence_reinit(ce);
+       intel_context_get(ce);
+
+       return ce->guc_id;
+}
+
+static struct i915_sw_fence *guc_context_block(struct intel_context *ce)
+{
+       struct intel_guc *guc = ce_to_guc(ce);
+       struct i915_sched_engine *sched_engine = ce->engine->sched_engine;
+       unsigned long flags;
+       struct intel_runtime_pm *runtime_pm = ce->engine->uncore->rpm;
+       intel_wakeref_t wakeref;
+       u16 guc_id;
+       bool enabled;
+
+       spin_lock_irqsave(&ce->guc_state.lock, flags);
+
+       /*
+        * Sync with submission path, increment before below changes to context
+        * state.
+        */
+       spin_lock(&sched_engine->lock);
+       incr_context_blocked(ce);
+       spin_unlock(&sched_engine->lock);
+
+       enabled = context_enabled(ce);
+       if (unlikely(!enabled || submission_disabled(guc))) {
+               if (enabled)
+                       clr_context_enabled(ce);
+               spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+               return &ce->guc_blocked;
+       }
+
+       /*
+        * We add +2 here as the schedule disable complete CTB handler calls
+        * intel_context_sched_disable_unpin (-2 to pin_count).
+        */
+       atomic_add(2, &ce->pin_count);
+
+       guc_id = prep_context_pending_disable(ce);
+
+       spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+
+       with_intel_runtime_pm(runtime_pm, wakeref)
+               __guc_context_sched_disable(guc, ce, guc_id);
+
+       return &ce->guc_blocked;
+}
+
+static void guc_context_unblock(struct intel_context *ce)
+{
+       struct intel_guc *guc = ce_to_guc(ce);
+       struct i915_sched_engine *sched_engine = ce->engine->sched_engine;
+       unsigned long flags;
+       struct intel_runtime_pm *runtime_pm = ce->engine->uncore->rpm;
+       intel_wakeref_t wakeref;
+       bool enable;
+
+       GEM_BUG_ON(context_enabled(ce));
+
+       spin_lock_irqsave(&ce->guc_state.lock, flags);
+
+       if (unlikely(submission_disabled(guc) ||
+                    !intel_context_is_pinned(ce) ||
+                    context_pending_disable(ce) ||
+                    context_blocked(ce) > 1)) {
+               enable = false;
+       } else {
+               enable = true;
+               set_context_pending_enable(ce);
+               set_context_enabled(ce);
+               intel_context_get(ce);
+       }
+
+       /*
+        * Sync with submission path, decrement after above changes to context
+        * state.
+        */
+       spin_lock(&sched_engine->lock);
+       decr_context_blocked(ce);
+       spin_unlock(&sched_engine->lock);
+
+       spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+
+       if (enable) {
+               with_intel_runtime_pm(runtime_pm, wakeref)
+                       __guc_context_sched_enable(guc, ce);
+       }
+}
+
+static void guc_context_cancel_request(struct intel_context *ce,
+                                      struct i915_request *rq)
+{
+       if (i915_sw_fence_signaled(&rq->submit)) {
+               struct i915_sw_fence *fence = guc_context_block(ce);
+
+               i915_sw_fence_wait(fence);
+               if (!i915_request_completed(rq)) {
+                       __i915_request_skip(rq);
+                       guc_reset_state(ce, intel_ring_wrap(ce->ring, rq->head),
+                                       true);
+               }
+               guc_context_unblock(ce);
+       }
+}
+
+static void __guc_context_set_preemption_timeout(struct intel_guc *guc,
+                                                u16 guc_id,
+                                                u32 preemption_timeout)
+{
+       u32 action[] = {
+               INTEL_GUC_ACTION_SET_CONTEXT_PREEMPTION_TIMEOUT,
+               guc_id,
+               preemption_timeout
+       };
+
+       intel_guc_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true);
+}
+
+static void guc_context_ban(struct intel_context *ce, struct i915_request *rq)
+{
+       struct intel_guc *guc = ce_to_guc(ce);
+       struct intel_runtime_pm *runtime_pm =
+               &ce->engine->gt->i915->runtime_pm;
+       intel_wakeref_t wakeref;
+       unsigned long flags;
+
+       guc_flush_submissions(guc);
+
+       spin_lock_irqsave(&ce->guc_state.lock, flags);
+       set_context_banned(ce);
+
+       if (submission_disabled(guc) ||
+           (!context_enabled(ce) && !context_pending_disable(ce))) {
+               spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+
+               guc_cancel_context_requests(ce);
+               intel_engine_signal_breadcrumbs(ce->engine);
+       } else if (!context_pending_disable(ce)) {
+               u16 guc_id;
+
+               /*
+                * We add +2 here as the schedule disable complete CTB handler
+                * calls intel_context_sched_disable_unpin (-2 to pin_count).
+                */
+               atomic_add(2, &ce->pin_count);
+
+               guc_id = prep_context_pending_disable(ce);
+               spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+
+               /*
+                * In addition to disabling scheduling, set the preemption
+                * timeout to the minimum value (1 us) so the banned context
+                * gets kicked off the HW ASAP.
+                */
+               with_intel_runtime_pm(runtime_pm, wakeref) {
+                       __guc_context_set_preemption_timeout(guc, guc_id, 1);
+                       __guc_context_sched_disable(guc, ce, guc_id);
+               }
+       } else {
+               if (!context_guc_id_invalid(ce))
+                       with_intel_runtime_pm(runtime_pm, wakeref)
+                               __guc_context_set_preemption_timeout(guc,
+                                                                    ce->guc_id,
+                                                                    1);
+               spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+       }
+}
+
+static void guc_context_sched_disable(struct intel_context *ce)
+{
+       struct intel_guc *guc = ce_to_guc(ce);
+       unsigned long flags;
+       struct intel_runtime_pm *runtime_pm = &ce->engine->gt->i915->runtime_pm;
+       intel_wakeref_t wakeref;
+       u16 guc_id;
+       bool enabled;
+
+       if (submission_disabled(guc) || context_guc_id_invalid(ce) ||
+           !lrc_desc_registered(guc, ce->guc_id)) {
+               clr_context_enabled(ce);
+               goto unpin;
+       }
+
+       if (!context_enabled(ce))
+               goto unpin;
+
+       spin_lock_irqsave(&ce->guc_state.lock, flags);
+
+       /*
+        * We have to check if the context has been disabled by another thread.
+        * We also have to check if the context has been pinned again as another
+        * pin operation is allowed to pass this function. Checking the pin
+        * count, within ce->guc_state.lock, synchronizes this function with
+        * guc_request_alloc ensuring a request doesn't slip through the
+        * 'context_pending_disable' fence. Checking within the spin lock (can't
+        * sleep) ensures another process doesn't pin this context and generate
+        * a request before we set the 'context_pending_disable' flag here.
+        */
+       enabled = context_enabled(ce);
+       if (unlikely(!enabled || submission_disabled(guc))) {
+               if (enabled)
+                       clr_context_enabled(ce);
+               spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+               goto unpin;
+       }
+       if (unlikely(atomic_add_unless(&ce->pin_count, -2, 2))) {
+               spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+               return;
+       }
+       guc_id = prep_context_pending_disable(ce);
+
+       spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+
+       with_intel_runtime_pm(runtime_pm, wakeref)
+               __guc_context_sched_disable(guc, ce, guc_id);
+
+       return;
+unpin:
+       intel_context_sched_disable_unpin(ce);
+}
+
+static inline void guc_lrc_desc_unpin(struct intel_context *ce)
+{
+       struct intel_guc *guc = ce_to_guc(ce);
+
+       GEM_BUG_ON(!lrc_desc_registered(guc, ce->guc_id));
+       GEM_BUG_ON(ce != __get_context(guc, ce->guc_id));
+       GEM_BUG_ON(context_enabled(ce));
+
+       clr_context_registered(ce);
+       deregister_context(ce, ce->guc_id, true);
+}
+
+static void __guc_context_destroy(struct intel_context *ce)
+{
+       GEM_BUG_ON(ce->guc_prio_count[GUC_CLIENT_PRIORITY_KMD_HIGH] ||
+                  ce->guc_prio_count[GUC_CLIENT_PRIORITY_HIGH] ||
+                  ce->guc_prio_count[GUC_CLIENT_PRIORITY_KMD_NORMAL] ||
+                  ce->guc_prio_count[GUC_CLIENT_PRIORITY_NORMAL]);
+
+       lrc_fini(ce);
+       intel_context_fini(ce);
+
+       if (intel_engine_is_virtual(ce->engine)) {
+               struct guc_virtual_engine *ve =
+                       container_of(ce, typeof(*ve), context);
+
+               if (ve->base.breadcrumbs)
+                       intel_breadcrumbs_put(ve->base.breadcrumbs);
+
+               kfree(ve);
+       } else {
+               intel_context_free(ce);
+       }
+}
+
+static void guc_context_destroy(struct kref *kref)
+{
+       struct intel_context *ce = container_of(kref, typeof(*ce), ref);
+       struct intel_runtime_pm *runtime_pm = ce->engine->uncore->rpm;
+       struct intel_guc *guc = ce_to_guc(ce);
+       intel_wakeref_t wakeref;
+       unsigned long flags;
+       bool disabled;
+
+       /*
+        * If the guc_id is invalid this context has been stolen and we can free
+        * it immediately. Also can be freed immediately if the context is not
+        * registered with the GuC or the GuC is in the middle of a reset.
+        */
+       if (context_guc_id_invalid(ce)) {
+               __guc_context_destroy(ce);
+               return;
+       } else if (submission_disabled(guc) ||
+                  !lrc_desc_registered(guc, ce->guc_id)) {
+               release_guc_id(guc, ce);
+               __guc_context_destroy(ce);
+               return;
+       }
+
+       /*
+        * We have to acquire the context spinlock and check guc_id again, if it
+        * is valid it hasn't been stolen and needs to be deregistered. We
+        * delete this context from the list of unpinned guc_ids available to
+        * steal to seal a race with guc_lrc_desc_pin(). When the G2H CTB
+        * returns indicating this context has been deregistered the guc_id is
+        * returned to the pool of available guc_ids.
+        */
+       spin_lock_irqsave(&guc->contexts_lock, flags);
+       if (context_guc_id_invalid(ce)) {
+               spin_unlock_irqrestore(&guc->contexts_lock, flags);
+               __guc_context_destroy(ce);
+               return;
+       }
+
+       if (!list_empty(&ce->guc_id_link))
+               list_del_init(&ce->guc_id_link);
+       spin_unlock_irqrestore(&guc->contexts_lock, flags);
+
+       /* Seal race with Reset */
+       spin_lock_irqsave(&ce->guc_state.lock, flags);
+       disabled = submission_disabled(guc);
+       if (likely(!disabled))
+               set_context_destroyed(ce);
+       spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+       if (unlikely(disabled)) {
+               release_guc_id(guc, ce);
+               __guc_context_destroy(ce);
+               return;
+       }
+
+       /*
+        * We defer GuC context deregistration until the context is destroyed
+        * in order to save on CTBs. With this optimization ideally we only need
+        * 1 CTB to register the context during the first pin and 1 CTB to
+        * deregister the context when the context is destroyed. Without this
+        * optimization, a CTB would be needed every pin & unpin.
+        *
+        * XXX: Need to acqiure the runtime wakeref as this can be triggered
+        * from context_free_worker when runtime wakeref is not held.
+        * guc_lrc_desc_unpin requires the runtime as a GuC register is written
+        * in H2G CTB to deregister the context. A future patch may defer this
+        * H2G CTB if the runtime wakeref is zero.
+        */
+       with_intel_runtime_pm(runtime_pm, wakeref)
+               guc_lrc_desc_unpin(ce);
+}
+
+static int guc_context_alloc(struct intel_context *ce)
+{
+       return lrc_alloc(ce, ce->engine);
+}
+
+static void guc_context_set_prio(struct intel_guc *guc,
+                                struct intel_context *ce,
+                                u8 prio)
+{
+       u32 action[] = {
+               INTEL_GUC_ACTION_SET_CONTEXT_PRIORITY,
+               ce->guc_id,
+               prio,
+       };
+
+       GEM_BUG_ON(prio < GUC_CLIENT_PRIORITY_KMD_HIGH ||
+                  prio > GUC_CLIENT_PRIORITY_NORMAL);
+
+       if (ce->guc_prio == prio || submission_disabled(guc) ||
+           !context_registered(ce))
+               return;
+
+       guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true);
+
+       ce->guc_prio = prio;
+       trace_intel_context_set_prio(ce);
+}
+
+static inline u8 map_i915_prio_to_guc_prio(int prio)
+{
+       if (prio == I915_PRIORITY_NORMAL)
+               return GUC_CLIENT_PRIORITY_KMD_NORMAL;
+       else if (prio < I915_PRIORITY_NORMAL)
+               return GUC_CLIENT_PRIORITY_NORMAL;
+       else if (prio < I915_PRIORITY_DISPLAY)
+               return GUC_CLIENT_PRIORITY_HIGH;
+       else
+               return GUC_CLIENT_PRIORITY_KMD_HIGH;
+}
+
+static inline void add_context_inflight_prio(struct intel_context *ce,
+                                            u8 guc_prio)
+{
+       lockdep_assert_held(&ce->guc_active.lock);
+       GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_prio_count));
+
+       ++ce->guc_prio_count[guc_prio];
+
+       /* Overflow protection */
+       GEM_WARN_ON(!ce->guc_prio_count[guc_prio]);
+}
+
+static inline void sub_context_inflight_prio(struct intel_context *ce,
+                                            u8 guc_prio)
+{
+       lockdep_assert_held(&ce->guc_active.lock);
+       GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_prio_count));
+
+       /* Underflow protection */
+       GEM_WARN_ON(!ce->guc_prio_count[guc_prio]);
+
+       --ce->guc_prio_count[guc_prio];
+}
+
+static inline void update_context_prio(struct intel_context *ce)
+{
+       struct intel_guc *guc = &ce->engine->gt->uc.guc;
+       int i;
+
+       BUILD_BUG_ON(GUC_CLIENT_PRIORITY_KMD_HIGH != 0);
+       BUILD_BUG_ON(GUC_CLIENT_PRIORITY_KMD_HIGH > GUC_CLIENT_PRIORITY_NORMAL);
+
+       lockdep_assert_held(&ce->guc_active.lock);
+
+       for (i = 0; i < ARRAY_SIZE(ce->guc_prio_count); ++i) {
+               if (ce->guc_prio_count[i]) {
+                       guc_context_set_prio(guc, ce, i);
+                       break;
+               }
+       }
+}
+
+static inline bool new_guc_prio_higher(u8 old_guc_prio, u8 new_guc_prio)
+{
+       /* Lower value is higher priority */
+       return new_guc_prio < old_guc_prio;
+}
+
+static void add_to_context(struct i915_request *rq)
+{
+       struct intel_context *ce = rq->context;
+       u8 new_guc_prio = map_i915_prio_to_guc_prio(rq_prio(rq));
+
+       GEM_BUG_ON(rq->guc_prio == GUC_PRIO_FINI);
+
+       spin_lock(&ce->guc_active.lock);
+       list_move_tail(&rq->sched.link, &ce->guc_active.requests);
+
+       if (rq->guc_prio == GUC_PRIO_INIT) {
+               rq->guc_prio = new_guc_prio;
+               add_context_inflight_prio(ce, rq->guc_prio);
+       } else if (new_guc_prio_higher(rq->guc_prio, new_guc_prio)) {
+               sub_context_inflight_prio(ce, rq->guc_prio);
+               rq->guc_prio = new_guc_prio;
+               add_context_inflight_prio(ce, rq->guc_prio);
+       }
+       update_context_prio(ce);
+
+       spin_unlock(&ce->guc_active.lock);
+}
+
+static void guc_prio_fini(struct i915_request *rq, struct intel_context *ce)
+{
+       lockdep_assert_held(&ce->guc_active.lock);
+
+       if (rq->guc_prio != GUC_PRIO_INIT &&
+           rq->guc_prio != GUC_PRIO_FINI) {
+               sub_context_inflight_prio(ce, rq->guc_prio);
+               update_context_prio(ce);
+       }
+       rq->guc_prio = GUC_PRIO_FINI;
+}
+
+static void remove_from_context(struct i915_request *rq)
+{
+       struct intel_context *ce = rq->context;
+
+       spin_lock_irq(&ce->guc_active.lock);
+
+       list_del_init(&rq->sched.link);
+       clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+
+       /* Prevent further __await_execution() registering a cb, then flush */
+       set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
+
+       guc_prio_fini(rq, ce);
+
+       spin_unlock_irq(&ce->guc_active.lock);
+
+       atomic_dec(&ce->guc_id_ref);
+       i915_request_notify_execute_cb_imm(rq);
+}
+
+static const struct intel_context_ops guc_context_ops = {
+       .alloc = guc_context_alloc,
+
+       .pre_pin = guc_context_pre_pin,
+       .pin = guc_context_pin,
+       .unpin = guc_context_unpin,
+       .post_unpin = guc_context_post_unpin,
+
+       .ban = guc_context_ban,
+
+       .cancel_request = guc_context_cancel_request,
+
+       .enter = intel_context_enter_engine,
+       .exit = intel_context_exit_engine,
+
+       .sched_disable = guc_context_sched_disable,
+
+       .reset = lrc_reset,
+       .destroy = guc_context_destroy,
+
+       .create_virtual = guc_create_virtual,
+};
+
+static void __guc_signal_context_fence(struct intel_context *ce)
+{
+       struct i915_request *rq;
+
+       lockdep_assert_held(&ce->guc_state.lock);
+
+       if (!list_empty(&ce->guc_state.fences))
+               trace_intel_context_fence_release(ce);
+
+       list_for_each_entry(rq, &ce->guc_state.fences, guc_fence_link)
+               i915_sw_fence_complete(&rq->submit);
+
+       INIT_LIST_HEAD(&ce->guc_state.fences);
+}
+
+static void guc_signal_context_fence(struct intel_context *ce)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&ce->guc_state.lock, flags);
+       clr_context_wait_for_deregister_to_register(ce);
+       __guc_signal_context_fence(ce);
+       spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+}
+
+static bool context_needs_register(struct intel_context *ce, bool new_guc_id)
+{
+       return (new_guc_id || test_bit(CONTEXT_LRCA_DIRTY, &ce->flags) ||
+               !lrc_desc_registered(ce_to_guc(ce), ce->guc_id)) &&
+               !submission_disabled(ce_to_guc(ce));
+}
+
+static int guc_request_alloc(struct i915_request *rq)
+{
+       struct intel_context *ce = rq->context;
+       struct intel_guc *guc = ce_to_guc(ce);
+       unsigned long flags;
+       int ret;
+
+       GEM_BUG_ON(!intel_context_is_pinned(rq->context));
+
+       /*
+        * Flush enough space to reduce the likelihood of waiting after
+        * we start building the request - in which case we will just
+        * have to repeat work.
+        */
+       rq->reserved_space += GUC_REQUEST_SIZE;
+
+       /*
+        * Note that after this point, we have committed to using
+        * this request as it is being used to both track the
+        * state of engine initialisation and liveness of the
+        * golden renderstate above. Think twice before you try
+        * to cancel/unwind this request now.
+        */
+
+       /* Unconditionally invalidate GPU caches and TLBs. */
+       ret = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
+       if (ret)
+               return ret;
+
+       rq->reserved_space -= GUC_REQUEST_SIZE;
+
+       /*
+        * Call pin_guc_id here rather than in the pinning step as with
+        * dma_resv, contexts can be repeatedly pinned / unpinned trashing the
+        * guc_ids and creating horrible race conditions. This is especially bad
+        * when guc_ids are being stolen due to over subscription. By the time
+        * this function is reached, it is guaranteed that the guc_id will be
+        * persistent until the generated request is retired. Thus, sealing these
+        * race conditions. It is still safe to fail here if guc_ids are
+        * exhausted and return -EAGAIN to the user indicating that they can try
+        * again in the future.
+        *
+        * There is no need for a lock here as the timeline mutex ensures at
+        * most one context can be executing this code path at once. The
+        * guc_id_ref is incremented once for every request in flight and
+        * decremented on each retire. When it is zero, a lock around the
+        * increment (in pin_guc_id) is needed to seal a race with unpin_guc_id.
+        */
+       if (atomic_add_unless(&ce->guc_id_ref, 1, 0))
+               goto out;
+
+       ret = pin_guc_id(guc, ce);      /* returns 1 if new guc_id assigned */
+       if (unlikely(ret < 0))
+               return ret;
+       if (context_needs_register(ce, !!ret)) {
+               ret = guc_lrc_desc_pin(ce, true);
+               if (unlikely(ret)) {    /* unwind */
+                       if (ret == -EPIPE) {
+                               disable_submission(guc);
+                               goto out;       /* GPU will be reset */
+                       }
+                       atomic_dec(&ce->guc_id_ref);
+                       unpin_guc_id(guc, ce);
+                       return ret;
+               }
+       }
+
+       clear_bit(CONTEXT_LRCA_DIRTY, &ce->flags);
+
+out:
+       /*
+        * We block all requests on this context if a G2H is pending for a
+        * schedule disable or context deregistration as the GuC will fail a
+        * schedule enable or context registration if either G2H is pending
+        * respectfully. Once a G2H returns, the fence is released that is
+        * blocking these requests (see guc_signal_context_fence).
+        *
+        * We can safely check the below fields outside of the lock as it isn't
+        * possible for these fields to transition from being clear to set but
+        * converse is possible, hence the need for the check within the lock.
+        */
+       if (likely(!context_wait_for_deregister_to_register(ce) &&
+                  !context_pending_disable(ce)))
+               return 0;
+
+       spin_lock_irqsave(&ce->guc_state.lock, flags);
+       if (context_wait_for_deregister_to_register(ce) ||
+           context_pending_disable(ce)) {
+               i915_sw_fence_await(&rq->submit);
+
+               list_add_tail(&rq->guc_fence_link, &ce->guc_state.fences);
+       }
+       spin_unlock_irqrestore(&ce->guc_state.lock, flags);
 
-       spin_unlock_irqrestore(&engine->active.lock, flags);
+       return 0;
 }
 
-static void guc_reset_finish(struct intel_engine_cs *engine)
+static int guc_virtual_context_pre_pin(struct intel_context *ce,
+                                      struct i915_gem_ww_ctx *ww,
+                                      void **vaddr)
 {
-       struct intel_engine_execlists * const execlists = &engine->execlists;
-
-       if (__tasklet_enable(&execlists->tasklet))
-               /* And kick in case we missed a new request submission. */
-               tasklet_hi_schedule(&execlists->tasklet);
+       struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
 
-       ENGINE_TRACE(engine, "depth->%d\n",
-                    atomic_read(&execlists->tasklet.count));
+       return __guc_context_pre_pin(ce, engine, ww, vaddr);
 }
 
-/*
- * Set up the memory resources to be shared with the GuC (via the GGTT)
- * at firmware loading time.
- */
-int intel_guc_submission_init(struct intel_guc *guc)
+static int guc_virtual_context_pin(struct intel_context *ce, void *vaddr)
 {
-       int ret;
-
-       if (guc->stage_desc_pool)
-               return 0;
-
-       ret = guc_stage_desc_pool_create(guc);
-       if (ret)
-               return ret;
-       /*
-        * Keep static analysers happy, let them know that we allocated the
-        * vma after testing that it didn't exist earlier.
-        */
-       GEM_BUG_ON(!guc->stage_desc_pool);
+       struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
 
-       return 0;
+       return __guc_context_pin(ce, engine, vaddr);
 }
 
-void intel_guc_submission_fini(struct intel_guc *guc)
+static void guc_virtual_context_enter(struct intel_context *ce)
 {
-       if (guc->stage_desc_pool) {
-               guc_stage_desc_pool_destroy(guc);
-       }
-}
+       intel_engine_mask_t tmp, mask = ce->engine->mask;
+       struct intel_engine_cs *engine;
 
-static int guc_context_alloc(struct intel_context *ce)
-{
-       return lrc_alloc(ce, ce->engine);
+       for_each_engine_masked(engine, ce->engine->gt, mask, tmp)
+               intel_engine_pm_get(engine);
+
+       intel_timeline_enter(ce->timeline);
 }
 
-static int guc_context_pre_pin(struct intel_context *ce,
-                              struct i915_gem_ww_ctx *ww,
-                              void **vaddr)
+static void guc_virtual_context_exit(struct intel_context *ce)
 {
-       return lrc_pre_pin(ce, ce->engine, ww, vaddr);
+       intel_engine_mask_t tmp, mask = ce->engine->mask;
+       struct intel_engine_cs *engine;
+
+       for_each_engine_masked(engine, ce->engine->gt, mask, tmp)
+               intel_engine_pm_put(engine);
+
+       intel_timeline_exit(ce->timeline);
 }
 
-static int guc_context_pin(struct intel_context *ce, void *vaddr)
+static int guc_virtual_context_alloc(struct intel_context *ce)
 {
-       return lrc_pin(ce, ce->engine, vaddr);
+       struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
+
+       return lrc_alloc(ce, engine);
 }
 
-static const struct intel_context_ops guc_context_ops = {
-       .alloc = guc_context_alloc,
+static const struct intel_context_ops virtual_guc_context_ops = {
+       .alloc = guc_virtual_context_alloc,
 
-       .pre_pin = guc_context_pre_pin,
-       .pin = guc_context_pin,
-       .unpin = lrc_unpin,
-       .post_unpin = lrc_post_unpin,
+       .pre_pin = guc_virtual_context_pre_pin,
+       .pin = guc_virtual_context_pin,
+       .unpin = guc_context_unpin,
+       .post_unpin = guc_context_post_unpin,
 
-       .enter = intel_context_enter_engine,
-       .exit = intel_context_exit_engine,
+       .ban = guc_context_ban,
 
-       .reset = lrc_reset,
-       .destroy = lrc_destroy,
-};
+       .cancel_request = guc_context_cancel_request,
 
-static int guc_request_alloc(struct i915_request *request)
-{
-       int ret;
+       .enter = guc_virtual_context_enter,
+       .exit = guc_virtual_context_exit,
 
-       GEM_BUG_ON(!intel_context_is_pinned(request->context));
+       .sched_disable = guc_context_sched_disable,
 
-       /*
-        * Flush enough space to reduce the likelihood of waiting after
-        * we start building the request - in which case we will just
-        * have to repeat work.
-        */
-       request->reserved_space += GUC_REQUEST_SIZE;
+       .destroy = guc_context_destroy,
 
-       /*
-        * Note that after this point, we have committed to using
-        * this request as it is being used to both track the
-        * state of engine initialisation and liveness of the
-        * golden renderstate above. Think twice before you try
-        * to cancel/unwind this request now.
-        */
+       .get_sibling = guc_virtual_get_sibling,
+};
 
-       /* Unconditionally invalidate GPU caches and TLBs. */
-       ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
-       if (ret)
-               return ret;
+static bool
+guc_irq_enable_breadcrumbs(struct intel_breadcrumbs *b)
+{
+       struct intel_engine_cs *sibling;
+       intel_engine_mask_t tmp, mask = b->engine_mask;
+       bool result = false;
 
-       request->reserved_space -= GUC_REQUEST_SIZE;
-       return 0;
+       for_each_engine_masked(sibling, b->irq_engine->gt, mask, tmp)
+               result |= intel_engine_irq_enable(sibling);
+
+       return result;
 }
 
-static inline void queue_request(struct intel_engine_cs *engine,
-                                struct i915_request *rq,
-                                int prio)
+static void
+guc_irq_disable_breadcrumbs(struct intel_breadcrumbs *b)
 {
-       GEM_BUG_ON(!list_empty(&rq->sched.link));
-       list_add_tail(&rq->sched.link,
-                     i915_sched_lookup_priolist(engine, prio));
-       set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+       struct intel_engine_cs *sibling;
+       intel_engine_mask_t tmp, mask = b->engine_mask;
+
+       for_each_engine_masked(sibling, b->irq_engine->gt, mask, tmp)
+               intel_engine_irq_disable(sibling);
 }
 
-static void guc_submit_request(struct i915_request *rq)
+static void guc_init_breadcrumbs(struct intel_engine_cs *engine)
 {
-       struct intel_engine_cs *engine = rq->engine;
-       unsigned long flags;
+       int i;
 
-       /* Will be called from irq-context when using foreign fences. */
-       spin_lock_irqsave(&engine->active.lock, flags);
+       /*
+        * In GuC submission mode we do not know which physical engine a request
+        * will be scheduled on, this creates a problem because the breadcrumb
+        * interrupt is per physical engine. To work around this we attach
+        * requests and direct all breadcrumb interrupts to the first instance
+        * of an engine per class. In addition all breadcrumb interrupts are
+        * enabled / disabled across an engine class in unison.
+        */
+       for (i = 0; i < MAX_ENGINE_INSTANCE; ++i) {
+               struct intel_engine_cs *sibling =
+                       engine->gt->engine_class[engine->class][i];
+
+               if (sibling) {
+                       if (engine->breadcrumbs != sibling->breadcrumbs) {
+                               intel_breadcrumbs_put(engine->breadcrumbs);
+                               engine->breadcrumbs =
+                                       intel_breadcrumbs_get(sibling->breadcrumbs);
+                       }
+                       break;
+               }
+       }
 
-       queue_request(engine, rq, rq_prio(rq));
+       if (engine->breadcrumbs) {
+               engine->breadcrumbs->engine_mask |= engine->mask;
+               engine->breadcrumbs->irq_enable = guc_irq_enable_breadcrumbs;
+               engine->breadcrumbs->irq_disable = guc_irq_disable_breadcrumbs;
+       }
+}
 
-       GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
-       GEM_BUG_ON(list_empty(&rq->sched.link));
+static void guc_bump_inflight_request_prio(struct i915_request *rq,
+                                          int prio)
+{
+       struct intel_context *ce = rq->context;
+       u8 new_guc_prio = map_i915_prio_to_guc_prio(prio);
+
+       /* Short circuit function */
+       if (prio < I915_PRIORITY_NORMAL ||
+           rq->guc_prio == GUC_PRIO_FINI ||
+           (rq->guc_prio != GUC_PRIO_INIT &&
+            !new_guc_prio_higher(rq->guc_prio, new_guc_prio)))
+               return;
+
+       spin_lock(&ce->guc_active.lock);
+       if (rq->guc_prio != GUC_PRIO_FINI) {
+               if (rq->guc_prio != GUC_PRIO_INIT)
+                       sub_context_inflight_prio(ce, rq->guc_prio);
+               rq->guc_prio = new_guc_prio;
+               add_context_inflight_prio(ce, rq->guc_prio);
+               update_context_prio(ce);
+       }
+       spin_unlock(&ce->guc_active.lock);
+}
 
-       tasklet_hi_schedule(&engine->execlists.tasklet);
+static void guc_retire_inflight_request_prio(struct i915_request *rq)
+{
+       struct intel_context *ce = rq->context;
 
-       spin_unlock_irqrestore(&engine->active.lock, flags);
+       spin_lock(&ce->guc_active.lock);
+       guc_prio_fini(rq, ce);
+       spin_unlock(&ce->guc_active.lock);
 }
 
 static void sanitize_hwsp(struct intel_engine_cs *engine)
@@ -588,21 +2345,68 @@ static int guc_resume(struct intel_engine_cs *engine)
        return 0;
 }
 
+static bool guc_sched_engine_disabled(struct i915_sched_engine *sched_engine)
+{
+       return !sched_engine->tasklet.callback;
+}
+
 static void guc_set_default_submission(struct intel_engine_cs *engine)
 {
        engine->submit_request = guc_submit_request;
 }
 
+static inline void guc_kernel_context_pin(struct intel_guc *guc,
+                                         struct intel_context *ce)
+{
+       if (context_guc_id_invalid(ce))
+               pin_guc_id(guc, ce);
+       guc_lrc_desc_pin(ce, true);
+}
+
+static inline void guc_init_lrc_mapping(struct intel_guc *guc)
+{
+       struct intel_gt *gt = guc_to_gt(guc);
+       struct intel_engine_cs *engine;
+       enum intel_engine_id id;
+
+       /* make sure all descriptors are clean... */
+       xa_destroy(&guc->context_lookup);
+
+       /*
+        * Some contexts might have been pinned before we enabled GuC
+        * submission, so we need to add them to the GuC bookeeping.
+        * Also, after a reset the of the GuC we want to make sure that the
+        * information shared with GuC is properly reset. The kernel LRCs are
+        * not attached to the gem_context, so they need to be added separately.
+        *
+        * Note: we purposefully do not check the return of guc_lrc_desc_pin,
+        * because that function can only fail if a reset is just starting. This
+        * is at the end of reset so presumably another reset isn't happening
+        * and even it did this code would be run again.
+        */
+
+       for_each_engine(engine, gt, id)
+               if (engine->kernel_context)
+                       guc_kernel_context_pin(guc, engine->kernel_context);
+}
+
 static void guc_release(struct intel_engine_cs *engine)
 {
        engine->sanitize = NULL; /* no longer in control, nothing to sanitize */
 
-       tasklet_kill(&engine->execlists.tasklet);
-
        intel_engine_cleanup_common(engine);
        lrc_fini_wa_ctx(engine);
 }
 
+static void virtual_guc_bump_serial(struct intel_engine_cs *engine)
+{
+       struct intel_engine_cs *e;
+       intel_engine_mask_t tmp, mask = engine->mask;
+
+       for_each_engine_masked(e, engine->gt, mask, tmp)
+               e->serial++;
+}
+
 static void guc_default_vfuncs(struct intel_engine_cs *engine)
 {
        /* Default vfuncs which can be overridden by each engine. */
@@ -611,13 +2415,15 @@ static void guc_default_vfuncs(struct intel_engine_cs *engine)
 
        engine->cops = &guc_context_ops;
        engine->request_alloc = guc_request_alloc;
+       engine->add_active_request = add_to_context;
+       engine->remove_active_request = remove_from_context;
 
-       engine->schedule = i915_schedule;
+       engine->sched_engine->schedule = i915_schedule;
 
-       engine->reset.prepare = guc_reset_prepare;
-       engine->reset.rewind = guc_reset_rewind;
-       engine->reset.cancel = guc_reset_cancel;
-       engine->reset.finish = guc_reset_finish;
+       engine->reset.prepare = guc_reset_nop;
+       engine->reset.rewind = guc_rewind_nop;
+       engine->reset.cancel = guc_reset_nop;
+       engine->reset.finish = guc_reset_nop;
 
        engine->emit_flush = gen8_emit_flush_xcs;
        engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb;
@@ -629,13 +2435,13 @@ static void guc_default_vfuncs(struct intel_engine_cs *engine)
        engine->set_default_submission = guc_set_default_submission;
 
        engine->flags |= I915_ENGINE_HAS_PREEMPTION;
+       engine->flags |= I915_ENGINE_HAS_TIMESLICES;
 
        /*
         * TODO: GuC supports timeslicing and semaphores as well, but they're
         * handled by the firmware so some minor tweaks are required before
         * enabling.
         *
-        * engine->flags |= I915_ENGINE_HAS_TIMESLICES;
         * engine->flags |= I915_ENGINE_HAS_SEMAPHORES;
         */
 
@@ -666,9 +2472,21 @@ static inline void guc_default_irqs(struct intel_engine_cs *engine)
        intel_engine_set_irq_handler(engine, cs_irq_handler);
 }
 
+static void guc_sched_engine_destroy(struct kref *kref)
+{
+       struct i915_sched_engine *sched_engine =
+               container_of(kref, typeof(*sched_engine), ref);
+       struct intel_guc *guc = sched_engine->private_data;
+
+       guc->sched_engine = NULL;
+       tasklet_kill(&sched_engine->tasklet); /* flush the callback */
+       kfree(sched_engine);
+}
+
 int intel_guc_submission_setup(struct intel_engine_cs *engine)
 {
        struct drm_i915_private *i915 = engine->i915;
+       struct intel_guc *guc = &engine->gt->uc.guc;
 
        /*
         * The setup relies on several assumptions (e.g. irqs always enabled)
@@ -676,10 +2494,28 @@ int intel_guc_submission_setup(struct intel_engine_cs *engine)
         */
        GEM_BUG_ON(GRAPHICS_VER(i915) < 11);
 
-       tasklet_setup(&engine->execlists.tasklet, guc_submission_tasklet);
+       if (!guc->sched_engine) {
+               guc->sched_engine = i915_sched_engine_create(ENGINE_VIRTUAL);
+               if (!guc->sched_engine)
+                       return -ENOMEM;
+
+               guc->sched_engine->schedule = i915_schedule;
+               guc->sched_engine->disabled = guc_sched_engine_disabled;
+               guc->sched_engine->private_data = guc;
+               guc->sched_engine->destroy = guc_sched_engine_destroy;
+               guc->sched_engine->bump_inflight_request_prio =
+                       guc_bump_inflight_request_prio;
+               guc->sched_engine->retire_inflight_request_prio =
+                       guc_retire_inflight_request_prio;
+               tasklet_setup(&guc->sched_engine->tasklet,
+                             guc_submission_tasklet);
+       }
+       i915_sched_engine_put(engine->sched_engine);
+       engine->sched_engine = i915_sched_engine_get(guc->sched_engine);
 
        guc_default_vfuncs(engine);
        guc_default_irqs(engine);
+       guc_init_breadcrumbs(engine);
 
        if (engine->class == RENDER_CLASS)
                rcs_submission_override(engine);
@@ -695,18 +2531,19 @@ int intel_guc_submission_setup(struct intel_engine_cs *engine)
 
 void intel_guc_submission_enable(struct intel_guc *guc)
 {
-       guc_stage_desc_init(guc);
+       guc_init_lrc_mapping(guc);
 }
 
 void intel_guc_submission_disable(struct intel_guc *guc)
 {
-       struct intel_gt *gt = guc_to_gt(guc);
-
-       GEM_BUG_ON(gt->awake); /* GT should be parked first */
-
        /* Note: By the time we're here, GuC may have already been reset */
+}
 
-       guc_stage_desc_fini(guc);
+static bool __guc_submission_supported(struct intel_guc *guc)
+{
+       /* GuC submission is unavailable for pre-Gen11 */
+       return intel_guc_is_supported(guc) &&
+              GRAPHICS_VER(guc_to_gt(guc)->i915) >= 11;
 }
 
 static bool __guc_submission_selected(struct intel_guc *guc)
@@ -721,5 +2558,481 @@ static bool __guc_submission_selected(struct intel_guc *guc)
 
 void intel_guc_submission_init_early(struct intel_guc *guc)
 {
+       guc->submission_supported = __guc_submission_supported(guc);
        guc->submission_selected = __guc_submission_selected(guc);
 }
+
+static inline struct intel_context *
+g2h_context_lookup(struct intel_guc *guc, u32 desc_idx)
+{
+       struct intel_context *ce;
+
+       if (unlikely(desc_idx >= GUC_MAX_LRC_DESCRIPTORS)) {
+               drm_err(&guc_to_gt(guc)->i915->drm,
+                       "Invalid desc_idx %u", desc_idx);
+               return NULL;
+       }
+
+       ce = __get_context(guc, desc_idx);
+       if (unlikely(!ce)) {
+               drm_err(&guc_to_gt(guc)->i915->drm,
+                       "Context is NULL, desc_idx %u", desc_idx);
+               return NULL;
+       }
+
+       return ce;
+}
+
+static void decr_outstanding_submission_g2h(struct intel_guc *guc)
+{
+       if (atomic_dec_and_test(&guc->outstanding_submission_g2h))
+               wake_up_all(&guc->ct.wq);
+}
+
+int intel_guc_deregister_done_process_msg(struct intel_guc *guc,
+                                         const u32 *msg,
+                                         u32 len)
+{
+       struct intel_context *ce;
+       u32 desc_idx = msg[0];
+
+       if (unlikely(len < 1)) {
+               drm_err(&guc_to_gt(guc)->i915->drm, "Invalid length %u", len);
+               return -EPROTO;
+       }
+
+       ce = g2h_context_lookup(guc, desc_idx);
+       if (unlikely(!ce))
+               return -EPROTO;
+
+       trace_intel_context_deregister_done(ce);
+
+       if (context_wait_for_deregister_to_register(ce)) {
+               struct intel_runtime_pm *runtime_pm =
+                       &ce->engine->gt->i915->runtime_pm;
+               intel_wakeref_t wakeref;
+
+               /*
+                * Previous owner of this guc_id has been deregistered, now safe
+                * register this context.
+                */
+               with_intel_runtime_pm(runtime_pm, wakeref)
+                       register_context(ce, true);
+               guc_signal_context_fence(ce);
+               intel_context_put(ce);
+       } else if (context_destroyed(ce)) {
+               /* Context has been destroyed */
+               release_guc_id(guc, ce);
+               __guc_context_destroy(ce);
+       }
+
+       decr_outstanding_submission_g2h(guc);
+
+       return 0;
+}
+
+int intel_guc_sched_done_process_msg(struct intel_guc *guc,
+                                    const u32 *msg,
+                                    u32 len)
+{
+       struct intel_context *ce;
+       unsigned long flags;
+       u32 desc_idx = msg[0];
+
+       if (unlikely(len < 2)) {
+               drm_err(&guc_to_gt(guc)->i915->drm, "Invalid length %u", len);
+               return -EPROTO;
+       }
+
+       ce = g2h_context_lookup(guc, desc_idx);
+       if (unlikely(!ce))
+               return -EPROTO;
+
+       if (unlikely(context_destroyed(ce) ||
+                    (!context_pending_enable(ce) &&
+                    !context_pending_disable(ce)))) {
+               drm_err(&guc_to_gt(guc)->i915->drm,
+                       "Bad context sched_state 0x%x, 0x%x, desc_idx %u",
+                       atomic_read(&ce->guc_sched_state_no_lock),
+                       ce->guc_state.sched_state, desc_idx);
+               return -EPROTO;
+       }
+
+       trace_intel_context_sched_done(ce);
+
+       if (context_pending_enable(ce)) {
+               clr_context_pending_enable(ce);
+       } else if (context_pending_disable(ce)) {
+               bool banned;
+
+               /*
+                * Unpin must be done before __guc_signal_context_fence,
+                * otherwise a race exists between the requests getting
+                * submitted + retired before this unpin completes resulting in
+                * the pin_count going to zero and the context still being
+                * enabled.
+                */
+               intel_context_sched_disable_unpin(ce);
+
+               spin_lock_irqsave(&ce->guc_state.lock, flags);
+               banned = context_banned(ce);
+               clr_context_banned(ce);
+               clr_context_pending_disable(ce);
+               __guc_signal_context_fence(ce);
+               guc_blocked_fence_complete(ce);
+               spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+
+               if (banned) {
+                       guc_cancel_context_requests(ce);
+                       intel_engine_signal_breadcrumbs(ce->engine);
+               }
+       }
+
+       decr_outstanding_submission_g2h(guc);
+       intel_context_put(ce);
+
+       return 0;
+}
+
+static void capture_error_state(struct intel_guc *guc,
+                               struct intel_context *ce)
+{
+       struct intel_gt *gt = guc_to_gt(guc);
+       struct drm_i915_private *i915 = gt->i915;
+       struct intel_engine_cs *engine = __context_to_physical_engine(ce);
+       intel_wakeref_t wakeref;
+
+       intel_engine_set_hung_context(engine, ce);
+       with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+               i915_capture_error_state(gt, engine->mask);
+       atomic_inc(&i915->gpu_error.reset_engine_count[engine->uabi_class]);
+}
+
+static void guc_context_replay(struct intel_context *ce)
+{
+       struct i915_sched_engine *sched_engine = ce->engine->sched_engine;
+
+       __guc_reset_context(ce, true);
+       tasklet_hi_schedule(&sched_engine->tasklet);
+}
+
+static void guc_handle_context_reset(struct intel_guc *guc,
+                                    struct intel_context *ce)
+{
+       trace_intel_context_reset(ce);
+
+       if (likely(!intel_context_is_banned(ce))) {
+               capture_error_state(guc, ce);
+               guc_context_replay(ce);
+       }
+}
+
+int intel_guc_context_reset_process_msg(struct intel_guc *guc,
+                                       const u32 *msg, u32 len)
+{
+       struct intel_context *ce;
+       int desc_idx;
+
+       if (unlikely(len != 1)) {
+               drm_err(&guc_to_gt(guc)->i915->drm, "Invalid length %u", len);
+               return -EPROTO;
+       }
+
+       desc_idx = msg[0];
+       ce = g2h_context_lookup(guc, desc_idx);
+       if (unlikely(!ce))
+               return -EPROTO;
+
+       guc_handle_context_reset(guc, ce);
+
+       return 0;
+}
+
+static struct intel_engine_cs *
+guc_lookup_engine(struct intel_guc *guc, u8 guc_class, u8 instance)
+{
+       struct intel_gt *gt = guc_to_gt(guc);
+       u8 engine_class = guc_class_to_engine_class(guc_class);
+
+       /* Class index is checked in class converter */
+       GEM_BUG_ON(instance > MAX_ENGINE_INSTANCE);
+
+       return gt->engine_class[engine_class][instance];
+}
+
+int intel_guc_engine_failure_process_msg(struct intel_guc *guc,
+                                        const u32 *msg, u32 len)
+{
+       struct intel_engine_cs *engine;
+       u8 guc_class, instance;
+       u32 reason;
+
+       if (unlikely(len != 3)) {
+               drm_err(&guc_to_gt(guc)->i915->drm, "Invalid length %u", len);
+               return -EPROTO;
+       }
+
+       guc_class = msg[0];
+       instance = msg[1];
+       reason = msg[2];
+
+       engine = guc_lookup_engine(guc, guc_class, instance);
+       if (unlikely(!engine)) {
+               drm_err(&guc_to_gt(guc)->i915->drm,
+                       "Invalid engine %d:%d", guc_class, instance);
+               return -EPROTO;
+       }
+
+       intel_gt_handle_error(guc_to_gt(guc), engine->mask,
+                             I915_ERROR_CAPTURE,
+                             "GuC failed to reset %s (reason=0x%08x)\n",
+                             engine->name, reason);
+
+       return 0;
+}
+
+void intel_guc_find_hung_context(struct intel_engine_cs *engine)
+{
+       struct intel_guc *guc = &engine->gt->uc.guc;
+       struct intel_context *ce;
+       struct i915_request *rq;
+       unsigned long index;
+
+       /* Reset called during driver load? GuC not yet initialised! */
+       if (unlikely(!guc_submission_initialized(guc)))
+               return;
+
+       xa_for_each(&guc->context_lookup, index, ce) {
+               if (!intel_context_is_pinned(ce))
+                       continue;
+
+               if (intel_engine_is_virtual(ce->engine)) {
+                       if (!(ce->engine->mask & engine->mask))
+                               continue;
+               } else {
+                       if (ce->engine != engine)
+                               continue;
+               }
+
+               list_for_each_entry(rq, &ce->guc_active.requests, sched.link) {
+                       if (i915_test_request_state(rq) != I915_REQUEST_ACTIVE)
+                               continue;
+
+                       intel_engine_set_hung_context(engine, ce);
+
+                       /* Can only cope with one hang at a time... */
+                       return;
+               }
+       }
+}
+
+void intel_guc_dump_active_requests(struct intel_engine_cs *engine,
+                                   struct i915_request *hung_rq,
+                                   struct drm_printer *m)
+{
+       struct intel_guc *guc = &engine->gt->uc.guc;
+       struct intel_context *ce;
+       unsigned long index;
+       unsigned long flags;
+
+       /* Reset called during driver load? GuC not yet initialised! */
+       if (unlikely(!guc_submission_initialized(guc)))
+               return;
+
+       xa_for_each(&guc->context_lookup, index, ce) {
+               if (!intel_context_is_pinned(ce))
+                       continue;
+
+               if (intel_engine_is_virtual(ce->engine)) {
+                       if (!(ce->engine->mask & engine->mask))
+                               continue;
+               } else {
+                       if (ce->engine != engine)
+                               continue;
+               }
+
+               spin_lock_irqsave(&ce->guc_active.lock, flags);
+               intel_engine_dump_active_requests(&ce->guc_active.requests,
+                                                 hung_rq, m);
+               spin_unlock_irqrestore(&ce->guc_active.lock, flags);
+       }
+}
+
+void intel_guc_submission_print_info(struct intel_guc *guc,
+                                    struct drm_printer *p)
+{
+       struct i915_sched_engine *sched_engine = guc->sched_engine;
+       struct rb_node *rb;
+       unsigned long flags;
+
+       if (!sched_engine)
+               return;
+
+       drm_printf(p, "GuC Number Outstanding Submission G2H: %u\n",
+                  atomic_read(&guc->outstanding_submission_g2h));
+       drm_printf(p, "GuC tasklet count: %u\n\n",
+                  atomic_read(&sched_engine->tasklet.count));
+
+       spin_lock_irqsave(&sched_engine->lock, flags);
+       drm_printf(p, "Requests in GuC submit tasklet:\n");
+       for (rb = rb_first_cached(&sched_engine->queue); rb; rb = rb_next(rb)) {
+               struct i915_priolist *pl = to_priolist(rb);
+               struct i915_request *rq;
+
+               priolist_for_each_request(rq, pl)
+                       drm_printf(p, "guc_id=%u, seqno=%llu\n",
+                                  rq->context->guc_id,
+                                  rq->fence.seqno);
+       }
+       spin_unlock_irqrestore(&sched_engine->lock, flags);
+       drm_printf(p, "\n");
+}
+
+static inline void guc_log_context_priority(struct drm_printer *p,
+                                           struct intel_context *ce)
+{
+       int i;
+
+       drm_printf(p, "\t\tPriority: %d\n",
+                  ce->guc_prio);
+       drm_printf(p, "\t\tNumber Requests (lower index == higher priority)\n");
+       for (i = GUC_CLIENT_PRIORITY_KMD_HIGH;
+            i < GUC_CLIENT_PRIORITY_NUM; ++i) {
+               drm_printf(p, "\t\tNumber requests in priority band[%d]: %d\n",
+                          i, ce->guc_prio_count[i]);
+       }
+       drm_printf(p, "\n");
+}
+
+void intel_guc_submission_print_context_info(struct intel_guc *guc,
+                                            struct drm_printer *p)
+{
+       struct intel_context *ce;
+       unsigned long index;
+
+       xa_for_each(&guc->context_lookup, index, ce) {
+               drm_printf(p, "GuC lrc descriptor %u:\n", ce->guc_id);
+               drm_printf(p, "\tHW Context Desc: 0x%08x\n", ce->lrc.lrca);
+               drm_printf(p, "\t\tLRC Head: Internal %u, Memory %u\n",
+                          ce->ring->head,
+                          ce->lrc_reg_state[CTX_RING_HEAD]);
+               drm_printf(p, "\t\tLRC Tail: Internal %u, Memory %u\n",
+                          ce->ring->tail,
+                          ce->lrc_reg_state[CTX_RING_TAIL]);
+               drm_printf(p, "\t\tContext Pin Count: %u\n",
+                          atomic_read(&ce->pin_count));
+               drm_printf(p, "\t\tGuC ID Ref Count: %u\n",
+                          atomic_read(&ce->guc_id_ref));
+               drm_printf(p, "\t\tSchedule State: 0x%x, 0x%x\n\n",
+                          ce->guc_state.sched_state,
+                          atomic_read(&ce->guc_sched_state_no_lock));
+
+               guc_log_context_priority(p, ce);
+       }
+}
+
+static struct intel_context *
+guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count)
+{
+       struct guc_virtual_engine *ve;
+       struct intel_guc *guc;
+       unsigned int n;
+       int err;
+
+       ve = kzalloc(sizeof(*ve), GFP_KERNEL);
+       if (!ve)
+               return ERR_PTR(-ENOMEM);
+
+       guc = &siblings[0]->gt->uc.guc;
+
+       ve->base.i915 = siblings[0]->i915;
+       ve->base.gt = siblings[0]->gt;
+       ve->base.uncore = siblings[0]->uncore;
+       ve->base.id = -1;
+
+       ve->base.uabi_class = I915_ENGINE_CLASS_INVALID;
+       ve->base.instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
+       ve->base.uabi_instance = I915_ENGINE_CLASS_INVALID_VIRTUAL;
+       ve->base.saturated = ALL_ENGINES;
+
+       snprintf(ve->base.name, sizeof(ve->base.name), "virtual");
+
+       ve->base.sched_engine = i915_sched_engine_get(guc->sched_engine);
+
+       ve->base.cops = &virtual_guc_context_ops;
+       ve->base.request_alloc = guc_request_alloc;
+       ve->base.bump_serial = virtual_guc_bump_serial;
+
+       ve->base.submit_request = guc_submit_request;
+
+       ve->base.flags = I915_ENGINE_IS_VIRTUAL;
+
+       intel_context_init(&ve->context, &ve->base);
+
+       for (n = 0; n < count; n++) {
+               struct intel_engine_cs *sibling = siblings[n];
+
+               GEM_BUG_ON(!is_power_of_2(sibling->mask));
+               if (sibling->mask & ve->base.mask) {
+                       DRM_DEBUG("duplicate %s entry in load balancer\n",
+                                 sibling->name);
+                       err = -EINVAL;
+                       goto err_put;
+               }
+
+               ve->base.mask |= sibling->mask;
+
+               if (n != 0 && ve->base.class != sibling->class) {
+                       DRM_DEBUG("invalid mixing of engine class, sibling %d, already %d\n",
+                                 sibling->class, ve->base.class);
+                       err = -EINVAL;
+                       goto err_put;
+               } else if (n == 0) {
+                       ve->base.class = sibling->class;
+                       ve->base.uabi_class = sibling->uabi_class;
+                       snprintf(ve->base.name, sizeof(ve->base.name),
+                                "v%dx%d", ve->base.class, count);
+                       ve->base.context_size = sibling->context_size;
+
+                       ve->base.add_active_request =
+                               sibling->add_active_request;
+                       ve->base.remove_active_request =
+                               sibling->remove_active_request;
+                       ve->base.emit_bb_start = sibling->emit_bb_start;
+                       ve->base.emit_flush = sibling->emit_flush;
+                       ve->base.emit_init_breadcrumb =
+                               sibling->emit_init_breadcrumb;
+                       ve->base.emit_fini_breadcrumb =
+                               sibling->emit_fini_breadcrumb;
+                       ve->base.emit_fini_breadcrumb_dw =
+                               sibling->emit_fini_breadcrumb_dw;
+                       ve->base.breadcrumbs =
+                               intel_breadcrumbs_get(sibling->breadcrumbs);
+
+                       ve->base.flags |= sibling->flags;
+
+                       ve->base.props.timeslice_duration_ms =
+                               sibling->props.timeslice_duration_ms;
+                       ve->base.props.preempt_timeout_ms =
+                               sibling->props.preempt_timeout_ms;
+               }
+       }
+
+       return &ve->context;
+
+err_put:
+       intel_context_put(&ve->context);
+       return ERR_PTR(err);
+}
+
+bool intel_guc_virtual_engine_has_heartbeat(const struct intel_engine_cs *ve)
+{
+       struct intel_engine_cs *engine;
+       intel_engine_mask_t tmp, mask = ve->mask;
+
+       for_each_engine_masked(engine, ve->gt, mask, tmp)
+               if (READ_ONCE(engine->props.heartbeat_interval_ms))
+                       return true;
+
+       return false;
+}
index 3f70050..c7ef44f 100644 (file)
@@ -10,6 +10,7 @@
 
 #include "intel_guc.h"
 
+struct drm_printer;
 struct intel_engine_cs;
 
 void intel_guc_submission_init_early(struct intel_guc *guc);
@@ -20,11 +21,24 @@ void intel_guc_submission_fini(struct intel_guc *guc);
 int intel_guc_preempt_work_create(struct intel_guc *guc);
 void intel_guc_preempt_work_destroy(struct intel_guc *guc);
 int intel_guc_submission_setup(struct intel_engine_cs *engine);
+void intel_guc_submission_print_info(struct intel_guc *guc,
+                                    struct drm_printer *p);
+void intel_guc_submission_print_context_info(struct intel_guc *guc,
+                                            struct drm_printer *p);
+void intel_guc_dump_active_requests(struct intel_engine_cs *engine,
+                                   struct i915_request *hung_rq,
+                                   struct drm_printer *m);
+
+bool intel_guc_virtual_engine_has_heartbeat(const struct intel_engine_cs *ve);
+
+int intel_guc_wait_for_pending_msg(struct intel_guc *guc,
+                                  atomic_t *wait_var,
+                                  bool interruptible,
+                                  long timeout);
 
 static inline bool intel_guc_submission_is_supported(struct intel_guc *guc)
 {
-       /* XXX: GuC submission is unavailable for now */
-       return false;
+       return guc->submission_supported;
 }
 
 static inline bool intel_guc_submission_is_wanted(struct intel_guc *guc)
index 6d8b923..b104fb7 100644 (file)
@@ -34,8 +34,14 @@ static void uc_expand_default_options(struct intel_uc *uc)
                return;
        }
 
-       /* Default: enable HuC authentication only */
-       i915->params.enable_guc = ENABLE_GUC_LOAD_HUC;
+       /* Intermediate platforms are HuC authentication only */
+       if (IS_DG1(i915) || IS_ALDERLAKE_S(i915)) {
+               i915->params.enable_guc = ENABLE_GUC_LOAD_HUC;
+               return;
+       }
+
+       /* Default: enable HuC authentication and GuC submission */
+       i915->params.enable_guc = ENABLE_GUC_LOAD_HUC | ENABLE_GUC_SUBMISSION;
 }
 
 /* Reset GuC providing us with fresh state for both GuC and HuC.
@@ -69,16 +75,18 @@ static void __confirm_options(struct intel_uc *uc)
        struct drm_i915_private *i915 = uc_to_gt(uc)->i915;
 
        drm_dbg(&i915->drm,
-               "enable_guc=%d (guc:%s submission:%s huc:%s)\n",
+               "enable_guc=%d (guc:%s submission:%s huc:%s slpc:%s)\n",
                i915->params.enable_guc,
                yesno(intel_uc_wants_guc(uc)),
                yesno(intel_uc_wants_guc_submission(uc)),
-               yesno(intel_uc_wants_huc(uc)));
+               yesno(intel_uc_wants_huc(uc)),
+               yesno(intel_uc_wants_guc_slpc(uc)));
 
        if (i915->params.enable_guc == 0) {
                GEM_BUG_ON(intel_uc_wants_guc(uc));
                GEM_BUG_ON(intel_uc_wants_guc_submission(uc));
                GEM_BUG_ON(intel_uc_wants_huc(uc));
+               GEM_BUG_ON(intel_uc_wants_guc_slpc(uc));
                return;
        }
 
@@ -120,6 +128,11 @@ void intel_uc_init_early(struct intel_uc *uc)
                uc->ops = &uc_ops_off;
 }
 
+void intel_uc_init_late(struct intel_uc *uc)
+{
+       intel_guc_init_late(&uc->guc);
+}
+
 void intel_uc_driver_late_release(struct intel_uc *uc)
 {
 }
@@ -207,21 +220,6 @@ static void guc_handle_mmio_msg(struct intel_guc *guc)
        spin_unlock_irq(&guc->irq_lock);
 }
 
-static void guc_reset_interrupts(struct intel_guc *guc)
-{
-       guc->interrupts.reset(guc);
-}
-
-static void guc_enable_interrupts(struct intel_guc *guc)
-{
-       guc->interrupts.enable(guc);
-}
-
-static void guc_disable_interrupts(struct intel_guc *guc)
-{
-       guc->interrupts.disable(guc);
-}
-
 static int guc_enable_communication(struct intel_guc *guc)
 {
        struct intel_gt *gt = guc_to_gt(guc);
@@ -242,7 +240,7 @@ static int guc_enable_communication(struct intel_guc *guc)
        guc_get_mmio_msg(guc);
        guc_handle_mmio_msg(guc);
 
-       guc_enable_interrupts(guc);
+       intel_guc_enable_interrupts(guc);
 
        /* check for CT messages received before we enabled interrupts */
        spin_lock_irq(&gt->irq_lock);
@@ -265,7 +263,7 @@ static void guc_disable_communication(struct intel_guc *guc)
         */
        guc_clear_mmio_msg(guc);
 
-       guc_disable_interrupts(guc);
+       intel_guc_disable_interrupts(guc);
 
        intel_guc_ct_disable(&guc->ct);
 
@@ -323,9 +321,6 @@ static int __uc_init(struct intel_uc *uc)
        if (i915_inject_probe_failure(uc_to_gt(uc)->i915))
                return -ENOMEM;
 
-       /* XXX: GuC submission is unavailable for now */
-       GEM_BUG_ON(intel_uc_uses_guc_submission(uc));
-
        ret = intel_guc_init(guc);
        if (ret)
                return ret;
@@ -463,7 +458,7 @@ static int __uc_init_hw(struct intel_uc *uc)
        if (ret)
                goto err_out;
 
-       guc_reset_interrupts(guc);
+       intel_guc_reset_interrupts(guc);
 
        /* WaEnableuKernelHeaderValidFix:skl */
        /* WaEnableGuCBootHashCheckNotSet:skl,bxt,kbl */
@@ -505,12 +500,21 @@ static int __uc_init_hw(struct intel_uc *uc)
        if (intel_uc_uses_guc_submission(uc))
                intel_guc_submission_enable(guc);
 
+       if (intel_uc_uses_guc_slpc(uc)) {
+               ret = intel_guc_slpc_enable(&guc->slpc);
+               if (ret)
+                       goto err_submission;
+       }
+
        drm_info(&i915->drm, "%s firmware %s version %u.%u %s:%s\n",
                 intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_GUC), guc->fw.path,
                 guc->fw.major_ver_found, guc->fw.minor_ver_found,
                 "submission",
                 enableddisabled(intel_uc_uses_guc_submission(uc)));
 
+       drm_info(&i915->drm, "GuC SLPC: %s\n",
+                enableddisabled(intel_uc_uses_guc_slpc(uc)));
+
        if (intel_uc_uses_huc(uc)) {
                drm_info(&i915->drm, "%s firmware %s version %u.%u %s:%s\n",
                         intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_HUC),
@@ -525,6 +529,8 @@ static int __uc_init_hw(struct intel_uc *uc)
        /*
         * We've failed to load the firmware :(
         */
+err_submission:
+       intel_guc_submission_disable(guc);
 err_log_capture:
        __uc_capture_load_err_log(uc);
 err_out:
@@ -565,23 +571,67 @@ void intel_uc_reset_prepare(struct intel_uc *uc)
 {
        struct intel_guc *guc = &uc->guc;
 
-       if (!intel_guc_is_ready(guc))
+       uc->reset_in_progress = true;
+
+       /* Nothing to do if GuC isn't supported */
+       if (!intel_uc_supports_guc(uc))
                return;
 
+       /* Firmware expected to be running when this function is called */
+       if (!intel_guc_is_ready(guc))
+               goto sanitize;
+
+       if (intel_uc_uses_guc_submission(uc))
+               intel_guc_submission_reset_prepare(guc);
+
+sanitize:
        __uc_sanitize(uc);
 }
 
+void intel_uc_reset(struct intel_uc *uc, bool stalled)
+{
+       struct intel_guc *guc = &uc->guc;
+
+       /* Firmware can not be running when this function is called  */
+       if (intel_uc_uses_guc_submission(uc))
+               intel_guc_submission_reset(guc, stalled);
+}
+
+void intel_uc_reset_finish(struct intel_uc *uc)
+{
+       struct intel_guc *guc = &uc->guc;
+
+       uc->reset_in_progress = false;
+
+       /* Firmware expected to be running when this function is called */
+       if (intel_guc_is_fw_running(guc) && intel_uc_uses_guc_submission(uc))
+               intel_guc_submission_reset_finish(guc);
+}
+
+void intel_uc_cancel_requests(struct intel_uc *uc)
+{
+       struct intel_guc *guc = &uc->guc;
+
+       /* Firmware can not be running when this function is called  */
+       if (intel_uc_uses_guc_submission(uc))
+               intel_guc_submission_cancel_requests(guc);
+}
+
 void intel_uc_runtime_suspend(struct intel_uc *uc)
 {
        struct intel_guc *guc = &uc->guc;
-       int err;
 
        if (!intel_guc_is_ready(guc))
                return;
 
-       err = intel_guc_suspend(guc);
-       if (err)
-               DRM_DEBUG_DRIVER("Failed to suspend GuC, err=%d", err);
+       /*
+        * Wait for any outstanding CTB before tearing down communication /w the
+        * GuC.
+        */
+#define OUTSTANDING_CTB_TIMEOUT_PERIOD (HZ / 5)
+       intel_guc_wait_for_pending_msg(guc, &guc->outstanding_submission_g2h,
+                                      false, OUTSTANDING_CTB_TIMEOUT_PERIOD);
+       GEM_WARN_ON(atomic_read(&guc->outstanding_submission_g2h));
 
        guc_disable_communication(guc);
 }
@@ -590,17 +640,22 @@ void intel_uc_suspend(struct intel_uc *uc)
 {
        struct intel_guc *guc = &uc->guc;
        intel_wakeref_t wakeref;
+       int err;
 
        if (!intel_guc_is_ready(guc))
                return;
 
-       with_intel_runtime_pm(uc_to_gt(uc)->uncore->rpm, wakeref)
-               intel_uc_runtime_suspend(uc);
+       with_intel_runtime_pm(&uc_to_gt(uc)->i915->runtime_pm, wakeref) {
+               err = intel_guc_suspend(guc);
+               if (err)
+                       DRM_DEBUG_DRIVER("Failed to suspend GuC, err=%d", err);
+       }
 }
 
 static int __uc_resume(struct intel_uc *uc, bool enable_communication)
 {
        struct intel_guc *guc = &uc->guc;
+       struct intel_gt *gt = guc_to_gt(guc);
        int err;
 
        if (!intel_guc_is_fw_running(guc))
@@ -612,6 +667,13 @@ static int __uc_resume(struct intel_uc *uc, bool enable_communication)
        if (enable_communication)
                guc_enable_communication(guc);
 
+       /* If we are only resuming GuC communication but not reloading
+        * GuC, we need to ensure the ARAT timer interrupt is enabled
+        * again. In case of GuC reload, it is enabled during SLPC enable.
+        */
+       if (enable_communication && intel_uc_uses_guc_slpc(uc))
+               intel_guc_pm_intrmsk_enable(gt);
+
        err = intel_guc_resume(guc);
        if (err) {
                DRM_DEBUG_DRIVER("Failed to resume GuC, err=%d", err);
index 9c954c5..866b462 100644 (file)
@@ -7,7 +7,9 @@
 #define _INTEL_UC_H_
 
 #include "intel_guc.h"
+#include "intel_guc_rc.h"
 #include "intel_guc_submission.h"
+#include "intel_guc_slpc.h"
 #include "intel_huc.h"
 #include "i915_params.h"
 
@@ -30,13 +32,19 @@ struct intel_uc {
 
        /* Snapshot of GuC log from last failed load */
        struct drm_i915_gem_object *load_err_log;
+
+       bool reset_in_progress;
 };
 
 void intel_uc_init_early(struct intel_uc *uc);
+void intel_uc_init_late(struct intel_uc *uc);
 void intel_uc_driver_late_release(struct intel_uc *uc);
 void intel_uc_driver_remove(struct intel_uc *uc);
 void intel_uc_init_mmio(struct intel_uc *uc);
 void intel_uc_reset_prepare(struct intel_uc *uc);
+void intel_uc_reset(struct intel_uc *uc, bool stalled);
+void intel_uc_reset_finish(struct intel_uc *uc);
+void intel_uc_cancel_requests(struct intel_uc *uc);
 void intel_uc_suspend(struct intel_uc *uc);
 void intel_uc_runtime_suspend(struct intel_uc *uc);
 int intel_uc_resume(struct intel_uc *uc);
@@ -77,10 +85,17 @@ __uc_state_checker(x, func, uses, used)
 uc_state_checkers(guc, guc);
 uc_state_checkers(huc, huc);
 uc_state_checkers(guc, guc_submission);
+uc_state_checkers(guc, guc_slpc);
+uc_state_checkers(guc, guc_rc);
 
 #undef uc_state_checkers
 #undef __uc_state_checker
 
+static inline int intel_uc_wait_for_idle(struct intel_uc *uc, long timeout)
+{
+       return intel_guc_wait_for_idle(&uc->guc, timeout);
+}
+
 #define intel_uc_ops_function(_NAME, _OPS, _TYPE, _RET) \
 static inline _TYPE intel_uc_##_NAME(struct intel_uc *uc) \
 { \
index df647c9..3a16d08 100644 (file)
@@ -48,19 +48,20 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
  * firmware as TGL.
  */
 #define INTEL_UC_FIRMWARE_DEFS(fw_def, guc_def, huc_def) \
-       fw_def(ALDERLAKE_S, 0, guc_def(tgl, 49, 0, 1), huc_def(tgl,  7, 5, 0)) \
-       fw_def(ROCKETLAKE,  0, guc_def(tgl, 49, 0, 1), huc_def(tgl,  7, 5, 0)) \
-       fw_def(TIGERLAKE,   0, guc_def(tgl, 49, 0, 1), huc_def(tgl,  7, 5, 0)) \
-       fw_def(JASPERLAKE,  0, guc_def(ehl, 49, 0, 1), huc_def(ehl,  9, 0, 0)) \
-       fw_def(ELKHARTLAKE, 0, guc_def(ehl, 49, 0, 1), huc_def(ehl,  9, 0, 0)) \
-       fw_def(ICELAKE,     0, guc_def(icl, 49, 0, 1), huc_def(icl,  9, 0, 0)) \
-       fw_def(COMETLAKE,   5, guc_def(cml, 49, 0, 1), huc_def(cml,  4, 0, 0)) \
-       fw_def(COMETLAKE,   0, guc_def(kbl, 49, 0, 1), huc_def(kbl,  4, 0, 0)) \
-       fw_def(COFFEELAKE,  0, guc_def(kbl, 49, 0, 1), huc_def(kbl,  4, 0, 0)) \
-       fw_def(GEMINILAKE,  0, guc_def(glk, 49, 0, 1), huc_def(glk,  4, 0, 0)) \
-       fw_def(KABYLAKE,    0, guc_def(kbl, 49, 0, 1), huc_def(kbl,  4, 0, 0)) \
-       fw_def(BROXTON,     0, guc_def(bxt, 49, 0, 1), huc_def(bxt,  2, 0, 0)) \
-       fw_def(SKYLAKE,     0, guc_def(skl, 49, 0, 1), huc_def(skl,  2, 0, 0))
+       fw_def(ALDERLAKE_P, 0, guc_def(adlp, 62, 0, 3), huc_def(tgl, 7, 9, 3)) \
+       fw_def(ALDERLAKE_S, 0, guc_def(tgl, 62, 0, 0), huc_def(tgl,  7, 9, 3)) \
+       fw_def(ROCKETLAKE,  0, guc_def(tgl, 62, 0, 0), huc_def(tgl,  7, 9, 3)) \
+       fw_def(TIGERLAKE,   0, guc_def(tgl, 62, 0, 0), huc_def(tgl,  7, 9, 3)) \
+       fw_def(JASPERLAKE,  0, guc_def(ehl, 62, 0, 0), huc_def(ehl,  9, 0, 0)) \
+       fw_def(ELKHARTLAKE, 0, guc_def(ehl, 62, 0, 0), huc_def(ehl,  9, 0, 0)) \
+       fw_def(ICELAKE,     0, guc_def(icl, 62, 0, 0), huc_def(icl,  9, 0, 0)) \
+       fw_def(COMETLAKE,   5, guc_def(cml, 62, 0, 0), huc_def(cml,  4, 0, 0)) \
+       fw_def(COMETLAKE,   0, guc_def(kbl, 62, 0, 0), huc_def(kbl,  4, 0, 0)) \
+       fw_def(COFFEELAKE,  0, guc_def(kbl, 62, 0, 0), huc_def(kbl,  4, 0, 0)) \
+       fw_def(GEMINILAKE,  0, guc_def(glk, 62, 0, 0), huc_def(glk,  4, 0, 0)) \
+       fw_def(KABYLAKE,    0, guc_def(kbl, 62, 0, 0), huc_def(kbl,  4, 0, 0)) \
+       fw_def(BROXTON,     0, guc_def(bxt, 62, 0, 0), huc_def(bxt,  2, 0, 0)) \
+       fw_def(SKYLAKE,     0, guc_def(skl, 62, 0, 0), huc_def(skl,  2, 0, 0))
 
 #define __MAKE_UC_FW_PATH(prefix_, name_, major_, minor_, patch_) \
        "i915/" \
index cc2c05e..e5c2fdf 100644 (file)
@@ -1055,7 +1055,7 @@ static bool vgpu_ips_enabled(struct intel_vgpu *vgpu)
 {
        struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
 
-       if (GRAPHICS_VER(dev_priv) == 9 || GRAPHICS_VER(dev_priv) == 10) {
+       if (GRAPHICS_VER(dev_priv) == 9) {
                u32 ips = vgpu_vreg_t(vgpu, GEN8_GAMW_ECO_DEV_RW_IA) &
                        GAMW_ECO_ENABLE_64K_IPS_FIELD;
 
index 734c37c..b56a8e3 100644 (file)
@@ -1409,11 +1409,8 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
                intel_context_set_single_submission(ce);
 
                /* Max ring buffer size */
-               if (!intel_uc_wants_guc_submission(&engine->gt->uc)) {
-                       const unsigned int ring_size = 512 * SZ_4K;
-
-                       ce->ring = __intel_context_ring_size(ring_size);
-               }
+               if (!intel_uc_wants_guc_submission(&engine->gt->uc))
+                       ce->ring_size = SZ_2M;
 
                s->shadow[i] = ce;
        }
index b1aa1c4..3103c1e 100644 (file)
@@ -13,7 +13,6 @@
 
 #include "i915_drv.h"
 #include "i915_active.h"
-#include "i915_globals.h"
 
 /*
  * Active refs memory management
  * they idle (when we know the active requests are inactive) and allocate the
  * nodes from a local slab cache to hopefully reduce the fragmentation.
  */
-static struct i915_global_active {
-       struct i915_global base;
-       struct kmem_cache *slab_cache;
-} global;
+static struct kmem_cache *slab_cache;
 
 struct active_node {
        struct rb_node node;
@@ -174,7 +170,7 @@ __active_retire(struct i915_active *ref)
        /* Finally free the discarded timeline tree  */
        rbtree_postorder_for_each_entry_safe(it, n, &root, node) {
                GEM_BUG_ON(i915_active_fence_isset(&it->base));
-               kmem_cache_free(global.slab_cache, it);
+               kmem_cache_free(slab_cache, it);
        }
 }
 
@@ -322,7 +318,7 @@ active_instance(struct i915_active *ref, u64 idx)
         * XXX: We should preallocate this before i915_active_ref() is ever
         *  called, but we cannot call into fs_reclaim() anyway, so use GFP_ATOMIC.
         */
-       node = kmem_cache_alloc(global.slab_cache, GFP_ATOMIC);
+       node = kmem_cache_alloc(slab_cache, GFP_ATOMIC);
        if (!node)
                goto out;
 
@@ -788,7 +784,7 @@ void i915_active_fini(struct i915_active *ref)
        mutex_destroy(&ref->mutex);
 
        if (ref->cache)
-               kmem_cache_free(global.slab_cache, ref->cache);
+               kmem_cache_free(slab_cache, ref->cache);
 }
 
 static inline bool is_idle_barrier(struct active_node *node, u64 idx)
@@ -908,7 +904,7 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
                node = reuse_idle_barrier(ref, idx);
                rcu_read_unlock();
                if (!node) {
-                       node = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
+                       node = kmem_cache_alloc(slab_cache, GFP_KERNEL);
                        if (!node)
                                goto unwind;
 
@@ -956,7 +952,7 @@ unwind:
                atomic_dec(&ref->count);
                intel_engine_pm_put(barrier_to_engine(node));
 
-               kmem_cache_free(global.slab_cache, node);
+               kmem_cache_free(slab_cache, node);
        }
        return -ENOMEM;
 }
@@ -1176,27 +1172,16 @@ struct i915_active *i915_active_create(void)
 #include "selftests/i915_active.c"
 #endif
 
-static void i915_global_active_shrink(void)
+void i915_active_module_exit(void)
 {
-       kmem_cache_shrink(global.slab_cache);
+       kmem_cache_destroy(slab_cache);
 }
 
-static void i915_global_active_exit(void)
+int __init i915_active_module_init(void)
 {
-       kmem_cache_destroy(global.slab_cache);
-}
-
-static struct i915_global_active global = { {
-       .shrink = i915_global_active_shrink,
-       .exit = i915_global_active_exit,
-} };
-
-int __init i915_global_active_init(void)
-{
-       global.slab_cache = KMEM_CACHE(active_node, SLAB_HWCACHE_ALIGN);
-       if (!global.slab_cache)
+       slab_cache = KMEM_CACHE(active_node, SLAB_HWCACHE_ALIGN);
+       if (!slab_cache)
                return -ENOMEM;
 
-       i915_global_register(&global.base);
        return 0;
 }
index d0feda6..5fcdb0e 100644 (file)
@@ -247,4 +247,7 @@ static inline int __i915_request_await_exclusive(struct i915_request *rq,
        return err;
 }
 
+void i915_active_module_exit(void);
+int i915_active_module_init(void);
+
 #endif /* _I915_ACTIVE_H_ */
diff --git a/drivers/gpu/drm/i915/i915_buddy.c b/drivers/gpu/drm/i915/i915_buddy.c
new file mode 100644 (file)
index 0000000..7b274c5
--- /dev/null
@@ -0,0 +1,421 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright Â© 2021 Intel Corporation
+ */
+
+#include <linux/kmemleak.h>
+
+#include "i915_buddy.h"
+
+#include "i915_gem.h"
+#include "i915_utils.h"
+
+static struct kmem_cache *slab_blocks;
+
+static struct i915_buddy_block *i915_block_alloc(struct i915_buddy_mm *mm,
+                                                struct i915_buddy_block *parent,
+                                                unsigned int order,
+                                                u64 offset)
+{
+       struct i915_buddy_block *block;
+
+       GEM_BUG_ON(order > I915_BUDDY_MAX_ORDER);
+
+       block = kmem_cache_zalloc(slab_blocks, GFP_KERNEL);
+       if (!block)
+               return NULL;
+
+       block->header = offset;
+       block->header |= order;
+       block->parent = parent;
+
+       GEM_BUG_ON(block->header & I915_BUDDY_HEADER_UNUSED);
+       return block;
+}
+
+static void i915_block_free(struct i915_buddy_mm *mm,
+                           struct i915_buddy_block *block)
+{
+       kmem_cache_free(slab_blocks, block);
+}
+
+static void mark_allocated(struct i915_buddy_block *block)
+{
+       block->header &= ~I915_BUDDY_HEADER_STATE;
+       block->header |= I915_BUDDY_ALLOCATED;
+
+       list_del(&block->link);
+}
+
+static void mark_free(struct i915_buddy_mm *mm,
+                     struct i915_buddy_block *block)
+{
+       block->header &= ~I915_BUDDY_HEADER_STATE;
+       block->header |= I915_BUDDY_FREE;
+
+       list_add(&block->link,
+                &mm->free_list[i915_buddy_block_order(block)]);
+}
+
+static void mark_split(struct i915_buddy_block *block)
+{
+       block->header &= ~I915_BUDDY_HEADER_STATE;
+       block->header |= I915_BUDDY_SPLIT;
+
+       list_del(&block->link);
+}
+
+int i915_buddy_init(struct i915_buddy_mm *mm, u64 size, u64 chunk_size)
+{
+       unsigned int i;
+       u64 offset;
+
+       if (size < chunk_size)
+               return -EINVAL;
+
+       if (chunk_size < PAGE_SIZE)
+               return -EINVAL;
+
+       if (!is_power_of_2(chunk_size))
+               return -EINVAL;
+
+       size = round_down(size, chunk_size);
+
+       mm->size = size;
+       mm->chunk_size = chunk_size;
+       mm->max_order = ilog2(size) - ilog2(chunk_size);
+
+       GEM_BUG_ON(mm->max_order > I915_BUDDY_MAX_ORDER);
+
+       mm->free_list = kmalloc_array(mm->max_order + 1,
+                                     sizeof(struct list_head),
+                                     GFP_KERNEL);
+       if (!mm->free_list)
+               return -ENOMEM;
+
+       for (i = 0; i <= mm->max_order; ++i)
+               INIT_LIST_HEAD(&mm->free_list[i]);
+
+       mm->n_roots = hweight64(size);
+
+       mm->roots = kmalloc_array(mm->n_roots,
+                                 sizeof(struct i915_buddy_block *),
+                                 GFP_KERNEL);
+       if (!mm->roots)
+               goto out_free_list;
+
+       offset = 0;
+       i = 0;
+
+       /*
+        * Split into power-of-two blocks, in case we are given a size that is
+        * not itself a power-of-two.
+        */
+       do {
+               struct i915_buddy_block *root;
+               unsigned int order;
+               u64 root_size;
+
+               root_size = rounddown_pow_of_two(size);
+               order = ilog2(root_size) - ilog2(chunk_size);
+
+               root = i915_block_alloc(mm, NULL, order, offset);
+               if (!root)
+                       goto out_free_roots;
+
+               mark_free(mm, root);
+
+               GEM_BUG_ON(i > mm->max_order);
+               GEM_BUG_ON(i915_buddy_block_size(mm, root) < chunk_size);
+
+               mm->roots[i] = root;
+
+               offset += root_size;
+               size -= root_size;
+               i++;
+       } while (size);
+
+       return 0;
+
+out_free_roots:
+       while (i--)
+               i915_block_free(mm, mm->roots[i]);
+       kfree(mm->roots);
+out_free_list:
+       kfree(mm->free_list);
+       return -ENOMEM;
+}
+
+void i915_buddy_fini(struct i915_buddy_mm *mm)
+{
+       int i;
+
+       for (i = 0; i < mm->n_roots; ++i) {
+               GEM_WARN_ON(!i915_buddy_block_is_free(mm->roots[i]));
+               i915_block_free(mm, mm->roots[i]);
+       }
+
+       kfree(mm->roots);
+       kfree(mm->free_list);
+}
+
+static int split_block(struct i915_buddy_mm *mm,
+                      struct i915_buddy_block *block)
+{
+       unsigned int block_order = i915_buddy_block_order(block) - 1;
+       u64 offset = i915_buddy_block_offset(block);
+
+       GEM_BUG_ON(!i915_buddy_block_is_free(block));
+       GEM_BUG_ON(!i915_buddy_block_order(block));
+
+       block->left = i915_block_alloc(mm, block, block_order, offset);
+       if (!block->left)
+               return -ENOMEM;
+
+       block->right = i915_block_alloc(mm, block, block_order,
+                                       offset + (mm->chunk_size << block_order));
+       if (!block->right) {
+               i915_block_free(mm, block->left);
+               return -ENOMEM;
+       }
+
+       mark_free(mm, block->left);
+       mark_free(mm, block->right);
+
+       mark_split(block);
+
+       return 0;
+}
+
+static struct i915_buddy_block *
+get_buddy(struct i915_buddy_block *block)
+{
+       struct i915_buddy_block *parent;
+
+       parent = block->parent;
+       if (!parent)
+               return NULL;
+
+       if (parent->left == block)
+               return parent->right;
+
+       return parent->left;
+}
+
+static void __i915_buddy_free(struct i915_buddy_mm *mm,
+                             struct i915_buddy_block *block)
+{
+       struct i915_buddy_block *parent;
+
+       while ((parent = block->parent)) {
+               struct i915_buddy_block *buddy;
+
+               buddy = get_buddy(block);
+
+               if (!i915_buddy_block_is_free(buddy))
+                       break;
+
+               list_del(&buddy->link);
+
+               i915_block_free(mm, block);
+               i915_block_free(mm, buddy);
+
+               block = parent;
+       }
+
+       mark_free(mm, block);
+}
+
+void i915_buddy_free(struct i915_buddy_mm *mm,
+                    struct i915_buddy_block *block)
+{
+       GEM_BUG_ON(!i915_buddy_block_is_allocated(block));
+       __i915_buddy_free(mm, block);
+}
+
+void i915_buddy_free_list(struct i915_buddy_mm *mm, struct list_head *objects)
+{
+       struct i915_buddy_block *block, *on;
+
+       list_for_each_entry_safe(block, on, objects, link) {
+               i915_buddy_free(mm, block);
+               cond_resched();
+       }
+       INIT_LIST_HEAD(objects);
+}
+
+/*
+ * Allocate power-of-two block. The order value here translates to:
+ *
+ *   0 = 2^0 * mm->chunk_size
+ *   1 = 2^1 * mm->chunk_size
+ *   2 = 2^2 * mm->chunk_size
+ *   ...
+ */
+struct i915_buddy_block *
+i915_buddy_alloc(struct i915_buddy_mm *mm, unsigned int order)
+{
+       struct i915_buddy_block *block = NULL;
+       unsigned int i;
+       int err;
+
+       for (i = order; i <= mm->max_order; ++i) {
+               block = list_first_entry_or_null(&mm->free_list[i],
+                                                struct i915_buddy_block,
+                                                link);
+               if (block)
+                       break;
+       }
+
+       if (!block)
+               return ERR_PTR(-ENOSPC);
+
+       GEM_BUG_ON(!i915_buddy_block_is_free(block));
+
+       while (i != order) {
+               err = split_block(mm, block);
+               if (unlikely(err))
+                       goto out_free;
+
+               /* Go low */
+               block = block->left;
+               i--;
+       }
+
+       mark_allocated(block);
+       kmemleak_update_trace(block);
+       return block;
+
+out_free:
+       if (i != order)
+               __i915_buddy_free(mm, block);
+       return ERR_PTR(err);
+}
+
+static inline bool overlaps(u64 s1, u64 e1, u64 s2, u64 e2)
+{
+       return s1 <= e2 && e1 >= s2;
+}
+
+static inline bool contains(u64 s1, u64 e1, u64 s2, u64 e2)
+{
+       return s1 <= s2 && e1 >= e2;
+}
+
+/*
+ * Allocate range. Note that it's safe to chain together multiple alloc_ranges
+ * with the same blocks list.
+ *
+ * Intended for pre-allocating portions of the address space, for example to
+ * reserve a block for the initial framebuffer or similar, hence the expectation
+ * here is that i915_buddy_alloc() is still the main vehicle for
+ * allocations, so if that's not the case then the drm_mm range allocator is
+ * probably a much better fit, and so you should probably go use that instead.
+ */
+int i915_buddy_alloc_range(struct i915_buddy_mm *mm,
+                          struct list_head *blocks,
+                          u64 start, u64 size)
+{
+       struct i915_buddy_block *block;
+       struct i915_buddy_block *buddy;
+       LIST_HEAD(allocated);
+       LIST_HEAD(dfs);
+       u64 end;
+       int err;
+       int i;
+
+       if (size < mm->chunk_size)
+               return -EINVAL;
+
+       if (!IS_ALIGNED(size | start, mm->chunk_size))
+               return -EINVAL;
+
+       if (range_overflows(start, size, mm->size))
+               return -EINVAL;
+
+       for (i = 0; i < mm->n_roots; ++i)
+               list_add_tail(&mm->roots[i]->tmp_link, &dfs);
+
+       end = start + size - 1;
+
+       do {
+               u64 block_start;
+               u64 block_end;
+
+               block = list_first_entry_or_null(&dfs,
+                                                struct i915_buddy_block,
+                                                tmp_link);
+               if (!block)
+                       break;
+
+               list_del(&block->tmp_link);
+
+               block_start = i915_buddy_block_offset(block);
+               block_end = block_start + i915_buddy_block_size(mm, block) - 1;
+
+               if (!overlaps(start, end, block_start, block_end))
+                       continue;
+
+               if (i915_buddy_block_is_allocated(block)) {
+                       err = -ENOSPC;
+                       goto err_free;
+               }
+
+               if (contains(start, end, block_start, block_end)) {
+                       if (!i915_buddy_block_is_free(block)) {
+                               err = -ENOSPC;
+                               goto err_free;
+                       }
+
+                       mark_allocated(block);
+                       list_add_tail(&block->link, &allocated);
+                       continue;
+               }
+
+               if (!i915_buddy_block_is_split(block)) {
+                       err = split_block(mm, block);
+                       if (unlikely(err))
+                               goto err_undo;
+               }
+
+               list_add(&block->right->tmp_link, &dfs);
+               list_add(&block->left->tmp_link, &dfs);
+       } while (1);
+
+       list_splice_tail(&allocated, blocks);
+       return 0;
+
+err_undo:
+       /*
+        * We really don't want to leave around a bunch of split blocks, since
+        * bigger is better, so make sure we merge everything back before we
+        * free the allocated blocks.
+        */
+       buddy = get_buddy(block);
+       if (buddy &&
+           (i915_buddy_block_is_free(block) &&
+            i915_buddy_block_is_free(buddy)))
+               __i915_buddy_free(mm, block);
+
+err_free:
+       i915_buddy_free_list(mm, &allocated);
+       return err;
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/i915_buddy.c"
+#endif
+
+void i915_buddy_module_exit(void)
+{
+       kmem_cache_destroy(slab_blocks);
+}
+
+int __init i915_buddy_module_init(void)
+{
+       slab_blocks = KMEM_CACHE(i915_buddy_block, 0);
+       if (!slab_blocks)
+               return -ENOMEM;
+
+       return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_buddy.h b/drivers/gpu/drm/i915/i915_buddy.h
new file mode 100644 (file)
index 0000000..3940d63
--- /dev/null
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright Â© 2021 Intel Corporation
+ */
+
+#ifndef __I915_BUDDY_H__
+#define __I915_BUDDY_H__
+
+#include <linux/bitops.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+
+struct i915_buddy_block {
+#define I915_BUDDY_HEADER_OFFSET GENMASK_ULL(63, 12)
+#define I915_BUDDY_HEADER_STATE  GENMASK_ULL(11, 10)
+#define   I915_BUDDY_ALLOCATED    (1 << 10)
+#define   I915_BUDDY_FREE         (2 << 10)
+#define   I915_BUDDY_SPLIT        (3 << 10)
+/* Free to be used, if needed in the future */
+#define I915_BUDDY_HEADER_UNUSED GENMASK_ULL(9, 6)
+#define I915_BUDDY_HEADER_ORDER  GENMASK_ULL(5, 0)
+       u64 header;
+
+       struct i915_buddy_block *left;
+       struct i915_buddy_block *right;
+       struct i915_buddy_block *parent;
+
+       void *private; /* owned by creator */
+
+       /*
+        * While the block is allocated by the user through i915_buddy_alloc*,
+        * the user has ownership of the link, for example to maintain within
+        * a list, if so desired. As soon as the block is freed with
+        * i915_buddy_free* ownership is given back to the mm.
+        */
+       struct list_head link;
+       struct list_head tmp_link;
+};
+
+/* Order-zero must be at least PAGE_SIZE */
+#define I915_BUDDY_MAX_ORDER (63 - PAGE_SHIFT)
+
+/*
+ * Binary Buddy System.
+ *
+ * Locking should be handled by the user, a simple mutex around
+ * i915_buddy_alloc* and i915_buddy_free* should suffice.
+ */
+struct i915_buddy_mm {
+       /* Maintain a free list for each order. */
+       struct list_head *free_list;
+
+       /*
+        * Maintain explicit binary tree(s) to track the allocation of the
+        * address space. This gives us a simple way of finding a buddy block
+        * and performing the potentially recursive merge step when freeing a
+        * block.  Nodes are either allocated or free, in which case they will
+        * also exist on the respective free list.
+        */
+       struct i915_buddy_block **roots;
+
+       /*
+        * Anything from here is public, and remains static for the lifetime of
+        * the mm. Everything above is considered do-not-touch.
+        */
+       unsigned int n_roots;
+       unsigned int max_order;
+
+       /* Must be at least PAGE_SIZE */
+       u64 chunk_size;
+       u64 size;
+};
+
+static inline u64
+i915_buddy_block_offset(struct i915_buddy_block *block)
+{
+       return block->header & I915_BUDDY_HEADER_OFFSET;
+}
+
+static inline unsigned int
+i915_buddy_block_order(struct i915_buddy_block *block)
+{
+       return block->header & I915_BUDDY_HEADER_ORDER;
+}
+
+static inline unsigned int
+i915_buddy_block_state(struct i915_buddy_block *block)
+{
+       return block->header & I915_BUDDY_HEADER_STATE;
+}
+
+static inline bool
+i915_buddy_block_is_allocated(struct i915_buddy_block *block)
+{
+       return i915_buddy_block_state(block) == I915_BUDDY_ALLOCATED;
+}
+
+static inline bool
+i915_buddy_block_is_free(struct i915_buddy_block *block)
+{
+       return i915_buddy_block_state(block) == I915_BUDDY_FREE;
+}
+
+static inline bool
+i915_buddy_block_is_split(struct i915_buddy_block *block)
+{
+       return i915_buddy_block_state(block) == I915_BUDDY_SPLIT;
+}
+
+static inline u64
+i915_buddy_block_size(struct i915_buddy_mm *mm,
+                     struct i915_buddy_block *block)
+{
+       return mm->chunk_size << i915_buddy_block_order(block);
+}
+
+int i915_buddy_init(struct i915_buddy_mm *mm, u64 size, u64 chunk_size);
+
+void i915_buddy_fini(struct i915_buddy_mm *mm);
+
+struct i915_buddy_block *
+i915_buddy_alloc(struct i915_buddy_mm *mm, unsigned int order);
+
+int i915_buddy_alloc_range(struct i915_buddy_mm *mm,
+                          struct list_head *blocks,
+                          u64 start, u64 size);
+
+void i915_buddy_free(struct i915_buddy_mm *mm, struct i915_buddy_block *block);
+
+void i915_buddy_free_list(struct i915_buddy_mm *mm, struct list_head *objects);
+
+void i915_buddy_module_exit(void);
+int i915_buddy_module_init(void);
+
+#endif
index a3b4d99..e0403ce 100644 (file)
@@ -1468,42 +1468,43 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
         * space. Parsing should be faster in some cases this way.
         */
        batch_end = cmd + batch_length / sizeof(*batch_end);
-       while (*cmd != MI_BATCH_BUFFER_END) {
-               u32 length = 1;
-
-               if (*cmd != MI_NOOP) { /* MI_NOOP == 0 */
-                       desc = find_cmd(engine, *cmd, desc, &default_desc);
-                       if (!desc) {
-                               DRM_DEBUG("CMD: Unrecognized command: 0x%08X\n", *cmd);
-                               ret = -EINVAL;
-                               break;
-                       }
+       do {
+               u32 length;
 
-                       if (desc->flags & CMD_DESC_FIXED)
-                               length = desc->length.fixed;
-                       else
-                               length = (*cmd & desc->length.mask) + LENGTH_BIAS;
+               if (*cmd == MI_BATCH_BUFFER_END)
+                       break;
 
-                       if ((batch_end - cmd) < length) {
-                               DRM_DEBUG("CMD: Command length exceeds batch length: 0x%08X length=%u batchlen=%td\n",
-                                         *cmd,
-                                         length,
-                                         batch_end - cmd);
-                               ret = -EINVAL;
-                               break;
-                       }
+               desc = find_cmd(engine, *cmd, desc, &default_desc);
+               if (!desc) {
+                       DRM_DEBUG("CMD: Unrecognized command: 0x%08X\n", *cmd);
+                       ret = -EINVAL;
+                       break;
+               }
 
-                       if (!check_cmd(engine, desc, cmd, length)) {
-                               ret = -EACCES;
-                               break;
-                       }
+               if (desc->flags & CMD_DESC_FIXED)
+                       length = desc->length.fixed;
+               else
+                       length = (*cmd & desc->length.mask) + LENGTH_BIAS;
 
-                       if (cmd_desc_is(desc, MI_BATCH_BUFFER_START)) {
-                               ret = check_bbstart(cmd, offset, length, batch_length,
-                                                   batch_addr, shadow_addr,
-                                                   jump_whitelist);
-                               break;
-                       }
+               if ((batch_end - cmd) < length) {
+                       DRM_DEBUG("CMD: Command length exceeds batch length: 0x%08X length=%u batchlen=%td\n",
+                                 *cmd,
+                                 length,
+                                 batch_end - cmd);
+                       ret = -EINVAL;
+                       break;
+               }
+
+               if (!check_cmd(engine, desc, cmd, length)) {
+                       ret = -EACCES;
+                       break;
+               }
+
+               if (cmd_desc_is(desc, MI_BATCH_BUFFER_START)) {
+                       ret = check_bbstart(cmd, offset, length, batch_length,
+                                           batch_addr, shadow_addr,
+                                           jump_whitelist);
+                       break;
                }
 
                if (!IS_ERR_OR_NULL(jump_whitelist))
@@ -1516,7 +1517,7 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
                        ret = -EINVAL;
                        break;
                }
-       }
+       } while (1);
 
        if (trampoline) {
                /*
index 0529576..44969f5 100644 (file)
@@ -538,20 +538,20 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
                max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
                            rp_state_cap >> 16) & 0xff;
                max_freq *= (IS_GEN9_BC(dev_priv) ||
-                            GRAPHICS_VER(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
+                            GRAPHICS_VER(dev_priv) >= 11 ? GEN9_FREQ_SCALER : 1);
                seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
                           intel_gpu_freq(rps, max_freq));
 
                max_freq = (rp_state_cap & 0xff00) >> 8;
                max_freq *= (IS_GEN9_BC(dev_priv) ||
-                            GRAPHICS_VER(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
+                            GRAPHICS_VER(dev_priv) >= 11 ? GEN9_FREQ_SCALER : 1);
                seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
                           intel_gpu_freq(rps, max_freq));
 
                max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
                            rp_state_cap >> 0) & 0xff;
                max_freq *= (IS_GEN9_BC(dev_priv) ||
-                            GRAPHICS_VER(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1);
+                            GRAPHICS_VER(dev_priv) >= 11 ? GEN9_FREQ_SCALER : 1);
                seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
                           intel_gpu_freq(rps, max_freq));
                seq_printf(m, "Max overclocked frequency: %dMHz\n",
index 4e2b077..2042427 100644 (file)
@@ -6,9 +6,21 @@
 #include <linux/kernel.h>
 
 #include "i915_debugfs_params.h"
+#include "gt/intel_gt.h"
+#include "gt/uc/intel_guc.h"
 #include "i915_drv.h"
 #include "i915_params.h"
 
+#define MATCH_DEBUGFS_NODE_NAME(_file, _name) \
+       (strcmp((_file)->f_path.dentry->d_name.name, (_name)) == 0)
+
+#define GET_I915(i915, name, ptr)      \
+       do {    \
+               struct i915_params *params;     \
+               params = container_of(((void *)(ptr)), typeof(*params), name);  \
+               (i915) = container_of(params, typeof(*(i915)), params); \
+       } while (0)
+
 /* int param */
 static int i915_param_int_show(struct seq_file *m, void *data)
 {
@@ -24,6 +36,16 @@ static int i915_param_int_open(struct inode *inode, struct file *file)
        return single_open(file, i915_param_int_show, inode->i_private);
 }
 
+static int notify_guc(struct drm_i915_private *i915)
+{
+       int ret = 0;
+
+       if (intel_uc_uses_guc_submission(&i915->gt.uc))
+               ret = intel_guc_global_policies_update(&i915->gt.uc.guc);
+
+       return ret;
+}
+
 static ssize_t i915_param_int_write(struct file *file,
                                    const char __user *ubuf, size_t len,
                                    loff_t *offp)
@@ -81,8 +103,10 @@ static ssize_t i915_param_uint_write(struct file *file,
                                     const char __user *ubuf, size_t len,
                                     loff_t *offp)
 {
+       struct drm_i915_private *i915;
        struct seq_file *m = file->private_data;
        unsigned int *value = m->private;
+       unsigned int old = *value;
        int ret;
 
        ret = kstrtouint_from_user(ubuf, len, 0, value);
@@ -95,6 +119,14 @@ static ssize_t i915_param_uint_write(struct file *file,
                        *value = b;
        }
 
+       if (!ret && MATCH_DEBUGFS_NODE_NAME(file, "reset")) {
+               GET_I915(i915, reset, value);
+
+               ret = notify_guc(i915);
+               if (ret)
+                       *value = old;
+       }
+
        return ret ?: len;
 }
 
index 30d8cd8..59fb4c7 100644 (file)
@@ -271,10 +271,11 @@ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
        bool pre = false;
 
        pre |= IS_HSW_EARLY_SDV(dev_priv);
-       pre |= IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0);
-       pre |= IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST);
-       pre |= IS_KBL_GT_STEP(dev_priv, 0, STEP_A0);
-       pre |= IS_GLK_REVID(dev_priv, 0, GLK_REVID_A2);
+       pre |= IS_SKYLAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x6;
+       pre |= IS_BROXTON(dev_priv) && INTEL_REVID(dev_priv) < 0xA;
+       pre |= IS_KABYLAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x1;
+       pre |= IS_GEMINILAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x3;
+       pre |= IS_ICELAKE(dev_priv) && INTEL_REVID(dev_priv) < 0x7;
 
        if (pre) {
                drm_err(&dev_priv->drm, "This is a pre-production stepping. "
@@ -619,7 +620,9 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
 
        intel_opregion_setup(dev_priv);
 
-       intel_pcode_init(dev_priv);
+       ret = intel_pcode_init(dev_priv);
+       if (ret)
+               goto err_msi;
 
        /*
         * Fill the dram structure to get the system dram info. This will be
@@ -1230,6 +1233,10 @@ static int i915_drm_resume(struct drm_device *dev)
 
        disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
 
+       ret = intel_pcode_init(dev_priv);
+       if (ret)
+               return ret;
+
        sanitize_gpu(dev_priv);
 
        ret = i915_ggtt_enable_hw(dev_priv);
index 997fbe9..005b1ce 100644 (file)
@@ -202,6 +202,68 @@ struct drm_i915_file_private {
                struct rcu_head rcu;
        };
 
+       /** @proto_context_lock: Guards all struct i915_gem_proto_context
+        * operations
+        *
+        * This not only guards @proto_context_xa, but is always held
+        * whenever we manipulate any struct i915_gem_proto_context,
+        * including finalizing it on first actual use of the GEM context.
+        *
+        * See i915_gem_proto_context.
+        */
+       struct mutex proto_context_lock;
+
+       /** @proto_context_xa: xarray of struct i915_gem_proto_context
+        *
+        * Historically, the context uAPI allowed for two methods of
+        * setting context parameters: SET_CONTEXT_PARAM and
+        * CONTEXT_CREATE_EXT_SETPARAM.  The former is allowed to be called
+        * at any time while the later happens as part of
+        * GEM_CONTEXT_CREATE.  Everything settable via one was settable
+        * via the other.  While some params are fairly simple and setting
+        * them on a live context is harmless such as the context priority,
+        * others are far trickier such as the VM or the set of engines.
+        * In order to swap out the VM, for instance, we have to delay
+        * until all current in-flight work is complete, swap in the new
+        * VM, and then continue.  This leads to a plethora of potential
+        * race conditions we'd really rather avoid.
+        *
+        * We have since disallowed setting these more complex parameters
+        * on active contexts.  This works by delaying the creation of the
+        * actual context until after the client is done configuring it
+        * with SET_CONTEXT_PARAM.  From the perspective of the client, it
+        * has the same u32 context ID the whole time.  From the
+        * perspective of i915, however, it's a struct i915_gem_proto_context
+        * right up until the point where we attempt to do something which
+        * the proto-context can't handle.  Then the struct i915_gem_context
+        * gets created.
+        *
+        * This is accomplished via a little xarray dance.  When
+        * GEM_CONTEXT_CREATE is called, we create a struct
+        * i915_gem_proto_context, reserve a slot in @context_xa but leave
+        * it NULL, and place the proto-context in the corresponding slot
+        * in @proto_context_xa.  Then, in i915_gem_context_lookup(), we
+        * first check @context_xa.  If it's there, we return the struct
+        * i915_gem_context and we're done.  If it's not, we look in
+        * @proto_context_xa and, if we find it there, we create the actual
+        * context and kill the proto-context.
+        *
+        * In order for this dance to work properly, everything which ever
+        * touches a struct i915_gem_proto_context is guarded by
+        * @proto_context_lock, including context creation.  Yes, this
+        * means context creation now takes a giant global lock but it
+        * can't really be helped and that should never be on any driver's
+        * fast-path anyway.
+        */
+       struct xarray proto_context_xa;
+
+       /** @context_xa: xarray of fully created i915_gem_context
+        *
+        * Write access to this xarray is guarded by @proto_context_lock.
+        * Otherwise, writers may race with finalize_create_context_locked().
+        *
+        * See @proto_context_xa.
+        */
        struct xarray context_xa;
        struct xarray vm_xa;
 
@@ -332,15 +394,6 @@ struct drm_i915_display_funcs {
        void (*read_luts)(struct intel_crtc_state *crtc_state);
 };
 
-enum i915_cache_level {
-       I915_CACHE_NONE = 0,
-       I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */
-       I915_CACHE_L3_LLC, /* gen7+, L3 sits between the domain specifc
-                             caches, eg sampler/render caches, and the
-                             large Last-Level-Cache. LLC is coherent with
-                             the CPU, but L3 is only visible to the GPU. */
-       I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */
-};
 
 #define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */
 
@@ -556,7 +609,7 @@ struct i915_gem_mm {
         * notifier_lock for mmu notifiers, memory may not be allocated
         * while holding this lock.
         */
-       spinlock_t notifier_lock;
+       rwlock_t notifier_lock;
 #endif
 
        /* shrinker accounting, also useful for userland debugging */
@@ -580,6 +633,9 @@ i915_fence_timeout(const struct drm_i915_private *i915)
 
 #define HAS_HW_SAGV_WM(i915) (DISPLAY_VER(i915) >= 13 && !IS_DGFX(i915))
 
+/* Amount of PSF GV points, BSpec precisely defines this */
+#define I915_NUM_PSF_GV_POINTS 3
+
 struct ddi_vbt_port_info {
        /* Non-NULL if port present. */
        struct intel_bios_encoder_data *devdata;
@@ -1093,12 +1149,16 @@ struct drm_i915_private {
                        INTEL_DRAM_LPDDR5,
                } type;
                u8 num_qgv_points;
+               u8 num_psf_gv_points;
        } dram_info;
 
        struct intel_bw_info {
                /* for each QGV point */
                unsigned int deratedbw[I915_NUM_QGV_POINTS];
+               /* for each PSF GV point */
+               unsigned int psf_bw[I915_NUM_PSF_GV_POINTS];
                u8 num_qgv_points;
+               u8 num_psf_gv_points;
                u8 num_planes;
        } max_bw[6];
 
@@ -1243,11 +1303,17 @@ static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev)
 
 #define INTEL_DEVID(dev_priv)  (RUNTIME_INFO(dev_priv)->device_id)
 
+#define IP_VER(ver, rel)               ((ver) << 8 | (rel))
+
 #define GRAPHICS_VER(i915)             (INTEL_INFO(i915)->graphics_ver)
+#define GRAPHICS_VER_FULL(i915)                IP_VER(INTEL_INFO(i915)->graphics_ver, \
+                                              INTEL_INFO(i915)->graphics_rel)
 #define IS_GRAPHICS_VER(i915, from, until) \
        (GRAPHICS_VER(i915) >= (from) && GRAPHICS_VER(i915) <= (until))
 
 #define MEDIA_VER(i915)                        (INTEL_INFO(i915)->media_ver)
+#define MEDIA_VER_FULL(i915)           IP_VER(INTEL_INFO(i915)->media_ver, \
+                                              INTEL_INFO(i915)->media_rel)
 #define IS_MEDIA_VER(i915, from, until) \
        (MEDIA_VER(i915) >= (from) && MEDIA_VER(i915) <= (until))
 
@@ -1255,29 +1321,20 @@ static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev)
 #define IS_DISPLAY_VER(i915, from, until) \
        (DISPLAY_VER(i915) >= (from) && DISPLAY_VER(i915) <= (until))
 
-#define REVID_FOREVER          0xff
 #define INTEL_REVID(dev_priv)  (to_pci_dev((dev_priv)->drm.dev)->revision)
 
 #define HAS_DSB(dev_priv)      (INTEL_INFO(dev_priv)->display.has_dsb)
 
-/*
- * Return true if revision is in range [since,until] inclusive.
- *
- * Use 0 for open-ended since, and REVID_FOREVER for open-ended until.
- */
-#define IS_REVID(p, since, until) \
-       (INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until))
-
 #define INTEL_DISPLAY_STEP(__i915) (RUNTIME_INFO(__i915)->step.display_step)
 #define INTEL_GT_STEP(__i915) (RUNTIME_INFO(__i915)->step.gt_step)
 
 #define IS_DISPLAY_STEP(__i915, since, until) \
        (drm_WARN_ON(&(__i915)->drm, INTEL_DISPLAY_STEP(__i915) == STEP_NONE), \
-        INTEL_DISPLAY_STEP(__i915) >= (since) && INTEL_DISPLAY_STEP(__i915) <= (until))
+        INTEL_DISPLAY_STEP(__i915) >= (since) && INTEL_DISPLAY_STEP(__i915) < (until))
 
 #define IS_GT_STEP(__i915, since, until) \
        (drm_WARN_ON(&(__i915)->drm, INTEL_GT_STEP(__i915) == STEP_NONE), \
-        INTEL_GT_STEP(__i915) >= (since) && INTEL_GT_STEP(__i915) <= (until))
+        INTEL_GT_STEP(__i915) >= (since) && INTEL_GT_STEP(__i915) < (until))
 
 static __always_inline unsigned int
 __platform_mask_index(const struct intel_runtime_info *info,
@@ -1376,7 +1433,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
 #define IS_GEMINILAKE(dev_priv)        IS_PLATFORM(dev_priv, INTEL_GEMINILAKE)
 #define IS_COFFEELAKE(dev_priv)        IS_PLATFORM(dev_priv, INTEL_COFFEELAKE)
 #define IS_COMETLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COMETLAKE)
-#define IS_CANNONLAKE(dev_priv)        IS_PLATFORM(dev_priv, INTEL_CANNONLAKE)
+#define IS_CANNONLAKE(dev_priv)        0
 #define IS_ICELAKE(dev_priv)   IS_PLATFORM(dev_priv, INTEL_ICELAKE)
 #define IS_JSL_EHL(dev_priv)   (IS_PLATFORM(dev_priv, INTEL_JASPERLAKE) || \
                                IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE))
@@ -1385,6 +1442,12 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
 #define IS_DG1(dev_priv)        IS_PLATFORM(dev_priv, INTEL_DG1)
 #define IS_ALDERLAKE_S(dev_priv) IS_PLATFORM(dev_priv, INTEL_ALDERLAKE_S)
 #define IS_ALDERLAKE_P(dev_priv) IS_PLATFORM(dev_priv, INTEL_ALDERLAKE_P)
+#define IS_XEHPSDV(dev_priv) IS_PLATFORM(dev_priv, INTEL_XEHPSDV)
+#define IS_DG2(dev_priv)       IS_PLATFORM(dev_priv, INTEL_DG2)
+#define IS_DG2_G10(dev_priv) \
+       IS_SUBPLATFORM(dev_priv, INTEL_DG2, INTEL_SUBPLATFORM_G10)
+#define IS_DG2_G11(dev_priv) \
+       IS_SUBPLATFORM(dev_priv, INTEL_DG2, INTEL_SUBPLATFORM_G11)
 #define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
                                    (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
 #define IS_BDW_ULT(dev_priv) \
@@ -1436,8 +1499,6 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
 #define IS_CML_GT2(dev_priv)   (IS_COMETLAKE(dev_priv) && \
                                 INTEL_INFO(dev_priv)->gt == 2)
 
-#define IS_CNL_WITH_PORT_F(dev_priv) \
-       IS_SUBPLATFORM(dev_priv, INTEL_CANNONLAKE, INTEL_SUBPLATFORM_PORTF)
 #define IS_ICL_WITH_PORT_F(dev_priv) \
        IS_SUBPLATFORM(dev_priv, INTEL_ICELAKE, INTEL_SUBPLATFORM_PORTF)
 
@@ -1447,60 +1508,17 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
 #define IS_TGL_Y(dev_priv) \
        IS_SUBPLATFORM(dev_priv, INTEL_TIGERLAKE, INTEL_SUBPLATFORM_ULX)
 
-#define SKL_REVID_A0           0x0
-#define SKL_REVID_B0           0x1
-#define SKL_REVID_C0           0x2
-#define SKL_REVID_D0           0x3
-#define SKL_REVID_E0           0x4
-#define SKL_REVID_F0           0x5
-#define SKL_REVID_G0           0x6
-#define SKL_REVID_H0           0x7
-
-#define IS_SKL_REVID(p, since, until) (IS_SKYLAKE(p) && IS_REVID(p, since, until))
-
-#define BXT_REVID_A0           0x0
-#define BXT_REVID_A1           0x1
-#define BXT_REVID_B0           0x3
-#define BXT_REVID_B_LAST       0x8
-#define BXT_REVID_C0           0x9
-
-#define IS_BXT_REVID(dev_priv, since, until) \
-       (IS_BROXTON(dev_priv) && IS_REVID(dev_priv, since, until))
+#define IS_SKL_GT_STEP(p, since, until) (IS_SKYLAKE(p) && IS_GT_STEP(p, since, until))
 
 #define IS_KBL_GT_STEP(dev_priv, since, until) \
        (IS_KABYLAKE(dev_priv) && IS_GT_STEP(dev_priv, since, until))
 #define IS_KBL_DISPLAY_STEP(dev_priv, since, until) \
        (IS_KABYLAKE(dev_priv) && IS_DISPLAY_STEP(dev_priv, since, until))
 
-#define GLK_REVID_A0           0x0
-#define GLK_REVID_A1           0x1
-#define GLK_REVID_A2           0x2
-#define GLK_REVID_B0           0x3
-
-#define IS_GLK_REVID(dev_priv, since, until) \
-       (IS_GEMINILAKE(dev_priv) && IS_REVID(dev_priv, since, until))
-
-#define CNL_REVID_A0           0x0
-#define CNL_REVID_B0           0x1
-#define CNL_REVID_C0           0x2
-
-#define IS_CNL_REVID(p, since, until) \
-       (IS_CANNONLAKE(p) && IS_REVID(p, since, until))
-
-#define ICL_REVID_A0           0x0
-#define ICL_REVID_A2           0x1
-#define ICL_REVID_B0           0x3
-#define ICL_REVID_B2           0x4
-#define ICL_REVID_C0           0x5
-
-#define IS_ICL_REVID(p, since, until) \
-       (IS_ICELAKE(p) && IS_REVID(p, since, until))
-
-#define EHL_REVID_A0            0x0
-#define EHL_REVID_B0            0x1
-
-#define IS_JSL_EHL_REVID(p, since, until) \
-       (IS_JSL_EHL(p) && IS_REVID(p, since, until))
+#define IS_JSL_EHL_GT_STEP(p, since, until) \
+       (IS_JSL_EHL(p) && IS_GT_STEP(p, since, until))
+#define IS_JSL_EHL_DISPLAY_STEP(p, since, until) \
+       (IS_JSL_EHL(p) && IS_DISPLAY_STEP(p, since, until))
 
 #define IS_TGL_DISPLAY_STEP(__i915, since, until) \
        (IS_TIGERLAKE(__i915) && \
@@ -1514,18 +1532,13 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
        (IS_TIGERLAKE(__i915) && !(IS_TGL_U(__i915) || IS_TGL_Y(__i915)) && \
         IS_GT_STEP(__i915, since, until))
 
-#define RKL_REVID_A0           0x0
-#define RKL_REVID_B0           0x1
-#define RKL_REVID_C0           0x4
-
-#define IS_RKL_REVID(p, since, until) \
-       (IS_ROCKETLAKE(p) && IS_REVID(p, since, until))
-
-#define DG1_REVID_A0           0x0
-#define DG1_REVID_B0           0x1
+#define IS_RKL_DISPLAY_STEP(p, since, until) \
+       (IS_ROCKETLAKE(p) && IS_DISPLAY_STEP(p, since, until))
 
-#define IS_DG1_REVID(p, since, until) \
-       (IS_DG1(p) && IS_REVID(p, since, until))
+#define IS_DG1_GT_STEP(p, since, until) \
+       (IS_DG1(p) && IS_GT_STEP(p, since, until))
+#define IS_DG1_DISPLAY_STEP(p, since, until) \
+       (IS_DG1(p) && IS_DISPLAY_STEP(p, since, until))
 
 #define IS_ADLS_DISPLAY_STEP(__i915, since, until) \
        (IS_ALDERLAKE_S(__i915) && \
@@ -1543,6 +1556,31 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
        (IS_ALDERLAKE_P(__i915) && \
         IS_GT_STEP(__i915, since, until))
 
+#define IS_XEHPSDV_GT_STEP(__i915, since, until) \
+       (IS_XEHPSDV(__i915) && IS_GT_STEP(__i915, since, until))
+
+/*
+ * DG2 hardware steppings are a bit unusual.  The hardware design was forked
+ * to create two variants (G10 and G11) which have distinct workaround sets.
+ * The G11 fork of the DG2 design resets the GT stepping back to "A0" for its
+ * first iteration, even though it's more similar to a G10 B0 stepping in terms
+ * of functionality and workarounds.  However the display stepping does not
+ * reset in the same manner --- a specific stepping like "B0" has a consistent
+ * meaning regardless of whether it belongs to a G10 or G11 DG2.
+ *
+ * TLDR:  All GT workarounds and stepping-specific logic must be applied in
+ * relation to a specific subplatform (G10 or G11), whereas display workarounds
+ * and stepping-specific logic will be applied with a general DG2-wide stepping
+ * number.
+ */
+#define IS_DG2_GT_STEP(__i915, variant, since, until) \
+       (IS_SUBPLATFORM(__i915, INTEL_DG2, INTEL_SUBPLATFORM_##variant) && \
+        IS_GT_STEP(__i915, since, until))
+
+#define IS_DG2_DISP_STEP(__i915, since, until) \
+       (IS_DG2(__i915) && \
+        IS_DISPLAY_STEP(__i915, since, until))
+
 #define IS_LP(dev_priv)                (INTEL_INFO(dev_priv)->is_lp)
 #define IS_GEN9_LP(dev_priv)   (GRAPHICS_VER(dev_priv) == 9 && IS_LP(dev_priv))
 #define IS_GEN9_BC(dev_priv)   (GRAPHICS_VER(dev_priv) == 9 && !IS_LP(dev_priv))
@@ -1580,8 +1618,6 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
 #define HAS_LOGICAL_RING_ELSQ(dev_priv) \
                (INTEL_INFO(dev_priv)->has_logical_ring_elsq)
 
-#define HAS_MASTER_UNIT_IRQ(dev_priv) (INTEL_INFO(dev_priv)->has_master_unit_irq)
-
 #define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv)
 
 #define INTEL_PPGTT(dev_priv) (INTEL_INFO(dev_priv)->ppgtt_type)
@@ -1607,12 +1643,10 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
 
 /* WaRsDisableCoarsePowerGating:skl,cnl */
 #define NEEDS_WaRsDisableCoarsePowerGating(dev_priv)                   \
-       (IS_CANNONLAKE(dev_priv) ||                                     \
-        IS_SKL_GT3(dev_priv) ||                                        \
-        IS_SKL_GT4(dev_priv))
+       (IS_SKL_GT3(dev_priv) || IS_SKL_GT4(dev_priv))
 
 #define HAS_GMBUS_IRQ(dev_priv) (GRAPHICS_VER(dev_priv) >= 4)
-#define HAS_GMBUS_BURST_READ(dev_priv) (GRAPHICS_VER(dev_priv) >= 10 || \
+#define HAS_GMBUS_BURST_READ(dev_priv) (GRAPHICS_VER(dev_priv) >= 11 || \
                                        IS_GEMINILAKE(dev_priv) || \
                                        IS_KABYLAKE(dev_priv))
 
@@ -1632,6 +1666,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
 
 #define HAS_DP_MST(dev_priv)   (INTEL_INFO(dev_priv)->display.has_dp_mst)
 
+#define HAS_CDCLK_CRAWL(dev_priv)       (INTEL_INFO(dev_priv)->display.has_cdclk_crawl)
 #define HAS_DDI(dev_priv)               (INTEL_INFO(dev_priv)->display.has_ddi)
 #define HAS_FPGA_DBG_UNCLAIMED(dev_priv) (INTEL_INFO(dev_priv)->display.has_fpga_dbg)
 #define HAS_PSR(dev_priv)               (INTEL_INFO(dev_priv)->display.has_psr)
@@ -1653,6 +1688,9 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
 #define HAS_RUNTIME_PM(dev_priv) (INTEL_INFO(dev_priv)->has_runtime_pm)
 #define HAS_64BIT_RELOC(dev_priv) (INTEL_INFO(dev_priv)->has_64bit_reloc)
 
+#define HAS_MSLICES(dev_priv) \
+       (INTEL_INFO(dev_priv)->has_mslices)
+
 #define HAS_IPC(dev_priv)               (INTEL_INFO(dev_priv)->display.has_ipc)
 
 #define HAS_REGION(i915, i) (INTEL_INFO(i915)->memory_regions & (i))
@@ -1742,9 +1780,6 @@ void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv);
 void i915_gem_init_early(struct drm_i915_private *dev_priv);
 void i915_gem_cleanup_early(struct drm_i915_private *dev_priv);
 
-struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915,
-                                                u16 type, u16 instance);
-
 static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
 {
        /*
@@ -1841,24 +1876,18 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
 
 struct dma_buf *i915_gem_prime_export(struct drm_gem_object *gem_obj, int flags);
 
-static inline struct i915_gem_context *
-__i915_gem_context_lookup_rcu(struct drm_i915_file_private *file_priv, u32 id)
-{
-       return xa_load(&file_priv->context_xa, id);
-}
-
-static inline struct i915_gem_context *
-i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)
+static inline struct i915_address_space *
+i915_gem_vm_lookup(struct drm_i915_file_private *file_priv, u32 id)
 {
-       struct i915_gem_context *ctx;
+       struct i915_address_space *vm;
 
        rcu_read_lock();
-       ctx = __i915_gem_context_lookup_rcu(file_priv, id);
-       if (ctx && !kref_get_unless_zero(&ctx->ref))
-               ctx = NULL;
+       vm = xa_load(&file_priv->vm_xa, id);
+       if (vm && !kref_get_unless_zero(&vm->ref))
+               vm = NULL;
        rcu_read_unlock();
 
-       return ctx;
+       return vm;
 }
 
 /* i915_gem_evict.c */
@@ -1925,8 +1954,8 @@ int remap_io_sg(struct vm_area_struct *vma,
 
 static inline int intel_hws_csb_write_index(struct drm_i915_private *i915)
 {
-       if (GRAPHICS_VER(i915) >= 10)
-               return CNL_HWS_CSB_WRITE_INDEX;
+       if (GRAPHICS_VER(i915) >= 11)
+               return ICL_HWS_CSB_WRITE_INDEX;
        else
                return I915_HWS_CSB_WRITE_INDEX;
 }
index 6a0a3f0..590efc8 100644 (file)
@@ -469,12 +469,6 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
        if (ret != -ENODEV)
                goto out;
 
-       ret = -ENODEV;
-       if (obj->ops->pread)
-               ret = obj->ops->pread(obj, args);
-       if (ret != -ENODEV)
-               goto out;
-
        ret = i915_gem_object_wait(obj,
                                   I915_WAIT_INTERRUPTIBLE,
                                   MAX_SCHEDULE_TIMEOUT);
@@ -1207,58 +1201,6 @@ int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
        return ret;
 }
 
-void i915_gem_ww_ctx_init(struct i915_gem_ww_ctx *ww, bool intr)
-{
-       ww_acquire_init(&ww->ctx, &reservation_ww_class);
-       INIT_LIST_HEAD(&ww->obj_list);
-       ww->intr = intr;
-       ww->contended = NULL;
-}
-
-static void i915_gem_ww_ctx_unlock_all(struct i915_gem_ww_ctx *ww)
-{
-       struct drm_i915_gem_object *obj;
-
-       while ((obj = list_first_entry_or_null(&ww->obj_list, struct drm_i915_gem_object, obj_link))) {
-               list_del(&obj->obj_link);
-               i915_gem_object_unlock(obj);
-       }
-}
-
-void i915_gem_ww_unlock_single(struct drm_i915_gem_object *obj)
-{
-       list_del(&obj->obj_link);
-       i915_gem_object_unlock(obj);
-}
-
-void i915_gem_ww_ctx_fini(struct i915_gem_ww_ctx *ww)
-{
-       i915_gem_ww_ctx_unlock_all(ww);
-       WARN_ON(ww->contended);
-       ww_acquire_fini(&ww->ctx);
-}
-
-int __must_check i915_gem_ww_ctx_backoff(struct i915_gem_ww_ctx *ww)
-{
-       int ret = 0;
-
-       if (WARN_ON(!ww->contended))
-               return -EINVAL;
-
-       i915_gem_ww_ctx_unlock_all(ww);
-       if (ww->intr)
-               ret = dma_resv_lock_slow_interruptible(ww->contended->base.resv, &ww->ctx);
-       else
-               dma_resv_lock_slow(ww->contended->base.resv, &ww->ctx);
-
-       if (!ret)
-               list_add_tail(&ww->contended->obj_link, &ww->obj_list);
-
-       ww->contended = NULL;
-
-       return ret;
-}
-
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 #include "selftests/mock_gem_device.c"
 #include "selftests/i915_gem.c"
index 440c35f..d0752e5 100644 (file)
@@ -123,16 +123,4 @@ static inline bool __tasklet_is_scheduled(struct tasklet_struct *t)
        return test_bit(TASKLET_STATE_SCHED, &t->state);
 }
 
-struct i915_gem_ww_ctx {
-       struct ww_acquire_ctx ctx;
-       struct list_head obj_list;
-       bool intr;
-       struct drm_i915_gem_object *contended;
-};
-
-void i915_gem_ww_ctx_init(struct i915_gem_ww_ctx *ctx, bool intr);
-void i915_gem_ww_ctx_fini(struct i915_gem_ww_ctx *ctx);
-int __must_check i915_gem_ww_ctx_backoff(struct i915_gem_ww_ctx *ctx);
-void i915_gem_ww_unlock_single(struct drm_i915_gem_object *obj);
-
 #endif /* __I915_GEM_H__ */
index 4d2d59a..2b73ddb 100644 (file)
@@ -27,6 +27,7 @@
  */
 
 #include "gem/i915_gem_context.h"
+#include "gt/intel_gt.h"
 #include "gt/intel_gt_requests.h"
 
 #include "i915_drv.h"
diff --git a/drivers/gpu/drm/i915/i915_gem_ww.c b/drivers/gpu/drm/i915/i915_gem_ww.c
new file mode 100644 (file)
index 0000000..3f6ff13
--- /dev/null
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright Â© 2020 Intel Corporation
+ */
+#include <linux/dma-resv.h>
+#include "i915_gem_ww.h"
+#include "gem/i915_gem_object.h"
+
+void i915_gem_ww_ctx_init(struct i915_gem_ww_ctx *ww, bool intr)
+{
+       ww_acquire_init(&ww->ctx, &reservation_ww_class);
+       INIT_LIST_HEAD(&ww->obj_list);
+       ww->intr = intr;
+       ww->contended = NULL;
+}
+
+static void i915_gem_ww_ctx_unlock_all(struct i915_gem_ww_ctx *ww)
+{
+       struct drm_i915_gem_object *obj;
+
+       while ((obj = list_first_entry_or_null(&ww->obj_list, struct drm_i915_gem_object, obj_link))) {
+               list_del(&obj->obj_link);
+               i915_gem_object_unlock(obj);
+               i915_gem_object_put(obj);
+       }
+}
+
+void i915_gem_ww_unlock_single(struct drm_i915_gem_object *obj)
+{
+       list_del(&obj->obj_link);
+       i915_gem_object_unlock(obj);
+       i915_gem_object_put(obj);
+}
+
+void i915_gem_ww_ctx_fini(struct i915_gem_ww_ctx *ww)
+{
+       i915_gem_ww_ctx_unlock_all(ww);
+       WARN_ON(ww->contended);
+       ww_acquire_fini(&ww->ctx);
+}
+
+int __must_check i915_gem_ww_ctx_backoff(struct i915_gem_ww_ctx *ww)
+{
+       int ret = 0;
+
+       if (WARN_ON(!ww->contended))
+               return -EINVAL;
+
+       i915_gem_ww_ctx_unlock_all(ww);
+       if (ww->intr)
+               ret = dma_resv_lock_slow_interruptible(ww->contended->base.resv, &ww->ctx);
+       else
+               dma_resv_lock_slow(ww->contended->base.resv, &ww->ctx);
+
+       if (!ret)
+               list_add_tail(&ww->contended->obj_link, &ww->obj_list);
+       else
+               i915_gem_object_put(ww->contended);
+
+       ww->contended = NULL;
+
+       return ret;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_ww.h b/drivers/gpu/drm/i915/i915_gem_ww.h
new file mode 100644 (file)
index 0000000..f6b1a79
--- /dev/null
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright Â© 2020 Intel Corporation
+ */
+#ifndef __I915_GEM_WW_H__
+#define __I915_GEM_WW_H__
+
+#include <drm/drm_drv.h>
+
+struct i915_gem_ww_ctx {
+       struct ww_acquire_ctx ctx;
+       struct list_head obj_list;
+       struct drm_i915_gem_object *contended;
+       unsigned short intr;
+       unsigned short loop;
+};
+
+void i915_gem_ww_ctx_init(struct i915_gem_ww_ctx *ctx, bool intr);
+void i915_gem_ww_ctx_fini(struct i915_gem_ww_ctx *ctx);
+int __must_check i915_gem_ww_ctx_backoff(struct i915_gem_ww_ctx *ctx);
+void i915_gem_ww_unlock_single(struct drm_i915_gem_object *obj);
+
+/* Internal functions used by the inlines! Don't use. */
+static inline int __i915_gem_ww_fini(struct i915_gem_ww_ctx *ww, int err)
+{
+       ww->loop = 0;
+       if (err == -EDEADLK) {
+               err = i915_gem_ww_ctx_backoff(ww);
+               if (!err)
+                       ww->loop = 1;
+       }
+
+       if (!ww->loop)
+               i915_gem_ww_ctx_fini(ww);
+
+       return err;
+}
+
+static inline void
+__i915_gem_ww_init(struct i915_gem_ww_ctx *ww, bool intr)
+{
+       i915_gem_ww_ctx_init(ww, intr);
+       ww->loop = 1;
+}
+
+#define for_i915_gem_ww(_ww, _err, _intr)                      \
+       for (__i915_gem_ww_init(_ww, _intr); (_ww)->loop;       \
+            _err = __i915_gem_ww_fini(_ww, _err))
+
+#endif
index 24e1821..77490cb 100644 (file)
@@ -15,7 +15,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
        struct pci_dev *pdev = to_pci_dev(dev->dev);
        const struct sseu_dev_info *sseu = &i915->gt.info.sseu;
        drm_i915_getparam_t *param = data;
-       int value;
+       int value = 0;
 
        switch (param->param) {
        case I915_PARAM_IRQ_ACTIVE:
@@ -134,6 +134,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
        case I915_PARAM_HAS_EXEC_FENCE_ARRAY:
        case I915_PARAM_HAS_EXEC_SUBMIT_FENCE:
        case I915_PARAM_HAS_EXEC_TIMELINE_FENCES:
+       case I915_PARAM_HAS_USERPTR_PROBE:
                /* For the time being all of these are always true;
                 * if some supported hardware does not have one of these
                 * features this value needs to be provided from
@@ -150,7 +151,9 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data,
                        return -ENODEV;
                break;
        case I915_PARAM_SUBSLICE_MASK:
-               value = sseu->subslice_mask[0];
+               /* Only copy bits from the first slice */
+               memcpy(&value, sseu->subslice_mask,
+                      min(sseu->ss_stride, (u8)sizeof(value)));
                if (!value)
                        return -ENODEV;
                break;
diff --git a/drivers/gpu/drm/i915/i915_globals.c b/drivers/gpu/drm/i915/i915_globals.c
deleted file mode 100644 (file)
index 77f1911..0000000
+++ /dev/null
@@ -1,160 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright Â© 2019 Intel Corporation
- */
-
-#include <linux/slab.h>
-#include <linux/workqueue.h>
-
-#include "i915_active.h"
-#include "gem/i915_gem_context.h"
-#include "gem/i915_gem_object.h"
-#include "i915_globals.h"
-#include "i915_request.h"
-#include "i915_scheduler.h"
-#include "i915_vma.h"
-
-static LIST_HEAD(globals);
-
-static atomic_t active;
-static atomic_t epoch;
-static struct park_work {
-       struct delayed_work work;
-       struct rcu_head rcu;
-       unsigned long flags;
-#define PENDING 0
-       int epoch;
-} park;
-
-static void i915_globals_shrink(void)
-{
-       struct i915_global *global;
-
-       /*
-        * kmem_cache_shrink() discards empty slabs and reorders partially
-        * filled slabs to prioritise allocating from the mostly full slabs,
-        * with the aim of reducing fragmentation.
-        */
-       list_for_each_entry(global, &globals, link)
-               global->shrink();
-}
-
-static void __i915_globals_grace(struct rcu_head *rcu)
-{
-       /* Ratelimit parking as shrinking is quite slow */
-       schedule_delayed_work(&park.work, round_jiffies_up_relative(2 * HZ));
-}
-
-static void __i915_globals_queue_rcu(void)
-{
-       park.epoch = atomic_inc_return(&epoch);
-       if (!atomic_read(&active)) {
-               init_rcu_head(&park.rcu);
-               call_rcu(&park.rcu, __i915_globals_grace);
-       }
-}
-
-static void __i915_globals_park(struct work_struct *work)
-{
-       destroy_rcu_head(&park.rcu);
-
-       /* Confirm nothing woke up in the last grace period */
-       if (park.epoch != atomic_read(&epoch)) {
-               __i915_globals_queue_rcu();
-               return;
-       }
-
-       clear_bit(PENDING, &park.flags);
-       i915_globals_shrink();
-}
-
-void __init i915_global_register(struct i915_global *global)
-{
-       GEM_BUG_ON(!global->shrink);
-       GEM_BUG_ON(!global->exit);
-
-       list_add_tail(&global->link, &globals);
-}
-
-static void __i915_globals_cleanup(void)
-{
-       struct i915_global *global, *next;
-
-       list_for_each_entry_safe_reverse(global, next, &globals, link)
-               global->exit();
-}
-
-static __initconst int (* const initfn[])(void) = {
-       i915_global_active_init,
-       i915_global_context_init,
-       i915_global_gem_context_init,
-       i915_global_objects_init,
-       i915_global_request_init,
-       i915_global_scheduler_init,
-       i915_global_vma_init,
-};
-
-int __init i915_globals_init(void)
-{
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(initfn); i++) {
-               int err;
-
-               err = initfn[i]();
-               if (err) {
-                       __i915_globals_cleanup();
-                       return err;
-               }
-       }
-
-       INIT_DELAYED_WORK(&park.work, __i915_globals_park);
-       return 0;
-}
-
-void i915_globals_park(void)
-{
-       /*
-        * Defer shrinking the global slab caches (and other work) until
-        * after a RCU grace period has completed with no activity. This
-        * is to try and reduce the latency impact on the consumers caused
-        * by us shrinking the caches the same time as they are trying to
-        * allocate, with the assumption being that if we idle long enough
-        * for an RCU grace period to elapse since the last use, it is likely
-        * to be longer until we need the caches again.
-        */
-       if (!atomic_dec_and_test(&active))
-               return;
-
-       /* Queue cleanup after the next RCU grace period has freed slabs */
-       if (!test_and_set_bit(PENDING, &park.flags))
-               __i915_globals_queue_rcu();
-}
-
-void i915_globals_unpark(void)
-{
-       atomic_inc(&epoch);
-       atomic_inc(&active);
-}
-
-static void __exit __i915_globals_flush(void)
-{
-       atomic_inc(&active); /* skip shrinking */
-
-       rcu_barrier(); /* wait for the work to be queued */
-       flush_delayed_work(&park.work);
-
-       atomic_dec(&active);
-}
-
-void __exit i915_globals_exit(void)
-{
-       GEM_BUG_ON(atomic_read(&active));
-
-       __i915_globals_flush();
-       __i915_globals_cleanup();
-
-       /* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
-       rcu_barrier();
-}
diff --git a/drivers/gpu/drm/i915/i915_globals.h b/drivers/gpu/drm/i915/i915_globals.h
deleted file mode 100644 (file)
index 2d199f4..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright Â© 2019 Intel Corporation
- */
-
-#ifndef _I915_GLOBALS_H_
-#define _I915_GLOBALS_H_
-
-#include <linux/types.h>
-
-typedef void (*i915_global_func_t)(void);
-
-struct i915_global {
-       struct list_head link;
-
-       i915_global_func_t shrink;
-       i915_global_func_t exit;
-};
-
-void i915_global_register(struct i915_global *global);
-
-int i915_globals_init(void);
-void i915_globals_park(void);
-void i915_globals_unpark(void);
-void i915_globals_exit(void);
-
-/* constructors */
-int i915_global_active_init(void);
-int i915_global_context_init(void);
-int i915_global_gem_context_init(void);
-int i915_global_objects_init(void);
-int i915_global_request_init(void);
-int i915_global_scheduler_init(void);
-int i915_global_vma_init(void);
-
-#endif /* _I915_GLOBALS_H_ */
index 35c97c3..0f08bcf 100644 (file)
@@ -1039,7 +1039,7 @@ i915_vma_coredump_create(const struct intel_gt *gt,
                        if (ret)
                                break;
                }
-       } else if (i915_gem_object_is_lmem(vma->obj)) {
+       } else if (__i915_gem_object_is_lmem(vma->obj)) {
                struct intel_memory_region *mem = vma->obj->mm.region;
                dma_addr_t dma;
 
@@ -1429,20 +1429,37 @@ capture_engine(struct intel_engine_cs *engine,
 {
        struct intel_engine_capture_vma *capture = NULL;
        struct intel_engine_coredump *ee;
-       struct i915_request *rq;
+       struct intel_context *ce;
+       struct i915_request *rq = NULL;
        unsigned long flags;
 
        ee = intel_engine_coredump_alloc(engine, GFP_KERNEL);
        if (!ee)
                return NULL;
 
-       spin_lock_irqsave(&engine->active.lock, flags);
-       rq = intel_engine_find_active_request(engine);
+       ce = intel_engine_get_hung_context(engine);
+       if (ce) {
+               intel_engine_clear_hung_context(engine);
+               rq = intel_context_find_active_request(ce);
+               if (!rq || !i915_request_started(rq))
+                       goto no_request_capture;
+       } else {
+               /*
+                * Getting here with GuC enabled means it is a forced error capture
+                * with no actual hang. So, no need to attempt the execlist search.
+                */
+               if (!intel_uc_uses_guc_submission(&engine->gt->uc)) {
+                       spin_lock_irqsave(&engine->sched_engine->lock, flags);
+                       rq = intel_engine_execlist_find_hung_request(engine);
+                       spin_unlock_irqrestore(&engine->sched_engine->lock,
+                                              flags);
+               }
+       }
        if (rq)
                capture = intel_engine_coredump_add_request(ee, rq,
                                                            ATOMIC_MAYFAIL);
-       spin_unlock_irqrestore(&engine->active.lock, flags);
        if (!capture) {
+no_request_capture:
                kfree(ee);
                return NULL;
        }
index 1d4c683..17d3362 100644 (file)
@@ -207,7 +207,7 @@ static void intel_hpd_init_pins(struct drm_i915_private *dev_priv)
            (!HAS_PCH_SPLIT(dev_priv) || HAS_PCH_NOP(dev_priv)))
                return;
 
-       if (HAS_PCH_DG1(dev_priv))
+       if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1)
                hpd->pch_hpd = hpd_sde_dg1;
        else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
                hpd->pch_hpd = hpd_icp;
@@ -2297,11 +2297,10 @@ static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
                        GEN9_AUX_CHANNEL_C |
                        GEN9_AUX_CHANNEL_D;
 
-       if (IS_CNL_WITH_PORT_F(dev_priv) || DISPLAY_VER(dev_priv) == 11)
-               mask |= CNL_AUX_CHANNEL_F;
-
-       if (DISPLAY_VER(dev_priv) == 11)
+       if (DISPLAY_VER(dev_priv) == 11) {
+               mask |= ICL_AUX_CHANNEL_F;
                mask |= ICL_AUX_CHANNEL_E;
+       }
 
        return mask;
 }
@@ -2698,11 +2697,9 @@ gen11_display_irq_handler(struct drm_i915_private *i915)
        enable_rpm_wakeref_asserts(&i915->runtime_pm);
 }
 
-static __always_inline irqreturn_t
-__gen11_irq_handler(struct drm_i915_private * const i915,
-                   u32 (*intr_disable)(void __iomem * const regs),
-                   void (*intr_enable)(void __iomem * const regs))
+static irqreturn_t gen11_irq_handler(int irq, void *arg)
 {
+       struct drm_i915_private *i915 = arg;
        void __iomem * const regs = i915->uncore.regs;
        struct intel_gt *gt = &i915->gt;
        u32 master_ctl;
@@ -2711,9 +2708,9 @@ __gen11_irq_handler(struct drm_i915_private * const i915,
        if (!intel_irqs_enabled(i915))
                return IRQ_NONE;
 
-       master_ctl = intr_disable(regs);
+       master_ctl = gen11_master_intr_disable(regs);
        if (!master_ctl) {
-               intr_enable(regs);
+               gen11_master_intr_enable(regs);
                return IRQ_NONE;
        }
 
@@ -2726,7 +2723,7 @@ __gen11_irq_handler(struct drm_i915_private * const i915,
 
        gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl);
 
-       intr_enable(regs);
+       gen11_master_intr_enable(regs);
 
        gen11_gu_misc_irq_handler(gt, gu_misc_iir);
 
@@ -2735,51 +2732,69 @@ __gen11_irq_handler(struct drm_i915_private * const i915,
        return IRQ_HANDLED;
 }
 
-static irqreturn_t gen11_irq_handler(int irq, void *arg)
-{
-       return __gen11_irq_handler(arg,
-                                  gen11_master_intr_disable,
-                                  gen11_master_intr_enable);
-}
-
-static u32 dg1_master_intr_disable_and_ack(void __iomem * const regs)
+static inline u32 dg1_master_intr_disable(void __iomem * const regs)
 {
        u32 val;
 
        /* First disable interrupts */
-       raw_reg_write(regs, DG1_MSTR_UNIT_INTR, 0);
+       raw_reg_write(regs, DG1_MSTR_TILE_INTR, 0);
 
        /* Get the indication levels and ack the master unit */
-       val = raw_reg_read(regs, DG1_MSTR_UNIT_INTR);
+       val = raw_reg_read(regs, DG1_MSTR_TILE_INTR);
        if (unlikely(!val))
                return 0;
 
-       raw_reg_write(regs, DG1_MSTR_UNIT_INTR, val);
-
-       /*
-        * Now with master disabled, get a sample of level indications
-        * for this interrupt and ack them right away - we keep GEN11_MASTER_IRQ
-        * out as this bit doesn't exist anymore for DG1
-        */
-       val = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ) & ~GEN11_MASTER_IRQ;
-       if (unlikely(!val))
-               return 0;
-
-       raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, val);
+       raw_reg_write(regs, DG1_MSTR_TILE_INTR, val);
 
        return val;
 }
 
 static inline void dg1_master_intr_enable(void __iomem * const regs)
 {
-       raw_reg_write(regs, DG1_MSTR_UNIT_INTR, DG1_MSTR_IRQ);
+       raw_reg_write(regs, DG1_MSTR_TILE_INTR, DG1_MSTR_IRQ);
 }
 
 static irqreturn_t dg1_irq_handler(int irq, void *arg)
 {
-       return __gen11_irq_handler(arg,
-                                  dg1_master_intr_disable_and_ack,
-                                  dg1_master_intr_enable);
+       struct drm_i915_private * const i915 = arg;
+       struct intel_gt *gt = &i915->gt;
+       void __iomem * const regs = i915->uncore.regs;
+       u32 master_tile_ctl, master_ctl;
+       u32 gu_misc_iir;
+
+       if (!intel_irqs_enabled(i915))
+               return IRQ_NONE;
+
+       master_tile_ctl = dg1_master_intr_disable(regs);
+       if (!master_tile_ctl) {
+               dg1_master_intr_enable(regs);
+               return IRQ_NONE;
+       }
+
+       /* FIXME: we only support tile 0 for now. */
+       if (master_tile_ctl & DG1_MSTR_TILE(0)) {
+               master_ctl = raw_reg_read(regs, GEN11_GFX_MSTR_IRQ);
+               raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, master_ctl);
+       } else {
+               DRM_ERROR("Tile not supported: 0x%08x\n", master_tile_ctl);
+               dg1_master_intr_enable(regs);
+               return IRQ_NONE;
+       }
+
+       gen11_gt_irq_handler(gt, master_ctl);
+
+       if (master_ctl & GEN11_DISPLAY_IRQ)
+               gen11_display_irq_handler(i915);
+
+       gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl);
+
+       dg1_master_intr_enable(regs);
+
+       gen11_gu_misc_irq_handler(gt, gu_misc_iir);
+
+       pmu_irq_stats(i915, IRQ_HANDLED);
+
+       return IRQ_HANDLED;
 }
 
 /* Called from drm generic code, passed 'crtc' which
@@ -3167,10 +3182,20 @@ static void gen11_irq_reset(struct drm_i915_private *dev_priv)
 {
        struct intel_uncore *uncore = &dev_priv->uncore;
 
-       if (HAS_MASTER_UNIT_IRQ(dev_priv))
-               dg1_master_intr_disable_and_ack(dev_priv->uncore.regs);
-       else
-               gen11_master_intr_disable(dev_priv->uncore.regs);
+       gen11_master_intr_disable(dev_priv->uncore.regs);
+
+       gen11_gt_irq_reset(&dev_priv->gt);
+       gen11_display_irq_reset(dev_priv);
+
+       GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
+       GEN3_IRQ_RESET(uncore, GEN8_PCU_);
+}
+
+static void dg1_irq_reset(struct drm_i915_private *dev_priv)
+{
+       struct intel_uncore *uncore = &dev_priv->uncore;
+
+       dg1_master_intr_disable(dev_priv->uncore.regs);
 
        gen11_gt_irq_reset(&dev_priv->gt);
        gen11_display_irq_reset(dev_priv);
@@ -3862,13 +3887,28 @@ static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
 
        GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
 
-       if (HAS_MASTER_UNIT_IRQ(dev_priv)) {
-               dg1_master_intr_enable(uncore->regs);
-               intel_uncore_posting_read(&dev_priv->uncore, DG1_MSTR_UNIT_INTR);
-       } else {
-               gen11_master_intr_enable(uncore->regs);
-               intel_uncore_posting_read(&dev_priv->uncore, GEN11_GFX_MSTR_IRQ);
+       gen11_master_intr_enable(uncore->regs);
+       intel_uncore_posting_read(&dev_priv->uncore, GEN11_GFX_MSTR_IRQ);
+}
+
+static void dg1_irq_postinstall(struct drm_i915_private *dev_priv)
+{
+       struct intel_uncore *uncore = &dev_priv->uncore;
+       u32 gu_misc_masked = GEN11_GU_MISC_GSE;
+
+       gen11_gt_irq_postinstall(&dev_priv->gt);
+
+       GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
+
+       if (HAS_DISPLAY(dev_priv)) {
+               icp_irq_postinstall(dev_priv);
+               gen8_de_irq_postinstall(dev_priv);
+               intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL,
+                                  GEN11_DISPLAY_IRQ_ENABLE);
        }
+
+       dg1_master_intr_enable(dev_priv->uncore.regs);
+       intel_uncore_posting_read(&dev_priv->uncore, DG1_MSTR_TILE_INTR);
 }
 
 static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv)
@@ -4407,9 +4447,9 @@ static irq_handler_t intel_irq_handler(struct drm_i915_private *dev_priv)
                else
                        return i8xx_irq_handler;
        } else {
-               if (HAS_MASTER_UNIT_IRQ(dev_priv))
+               if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
                        return dg1_irq_handler;
-               if (GRAPHICS_VER(dev_priv) >= 11)
+               else if (GRAPHICS_VER(dev_priv) >= 11)
                        return gen11_irq_handler;
                else if (GRAPHICS_VER(dev_priv) >= 8)
                        return gen8_irq_handler;
@@ -4432,7 +4472,9 @@ static void intel_irq_reset(struct drm_i915_private *dev_priv)
                else
                        i8xx_irq_reset(dev_priv);
        } else {
-               if (GRAPHICS_VER(dev_priv) >= 11)
+               if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
+                       dg1_irq_reset(dev_priv);
+               else if (GRAPHICS_VER(dev_priv) >= 11)
                        gen11_irq_reset(dev_priv);
                else if (GRAPHICS_VER(dev_priv) >= 8)
                        gen8_irq_reset(dev_priv);
@@ -4455,7 +4497,9 @@ static void intel_irq_postinstall(struct drm_i915_private *dev_priv)
                else
                        i8xx_irq_postinstall(dev_priv);
        } else {
-               if (GRAPHICS_VER(dev_priv) >= 11)
+               if (GRAPHICS_VER_FULL(dev_priv) >= IP_VER(12, 10))
+                       dg1_irq_postinstall(dev_priv);
+               else if (GRAPHICS_VER(dev_priv) >= 11)
                        gen11_irq_postinstall(dev_priv);
                else if (GRAPHICS_VER(dev_priv) >= 8)
                        gen8_irq_postinstall(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_module.c b/drivers/gpu/drm/i915/i915_module.c
new file mode 100644 (file)
index 0000000..c578ea8
--- /dev/null
@@ -0,0 +1,113 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright Â© 2021 Intel Corporation
+ */
+
+#include <linux/console.h>
+
+#include "gem/i915_gem_context.h"
+#include "gem/i915_gem_object.h"
+#include "i915_active.h"
+#include "i915_buddy.h"
+#include "i915_params.h"
+#include "i915_pci.h"
+#include "i915_perf.h"
+#include "i915_request.h"
+#include "i915_scheduler.h"
+#include "i915_selftest.h"
+#include "i915_vma.h"
+
+static int i915_check_nomodeset(void)
+{
+       bool use_kms = true;
+
+       /*
+        * Enable KMS by default, unless explicitly overriden by
+        * either the i915.modeset prarameter or by the
+        * vga_text_mode_force boot option.
+        */
+
+       if (i915_modparams.modeset == 0)
+               use_kms = false;
+
+       if (vgacon_text_force() && i915_modparams.modeset == -1)
+               use_kms = false;
+
+       if (!use_kms) {
+               /* Silently fail loading to not upset userspace. */
+               DRM_DEBUG_DRIVER("KMS disabled.\n");
+               return 1;
+       }
+
+       return 0;
+}
+
+static const struct {
+   int (*init)(void);
+   void (*exit)(void);
+} init_funcs[] = {
+       { i915_check_nomodeset, NULL },
+       { i915_active_module_init, i915_active_module_exit },
+       { i915_buddy_module_init, i915_buddy_module_exit },
+       { i915_context_module_init, i915_context_module_exit },
+       { i915_gem_context_module_init, i915_gem_context_module_exit },
+       { i915_objects_module_init, i915_objects_module_exit },
+       { i915_request_module_init, i915_request_module_exit },
+       { i915_scheduler_module_init, i915_scheduler_module_exit },
+       { i915_vma_module_init, i915_vma_module_exit },
+       { i915_mock_selftests, NULL },
+       { i915_pmu_init, i915_pmu_exit },
+       { i915_register_pci_driver, i915_unregister_pci_driver },
+       { i915_perf_sysctl_register, i915_perf_sysctl_unregister },
+};
+static int init_progress;
+
+static int __init i915_init(void)
+{
+       int err, i;
+
+       for (i = 0; i < ARRAY_SIZE(init_funcs); i++) {
+               err = init_funcs[i].init();
+               if (err < 0) {
+                       while (i--) {
+                               if (init_funcs[i].exit)
+                                       init_funcs[i].exit();
+                       }
+                       return err;
+               } else if (err > 0) {
+                       /*
+                        * Early-exit success is reserved for things which
+                        * don't have an exit() function because we have no
+                        * idea how far they got or how to partially tear
+                        * them down.
+                        */
+                       WARN_ON(init_funcs[i].exit);
+                       break;
+               }
+       }
+
+       init_progress = i;
+
+       return 0;
+}
+
+static void __exit i915_exit(void)
+{
+       int i;
+
+       for (i = init_progress - 1; i >= 0; i--) {
+               GEM_BUG_ON(i >= ARRAY_SIZE(init_funcs));
+               if (init_funcs[i].exit)
+                       init_funcs[i].exit();
+       }
+}
+
+module_init(i915_init);
+module_exit(i915_exit);
+
+MODULE_AUTHOR("Tungsten Graphics, Inc.");
+MODULE_AUTHOR("Intel Corporation");
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
index 7030e56..1bbd09a 100644 (file)
  *
  */
 
-#include <linux/console.h>
 #include <linux/vga_switcheroo.h>
 
 #include <drm/drm_drv.h>
 #include <drm/i915_pciids.h>
 
-#include "display/intel_fbdev.h"
-
 #include "i915_drv.h"
-#include "i915_perf.h"
-#include "i915_globals.h"
-#include "i915_selftest.h"
+#include "i915_pci.h"
 
 #define PLATFORM(x) .platform = (x)
 #define GEN(x) \
@@ -787,27 +782,13 @@ static const struct intel_device_info cml_gt2_info = {
        .gt = 2,
 };
 
-#define GEN10_FEATURES \
-       GEN9_FEATURES, \
-       GEN(10), \
-       .dbuf.size = 1024 - 4, /* 4 blocks for bypass path allocation */ \
-       .display.has_dsc = 1, \
-       .has_coherent_ggtt = false, \
-       GLK_COLORS
-
-static const struct intel_device_info cnl_info = {
-       GEN10_FEATURES,
-       PLATFORM(INTEL_CANNONLAKE),
-       .gt = 2,
-};
-
 #define GEN11_DEFAULT_PAGE_SIZES \
        .page_sizes = I915_GTT_PAGE_SIZE_4K | \
                      I915_GTT_PAGE_SIZE_64K | \
                      I915_GTT_PAGE_SIZE_2M
 
 #define GEN11_FEATURES \
-       GEN10_FEATURES, \
+       GEN9_FEATURES, \
        GEN11_DEFAULT_PAGE_SIZES, \
        .abox_mask = BIT(0), \
        .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | \
@@ -830,10 +811,12 @@ static const struct intel_device_info cnl_info = {
                [TRANSCODER_DSI_1] = TRANSCODER_DSI1_OFFSET, \
        }, \
        GEN(11), \
+       .color = { .degamma_lut_size = 33, .gamma_lut_size = 262145 }, \
        .dbuf.size = 2048, \
        .dbuf.slice_mask = BIT(DBUF_S1) | BIT(DBUF_S2), \
-       .has_logical_ring_elsq = 1, \
-       .color = { .degamma_lut_size = 33, .gamma_lut_size = 262145 }
+       .display.has_dsc = 1, \
+       .has_coherent_ggtt = false, \
+       .has_logical_ring_elsq = 1
 
 static const struct intel_device_info icl_info = {
        GEN11_FEATURES,
@@ -907,7 +890,6 @@ static const struct intel_device_info rkl_info = {
 
 #define DGFX_FEATURES \
        .memory_regions = REGION_SMEM | REGION_LMEM | REGION_STOLEN_LMEM, \
-       .has_master_unit_irq = 1, \
        .has_llc = 0, \
        .has_snoop = 1, \
        .is_dgfx = 1
@@ -915,6 +897,7 @@ static const struct intel_device_info rkl_info = {
 static const struct intel_device_info dg1_info __maybe_unused = {
        GEN12_FEATURES,
        DGFX_FEATURES,
+       .graphics_rel = 10,
        PLATFORM(INTEL_DG1),
        .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
        .require_force_probe = 1,
@@ -934,7 +917,7 @@ static const struct intel_device_info adl_s_info = {
        .display.has_psr_hw_tracking = 0,
        .platform_engine_mask =
                BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
-       .dma_mask_size = 46,
+       .dma_mask_size = 39,
 };
 
 #define XE_LPD_CURSOR_OFFSETS \
@@ -947,7 +930,7 @@ static const struct intel_device_info adl_s_info = {
 
 #define XE_LPD_FEATURES \
        .abox_mask = GENMASK(1, 0),                                             \
-       .color = { .degamma_lut_size = 33, .gamma_lut_size = 262145 },          \
+       .color = { .degamma_lut_size = 0, .gamma_lut_size = 0 },                \
        .cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) |          \
                BIT(TRANSCODER_C) | BIT(TRANSCODER_D),                          \
        .dbuf.size = 4096,                                                      \
@@ -984,8 +967,8 @@ static const struct intel_device_info adl_p_info = {
        GEN12_FEATURES,
        XE_LPD_FEATURES,
        PLATFORM(INTEL_ALDERLAKE_P),
-       .has_cdclk_crawl = 1,
        .require_force_probe = 1,
+       .display.has_cdclk_crawl = 1,
        .display.has_modular_fia = 1,
        .display.has_psr_hw_tracking = 0,
        .platform_engine_mask =
@@ -995,6 +978,67 @@ static const struct intel_device_info adl_p_info = {
 };
 
 #undef GEN
+
+#define XE_HP_PAGE_SIZES \
+       .page_sizes = I915_GTT_PAGE_SIZE_4K | \
+                     I915_GTT_PAGE_SIZE_64K | \
+                     I915_GTT_PAGE_SIZE_2M
+
+#define XE_HP_FEATURES \
+       .graphics_ver = 12, \
+       .graphics_rel = 50, \
+       XE_HP_PAGE_SIZES, \
+       .dma_mask_size = 46, \
+       .has_64bit_reloc = 1, \
+       .has_global_mocs = 1, \
+       .has_gt_uc = 1, \
+       .has_llc = 1, \
+       .has_logical_ring_contexts = 1, \
+       .has_logical_ring_elsq = 1, \
+       .has_mslices = 1, \
+       .has_rc6 = 1, \
+       .has_reset_engine = 1, \
+       .has_rps = 1, \
+       .has_runtime_pm = 1, \
+       .ppgtt_size = 48, \
+       .ppgtt_type = INTEL_PPGTT_FULL
+
+#define XE_HPM_FEATURES \
+       .media_ver = 12, \
+       .media_rel = 50
+
+__maybe_unused
+static const struct intel_device_info xehpsdv_info = {
+       XE_HP_FEATURES,
+       XE_HPM_FEATURES,
+       DGFX_FEATURES,
+       PLATFORM(INTEL_XEHPSDV),
+       .display = { },
+       .pipe_mask = 0,
+       .platform_engine_mask =
+               BIT(RCS0) | BIT(BCS0) |
+               BIT(VECS0) | BIT(VECS1) | BIT(VECS2) | BIT(VECS3) |
+               BIT(VCS0) | BIT(VCS1) | BIT(VCS2) | BIT(VCS3) |
+               BIT(VCS4) | BIT(VCS5) | BIT(VCS6) | BIT(VCS7),
+       .require_force_probe = 1,
+};
+
+__maybe_unused
+static const struct intel_device_info dg2_info = {
+       XE_HP_FEATURES,
+       XE_HPM_FEATURES,
+       XE_LPD_FEATURES,
+       DGFX_FEATURES,
+       .graphics_rel = 55,
+       .media_rel = 55,
+       PLATFORM(INTEL_DG2),
+       .platform_engine_mask =
+               BIT(RCS0) | BIT(BCS0) |
+               BIT(VECS0) | BIT(VECS1) |
+               BIT(VCS0) | BIT(VCS2),
+       .require_force_probe = 1,
+};
+
 #undef PLATFORM
 
 /*
@@ -1064,7 +1108,6 @@ static const struct pci_device_id pciidlist[] = {
        INTEL_CML_GT2_IDS(&cml_gt2_info),
        INTEL_CML_U_GT1_IDS(&cml_gt1_info),
        INTEL_CML_U_GT2_IDS(&cml_gt2_info),
-       INTEL_CNL_IDS(&cnl_info),
        INTEL_ICL_11_IDS(&icl_info),
        INTEL_EHL_IDS(&ehl_info),
        INTEL_JSL_IDS(&jsl_info),
@@ -1191,65 +1234,12 @@ static struct pci_driver i915_pci_driver = {
        .driver.pm = &i915_pm_ops,
 };
 
-static int __init i915_init(void)
+int i915_register_pci_driver(void)
 {
-       bool use_kms = true;
-       int err;
-
-       err = i915_globals_init();
-       if (err)
-               return err;
-
-       err = i915_mock_selftests();
-       if (err)
-               return err > 0 ? 0 : err;
-
-       /*
-        * Enable KMS by default, unless explicitly overriden by
-        * either the i915.modeset prarameter or by the
-        * vga_text_mode_force boot option.
-        */
-
-       if (i915_modparams.modeset == 0)
-               use_kms = false;
-
-       if (vgacon_text_force() && i915_modparams.modeset == -1)
-               use_kms = false;
-
-       if (!use_kms) {
-               /* Silently fail loading to not upset userspace. */
-               DRM_DEBUG_DRIVER("KMS disabled.\n");
-               return 0;
-       }
-
-       i915_pmu_init();
-
-       err = pci_register_driver(&i915_pci_driver);
-       if (err) {
-               i915_pmu_exit();
-               return err;
-       }
-
-       i915_perf_sysctl_register();
-       return 0;
+       return pci_register_driver(&i915_pci_driver);
 }
 
-static void __exit i915_exit(void)
+void i915_unregister_pci_driver(void)
 {
-       if (!i915_pci_driver.driver.owner)
-               return;
-
-       i915_perf_sysctl_unregister();
        pci_unregister_driver(&i915_pci_driver);
-       i915_globals_exit();
-       i915_pmu_exit();
 }
-
-module_init(i915_init);
-module_exit(i915_exit);
-
-MODULE_AUTHOR("Tungsten Graphics, Inc.");
-MODULE_AUTHOR("Intel Corporation");
-
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/i915/i915_pci.h b/drivers/gpu/drm/i915/i915_pci.h
new file mode 100644 (file)
index 0000000..b386f31
--- /dev/null
@@ -0,0 +1,8 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright Â© 2021 Intel Corporation
+ */
+
+int i915_register_pci_driver(void);
+void i915_unregister_pci_driver(void);
index 9f94914..2f01b8c 100644 (file)
@@ -1256,7 +1256,6 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
 
        case 8:
        case 9:
-       case 10:
                if (intel_engine_uses_guc(ce->engine)) {
                        /*
                         * When using GuC, the context descriptor we write in
@@ -1284,17 +1283,26 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
                break;
 
        case 11:
-       case 12: {
-               stream->specific_ctx_id_mask =
-                       ((1U << GEN11_SW_CTX_ID_WIDTH) - 1) << (GEN11_SW_CTX_ID_SHIFT - 32);
-               /*
-                * Pick an unused context id
-                * 0 - BITS_PER_LONG are used by other contexts
-                * GEN12_MAX_CONTEXT_HW_ID (0x7ff) is used by idle context
-                */
-               stream->specific_ctx_id = (GEN12_MAX_CONTEXT_HW_ID - 1) << (GEN11_SW_CTX_ID_SHIFT - 32);
+       case 12:
+               if (GRAPHICS_VER_FULL(ce->engine->i915) >= IP_VER(12, 50)) {
+                       stream->specific_ctx_id_mask =
+                               ((1U << XEHP_SW_CTX_ID_WIDTH) - 1) <<
+                               (XEHP_SW_CTX_ID_SHIFT - 32);
+                       stream->specific_ctx_id =
+                               (XEHP_MAX_CONTEXT_HW_ID - 1) <<
+                               (XEHP_SW_CTX_ID_SHIFT - 32);
+               } else {
+                       stream->specific_ctx_id_mask =
+                               ((1U << GEN11_SW_CTX_ID_WIDTH) - 1) << (GEN11_SW_CTX_ID_SHIFT - 32);
+                       /*
+                        * Pick an unused context id
+                        * 0 - BITS_PER_LONG are used by other contexts
+                        * GEN12_MAX_CONTEXT_HW_ID (0x7ff) is used by idle context
+                        */
+                       stream->specific_ctx_id =
+                               (GEN12_MAX_CONTEXT_HW_ID - 1) << (GEN11_SW_CTX_ID_SHIFT - 32);
+               }
                break;
-       }
 
        default:
                MISSING_CASE(GRAPHICS_VER(ce->engine->i915));
@@ -2580,7 +2588,7 @@ static void gen8_disable_metric_set(struct i915_perf_stream *stream)
        intel_uncore_rmw(uncore, GDT_CHICKEN_BITS, GT_NOA_ENABLE, 0);
 }
 
-static void gen10_disable_metric_set(struct i915_perf_stream *stream)
+static void gen11_disable_metric_set(struct i915_perf_stream *stream)
 {
        struct intel_uncore *uncore = stream->uncore;
 
@@ -3414,10 +3422,10 @@ i915_perf_open_ioctl_locked(struct i915_perf *perf,
                struct drm_i915_file_private *file_priv = file->driver_priv;
 
                specific_ctx = i915_gem_context_lookup(file_priv, ctx_handle);
-               if (!specific_ctx) {
+               if (IS_ERR(specific_ctx)) {
                        DRM_DEBUG("Failed to look up context with ID %u for opening perf stream\n",
                                  ctx_handle);
-                       ret = -ENOENT;
+                       ret = PTR_ERR(specific_ctx);
                        goto err;
                }
        }
@@ -3887,7 +3895,7 @@ static bool gen8_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
               REG_IN_RANGE(addr, RPM_CONFIG0, NOA_CONFIG(8));
 }
 
-static bool gen10_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
+static bool gen11_is_valid_mux_addr(struct i915_perf *perf, u32 addr)
 {
        return gen8_is_valid_mux_addr(perf, addr) ||
               REG_EQUAL(addr, GEN10_NOA_WRITE_HIGH) ||
@@ -4310,7 +4318,6 @@ static void oa_init_supported_formats(struct i915_perf *perf)
        case INTEL_GEMINILAKE:
        case INTEL_COFFEELAKE:
        case INTEL_COMETLAKE:
-       case INTEL_CANNONLAKE:
        case INTEL_ICELAKE:
        case INTEL_ELKHARTLAKE:
        case INTEL_JASPERLAKE:
@@ -4395,27 +4402,23 @@ void i915_perf_init(struct drm_i915_private *i915)
 
                                perf->gen8_valid_ctx_bit = BIT(16);
                        }
-               } else if (IS_GRAPHICS_VER(i915, 10, 11)) {
+               } else if (GRAPHICS_VER(i915) == 11) {
                        perf->ops.is_valid_b_counter_reg =
                                gen7_is_valid_b_counter_addr;
                        perf->ops.is_valid_mux_reg =
-                               gen10_is_valid_mux_addr;
+                               gen11_is_valid_mux_addr;
                        perf->ops.is_valid_flex_reg =
                                gen8_is_valid_flex_addr;
 
                        perf->ops.oa_enable = gen8_oa_enable;
                        perf->ops.oa_disable = gen8_oa_disable;
                        perf->ops.enable_metric_set = gen8_enable_metric_set;
-                       perf->ops.disable_metric_set = gen10_disable_metric_set;
+                       perf->ops.disable_metric_set = gen11_disable_metric_set;
                        perf->ops.oa_hw_tail_read = gen8_oa_hw_tail_read;
 
-                       if (GRAPHICS_VER(i915) == 10) {
-                               perf->ctx_oactxctrl_offset = 0x128;
-                               perf->ctx_flexeu0_offset = 0x3de;
-                       } else {
-                               perf->ctx_oactxctrl_offset = 0x124;
-                               perf->ctx_flexeu0_offset = 0x78e;
-                       }
+                       perf->ctx_oactxctrl_offset = 0x124;
+                       perf->ctx_flexeu0_offset = 0x78e;
+
                        perf->gen8_valid_ctx_bit = BIT(16);
                } else if (GRAPHICS_VER(i915) == 12) {
                        perf->ops.is_valid_b_counter_reg =
@@ -4483,9 +4486,10 @@ static int destroy_config(int id, void *p, void *data)
        return 0;
 }
 
-void i915_perf_sysctl_register(void)
+int i915_perf_sysctl_register(void)
 {
        sysctl_header = register_sysctl_table(dev_root);
+       return 0;
 }
 
 void i915_perf_sysctl_unregister(void)
index 882fdd0..1d1329e 100644 (file)
@@ -23,7 +23,7 @@ void i915_perf_fini(struct drm_i915_private *i915);
 void i915_perf_register(struct drm_i915_private *i915);
 void i915_perf_unregister(struct drm_i915_private *i915);
 int i915_perf_ioctl_version(void);
-void i915_perf_sysctl_register(void);
+int i915_perf_sysctl_register(void);
 void i915_perf_sysctl_unregister(void);
 
 int i915_perf_open_ioctl(struct drm_device *dev, void *data,
index 34d37d4..0b488d4 100644 (file)
@@ -407,7 +407,7 @@ frequency_sample(struct intel_gt *gt, unsigned int period_ns)
 
        if (pmu->enable & config_mask(I915_PMU_REQUESTED_FREQUENCY)) {
                add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_REQ],
-                               intel_gpu_freq(rps, rps->cur_freq),
+                               intel_rps_get_requested_frequency(rps),
                                period_ns / 1000);
        }
 
@@ -1088,7 +1088,7 @@ static int i915_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node)
 
 static enum cpuhp_state cpuhp_slot = CPUHP_INVALID;
 
-void i915_pmu_init(void)
+int i915_pmu_init(void)
 {
        int ret;
 
@@ -1101,6 +1101,8 @@ void i915_pmu_init(void)
                          ret);
        else
                cpuhp_slot = ret;
+
+       return 0;
 }
 
 void i915_pmu_exit(void)
index 60f9595..4490576 100644 (file)
@@ -147,14 +147,14 @@ struct i915_pmu {
 };
 
 #ifdef CONFIG_PERF_EVENTS
-void i915_pmu_init(void);
+int i915_pmu_init(void);
 void i915_pmu_exit(void);
 void i915_pmu_register(struct drm_i915_private *i915);
 void i915_pmu_unregister(struct drm_i915_private *i915);
 void i915_pmu_gt_parked(struct drm_i915_private *i915);
 void i915_pmu_gt_unparked(struct drm_i915_private *i915);
 #else
-static inline void i915_pmu_init(void) {}
+static inline int i915_pmu_init(void) { return 0; }
 static inline void i915_pmu_exit(void) {}
 static inline void i915_pmu_register(struct drm_i915_private *i915) {}
 static inline void i915_pmu_unregister(struct drm_i915_private *i915) {}
index 16a1923..167eaa8 100644 (file)
@@ -395,10 +395,18 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define  GEN11_GRDOM_MEDIA2            (1 << 6)
 #define  GEN11_GRDOM_MEDIA3            (1 << 7)
 #define  GEN11_GRDOM_MEDIA4            (1 << 8)
+#define  GEN11_GRDOM_MEDIA5            (1 << 9)
+#define  GEN11_GRDOM_MEDIA6            (1 << 10)
+#define  GEN11_GRDOM_MEDIA7            (1 << 11)
+#define  GEN11_GRDOM_MEDIA8            (1 << 12)
 #define  GEN11_GRDOM_VECS              (1 << 13)
 #define  GEN11_GRDOM_VECS2             (1 << 14)
+#define  GEN11_GRDOM_VECS3             (1 << 15)
+#define  GEN11_GRDOM_VECS4             (1 << 16)
 #define  GEN11_GRDOM_SFC0              (1 << 17)
 #define  GEN11_GRDOM_SFC1              (1 << 18)
+#define  GEN11_GRDOM_SFC2              (1 << 19)
+#define  GEN11_GRDOM_SFC3              (1 << 20)
 
 #define  GEN11_VCS_SFC_RESET_BIT(instance)     (GEN11_GRDOM_SFC0 << ((instance) >> 1))
 #define  GEN11_VECS_SFC_RESET_BIT(instance)    (GEN11_GRDOM_SFC0 << (instance))
@@ -422,7 +430,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define   GEN12_HCP_SFC_LOCK_ACK_BIT           REG_BIT(1)
 #define   GEN12_HCP_SFC_USAGE_BIT                      REG_BIT(0)
 
-#define GEN12_SFC_DONE(n)              _MMIO(0x1cc00 + (n) * 0x100)
+#define GEN12_SFC_DONE(n)              _MMIO(0x1cc000 + (n) * 0x1000)
 #define GEN12_SFC_DONE_MAX             4
 
 #define RING_PP_DIR_BASE(base)         _MMIO((base) + 0x228)
@@ -1877,7 +1885,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define BXT_PORT_CL1CM_DW30(phy)       _BXT_PHY((phy), _PORT_CL1CM_DW30_BC)
 
 /*
- * CNL/ICL Port/COMBO-PHY Registers
+ * ICL Port/COMBO-PHY Registers
  */
 #define _ICL_COMBOPHY_A                        0x162000
 #define _ICL_COMBOPHY_B                        0x6C000
@@ -1891,11 +1899,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
                                              _RKL_COMBOPHY_D, \
                                              _ADL_COMBOPHY_E)
 
-/* CNL/ICL Port CL_DW registers */
+/* ICL Port CL_DW registers */
 #define _ICL_PORT_CL_DW(dw, phy)       (_ICL_COMBOPHY(phy) + \
                                         4 * (dw))
 
-#define CNL_PORT_CL1CM_DW5             _MMIO(0x162014)
 #define ICL_PORT_CL_DW5(phy)           _MMIO(_ICL_PORT_CL_DW(5, phy))
 #define   CL_POWER_DOWN_ENABLE         (1 << 4)
 #define   SUS_CLOCK_CONFIG             (3 << 0)
@@ -1920,19 +1927,16 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define ICL_PORT_CL_DW12(phy)          _MMIO(_ICL_PORT_CL_DW(12, phy))
 #define   ICL_LANE_ENABLE_AUX          (1 << 0)
 
-/* CNL/ICL Port COMP_DW registers */
+/* ICL Port COMP_DW registers */
 #define _ICL_PORT_COMP                 0x100
 #define _ICL_PORT_COMP_DW(dw, phy)     (_ICL_COMBOPHY(phy) + \
                                         _ICL_PORT_COMP + 4 * (dw))
 
-#define CNL_PORT_COMP_DW0              _MMIO(0x162100)
 #define ICL_PORT_COMP_DW0(phy)         _MMIO(_ICL_PORT_COMP_DW(0, phy))
 #define   COMP_INIT                    (1 << 31)
 
-#define CNL_PORT_COMP_DW1              _MMIO(0x162104)
 #define ICL_PORT_COMP_DW1(phy)         _MMIO(_ICL_PORT_COMP_DW(1, phy))
 
-#define CNL_PORT_COMP_DW3              _MMIO(0x16210c)
 #define ICL_PORT_COMP_DW3(phy)         _MMIO(_ICL_PORT_COMP_DW(3, phy))
 #define   PROCESS_INFO_DOT_0           (0 << 26)
 #define   PROCESS_INFO_DOT_1           (1 << 26)
@@ -1948,38 +1952,11 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define ICL_PORT_COMP_DW8(phy)         _MMIO(_ICL_PORT_COMP_DW(8, phy))
 #define   IREFGEN                      (1 << 24)
 
-#define CNL_PORT_COMP_DW9              _MMIO(0x162124)
 #define ICL_PORT_COMP_DW9(phy)         _MMIO(_ICL_PORT_COMP_DW(9, phy))
 
-#define CNL_PORT_COMP_DW10             _MMIO(0x162128)
 #define ICL_PORT_COMP_DW10(phy)                _MMIO(_ICL_PORT_COMP_DW(10, phy))
 
-/* CNL/ICL Port PCS registers */
-#define _CNL_PORT_PCS_DW1_GRP_AE       0x162304
-#define _CNL_PORT_PCS_DW1_GRP_B                0x162384
-#define _CNL_PORT_PCS_DW1_GRP_C                0x162B04
-#define _CNL_PORT_PCS_DW1_GRP_D                0x162B84
-#define _CNL_PORT_PCS_DW1_GRP_F                0x162A04
-#define _CNL_PORT_PCS_DW1_LN0_AE       0x162404
-#define _CNL_PORT_PCS_DW1_LN0_B                0x162604
-#define _CNL_PORT_PCS_DW1_LN0_C                0x162C04
-#define _CNL_PORT_PCS_DW1_LN0_D                0x162E04
-#define _CNL_PORT_PCS_DW1_LN0_F                0x162804
-#define CNL_PORT_PCS_DW1_GRP(phy)      _MMIO(_PICK(phy, \
-                                                   _CNL_PORT_PCS_DW1_GRP_AE, \
-                                                   _CNL_PORT_PCS_DW1_GRP_B, \
-                                                   _CNL_PORT_PCS_DW1_GRP_C, \
-                                                   _CNL_PORT_PCS_DW1_GRP_D, \
-                                                   _CNL_PORT_PCS_DW1_GRP_AE, \
-                                                   _CNL_PORT_PCS_DW1_GRP_F))
-#define CNL_PORT_PCS_DW1_LN0(phy)      _MMIO(_PICK(phy, \
-                                                   _CNL_PORT_PCS_DW1_LN0_AE, \
-                                                   _CNL_PORT_PCS_DW1_LN0_B, \
-                                                   _CNL_PORT_PCS_DW1_LN0_C, \
-                                                   _CNL_PORT_PCS_DW1_LN0_D, \
-                                                   _CNL_PORT_PCS_DW1_LN0_AE, \
-                                                   _CNL_PORT_PCS_DW1_LN0_F))
-
+/* ICL Port PCS registers */
 #define _ICL_PORT_PCS_AUX              0x300
 #define _ICL_PORT_PCS_GRP              0x600
 #define _ICL_PORT_PCS_LN(ln)           (0x800 + (ln) * 0x100)
@@ -1998,34 +1975,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define   LATENCY_OPTIM_MASK           (0x3 << 2)
 #define   LATENCY_OPTIM_VAL(x)         ((x) << 2)
 
-/* CNL/ICL Port TX registers */
-#define _CNL_PORT_TX_AE_GRP_OFFSET             0x162340
-#define _CNL_PORT_TX_B_GRP_OFFSET              0x1623C0
-#define _CNL_PORT_TX_C_GRP_OFFSET              0x162B40
-#define _CNL_PORT_TX_D_GRP_OFFSET              0x162BC0
-#define _CNL_PORT_TX_F_GRP_OFFSET              0x162A40
-#define _CNL_PORT_TX_AE_LN0_OFFSET             0x162440
-#define _CNL_PORT_TX_B_LN0_OFFSET              0x162640
-#define _CNL_PORT_TX_C_LN0_OFFSET              0x162C40
-#define _CNL_PORT_TX_D_LN0_OFFSET              0x162E40
-#define _CNL_PORT_TX_F_LN0_OFFSET              0x162840
-#define _CNL_PORT_TX_DW_GRP(dw, port)  (_PICK((port), \
-                                              _CNL_PORT_TX_AE_GRP_OFFSET, \
-                                              _CNL_PORT_TX_B_GRP_OFFSET, \
-                                              _CNL_PORT_TX_B_GRP_OFFSET, \
-                                              _CNL_PORT_TX_D_GRP_OFFSET, \
-                                              _CNL_PORT_TX_AE_GRP_OFFSET, \
-                                              _CNL_PORT_TX_F_GRP_OFFSET) + \
-                                              4 * (dw))
-#define _CNL_PORT_TX_DW_LN0(dw, port)  (_PICK((port), \
-                                              _CNL_PORT_TX_AE_LN0_OFFSET, \
-                                              _CNL_PORT_TX_B_LN0_OFFSET, \
-                                              _CNL_PORT_TX_B_LN0_OFFSET, \
-                                              _CNL_PORT_TX_D_LN0_OFFSET, \
-                                              _CNL_PORT_TX_AE_LN0_OFFSET, \
-                                              _CNL_PORT_TX_F_LN0_OFFSET) + \
-                                              4 * (dw))
-
+/* ICL Port TX registers */
 #define _ICL_PORT_TX_AUX               0x380
 #define _ICL_PORT_TX_GRP               0x680
 #define _ICL_PORT_TX_LN(ln)            (0x880 + (ln) * 0x100)
@@ -2037,8 +1987,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define _ICL_PORT_TX_DW_LN(dw, ln, phy) (_ICL_COMBOPHY(phy) + \
                                          _ICL_PORT_TX_LN(ln) + 4 * (dw))
 
-#define CNL_PORT_TX_DW2_GRP(port)      _MMIO(_CNL_PORT_TX_DW_GRP(2, port))
-#define CNL_PORT_TX_DW2_LN0(port)      _MMIO(_CNL_PORT_TX_DW_LN0(2, port))
 #define ICL_PORT_TX_DW2_AUX(phy)       _MMIO(_ICL_PORT_TX_DW_AUX(2, phy))
 #define ICL_PORT_TX_DW2_GRP(phy)       _MMIO(_ICL_PORT_TX_DW_GRP(2, phy))
 #define ICL_PORT_TX_DW2_LN0(phy)       _MMIO(_ICL_PORT_TX_DW_LN(2, 0, phy))
@@ -2051,13 +1999,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define   RCOMP_SCALAR(x)              ((x) << 0)
 #define   RCOMP_SCALAR_MASK            (0xFF << 0)
 
-#define _CNL_PORT_TX_DW4_LN0_AE                0x162450
-#define _CNL_PORT_TX_DW4_LN1_AE                0x1624D0
-#define CNL_PORT_TX_DW4_GRP(port)      _MMIO(_CNL_PORT_TX_DW_GRP(4, (port)))
-#define CNL_PORT_TX_DW4_LN0(port)      _MMIO(_CNL_PORT_TX_DW_LN0(4, (port)))
-#define CNL_PORT_TX_DW4_LN(ln, port)   _MMIO(_CNL_PORT_TX_DW_LN0(4, (port)) + \
-                                          ((ln) * (_CNL_PORT_TX_DW4_LN1_AE - \
-                                                   _CNL_PORT_TX_DW4_LN0_AE)))
 #define ICL_PORT_TX_DW4_AUX(phy)       _MMIO(_ICL_PORT_TX_DW_AUX(4, phy))
 #define ICL_PORT_TX_DW4_GRP(phy)       _MMIO(_ICL_PORT_TX_DW_GRP(4, phy))
 #define ICL_PORT_TX_DW4_LN0(phy)       _MMIO(_ICL_PORT_TX_DW_LN(4, 0, phy))
@@ -2070,8 +2011,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define   CURSOR_COEFF(x)              ((x) << 0)
 #define   CURSOR_COEFF_MASK            (0x3F << 0)
 
-#define CNL_PORT_TX_DW5_GRP(port)      _MMIO(_CNL_PORT_TX_DW_GRP(5, port))
-#define CNL_PORT_TX_DW5_LN0(port)      _MMIO(_CNL_PORT_TX_DW_LN0(5, port))
 #define ICL_PORT_TX_DW5_AUX(phy)       _MMIO(_ICL_PORT_TX_DW_AUX(5, phy))
 #define ICL_PORT_TX_DW5_GRP(phy)       _MMIO(_ICL_PORT_TX_DW_GRP(5, phy))
 #define ICL_PORT_TX_DW5_LN0(phy)       _MMIO(_ICL_PORT_TX_DW_LN(5, 0, phy))
@@ -2083,8 +2022,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define   RTERM_SELECT(x)              ((x) << 3)
 #define   RTERM_SELECT_MASK            (0x7 << 3)
 
-#define CNL_PORT_TX_DW7_GRP(port)      _MMIO(_CNL_PORT_TX_DW_GRP(7, (port)))
-#define CNL_PORT_TX_DW7_LN0(port)      _MMIO(_CNL_PORT_TX_DW_LN0(7, (port)))
 #define ICL_PORT_TX_DW7_AUX(phy)       _MMIO(_ICL_PORT_TX_DW_AUX(7, phy))
 #define ICL_PORT_TX_DW7_GRP(phy)       _MMIO(_ICL_PORT_TX_DW_GRP(7, phy))
 #define ICL_PORT_TX_DW7_LN0(phy)       _MMIO(_ICL_PORT_TX_DW_LN(7, 0, phy))
@@ -2278,6 +2215,68 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define   MG_DP_MODE_CFG_DP_X2_MODE                    (1 << 7)
 #define   MG_DP_MODE_CFG_DP_X1_MODE                    (1 << 6)
 
+/*
+ * DG2 SNPS PHY registers (TC1 = PHY_E)
+ */
+#define _SNPS_PHY_A_BASE                       0x168000
+#define _SNPS_PHY_B_BASE                       0x169000
+#define _SNPS_PHY(phy)                         _PHY(phy, \
+                                                    _SNPS_PHY_A_BASE, \
+                                                    _SNPS_PHY_B_BASE)
+#define _SNPS2(phy, reg)                       (_SNPS_PHY(phy) - \
+                                                _SNPS_PHY_A_BASE + (reg))
+#define _MMIO_SNPS(phy, reg)                   _MMIO(_SNPS2(phy, reg))
+#define _MMIO_SNPS_LN(ln, phy, reg)            _MMIO(_SNPS2(phy, \
+                                                            (reg) + (ln) * 0x10))
+
+#define SNPS_PHY_MPLLB_CP(phy)                 _MMIO_SNPS(phy, 0x168000)
+#define   SNPS_PHY_MPLLB_CP_INT                        REG_GENMASK(31, 25)
+#define   SNPS_PHY_MPLLB_CP_INT_GS             REG_GENMASK(23, 17)
+#define   SNPS_PHY_MPLLB_CP_PROP               REG_GENMASK(15, 9)
+#define   SNPS_PHY_MPLLB_CP_PROP_GS            REG_GENMASK(7, 1)
+
+#define SNPS_PHY_MPLLB_DIV(phy)                        _MMIO_SNPS(phy, 0x168004)
+#define   SNPS_PHY_MPLLB_FORCE_EN              REG_BIT(31)
+#define   SNPS_PHY_MPLLB_DIV5_CLK_EN           REG_BIT(29)
+#define   SNPS_PHY_MPLLB_V2I                   REG_GENMASK(27, 26)
+#define   SNPS_PHY_MPLLB_FREQ_VCO              REG_GENMASK(25, 24)
+#define   SNPS_PHY_MPLLB_PMIX_EN               REG_BIT(10)
+#define   SNPS_PHY_MPLLB_TX_CLK_DIV            REG_GENMASK(7, 5)
+
+#define SNPS_PHY_MPLLB_FRACN1(phy)             _MMIO_SNPS(phy, 0x168008)
+#define   SNPS_PHY_MPLLB_FRACN_EN              REG_BIT(31)
+#define   SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN   REG_BIT(30)
+#define   SNPS_PHY_MPLLB_FRACN_DEN             REG_GENMASK(15, 0)
+
+#define SNPS_PHY_MPLLB_FRACN2(phy)             _MMIO_SNPS(phy, 0x16800C)
+#define   SNPS_PHY_MPLLB_FRACN_REM             REG_GENMASK(31, 16)
+#define   SNPS_PHY_MPLLB_FRACN_QUOT            REG_GENMASK(15, 0)
+
+#define SNPS_PHY_MPLLB_SSCEN(phy)              _MMIO_SNPS(phy, 0x168014)
+#define   SNPS_PHY_MPLLB_SSC_EN                        REG_BIT(31)
+#define   SNPS_PHY_MPLLB_SSC_UP_SPREAD         REG_BIT(30)
+#define   SNPS_PHY_MPLLB_SSC_PEAK              REG_GENMASK(29, 10)
+
+#define SNPS_PHY_MPLLB_SSCSTEP(phy)            _MMIO_SNPS(phy, 0x168018)
+#define   SNPS_PHY_MPLLB_SSC_STEPSIZE          REG_GENMASK(31, 11)
+
+#define SNPS_PHY_MPLLB_DIV2(phy)               _MMIO_SNPS(phy, 0x16801C)
+#define   SNPS_PHY_MPLLB_HDMI_PIXEL_CLK_DIV    REG_GENMASK(19, 18)
+#define   SNPS_PHY_MPLLB_HDMI_DIV              REG_GENMASK(17, 15)
+#define   SNPS_PHY_MPLLB_REF_CLK_DIV           REG_GENMASK(14, 12)
+#define   SNPS_PHY_MPLLB_MULTIPLIER            REG_GENMASK(11, 0)
+
+#define SNPS_PHY_REF_CONTROL(phy)              _MMIO_SNPS(phy, 0x168188)
+#define   SNPS_PHY_REF_CONTROL_REF_RANGE       REG_GENMASK(31, 27)
+
+#define SNPS_PHY_TX_REQ(phy)                   _MMIO_SNPS(phy, 0x168200)
+#define   SNPS_PHY_TX_REQ_LN_DIS_PWR_STATE_PSR REG_GENMASK(31, 30)
+
+#define SNPS_PHY_TX_EQ(ln, phy)                        _MMIO_SNPS_LN(ln, phy, 0x168300)
+#define   SNPS_PHY_TX_EQ_MAIN                  REG_GENMASK(23, 18)
+#define   SNPS_PHY_TX_EQ_POST                  REG_GENMASK(15, 10)
+#define   SNPS_PHY_TX_EQ_PRE                   REG_GENMASK(7, 2)
+
 /* The spec defines this only for BXT PHY0, but lets assume that this
  * would exist for PHY1 too if it had a second channel.
  */
@@ -2516,9 +2515,15 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define GEN11_BSD2_RING_BASE   0x1c4000
 #define GEN11_BSD3_RING_BASE   0x1d0000
 #define GEN11_BSD4_RING_BASE   0x1d4000
+#define XEHP_BSD5_RING_BASE    0x1e0000
+#define XEHP_BSD6_RING_BASE    0x1e4000
+#define XEHP_BSD7_RING_BASE    0x1f0000
+#define XEHP_BSD8_RING_BASE    0x1f4000
 #define VEBOX_RING_BASE                0x1a000
 #define GEN11_VEBOX_RING_BASE          0x1c8000
 #define GEN11_VEBOX2_RING_BASE         0x1d8000
+#define XEHP_VEBOX3_RING_BASE          0x1e8000
+#define XEHP_VEBOX4_RING_BASE          0x1f8000
 #define BLT_RING_BASE          0x22000
 #define RING_TAIL(base)                _MMIO((base) + 0x30)
 #define RING_HEAD(base)                _MMIO((base) + 0x34)
@@ -2572,7 +2577,16 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define   ARB_MODE_BWGTLB_DISABLE (1 << 9)
 #define   ARB_MODE_SWIZZLE_BDW (1 << 1)
 #define RENDER_HWS_PGA_GEN7    _MMIO(0x04080)
-#define RING_FAULT_REG(engine) _MMIO(0x4094 + 0x100 * (engine)->hw_id)
+
+#define _RING_FAULT_REG_RCS        0x4094
+#define _RING_FAULT_REG_VCS        0x4194
+#define _RING_FAULT_REG_BCS        0x4294
+#define _RING_FAULT_REG_VECS       0x4394
+#define RING_FAULT_REG(engine)     _MMIO(_PICK((engine)->class, \
+                                              _RING_FAULT_REG_RCS, \
+                                              _RING_FAULT_REG_VCS, \
+                                              _RING_FAULT_REG_VECS, \
+                                              _RING_FAULT_REG_BCS))
 #define GEN8_RING_FAULT_REG    _MMIO(0x4094)
 #define GEN12_RING_FAULT_REG   _MMIO(0xcec4)
 #define   GEN8_RING_FAULT_ENGINE_ID(x) (((x) >> 12) & 0x7)
@@ -2672,6 +2686,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define GEN12_SC_INSTDONE_EXTRA2       _MMIO(0x7108)
 #define GEN7_SAMPLER_INSTDONE  _MMIO(0xe160)
 #define GEN7_ROW_INSTDONE      _MMIO(0xe164)
+#define MCFG_MCR_SELECTOR              _MMIO(0xfd0)
+#define SF_MCR_SELECTOR                        _MMIO(0xfd8)
 #define GEN8_MCR_SELECTOR              _MMIO(0xfdc)
 #define   GEN8_MCR_SLICE(slice)                (((slice) & 3) << 26)
 #define   GEN8_MCR_SLICE_MASK          GEN8_MCR_SLICE(3)
@@ -3099,6 +3115,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define        GEN10_MIRROR_FUSE3              _MMIO(0x9118)
 #define GEN10_L3BANK_PAIR_COUNT     4
 #define GEN10_L3BANK_MASK   0x0F
+/* on Xe_HP the same fuses indicates mslices instead of L3 banks */
+#define GEN12_MAX_MSLICES 4
+#define GEN12_MEML3_EN_MASK 0x0F
 
 #define GEN8_EU_DISABLE0               _MMIO(0x9134)
 #define   GEN8_EU_DIS0_S0_MASK         0xffffff
@@ -3133,6 +3152,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 
 #define GEN12_GT_DSS_ENABLE _MMIO(0x913C)
 
+#define XEHP_EU_ENABLE                 _MMIO(0x9134)
+#define XEHP_EU_ENA_MASK               0xFF
+
 #define GEN6_BSD_SLEEP_PSMI_CONTROL    _MMIO(0x12050)
 #define   GEN6_BSD_SLEEP_MSG_DISABLE   (1 << 0)
 #define   GEN6_BSD_SLEEP_FLUSH_DISABLE (1 << 2)
@@ -4086,6 +4108,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define BXT_GT_PERF_STATUS      _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x7070)
 #define GEN6_RP_STATE_LIMITS   _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5994)
 #define GEN6_RP_STATE_CAP      _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5998)
+#define   RP0_CAP_MASK         REG_GENMASK(7, 0)
+#define   RP1_CAP_MASK         REG_GENMASK(15, 8)
+#define   RPN_CAP_MASK         REG_GENMASK(23, 16)
 #define BXT_RP_STATE_CAP        _MMIO(0x138170)
 #define GEN9_RP_STATE_LIMITS   _MMIO(0x138148)
 
@@ -4142,6 +4167,7 @@ enum {
        FAULT_AND_CONTINUE /* Unsupported */
 };
 
+#define CTX_GTT_ADDRESS_MASK GENMASK(31, 12)
 #define GEN8_CTX_VALID (1 << 0)
 #define GEN8_CTX_FORCE_PD_RESTORE (1 << 1)
 #define GEN8_CTX_FORCE_RESTORE (1 << 2)
@@ -4158,6 +4184,11 @@ enum {
 #define GEN11_ENGINE_INSTANCE_SHIFT 48
 #define GEN11_ENGINE_INSTANCE_WIDTH 6
 
+#define XEHP_SW_CTX_ID_SHIFT 39
+#define XEHP_SW_CTX_ID_WIDTH 16
+#define XEHP_SW_COUNTER_SHIFT 58
+#define XEHP_SW_COUNTER_WIDTH 6
+
 #define CHV_CLK_CTL1                   _MMIO(0x101100)
 #define VLV_CLK_CTL2                   _MMIO(0x101104)
 #define   CLK_CTL2_CZCOUNT_30NS_SHIFT  28
@@ -4586,7 +4617,7 @@ enum {
 #define _PSR2_CTL_EDP                          0x6f900
 #define EDP_PSR2_CTL(tran)                     _MMIO_TRANS2(tran, _PSR2_CTL_A)
 #define   EDP_PSR2_ENABLE                      (1 << 31)
-#define   EDP_SU_TRACK_ENABLE                  (1 << 30)
+#define   EDP_SU_TRACK_ENABLE                  (1 << 30) /* up to adl-p */
 #define   TGL_EDP_PSR2_BLOCK_COUNT_NUM_2       (0 << 28)
 #define   TGL_EDP_PSR2_BLOCK_COUNT_NUM_3       (1 << 28)
 #define   EDP_Y_COORDINATE_ENABLE              REG_BIT(25) /* display 10, 11 and 12 */
@@ -4655,17 +4686,23 @@ enum {
 #define PSR2_SU_STATUS_MASK(frame)     (0x3ff << PSR2_SU_STATUS_SHIFT(frame))
 #define PSR2_SU_STATUS_FRAMES          8
 
-#define _PSR2_MAN_TRK_CTL_A                            0x60910
-#define _PSR2_MAN_TRK_CTL_EDP                          0x6f910
-#define PSR2_MAN_TRK_CTL(tran)                         _MMIO_TRANS2(tran, _PSR2_MAN_TRK_CTL_A)
-#define  PSR2_MAN_TRK_CTL_ENABLE                       REG_BIT(31)
-#define  PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR_MASK    REG_GENMASK(30, 21)
-#define  PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(val)    REG_FIELD_PREP(PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR_MASK, val)
+#define _PSR2_MAN_TRK_CTL_A                                    0x60910
+#define _PSR2_MAN_TRK_CTL_EDP                                  0x6f910
+#define PSR2_MAN_TRK_CTL(tran)                                 _MMIO_TRANS2(tran, _PSR2_MAN_TRK_CTL_A)
+#define  PSR2_MAN_TRK_CTL_ENABLE                               REG_BIT(31)
+#define  PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR_MASK            REG_GENMASK(30, 21)
+#define  PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(val)            REG_FIELD_PREP(PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR_MASK, val)
 #define  PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR_MASK              REG_GENMASK(20, 11)
 #define  PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(val)              REG_FIELD_PREP(PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR_MASK, val)
-#define  PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME         REG_BIT(3)
-#define  PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME      REG_BIT(2)
-#define  PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE      REG_BIT(1)
+#define  PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME                 REG_BIT(3)
+#define  PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME              REG_BIT(2)
+#define  PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE              REG_BIT(1)
+#define  ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR_MASK       REG_GENMASK(28, 16)
+#define  ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(val)       REG_FIELD_PREP(ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR_MASK, val)
+#define  ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR_MASK         REG_GENMASK(12, 0)
+#define  ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(val)         REG_FIELD_PREP(ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR_MASK, val)
+#define  ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME            REG_BIT(14)
+#define  ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME         REG_BIT(13)
 
 /* Icelake DSC Rate Control Range Parameter Registers */
 #define DSCA_RC_RANGE_PARAMETERS_0             _MMIO(0x6B240)
@@ -7714,11 +7751,11 @@ enum {
 #define SKL_PS_ECC_STAT(pipe, id)  _MMIO_PIPE(pipe,     \
                        _ID(id, _PS_ECC_STAT_1A, _PS_ECC_STAT_2A),   \
                        _ID(id, _PS_ECC_STAT_1B, _PS_ECC_STAT_2B))
-#define CNL_PS_COEF_INDEX_SET(pipe, id, set)  _MMIO_PIPE(pipe,    \
+#define GLK_PS_COEF_INDEX_SET(pipe, id, set)  _MMIO_PIPE(pipe,    \
                        _ID(id, _PS_COEF_SET0_INDEX_1A, _PS_COEF_SET0_INDEX_2A) + (set) * 8, \
                        _ID(id, _PS_COEF_SET0_INDEX_1B, _PS_COEF_SET0_INDEX_2B) + (set) * 8)
 
-#define CNL_PS_COEF_DATA_SET(pipe, id, set)  _MMIO_PIPE(pipe,     \
+#define GLK_PS_COEF_DATA_SET(pipe, id, set)  _MMIO_PIPE(pipe,     \
                        _ID(id, _PS_COEF_SET0_DATA_1A, _PS_COEF_SET0_DATA_2A) + (set) * 8, \
                        _ID(id, _PS_COEF_SET0_DATA_1B, _PS_COEF_SET0_DATA_2B) + (set) * 8)
 /* legacy palette */
@@ -7933,7 +7970,7 @@ enum {
 #define  DSI1_NON_TE                   (1 << 31)
 #define  DSI0_NON_TE                   (1 << 30)
 #define  ICL_AUX_CHANNEL_E             (1 << 29)
-#define  CNL_AUX_CHANNEL_F             (1 << 28)
+#define  ICL_AUX_CHANNEL_F             (1 << 28)
 #define  GEN9_AUX_CHANNEL_D            (1 << 27)
 #define  GEN9_AUX_CHANNEL_C            (1 << 26)
 #define  GEN9_AUX_CHANNEL_B            (1 << 25)
@@ -7985,9 +8022,9 @@ enum {
 #define  GEN11_GT_DW1_IRQ              (1 << 1)
 #define  GEN11_GT_DW0_IRQ              (1 << 0)
 
-#define DG1_MSTR_UNIT_INTR             _MMIO(0x190008)
+#define DG1_MSTR_TILE_INTR             _MMIO(0x190008)
 #define   DG1_MSTR_IRQ                 REG_BIT(31)
-#define   DG1_MSTR_UNIT(u)             REG_BIT(u)
+#define   DG1_MSTR_TILE(t)             REG_BIT(t)
 
 #define GEN11_DISPLAY_INT_CTL          _MMIO(0x44200)
 #define  GEN11_DISPLAY_IRQ_ENABLE      (1 << 31)
@@ -8070,7 +8107,10 @@ enum {
 #define GEN11_BCS_RSVD_INTR_MASK       _MMIO(0x1900a0)
 #define GEN11_VCS0_VCS1_INTR_MASK      _MMIO(0x1900a8)
 #define GEN11_VCS2_VCS3_INTR_MASK      _MMIO(0x1900ac)
+#define GEN12_VCS4_VCS5_INTR_MASK      _MMIO(0x1900b0)
+#define GEN12_VCS6_VCS7_INTR_MASK      _MMIO(0x1900b4)
 #define GEN11_VECS0_VECS1_INTR_MASK    _MMIO(0x1900d0)
+#define GEN12_VECS2_VECS3_INTR_MASK    _MMIO(0x1900d4)
 #define GEN11_GUC_SG_INTR_MASK         _MMIO(0x1900e8)
 #define GEN11_GPM_WGBOXPERF_INTR_MASK  _MMIO(0x1900ec)
 #define GEN11_CRYPTO_RSVD_INTR_MASK    _MMIO(0x1900f0)
@@ -8123,7 +8163,6 @@ enum {
 #define  KVM_CONFIG_CHANGE_NOTIFICATION_SELECT (1 << 14)
 
 #define CHICKEN_MISC_2         _MMIO(0x42084)
-#define  CNL_COMP_PWR_DOWN     (1 << 23)
 #define  KBL_ARB_FILL_SPARE_14 REG_BIT(14)
 #define  KBL_ARB_FILL_SPARE_13 REG_BIT(13)
 #define  GLK_CL2_PWR_DOWN      (1 << 12)
@@ -8161,15 +8200,16 @@ enum {
                                            [TRANSCODER_B] = _CHICKEN_TRANS_B, \
                                            [TRANSCODER_C] = _CHICKEN_TRANS_C, \
                                            [TRANSCODER_D] = _CHICKEN_TRANS_D))
-#define  HSW_FRAME_START_DELAY_MASK    (3 << 27)
-#define  HSW_FRAME_START_DELAY(x)      ((x) << 27) /* 0-3 */
-#define  VSC_DATA_SEL_SOFTWARE_CONTROL (1 << 25) /* GLK and CNL+ */
-#define  DDI_TRAINING_OVERRIDE_ENABLE  (1 << 19)
-#define  DDI_TRAINING_OVERRIDE_VALUE   (1 << 18)
-#define  DDIE_TRAINING_OVERRIDE_ENABLE (1 << 17) /* CHICKEN_TRANS_A only */
-#define  DDIE_TRAINING_OVERRIDE_VALUE  (1 << 16) /* CHICKEN_TRANS_A only */
-#define  PSR2_ADD_VERTICAL_LINE_COUNT   (1 << 15)
-#define  PSR2_VSC_ENABLE_PROG_HEADER    (1 << 12)
+#define  HSW_FRAME_START_DELAY_MASK    REG_GENMASK(28, 27)
+#define  HSW_FRAME_START_DELAY(x)      REG_FIELD_PREP(HSW_FRAME_START_DELAY_MASK, x)
+#define  VSC_DATA_SEL_SOFTWARE_CONTROL REG_BIT(25) /* GLK */
+#define  FECSTALL_DIS_DPTSTREAM_DPTTG  REG_BIT(23)
+#define  DDI_TRAINING_OVERRIDE_ENABLE  REG_BIT(19)
+#define  DDI_TRAINING_OVERRIDE_VALUE   REG_BIT(18)
+#define  DDIE_TRAINING_OVERRIDE_ENABLE REG_BIT(17) /* CHICKEN_TRANS_A only */
+#define  DDIE_TRAINING_OVERRIDE_VALUE  REG_BIT(16) /* CHICKEN_TRANS_A only */
+#define  PSR2_ADD_VERTICAL_LINE_COUNT  REG_BIT(15)
+#define  PSR2_VSC_ENABLE_PROG_HEADER   REG_BIT(12)
 
 #define DISP_ARB_CTL   _MMIO(0x45000)
 #define  DISP_FBC_MEMORY_WAKE          (1 << 31)
@@ -8227,9 +8267,8 @@ enum {
 
 #define GEN8_CHICKEN_DCPR_1            _MMIO(0x46430)
 #define   SKL_SELECT_ALTERNATE_DC_EXIT (1 << 30)
-#define   CNL_DELAY_PMRSP              (1 << 22)
+#define   ICL_DELAY_PMRSP              (1 << 22)
 #define   MASK_WAKEMEM                 (1 << 13)
-#define   CNL_DDI_CLOCK_REG_ACCESS_ON  (1 << 7)
 
 #define GEN11_CHICKEN_DCPR_2                   _MMIO(0x46434)
 #define   DCPR_MASK_MAXLATENCY_MEMUP_CLR       REG_BIT(27)
@@ -8250,10 +8289,9 @@ enum {
 #define   SKL_DFSM_PIPE_B_DISABLE      (1 << 21)
 #define   SKL_DFSM_PIPE_C_DISABLE      (1 << 28)
 #define   TGL_DFSM_PIPE_D_DISABLE      (1 << 22)
-#define   CNL_DFSM_DISPLAY_DSC_DISABLE (1 << 7)
+#define   GLK_DFSM_DISPLAY_DSC_DISABLE (1 << 7)
 
 #define SKL_DSSM                               _MMIO(0x51004)
-#define CNL_DSSM_CDCLK_PLL_REFCLK_24MHz                (1 << 31)
 #define ICL_DSSM_CDCLK_PLL_REFCLK_MASK         (7 << 29)
 #define ICL_DSSM_CDCLK_PLL_REFCLK_24MHz                (0 << 29)
 #define ICL_DSSM_CDCLK_PLL_REFCLK_19_2MHz      (1 << 29)
@@ -8352,7 +8390,6 @@ enum {
 
 /* GEN8 chicken */
 #define HDC_CHICKEN0                           _MMIO(0x7300)
-#define CNL_HDC_CHICKEN0                       _MMIO(0xE5F0)
 #define ICL_HDC_MODE                           _MMIO(0xE5F4)
 #define  HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE        (1 << 15)
 #define  HDC_FENCE_DEST_SLM_DISABLE            (1 << 14)
@@ -8386,7 +8423,8 @@ enum {
 #define _PIPEC_CHICKEN                         0x72038
 #define PIPE_CHICKEN(pipe)                     _MMIO_PIPE(pipe, _PIPEA_CHICKEN,\
                                                           _PIPEB_CHICKEN)
-#define   UNDERRUN_RECOVERY_DISABLE            REG_BIT(30)
+#define   UNDERRUN_RECOVERY_DISABLE_ADLP       REG_BIT(30)
+#define   UNDERRUN_RECOVERY_ENABLE_DG2         REG_BIT(30)
 #define   PIXEL_ROUNDING_TRUNC_FB_PASSTHRU     (1 << 15)
 #define   PER_PIXEL_ALPHA_BYPASS_EN            (1 << 7)
 
@@ -9198,6 +9236,8 @@ enum {
 #define   GEN9_FREQUENCY(x)                    ((x) << 23)
 #define   GEN6_OFFSET(x)                       ((x) << 19)
 #define   GEN6_AGGRESSIVE_TURBO                        (0 << 15)
+#define   GEN9_SW_REQ_UNSLICE_RATIO_SHIFT      23
+
 #define GEN6_RC_VIDEO_FREQ                     _MMIO(0xA00C)
 #define GEN6_RC_CONTROL                                _MMIO(0xA090)
 #define   GEN6_RC_CTL_RC6pp_ENABLE             (1 << 16)
@@ -9366,9 +9406,13 @@ enum {
 #define   ICL_PCODE_MEM_SUBSYSYSTEM_INFO       0xd
 #define     ICL_PCODE_MEM_SS_READ_GLOBAL_INFO  (0x0 << 8)
 #define     ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point)        (((point) << 16) | (0x1 << 8))
+#define     ADL_PCODE_MEM_SS_READ_PSF_GV_INFO  ((0) | (0x2 << 8))
 #define   ICL_PCODE_SAGV_DE_MEM_SS_CONFIG      0xe
 #define     ICL_PCODE_POINTS_RESTRICTED                0x0
-#define     ICL_PCODE_POINTS_RESTRICTED_MASK   0x1
+#define     ICL_PCODE_POINTS_RESTRICTED_MASK   0xf
+#define   ADLS_PSF_PT_SHIFT                    8
+#define   ADLS_QGV_PT_MASK                     REG_GENMASK(7, 0)
+#define   ADLS_PSF_PT_MASK                     REG_GENMASK(10, 8)
 #define   GEN6_PCODE_READ_D_COMP               0x10
 #define   GEN6_PCODE_WRITE_D_COMP              0x11
 #define   ICL_PCODE_EXIT_TCCOLD                        0x12
@@ -9528,7 +9572,6 @@ enum {
 #define   HSW_SAMPLE_C_PERFORMANCE     (1 << 9)
 #define   GEN8_CENTROID_PIXEL_OPT_DIS  (1 << 8)
 #define   GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC  (1 << 5)
-#define   CNL_FAST_ANISO_L1_BANKING_FIX        (1 << 4)
 #define   GEN8_SAMPLER_POWER_BYPASS_DIS        (1 << 1)
 
 #define GEN9_HALF_SLICE_CHICKEN7       _MMIO(0xe194)
@@ -9709,15 +9752,12 @@ enum {
 /* HSW/BDW power well */
 #define   HSW_PW_CTL_IDX_GLOBAL                        15
 
-/* SKL/BXT/GLK/CNL power wells */
+/* SKL/BXT/GLK power wells */
 #define   SKL_PW_CTL_IDX_PW_2                  15
 #define   SKL_PW_CTL_IDX_PW_1                  14
-#define   CNL_PW_CTL_IDX_AUX_F                 12
-#define   CNL_PW_CTL_IDX_AUX_D                 11
 #define   GLK_PW_CTL_IDX_AUX_C                 10
 #define   GLK_PW_CTL_IDX_AUX_B                 9
 #define   GLK_PW_CTL_IDX_AUX_A                 8
-#define   CNL_PW_CTL_IDX_DDI_F                 6
 #define   SKL_PW_CTL_IDX_DDI_D                 4
 #define   SKL_PW_CTL_IDX_DDI_C                 3
 #define   SKL_PW_CTL_IDX_DDI_B                 2
@@ -9816,19 +9856,6 @@ enum skl_power_gate {
        ((pw_idx) - ICL_PW_CTL_IDX_PW_1 + SKL_PG1)
 #define  SKL_FUSE_PG_DIST_STATUS(pg)           (1 << (27 - (pg)))
 
-#define _CNL_AUX_REG_IDX(pw_idx)       ((pw_idx) - GLK_PW_CTL_IDX_AUX_B)
-#define _CNL_AUX_ANAOVRD1_B            0x162250
-#define _CNL_AUX_ANAOVRD1_C            0x162210
-#define _CNL_AUX_ANAOVRD1_D            0x1622D0
-#define _CNL_AUX_ANAOVRD1_F            0x162A90
-#define CNL_AUX_ANAOVRD1(pw_idx)       _MMIO(_PICK(_CNL_AUX_REG_IDX(pw_idx), \
-                                                   _CNL_AUX_ANAOVRD1_B, \
-                                                   _CNL_AUX_ANAOVRD1_C, \
-                                                   _CNL_AUX_ANAOVRD1_D, \
-                                                   _CNL_AUX_ANAOVRD1_F))
-#define   CNL_AUX_ANAOVRD1_ENABLE      (1 << 16)
-#define   CNL_AUX_ANAOVRD1_LDO_BYPASS  (1 << 23)
-
 #define _ICL_AUX_REG_IDX(pw_idx)       ((pw_idx) - ICL_PW_CTL_IDX_AUX_A)
 #define _ICL_AUX_ANAOVRD1_A            0x162398
 #define _ICL_AUX_ANAOVRD1_B            0x6C398
@@ -10128,11 +10155,11 @@ enum skl_power_gate {
 #define  TRANS_DDI_BPC_10              (1 << 20)
 #define  TRANS_DDI_BPC_6               (2 << 20)
 #define  TRANS_DDI_BPC_12              (3 << 20)
-#define  TRANS_DDI_PORT_SYNC_MASTER_SELECT_MASK        REG_GENMASK(19, 18) /* bdw-cnl */
+#define  TRANS_DDI_PORT_SYNC_MASTER_SELECT_MASK        REG_GENMASK(19, 18)
 #define  TRANS_DDI_PORT_SYNC_MASTER_SELECT(x)  REG_FIELD_PREP(TRANS_DDI_PORT_SYNC_MASTER_SELECT_MASK, (x))
 #define  TRANS_DDI_PVSYNC              (1 << 17)
 #define  TRANS_DDI_PHSYNC              (1 << 16)
-#define  TRANS_DDI_PORT_SYNC_ENABLE    REG_BIT(15) /* bdw-cnl */
+#define  TRANS_DDI_PORT_SYNC_ENABLE    REG_BIT(15)
 #define  TRANS_DDI_EDP_INPUT_MASK      (7 << 12)
 #define  TRANS_DDI_EDP_INPUT_A_ON      (0 << 12)
 #define  TRANS_DDI_EDP_INPUT_A_ONOFF   (4 << 12)
@@ -10165,6 +10192,9 @@ enum skl_power_gate {
 #define  PORT_SYNC_MODE_MASTER_SELECT_MASK     REG_GENMASK(2, 0)
 #define  PORT_SYNC_MODE_MASTER_SELECT(x)       REG_FIELD_PREP(PORT_SYNC_MODE_MASTER_SELECT_MASK, (x))
 
+#define TRANS_CMTG_CHICKEN             _MMIO(0x6fa90)
+#define  DISABLE_DPT_CLK_GATING                REG_BIT(1)
+
 /* DisplayPort Transport Control */
 #define _DP_TP_CTL_A                   0x64040
 #define _DP_TP_CTL_B                   0x64140
@@ -10488,17 +10518,6 @@ enum skl_power_gate {
 #define DPLL_CFGCR1(id)        _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR1, _DPLL2_CFGCR1)
 #define DPLL_CFGCR2(id)        _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR2, _DPLL2_CFGCR2)
 
-/*
- * CNL Clocks
- */
-#define DPCLKA_CFGCR0                          _MMIO(0x6C200)
-#define  DPCLKA_CFGCR0_DDI_CLK_OFF(port)       (1 << ((port) ==  PORT_F ? 23 : \
-                                                     (port) + 10))
-#define  DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port) ((port) == PORT_F ? 21 : \
-                                               (port) * 2)
-#define  DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port)  (3 << DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port))
-#define  DPCLKA_CFGCR0_DDI_CLK_SEL(pll, port)  ((pll) << DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port))
-
 /* ICL Clocks */
 #define ICL_DPCLKA_CFGCR0                      _MMIO(0x164280)
 #define  ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy)    (1 << _PICK(phy, 10, 11, 24, 4, 5))
@@ -10554,7 +10573,7 @@ enum skl_power_gate {
                                                        ADLS_DPCLKA_DDIJ_SEL_MASK, \
                                                        ADLS_DPCLKA_DDIK_SEL_MASK)
 
-/* CNL PLL */
+/* ICL PLL */
 #define DPLL0_ENABLE           0x46010
 #define DPLL1_ENABLE           0x46014
 #define _ADLS_DPLL2_ENABLE     0x46018
@@ -10563,9 +10582,14 @@ enum skl_power_gate {
 #define  PLL_LOCK              (1 << 30)
 #define  PLL_POWER_ENABLE      (1 << 27)
 #define  PLL_POWER_STATE       (1 << 26)
-#define CNL_DPLL_ENABLE(pll)   _MMIO_PLL3(pll, DPLL0_ENABLE, DPLL1_ENABLE, \
+#define ICL_DPLL_ENABLE(pll)   _MMIO_PLL3(pll, DPLL0_ENABLE, DPLL1_ENABLE, \
                                           _ADLS_DPLL2_ENABLE, _ADLS_DPLL3_ENABLE)
 
+#define _DG2_PLL3_ENABLE       0x4601C
+
+#define DG2_PLL_ENABLE(pll) _MMIO_PLL3(pll, DPLL0_ENABLE, DPLL1_ENABLE, \
+                                      _ADLS_DPLL2_ENABLE, _DG2_PLL3_ENABLE)
+
 #define TBT_PLL_ENABLE         _MMIO(0x46020)
 
 #define _MG_PLL1_ENABLE                0x46030
@@ -10731,60 +10755,52 @@ enum skl_power_gate {
                                                   _MG_PLL_TDC_COLDST_BIAS_PORT1, \
                                                   _MG_PLL_TDC_COLDST_BIAS_PORT2)
 
-#define _CNL_DPLL0_CFGCR0              0x6C000
-#define _CNL_DPLL1_CFGCR0              0x6C080
-#define  DPLL_CFGCR0_HDMI_MODE         (1 << 30)
-#define  DPLL_CFGCR0_SSC_ENABLE                (1 << 29)
-#define  DPLL_CFGCR0_SSC_ENABLE_ICL    (1 << 25)
-#define  DPLL_CFGCR0_LINK_RATE_MASK    (0xf << 25)
-#define  DPLL_CFGCR0_LINK_RATE_2700    (0 << 25)
-#define  DPLL_CFGCR0_LINK_RATE_1350    (1 << 25)
-#define  DPLL_CFGCR0_LINK_RATE_810     (2 << 25)
-#define  DPLL_CFGCR0_LINK_RATE_1620    (3 << 25)
-#define  DPLL_CFGCR0_LINK_RATE_1080    (4 << 25)
-#define  DPLL_CFGCR0_LINK_RATE_2160    (5 << 25)
-#define  DPLL_CFGCR0_LINK_RATE_3240    (6 << 25)
-#define  DPLL_CFGCR0_LINK_RATE_4050    (7 << 25)
-#define  DPLL_CFGCR0_DCO_FRACTION_MASK (0x7fff << 10)
-#define  DPLL_CFGCR0_DCO_FRACTION_SHIFT        (10)
-#define  DPLL_CFGCR0_DCO_FRACTION(x)   ((x) << 10)
-#define  DPLL_CFGCR0_DCO_INTEGER_MASK  (0x3ff)
-#define CNL_DPLL_CFGCR0(pll)           _MMIO_PLL(pll, _CNL_DPLL0_CFGCR0, _CNL_DPLL1_CFGCR0)
-
-#define _CNL_DPLL0_CFGCR1              0x6C004
-#define _CNL_DPLL1_CFGCR1              0x6C084
-#define  DPLL_CFGCR1_QDIV_RATIO_MASK   (0xff << 10)
-#define  DPLL_CFGCR1_QDIV_RATIO_SHIFT  (10)
-#define  DPLL_CFGCR1_QDIV_RATIO(x)     ((x) << 10)
-#define  DPLL_CFGCR1_QDIV_MODE_SHIFT   (9)
-#define  DPLL_CFGCR1_QDIV_MODE(x)      ((x) << 9)
-#define  DPLL_CFGCR1_KDIV_MASK         (7 << 6)
-#define  DPLL_CFGCR1_KDIV_SHIFT                (6)
-#define  DPLL_CFGCR1_KDIV(x)           ((x) << 6)
-#define  DPLL_CFGCR1_KDIV_1            (1 << 6)
-#define  DPLL_CFGCR1_KDIV_2            (2 << 6)
-#define  DPLL_CFGCR1_KDIV_3            (4 << 6)
-#define  DPLL_CFGCR1_PDIV_MASK         (0xf << 2)
-#define  DPLL_CFGCR1_PDIV_SHIFT                (2)
-#define  DPLL_CFGCR1_PDIV(x)           ((x) << 2)
-#define  DPLL_CFGCR1_PDIV_2            (1 << 2)
-#define  DPLL_CFGCR1_PDIV_3            (2 << 2)
-#define  DPLL_CFGCR1_PDIV_5            (4 << 2)
-#define  DPLL_CFGCR1_PDIV_7            (8 << 2)
-#define  DPLL_CFGCR1_CENTRAL_FREQ      (3 << 0)
-#define  DPLL_CFGCR1_CENTRAL_FREQ_8400 (3 << 0)
-#define  TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL (0 << 0)
-#define CNL_DPLL_CFGCR1(pll)           _MMIO_PLL(pll, _CNL_DPLL0_CFGCR1, _CNL_DPLL1_CFGCR1)
-
 #define _ICL_DPLL0_CFGCR0              0x164000
 #define _ICL_DPLL1_CFGCR0              0x164080
 #define ICL_DPLL_CFGCR0(pll)           _MMIO_PLL(pll, _ICL_DPLL0_CFGCR0, \
                                                  _ICL_DPLL1_CFGCR0)
+#define   DPLL_CFGCR0_HDMI_MODE                (1 << 30)
+#define   DPLL_CFGCR0_SSC_ENABLE       (1 << 29)
+#define   DPLL_CFGCR0_SSC_ENABLE_ICL   (1 << 25)
+#define   DPLL_CFGCR0_LINK_RATE_MASK   (0xf << 25)
+#define   DPLL_CFGCR0_LINK_RATE_2700   (0 << 25)
+#define   DPLL_CFGCR0_LINK_RATE_1350   (1 << 25)
+#define   DPLL_CFGCR0_LINK_RATE_810    (2 << 25)
+#define   DPLL_CFGCR0_LINK_RATE_1620   (3 << 25)
+#define   DPLL_CFGCR0_LINK_RATE_1080   (4 << 25)
+#define   DPLL_CFGCR0_LINK_RATE_2160   (5 << 25)
+#define   DPLL_CFGCR0_LINK_RATE_3240   (6 << 25)
+#define   DPLL_CFGCR0_LINK_RATE_4050   (7 << 25)
+#define   DPLL_CFGCR0_DCO_FRACTION_MASK        (0x7fff << 10)
+#define   DPLL_CFGCR0_DCO_FRACTION_SHIFT       (10)
+#define   DPLL_CFGCR0_DCO_FRACTION(x)  ((x) << 10)
+#define   DPLL_CFGCR0_DCO_INTEGER_MASK (0x3ff)
 
 #define _ICL_DPLL0_CFGCR1              0x164004
 #define _ICL_DPLL1_CFGCR1              0x164084
 #define ICL_DPLL_CFGCR1(pll)           _MMIO_PLL(pll, _ICL_DPLL0_CFGCR1, \
                                                  _ICL_DPLL1_CFGCR1)
+#define   DPLL_CFGCR1_QDIV_RATIO_MASK  (0xff << 10)
+#define   DPLL_CFGCR1_QDIV_RATIO_SHIFT (10)
+#define   DPLL_CFGCR1_QDIV_RATIO(x)    ((x) << 10)
+#define   DPLL_CFGCR1_QDIV_MODE_SHIFT  (9)
+#define   DPLL_CFGCR1_QDIV_MODE(x)     ((x) << 9)
+#define   DPLL_CFGCR1_KDIV_MASK                (7 << 6)
+#define   DPLL_CFGCR1_KDIV_SHIFT               (6)
+#define   DPLL_CFGCR1_KDIV(x)          ((x) << 6)
+#define   DPLL_CFGCR1_KDIV_1           (1 << 6)
+#define   DPLL_CFGCR1_KDIV_2           (2 << 6)
+#define   DPLL_CFGCR1_KDIV_3           (4 << 6)
+#define   DPLL_CFGCR1_PDIV_MASK                (0xf << 2)
+#define   DPLL_CFGCR1_PDIV_SHIFT               (2)
+#define   DPLL_CFGCR1_PDIV(x)          ((x) << 2)
+#define   DPLL_CFGCR1_PDIV_2           (1 << 2)
+#define   DPLL_CFGCR1_PDIV_3           (2 << 2)
+#define   DPLL_CFGCR1_PDIV_5           (4 << 2)
+#define   DPLL_CFGCR1_PDIV_7           (8 << 2)
+#define   DPLL_CFGCR1_CENTRAL_FREQ     (3 << 0)
+#define   DPLL_CFGCR1_CENTRAL_FREQ_8400        (3 << 0)
+#define   TGL_DPLL_CFGCR1_CFSELOVRD_NORMAL_XTAL        (0 << 0)
 
 #define _TGL_DPLL0_CFGCR0              0x164284
 #define _TGL_DPLL1_CFGCR0              0x16428C
@@ -11004,8 +11020,8 @@ enum skl_power_gate {
 #define   BXT_DE_PLL_LOCK              (1 << 30)
 #define   BXT_DE_PLL_FREQ_REQ          (1 << 23)
 #define   BXT_DE_PLL_FREQ_REQ_ACK      (1 << 22)
-#define   CNL_CDCLK_PLL_RATIO(x)       (x)
-#define   CNL_CDCLK_PLL_RATIO_MASK     0xff
+#define   ICL_CDCLK_PLL_RATIO(x)       (x)
+#define   ICL_CDCLK_PLL_RATIO_MASK     0xff
 
 /* GEN9 DC */
 #define DC_STATE_EN                    _MMIO(0x45504)
@@ -11060,6 +11076,7 @@ enum skl_power_gate {
 #define SKL_MEMORY_FREQ_MULTIPLIER_HZ          266666666
 #define SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU      _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5E04)
 #define  SKL_REQ_DATA_MASK                     (0xF << 0)
+#define  DG1_GEAR_TYPE                         REG_BIT(16)
 
 #define SKL_MAD_INTER_CHANNEL_0_0_0_MCHBAR_MCMAIN _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5000)
 #define  SKL_DRAM_DDR_TYPE_MASK                        (0x3 << 0)
@@ -11082,18 +11099,29 @@ enum skl_power_gate {
 #define  SKL_DRAM_RANK_1                       (0x0 << 10)
 #define  SKL_DRAM_RANK_2                       (0x1 << 10)
 #define  SKL_DRAM_RANK_MASK                    (0x1 << 10)
-#define  CNL_DRAM_SIZE_MASK                    0x7F
-#define  CNL_DRAM_WIDTH_MASK                   (0x3 << 7)
-#define  CNL_DRAM_WIDTH_SHIFT                  7
-#define  CNL_DRAM_WIDTH_X8                     (0x0 << 7)
-#define  CNL_DRAM_WIDTH_X16                    (0x1 << 7)
-#define  CNL_DRAM_WIDTH_X32                    (0x2 << 7)
-#define  CNL_DRAM_RANK_MASK                    (0x3 << 9)
-#define  CNL_DRAM_RANK_SHIFT                   9
-#define  CNL_DRAM_RANK_1                       (0x0 << 9)
-#define  CNL_DRAM_RANK_2                       (0x1 << 9)
-#define  CNL_DRAM_RANK_3                       (0x2 << 9)
-#define  CNL_DRAM_RANK_4                       (0x3 << 9)
+#define  ICL_DRAM_SIZE_MASK                    0x7F
+#define  ICL_DRAM_WIDTH_MASK                   (0x3 << 7)
+#define  ICL_DRAM_WIDTH_SHIFT                  7
+#define  ICL_DRAM_WIDTH_X8                     (0x0 << 7)
+#define  ICL_DRAM_WIDTH_X16                    (0x1 << 7)
+#define  ICL_DRAM_WIDTH_X32                    (0x2 << 7)
+#define  ICL_DRAM_RANK_MASK                    (0x3 << 9)
+#define  ICL_DRAM_RANK_SHIFT                   9
+#define  ICL_DRAM_RANK_1                       (0x0 << 9)
+#define  ICL_DRAM_RANK_2                       (0x1 << 9)
+#define  ICL_DRAM_RANK_3                       (0x2 << 9)
+#define  ICL_DRAM_RANK_4                       (0x3 << 9)
+
+#define SA_PERF_STATUS_0_0_0_MCHBAR_PC         _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5918)
+#define  DG1_QCLK_RATIO_MASK                   REG_GENMASK(9, 2)
+#define  DG1_QCLK_REFERENCE                    REG_BIT(10)
+
+#define MCHBAR_CH0_CR_TC_PRE_0_0_0_MCHBAR      _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x4000)
+#define   DG1_DRAM_T_RDPRE_MASK                        REG_GENMASK(16, 11)
+#define   DG1_DRAM_T_RP_MASK                   REG_GENMASK(6, 0)
+#define MCHBAR_CH0_CR_TC_PRE_0_0_0_MCHBAR_HIGH _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x4004)
+#define   DG1_DRAM_T_RCD_MASK                  REG_GENMASK(15, 9)
+#define   DG1_DRAM_T_RAS_MASK                  REG_GENMASK(8, 1)
 
 /*
  * Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register,
@@ -12296,6 +12324,7 @@ enum skl_power_gate {
 
 /* MOCS (Memory Object Control State) registers */
 #define GEN9_LNCFCMOCS(i)      _MMIO(0xb020 + (i) * 4) /* L3 Cache Control */
+#define GEN9_LNCFCMOCS_REG_COUNT       32
 
 #define __GEN9_RCS0_MOCS0      0xc800
 #define GEN9_GFX_MOCS(i)       _MMIO(__GEN9_RCS0_MOCS0 + (i) * 4)
@@ -12343,6 +12372,7 @@ enum skl_power_gate {
                                                 _ICL_PHY_MISC_B)
 #define  ICL_PHY_MISC_MUX_DDID                 (1 << 28)
 #define  ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN      (1 << 23)
+#define  DG2_PHY_DP_TX_ACK_MASK                        REG_GENMASK(23, 20)
 
 /* Icelake Display Stream Compression Registers */
 #define DSCA_PICTURE_PARAMETER_SET_0           _MMIO(0x6B200)
index 37aef13..ce44671 100644 (file)
 
 #include "i915_active.h"
 #include "i915_drv.h"
-#include "i915_globals.h"
 #include "i915_trace.h"
 #include "intel_pm.h"
 
 struct execute_cb {
        struct irq_work work;
        struct i915_sw_fence *fence;
-       void (*hook)(struct i915_request *rq, struct dma_fence *signal);
        struct i915_request *signal;
 };
 
-static struct i915_global_request {
-       struct i915_global base;
-       struct kmem_cache *slab_requests;
-       struct kmem_cache *slab_execute_cbs;
-} global;
+static struct kmem_cache *slab_requests;
+static struct kmem_cache *slab_execute_cbs;
 
 static const char *i915_fence_get_driver_name(struct dma_fence *fence)
 {
@@ -108,13 +103,16 @@ static signed long i915_fence_wait(struct dma_fence *fence,
 
 struct kmem_cache *i915_request_slab_cache(void)
 {
-       return global.slab_requests;
+       return slab_requests;
 }
 
 static void i915_fence_release(struct dma_fence *fence)
 {
        struct i915_request *rq = to_request(fence);
 
+       GEM_BUG_ON(rq->guc_prio != GUC_PRIO_INIT &&
+                  rq->guc_prio != GUC_PRIO_FINI);
+
        /*
         * The request is put onto a RCU freelist (i.e. the address
         * is immediately reused), mark the fences as being freed now.
@@ -126,41 +124,19 @@ static void i915_fence_release(struct dma_fence *fence)
        i915_sw_fence_fini(&rq->semaphore);
 
        /*
-        * Keep one request on each engine for reserved use under mempressure
-        *
-        * We do not hold a reference to the engine here and so have to be
-        * very careful in what rq->engine we poke. The virtual engine is
-        * referenced via the rq->context and we released that ref during
-        * i915_request_retire(), ergo we must not dereference a virtual
-        * engine here. Not that we would want to, as the only consumer of
-        * the reserved engine->request_pool is the power management parking,
-        * which must-not-fail, and that is only run on the physical engines.
-        *
-        * Since the request must have been executed to be have completed,
-        * we know that it will have been processed by the HW and will
-        * not be unsubmitted again, so rq->engine and rq->execution_mask
-        * at this point is stable. rq->execution_mask will be a single
-        * bit if the last and _only_ engine it could execution on was a
-        * physical engine, if it's multiple bits then it started on and
-        * could still be on a virtual engine. Thus if the mask is not a
-        * power-of-two we assume that rq->engine may still be a virtual
-        * engine and so a dangling invalid pointer that we cannot dereference
-        *
-        * For example, consider the flow of a bonded request through a virtual
-        * engine. The request is created with a wide engine mask (all engines
-        * that we might execute on). On processing the bond, the request mask
-        * is reduced to one or more engines. If the request is subsequently
-        * bound to a single engine, it will then be constrained to only
-        * execute on that engine and never returned to the virtual engine
-        * after timeslicing away, see __unwind_incomplete_requests(). Thus we
-        * know that if the rq->execution_mask is a single bit, rq->engine
-        * can be a physical engine with the exact corresponding mask.
+        * Keep one request on each engine for reserved use under mempressure,
+        * do not use with virtual engines as this really is only needed for
+        * kernel contexts.
         */
-       if (is_power_of_2(rq->execution_mask) &&
-           !cmpxchg(&rq->engine->request_pool, NULL, rq))
+       if (!intel_engine_is_virtual(rq->engine) &&
+           !cmpxchg(&rq->engine->request_pool, NULL, rq)) {
+               intel_context_put(rq->context);
                return;
+       }
+
+       intel_context_put(rq->context);
 
-       kmem_cache_free(global.slab_requests, rq);
+       kmem_cache_free(slab_requests, rq);
 }
 
 const struct dma_fence_ops i915_fence_ops = {
@@ -177,18 +153,7 @@ static void irq_execute_cb(struct irq_work *wrk)
        struct execute_cb *cb = container_of(wrk, typeof(*cb), work);
 
        i915_sw_fence_complete(cb->fence);
-       kmem_cache_free(global.slab_execute_cbs, cb);
-}
-
-static void irq_execute_cb_hook(struct irq_work *wrk)
-{
-       struct execute_cb *cb = container_of(wrk, typeof(*cb), work);
-
-       cb->hook(container_of(cb->fence, struct i915_request, submit),
-                &cb->signal->fence);
-       i915_request_put(cb->signal);
-
-       irq_execute_cb(wrk);
+       kmem_cache_free(slab_execute_cbs, cb);
 }
 
 static __always_inline void
@@ -216,7 +181,7 @@ static bool irq_work_imm(struct irq_work *wrk)
        return false;
 }
 
-static void __notify_execute_cb_imm(struct i915_request *rq)
+void i915_request_notify_execute_cb_imm(struct i915_request *rq)
 {
        __notify_execute_cb(rq, irq_work_imm);
 }
@@ -272,11 +237,11 @@ i915_request_active_engine(struct i915_request *rq,
         * check that we have acquired the lock on the final engine.
         */
        locked = READ_ONCE(rq->engine);
-       spin_lock_irq(&locked->active.lock);
+       spin_lock_irq(&locked->sched_engine->lock);
        while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) {
-               spin_unlock(&locked->active.lock);
+               spin_unlock(&locked->sched_engine->lock);
                locked = engine;
-               spin_lock(&locked->active.lock);
+               spin_lock(&locked->sched_engine->lock);
        }
 
        if (i915_request_is_active(rq)) {
@@ -285,42 +250,11 @@ i915_request_active_engine(struct i915_request *rq,
                ret = true;
        }
 
-       spin_unlock_irq(&locked->active.lock);
+       spin_unlock_irq(&locked->sched_engine->lock);
 
        return ret;
 }
 
-
-static void remove_from_engine(struct i915_request *rq)
-{
-       struct intel_engine_cs *engine, *locked;
-
-       /*
-        * Virtual engines complicate acquiring the engine timeline lock,
-        * as their rq->engine pointer is not stable until under that
-        * engine lock. The simple ploy we use is to take the lock then
-        * check that the rq still belongs to the newly locked engine.
-        */
-       locked = READ_ONCE(rq->engine);
-       spin_lock_irq(&locked->active.lock);
-       while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) {
-               spin_unlock(&locked->active.lock);
-               spin_lock(&engine->active.lock);
-               locked = engine;
-       }
-       list_del_init(&rq->sched.link);
-
-       clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
-       clear_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
-
-       /* Prevent further __await_execution() registering a cb, then flush */
-       set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
-
-       spin_unlock_irq(&locked->active.lock);
-
-       __notify_execute_cb_imm(rq);
-}
-
 static void __rq_init_watchdog(struct i915_request *rq)
 {
        rq->watchdog.timer.function = NULL;
@@ -417,8 +351,7 @@ bool i915_request_retire(struct i915_request *rq)
         * after removing the breadcrumb and signaling it, so that we do not
         * inadvertently attach the breadcrumb to a completed request.
         */
-       if (!list_empty(&rq->sched.link))
-               remove_from_engine(rq);
+       rq->engine->remove_active_request(rq);
        GEM_BUG_ON(!llist_empty(&rq->execute_cb));
 
        __list_del_entry(&rq->link); /* poison neither prev/next (RCU walks) */
@@ -443,6 +376,7 @@ void i915_request_retire_upto(struct i915_request *rq)
 
        do {
                tmp = list_first_entry(&tl->requests, typeof(*tmp), link);
+               GEM_BUG_ON(!i915_request_completed(tmp));
        } while (i915_request_retire(tmp) && tmp != rq);
 }
 
@@ -517,19 +451,14 @@ static bool __request_in_flight(const struct i915_request *signal)
 static int
 __await_execution(struct i915_request *rq,
                  struct i915_request *signal,
-                 void (*hook)(struct i915_request *rq,
-                              struct dma_fence *signal),
                  gfp_t gfp)
 {
        struct execute_cb *cb;
 
-       if (i915_request_is_active(signal)) {
-               if (hook)
-                       hook(rq, &signal->fence);
+       if (i915_request_is_active(signal))
                return 0;
-       }
 
-       cb = kmem_cache_alloc(global.slab_execute_cbs, gfp);
+       cb = kmem_cache_alloc(slab_execute_cbs, gfp);
        if (!cb)
                return -ENOMEM;
 
@@ -537,12 +466,6 @@ __await_execution(struct i915_request *rq,
        i915_sw_fence_await(cb->fence);
        init_irq_work(&cb->work, irq_execute_cb);
 
-       if (hook) {
-               cb->hook = hook;
-               cb->signal = i915_request_get(signal);
-               cb->work.func = irq_execute_cb_hook;
-       }
-
        /*
         * Register the callback first, then see if the signaler is already
         * active. This ensures that if we race with the
@@ -559,7 +482,7 @@ __await_execution(struct i915_request *rq,
        if (llist_add(&cb->work.node.llist, &signal->execute_cb)) {
                if (i915_request_is_active(signal) ||
                    __request_in_flight(signal))
-                       __notify_execute_cb_imm(signal);
+                       i915_request_notify_execute_cb_imm(signal);
        }
 
        return 0;
@@ -637,7 +560,7 @@ bool __i915_request_submit(struct i915_request *request)
        RQ_TRACE(request, "\n");
 
        GEM_BUG_ON(!irqs_disabled());
-       lockdep_assert_held(&engine->active.lock);
+       lockdep_assert_held(&engine->sched_engine->lock);
 
        /*
         * With the advent of preempt-to-busy, we frequently encounter
@@ -649,7 +572,7 @@ bool __i915_request_submit(struct i915_request *request)
         *
         * We must remove the request from the caller's priority queue,
         * and the caller must only call us when the request is in their
-        * priority queue, under the active.lock. This ensures that the
+        * priority queue, under the sched_engine->lock. This ensures that the
         * request has *not* yet been retired and we can safely move
         * the request into the engine->active.list where it will be
         * dropped upon retiring. (Otherwise if resubmit a *retired*
@@ -690,11 +613,15 @@ bool __i915_request_submit(struct i915_request *request)
                                     request->ring->vaddr + request->postfix);
 
        trace_i915_request_execute(request);
-       engine->serial++;
+       if (engine->bump_serial)
+               engine->bump_serial(engine);
+       else
+               engine->serial++;
+
        result = true;
 
        GEM_BUG_ON(test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags));
-       list_move_tail(&request->sched.link, &engine->active.requests);
+       engine->add_active_request(request);
 active:
        clear_bit(I915_FENCE_FLAG_PQUEUE, &request->fence.flags);
        set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags);
@@ -724,11 +651,11 @@ void i915_request_submit(struct i915_request *request)
        unsigned long flags;
 
        /* Will be called from irq-context when using foreign fences. */
-       spin_lock_irqsave(&engine->active.lock, flags);
+       spin_lock_irqsave(&engine->sched_engine->lock, flags);
 
        __i915_request_submit(request);
 
-       spin_unlock_irqrestore(&engine->active.lock, flags);
+       spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
 }
 
 void __i915_request_unsubmit(struct i915_request *request)
@@ -742,7 +669,7 @@ void __i915_request_unsubmit(struct i915_request *request)
        RQ_TRACE(request, "\n");
 
        GEM_BUG_ON(!irqs_disabled());
-       lockdep_assert_held(&engine->active.lock);
+       lockdep_assert_held(&engine->sched_engine->lock);
 
        /*
         * Before we remove this breadcrumb from the signal list, we have
@@ -775,23 +702,11 @@ void i915_request_unsubmit(struct i915_request *request)
        unsigned long flags;
 
        /* Will be called from irq-context when using foreign fences. */
-       spin_lock_irqsave(&engine->active.lock, flags);
+       spin_lock_irqsave(&engine->sched_engine->lock, flags);
 
        __i915_request_unsubmit(request);
 
-       spin_unlock_irqrestore(&engine->active.lock, flags);
-}
-
-static void __cancel_request(struct i915_request *rq)
-{
-       struct intel_engine_cs *engine = NULL;
-
-       i915_request_active_engine(rq, &engine);
-
-       if (engine && intel_engine_pulse(engine))
-               intel_gt_handle_error(engine->gt, engine->mask, 0,
-                                     "request cancellation by %s",
-                                     current->comm);
+       spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
 }
 
 void i915_request_cancel(struct i915_request *rq, int error)
@@ -801,7 +716,7 @@ void i915_request_cancel(struct i915_request *rq, int error)
 
        set_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags);
 
-       __cancel_request(rq);
+       intel_context_cancel_request(rq->context, rq);
 }
 
 static int __i915_sw_fence_call
@@ -889,7 +804,7 @@ request_alloc_slow(struct intel_timeline *tl,
        rq = list_first_entry(&tl->requests, typeof(*rq), link);
        i915_request_retire(rq);
 
-       rq = kmem_cache_alloc(global.slab_requests,
+       rq = kmem_cache_alloc(slab_requests,
                              gfp | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
        if (rq)
                return rq;
@@ -902,7 +817,7 @@ request_alloc_slow(struct intel_timeline *tl,
        retire_requests(tl);
 
 out:
-       return kmem_cache_alloc(global.slab_requests, gfp);
+       return kmem_cache_alloc(slab_requests, gfp);
 }
 
 static void __i915_request_ctor(void *arg)
@@ -963,7 +878,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
         *
         * Do not use kmem_cache_zalloc() here!
         */
-       rq = kmem_cache_alloc(global.slab_requests,
+       rq = kmem_cache_alloc(slab_requests,
                              gfp | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
        if (unlikely(!rq)) {
                rq = request_alloc_slow(tl, &ce->engine->request_pool, gfp);
@@ -973,7 +888,19 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
                }
        }
 
-       rq->context = ce;
+       /*
+        * Hold a reference to the intel_context over life of an i915_request.
+        * Without this an i915_request can exist after the context has been
+        * destroyed (e.g. request retired, context closed, but user space holds
+        * a reference to the request from an out fence). In the case of GuC
+        * submission + virtual engine, the engine that the request references
+        * is also destroyed which can trigger bad pointer dref in fence ops
+        * (e.g. i915_fence_get_driver_name). We could likely change these
+        * functions to avoid touching the engine but let's just be safe and
+        * hold the intel_context reference. In execlist mode the request always
+        * eventually points to a physical engine so this isn't an issue.
+        */
+       rq->context = intel_context_get(ce);
        rq->engine = ce->engine;
        rq->ring = ce->ring;
        rq->execution_mask = ce->engine->mask;
@@ -996,6 +923,8 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
 
        rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */
 
+       rq->guc_prio = GUC_PRIO_INIT;
+
        /* We bump the ref for the fence chain */
        i915_sw_fence_reinit(&i915_request_get(rq)->submit);
        i915_sw_fence_reinit(&i915_request_get(rq)->semaphore);
@@ -1050,7 +979,8 @@ err_unwind:
        GEM_BUG_ON(!list_empty(&rq->sched.waiters_list));
 
 err_free:
-       kmem_cache_free(global.slab_requests, rq);
+       intel_context_put(ce);
+       kmem_cache_free(slab_requests, rq);
 err_unreserve:
        intel_context_unpin(ce);
        return ERR_PTR(ret);
@@ -1253,7 +1183,7 @@ emit_semaphore_wait(struct i915_request *to,
                goto await_fence;
 
        /* Only submit our spinner after the signaler is running! */
-       if (__await_execution(to, from, NULL, gfp))
+       if (__await_execution(to, from, gfp))
                goto await_fence;
 
        if (__emit_semaphore_wait(to, from, from->fence.seqno))
@@ -1284,16 +1214,14 @@ static int intel_timeline_sync_set_start(struct intel_timeline *tl,
 
 static int
 __i915_request_await_execution(struct i915_request *to,
-                              struct i915_request *from,
-                              void (*hook)(struct i915_request *rq,
-                                           struct dma_fence *signal))
+                              struct i915_request *from)
 {
        int err;
 
        GEM_BUG_ON(intel_context_is_barrier(from->context));
 
        /* Submit both requests at the same time */
-       err = __await_execution(to, from, hook, I915_FENCE_GFP);
+       err = __await_execution(to, from, I915_FENCE_GFP);
        if (err)
                return err;
 
@@ -1343,7 +1271,7 @@ __i915_request_await_execution(struct i915_request *to,
        }
 
        /* Couple the dependency tree for PI on this exposed to->fence */
-       if (to->engine->schedule) {
+       if (to->engine->sched_engine->schedule) {
                err = i915_sched_node_add_dependency(&to->sched,
                                                     &from->sched,
                                                     I915_DEPENDENCY_WEAK);
@@ -1406,9 +1334,7 @@ i915_request_await_external(struct i915_request *rq, struct dma_fence *fence)
 
 int
 i915_request_await_execution(struct i915_request *rq,
-                            struct dma_fence *fence,
-                            void (*hook)(struct i915_request *rq,
-                                         struct dma_fence *signal))
+                            struct dma_fence *fence)
 {
        struct dma_fence **child = &fence;
        unsigned int nchild = 1;
@@ -1439,8 +1365,7 @@ i915_request_await_execution(struct i915_request *rq,
 
                if (dma_fence_is_i915(fence))
                        ret = __i915_request_await_execution(rq,
-                                                            to_request(fence),
-                                                            hook);
+                                                            to_request(fence));
                else
                        ret = i915_request_await_external(rq, fence);
                if (ret < 0)
@@ -1466,7 +1391,7 @@ await_request_submit(struct i915_request *to, struct i915_request *from)
                                                        &from->submit,
                                                        I915_FENCE_GFP);
        else
-               return __i915_request_await_execution(to, from, NULL);
+               return __i915_request_await_execution(to, from);
 }
 
 static int
@@ -1482,7 +1407,7 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from)
                return 0;
        }
 
-       if (to->engine->schedule) {
+       if (to->engine->sched_engine->schedule) {
                ret = i915_sched_node_add_dependency(&to->sched,
                                                     &from->sched,
                                                     I915_DEPENDENCY_EXTERNAL);
@@ -1490,7 +1415,8 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from)
                        return ret;
        }
 
-       if (is_power_of_2(to->execution_mask | READ_ONCE(from->execution_mask)))
+       if (!intel_engine_uses_guc(to->engine) &&
+           is_power_of_2(to->execution_mask | READ_ONCE(from->execution_mask)))
                ret = await_request_submit(to, from);
        else
                ret = emit_semaphore_wait(to, from, I915_FENCE_GFP);
@@ -1649,6 +1575,8 @@ __i915_request_add_to_timeline(struct i915_request *rq)
        prev = to_request(__i915_active_fence_set(&timeline->last_request,
                                                  &rq->fence));
        if (prev && !__i915_request_is_complete(prev)) {
+               bool uses_guc = intel_engine_uses_guc(rq->engine);
+
                /*
                 * The requests are supposed to be kept in order. However,
                 * we need to be wary in case the timeline->last_request
@@ -1659,7 +1587,9 @@ __i915_request_add_to_timeline(struct i915_request *rq)
                           i915_seqno_passed(prev->fence.seqno,
                                             rq->fence.seqno));
 
-               if (is_power_of_2(READ_ONCE(prev->engine)->mask | rq->engine->mask))
+               if ((!uses_guc &&
+                    is_power_of_2(READ_ONCE(prev->engine)->mask | rq->engine->mask)) ||
+                   (uses_guc && prev->context == rq->context))
                        i915_sw_fence_await_sw_fence(&rq->submit,
                                                     &prev->submit,
                                                     &rq->submitq);
@@ -1667,7 +1597,7 @@ __i915_request_add_to_timeline(struct i915_request *rq)
                        __i915_sw_fence_await_dma_fence(&rq->submit,
                                                        &prev->fence,
                                                        &rq->dmaq);
-               if (rq->engine->schedule)
+               if (rq->engine->sched_engine->schedule)
                        __i915_sched_node_add_dependency(&rq->sched,
                                                         &prev->sched,
                                                         &rq->dep,
@@ -1739,8 +1669,8 @@ void __i915_request_queue(struct i915_request *rq,
         * decide whether to preempt the entire chain so that it is ready to
         * run at the earliest possible convenience.
         */
-       if (attr && rq->engine->schedule)
-               rq->engine->schedule(rq, attr);
+       if (attr && rq->engine->sched_engine->schedule)
+               rq->engine->sched_engine->schedule(rq, attr);
 
        local_bh_disable();
        __i915_request_queue_bh(rq);
@@ -2100,31 +2030,61 @@ void i915_request_show(struct drm_printer *m,
                   name);
 }
 
-#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
-#include "selftests/mock_request.c"
-#include "selftests/i915_request.c"
-#endif
+static bool engine_match_ring(struct intel_engine_cs *engine, struct i915_request *rq)
+{
+       u32 ring = ENGINE_READ(engine, RING_START);
+
+       return ring == i915_ggtt_offset(rq->ring->vma);
+}
 
-static void i915_global_request_shrink(void)
+static bool match_ring(struct i915_request *rq)
 {
-       kmem_cache_shrink(global.slab_execute_cbs);
-       kmem_cache_shrink(global.slab_requests);
+       struct intel_engine_cs *engine;
+       bool found;
+       int i;
+
+       if (!intel_engine_is_virtual(rq->engine))
+               return engine_match_ring(rq->engine, rq);
+
+       found = false;
+       i = 0;
+       while ((engine = intel_engine_get_sibling(rq->engine, i++))) {
+               found = engine_match_ring(engine, rq);
+               if (found)
+                       break;
+       }
+
+       return found;
 }
 
-static void i915_global_request_exit(void)
+enum i915_request_state i915_test_request_state(struct i915_request *rq)
 {
-       kmem_cache_destroy(global.slab_execute_cbs);
-       kmem_cache_destroy(global.slab_requests);
+       if (i915_request_completed(rq))
+               return I915_REQUEST_COMPLETE;
+
+       if (!i915_request_started(rq))
+               return I915_REQUEST_PENDING;
+
+       if (match_ring(rq))
+               return I915_REQUEST_ACTIVE;
+
+       return I915_REQUEST_QUEUED;
 }
 
-static struct i915_global_request global = { {
-       .shrink = i915_global_request_shrink,
-       .exit = i915_global_request_exit,
-} };
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/mock_request.c"
+#include "selftests/i915_request.c"
+#endif
+
+void i915_request_module_exit(void)
+{
+       kmem_cache_destroy(slab_execute_cbs);
+       kmem_cache_destroy(slab_requests);
+}
 
-int __init i915_global_request_init(void)
+int __init i915_request_module_init(void)
 {
-       global.slab_requests =
+       slab_requests =
                kmem_cache_create("i915_request",
                                  sizeof(struct i915_request),
                                  __alignof__(struct i915_request),
@@ -2132,20 +2092,19 @@ int __init i915_global_request_init(void)
                                  SLAB_RECLAIM_ACCOUNT |
                                  SLAB_TYPESAFE_BY_RCU,
                                  __i915_request_ctor);
-       if (!global.slab_requests)
+       if (!slab_requests)
                return -ENOMEM;
 
-       global.slab_execute_cbs = KMEM_CACHE(execute_cb,
+       slab_execute_cbs = KMEM_CACHE(execute_cb,
                                             SLAB_HWCACHE_ALIGN |
                                             SLAB_RECLAIM_ACCOUNT |
                                             SLAB_TYPESAFE_BY_RCU);
-       if (!global.slab_execute_cbs)
+       if (!slab_execute_cbs)
                goto err_requests;
 
-       i915_global_register(&global.base);
        return 0;
 
 err_requests:
-       kmem_cache_destroy(global.slab_requests);
+       kmem_cache_destroy(slab_requests);
        return -ENOMEM;
 }
index 270f6cd..1bc1349 100644 (file)
@@ -285,6 +285,23 @@ struct i915_request {
                struct hrtimer timer;
        } watchdog;
 
+       /*
+        * Requests may need to be stalled when using GuC submission waiting for
+        * certain GuC operations to complete. If that is the case, stalled
+        * requests are added to a per context list of stalled requests. The
+        * below list_head is the link in that list.
+        */
+       struct list_head guc_fence_link;
+
+       /**
+        * Priority level while the request is inflight. Differs from i915
+        * scheduler priority. See comment above
+        * I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP for details.
+        */
+#define        GUC_PRIO_INIT   0xff
+#define        GUC_PRIO_FINI   0xfe
+       u8 guc_prio;
+
        I915_SELFTEST_DECLARE(struct {
                struct list_head link;
                unsigned long delay;
@@ -352,9 +369,7 @@ int i915_request_await_object(struct i915_request *to,
 int i915_request_await_dma_fence(struct i915_request *rq,
                                 struct dma_fence *fence);
 int i915_request_await_execution(struct i915_request *rq,
-                                struct dma_fence *fence,
-                                void (*hook)(struct i915_request *rq,
-                                             struct dma_fence *signal));
+                                struct dma_fence *fence);
 
 void i915_request_add(struct i915_request *rq);
 
@@ -613,7 +628,7 @@ i915_request_active_timeline(const struct i915_request *rq)
         * this submission.
         */
        return rcu_dereference_protected(rq->timeline,
-                                        lockdep_is_held(&rq->engine->active.lock));
+                                        lockdep_is_held(&rq->engine->sched_engine->lock));
 }
 
 static inline u32
@@ -641,4 +656,19 @@ bool
 i915_request_active_engine(struct i915_request *rq,
                           struct intel_engine_cs **active);
 
+void i915_request_notify_execute_cb_imm(struct i915_request *rq);
+
+enum i915_request_state {
+       I915_REQUEST_UNKNOWN = 0,
+       I915_REQUEST_COMPLETE,
+       I915_REQUEST_PENDING,
+       I915_REQUEST_QUEUED,
+       I915_REQUEST_ACTIVE,
+};
+
+enum i915_request_state i915_test_request_state(struct i915_request *rq);
+
+void i915_request_module_exit(void);
+int i915_request_module_init(void);
+
 #endif /* I915_REQUEST_H */
index 69e9e6c..4a6712d 100644 (file)
@@ -6,6 +6,9 @@
 
 #include "i915_scatterlist.h"
 
+#include "i915_buddy.h"
+#include "i915_ttm_buddy_manager.h"
+
 #include <drm/drm_mm.h>
 
 #include <linux/slab.h>
@@ -104,6 +107,83 @@ struct sg_table *i915_sg_from_mm_node(const struct drm_mm_node *node,
        return st;
 }
 
+/**
+ * i915_sg_from_buddy_resource - Create an sg_table from a struct
+ * i915_buddy_block list
+ * @res: The struct i915_ttm_buddy_resource.
+ * @region_start: An offset to add to the dma addresses of the sg list.
+ *
+ * Create a struct sg_table, initializing it from struct i915_buddy_block list,
+ * taking a maximum segment length into account, splitting into segments
+ * if necessary.
+ *
+ * Return: A pointer to a kmalloced struct sg_table on success, negative
+ * error code cast to an error pointer on failure.
+ */
+struct sg_table *i915_sg_from_buddy_resource(struct ttm_resource *res,
+                                            u64 region_start)
+{
+       struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res);
+       const u64 size = res->num_pages << PAGE_SHIFT;
+       const u64 max_segment = rounddown(UINT_MAX, PAGE_SIZE);
+       struct i915_buddy_mm *mm = bman_res->mm;
+       struct list_head *blocks = &bman_res->blocks;
+       struct i915_buddy_block *block;
+       struct scatterlist *sg;
+       struct sg_table *st;
+       resource_size_t prev_end;
+
+       GEM_BUG_ON(list_empty(blocks));
+
+       st = kmalloc(sizeof(*st), GFP_KERNEL);
+       if (!st)
+               return ERR_PTR(-ENOMEM);
+
+       if (sg_alloc_table(st, res->num_pages, GFP_KERNEL)) {
+               kfree(st);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       sg = st->sgl;
+       st->nents = 0;
+       prev_end = (resource_size_t)-1;
+
+       list_for_each_entry(block, blocks, link) {
+               u64 block_size, offset;
+
+               block_size = min_t(u64, size, i915_buddy_block_size(mm, block));
+               offset = i915_buddy_block_offset(block);
+
+               while (block_size) {
+                       u64 len;
+
+                       if (offset != prev_end || sg->length >= max_segment) {
+                               if (st->nents)
+                                       sg = __sg_next(sg);
+
+                               sg_dma_address(sg) = region_start + offset;
+                               sg_dma_len(sg) = 0;
+                               sg->length = 0;
+                               st->nents++;
+                       }
+
+                       len = min(block_size, max_segment - sg->length);
+                       sg->length += len;
+                       sg_dma_len(sg) += len;
+
+                       offset += len;
+                       block_size -= len;
+
+                       prev_end = offset;
+               }
+       }
+
+       sg_mark_end(sg);
+       i915_sg_trim(st);
+
+       return st;
+}
+
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 #include "selftests/scatterlist.c"
 #endif
index 5acca45..b8bd592 100644 (file)
@@ -14,6 +14,7 @@
 #include "i915_gem.h"
 
 struct drm_mm_node;
+struct ttm_resource;
 
 /*
  * Optimised SGL iterator for GEM objects
@@ -145,4 +146,8 @@ bool i915_sg_trim(struct sg_table *orig_st);
 
 struct sg_table *i915_sg_from_mm_node(const struct drm_mm_node *node,
                                      u64 region_start);
+
+struct sg_table *i915_sg_from_buddy_resource(struct ttm_resource *res,
+                                            u64 region_start);
+
 #endif
index efa638c..762127d 100644 (file)
@@ -7,15 +7,11 @@
 #include <linux/mutex.h>
 
 #include "i915_drv.h"
-#include "i915_globals.h"
 #include "i915_request.h"
 #include "i915_scheduler.h"
 
-static struct i915_global_scheduler {
-       struct i915_global base;
-       struct kmem_cache *slab_dependencies;
-       struct kmem_cache *slab_priorities;
-} global;
+static struct kmem_cache *slab_dependencies;
+static struct kmem_cache *slab_priorities;
 
 static DEFINE_SPINLOCK(schedule_lock);
 
@@ -40,7 +36,7 @@ static inline struct i915_priolist *to_priolist(struct rb_node *rb)
        return rb_entry(rb, struct i915_priolist, node);
 }
 
-static void assert_priolists(struct intel_engine_execlists * const execlists)
+static void assert_priolists(struct i915_sched_engine * const sched_engine)
 {
        struct rb_node *rb;
        long last_prio;
@@ -48,11 +44,11 @@ static void assert_priolists(struct intel_engine_execlists * const execlists)
        if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
                return;
 
-       GEM_BUG_ON(rb_first_cached(&execlists->queue) !=
-                  rb_first(&execlists->queue.rb_root));
+       GEM_BUG_ON(rb_first_cached(&sched_engine->queue) !=
+                  rb_first(&sched_engine->queue.rb_root));
 
        last_prio = INT_MAX;
-       for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
+       for (rb = rb_first_cached(&sched_engine->queue); rb; rb = rb_next(rb)) {
                const struct i915_priolist *p = to_priolist(rb);
 
                GEM_BUG_ON(p->priority > last_prio);
@@ -61,23 +57,22 @@ static void assert_priolists(struct intel_engine_execlists * const execlists)
 }
 
 struct list_head *
-i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio)
+i915_sched_lookup_priolist(struct i915_sched_engine *sched_engine, int prio)
 {
-       struct intel_engine_execlists * const execlists = &engine->execlists;
        struct i915_priolist *p;
        struct rb_node **parent, *rb;
        bool first = true;
 
-       lockdep_assert_held(&engine->active.lock);
-       assert_priolists(execlists);
+       lockdep_assert_held(&sched_engine->lock);
+       assert_priolists(sched_engine);
 
-       if (unlikely(execlists->no_priolist))
+       if (unlikely(sched_engine->no_priolist))
                prio = I915_PRIORITY_NORMAL;
 
 find_priolist:
        /* most positive priority is scheduled first, equal priorities fifo */
        rb = NULL;
-       parent = &execlists->queue.rb_root.rb_node;
+       parent = &sched_engine->queue.rb_root.rb_node;
        while (*parent) {
                rb = *parent;
                p = to_priolist(rb);
@@ -92,9 +87,9 @@ find_priolist:
        }
 
        if (prio == I915_PRIORITY_NORMAL) {
-               p = &execlists->default_priolist;
+               p = &sched_engine->default_priolist;
        } else {
-               p = kmem_cache_alloc(global.slab_priorities, GFP_ATOMIC);
+               p = kmem_cache_alloc(slab_priorities, GFP_ATOMIC);
                /* Convert an allocation failure to a priority bump */
                if (unlikely(!p)) {
                        prio = I915_PRIORITY_NORMAL; /* recurses just once */
@@ -107,7 +102,7 @@ find_priolist:
                         * requests, so if userspace lied about their
                         * dependencies that reordering may be visible.
                         */
-                       execlists->no_priolist = true;
+                       sched_engine->no_priolist = true;
                        goto find_priolist;
                }
        }
@@ -116,27 +111,27 @@ find_priolist:
        INIT_LIST_HEAD(&p->requests);
 
        rb_link_node(&p->node, rb, parent);
-       rb_insert_color_cached(&p->node, &execlists->queue, first);
+       rb_insert_color_cached(&p->node, &sched_engine->queue, first);
 
        return &p->requests;
 }
 
 void __i915_priolist_free(struct i915_priolist *p)
 {
-       kmem_cache_free(global.slab_priorities, p);
+       kmem_cache_free(slab_priorities, p);
 }
 
 struct sched_cache {
        struct list_head *priolist;
 };
 
-static struct intel_engine_cs *
-sched_lock_engine(const struct i915_sched_node *node,
-                 struct intel_engine_cs *locked,
+static struct i915_sched_engine *
+lock_sched_engine(struct i915_sched_node *node,
+                 struct i915_sched_engine *locked,
                  struct sched_cache *cache)
 {
        const struct i915_request *rq = node_to_request(node);
-       struct intel_engine_cs *engine;
+       struct i915_sched_engine *sched_engine;
 
        GEM_BUG_ON(!locked);
 
@@ -146,81 +141,22 @@ sched_lock_engine(const struct i915_sched_node *node,
         * engine lock. The simple ploy we use is to take the lock then
         * check that the rq still belongs to the newly locked engine.
         */
-       while (locked != (engine = READ_ONCE(rq->engine))) {
-               spin_unlock(&locked->active.lock);
+       while (locked != (sched_engine = READ_ONCE(rq->engine)->sched_engine)) {
+               spin_unlock(&locked->lock);
                memset(cache, 0, sizeof(*cache));
-               spin_lock(&engine->active.lock);
-               locked = engine;
+               spin_lock(&sched_engine->lock);
+               locked = sched_engine;
        }
 
-       GEM_BUG_ON(locked != engine);
+       GEM_BUG_ON(locked != sched_engine);
        return locked;
 }
 
-static inline int rq_prio(const struct i915_request *rq)
-{
-       return rq->sched.attr.priority;
-}
-
-static inline bool need_preempt(int prio, int active)
-{
-       /*
-        * Allow preemption of low -> normal -> high, but we do
-        * not allow low priority tasks to preempt other low priority
-        * tasks under the impression that latency for low priority
-        * tasks does not matter (as much as background throughput),
-        * so kiss.
-        */
-       return prio >= max(I915_PRIORITY_NORMAL, active);
-}
-
-static void kick_submission(struct intel_engine_cs *engine,
-                           const struct i915_request *rq,
-                           int prio)
-{
-       const struct i915_request *inflight;
-
-       /*
-        * We only need to kick the tasklet once for the high priority
-        * new context we add into the queue.
-        */
-       if (prio <= engine->execlists.queue_priority_hint)
-               return;
-
-       rcu_read_lock();
-
-       /* Nothing currently active? We're overdue for a submission! */
-       inflight = execlists_active(&engine->execlists);
-       if (!inflight)
-               goto unlock;
-
-       /*
-        * If we are already the currently executing context, don't
-        * bother evaluating if we should preempt ourselves.
-        */
-       if (inflight->context == rq->context)
-               goto unlock;
-
-       ENGINE_TRACE(engine,
-                    "bumping queue-priority-hint:%d for rq:%llx:%lld, inflight:%llx:%lld prio %d\n",
-                    prio,
-                    rq->fence.context, rq->fence.seqno,
-                    inflight->fence.context, inflight->fence.seqno,
-                    inflight->sched.attr.priority);
-
-       engine->execlists.queue_priority_hint = prio;
-       if (need_preempt(prio, rq_prio(inflight)))
-               tasklet_hi_schedule(&engine->execlists.tasklet);
-
-unlock:
-       rcu_read_unlock();
-}
-
 static void __i915_schedule(struct i915_sched_node *node,
                            const struct i915_sched_attr *attr)
 {
        const int prio = max(attr->priority, node->attr.priority);
-       struct intel_engine_cs *engine;
+       struct i915_sched_engine *sched_engine;
        struct i915_dependency *dep, *p;
        struct i915_dependency stack;
        struct sched_cache cache;
@@ -295,23 +231,31 @@ static void __i915_schedule(struct i915_sched_node *node,
        }
 
        memset(&cache, 0, sizeof(cache));
-       engine = node_to_request(node)->engine;
-       spin_lock(&engine->active.lock);
+       sched_engine = node_to_request(node)->engine->sched_engine;
+       spin_lock(&sched_engine->lock);
 
        /* Fifo and depth-first replacement ensure our deps execute before us */
-       engine = sched_lock_engine(node, engine, &cache);
+       sched_engine = lock_sched_engine(node, sched_engine, &cache);
        list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
+               struct i915_request *from = container_of(dep->signaler,
+                                                        struct i915_request,
+                                                        sched);
                INIT_LIST_HEAD(&dep->dfs_link);
 
                node = dep->signaler;
-               engine = sched_lock_engine(node, engine, &cache);
-               lockdep_assert_held(&engine->active.lock);
+               sched_engine = lock_sched_engine(node, sched_engine, &cache);
+               lockdep_assert_held(&sched_engine->lock);
 
                /* Recheck after acquiring the engine->timeline.lock */
                if (prio <= node->attr.priority || node_signaled(node))
                        continue;
 
-               GEM_BUG_ON(node_to_request(node)->engine != engine);
+               GEM_BUG_ON(node_to_request(node)->engine->sched_engine !=
+                          sched_engine);
+
+               /* Must be called before changing the nodes priority */
+               if (sched_engine->bump_inflight_request_prio)
+                       sched_engine->bump_inflight_request_prio(from, prio);
 
                WRITE_ONCE(node->attr.priority, prio);
 
@@ -329,16 +273,17 @@ static void __i915_schedule(struct i915_sched_node *node,
                if (i915_request_in_priority_queue(node_to_request(node))) {
                        if (!cache.priolist)
                                cache.priolist =
-                                       i915_sched_lookup_priolist(engine,
+                                       i915_sched_lookup_priolist(sched_engine,
                                                                   prio);
                        list_move_tail(&node->link, cache.priolist);
                }
 
                /* Defer (tasklet) submission until after all of our updates. */
-               kick_submission(engine, node_to_request(node), prio);
+               if (sched_engine->kick_backend)
+                       sched_engine->kick_backend(node_to_request(node), prio);
        }
 
-       spin_unlock(&engine->active.lock);
+       spin_unlock(&sched_engine->lock);
 }
 
 void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr)
@@ -371,13 +316,13 @@ void i915_sched_node_reinit(struct i915_sched_node *node)
 static struct i915_dependency *
 i915_dependency_alloc(void)
 {
-       return kmem_cache_alloc(global.slab_dependencies, GFP_KERNEL);
+       return kmem_cache_alloc(slab_dependencies, GFP_KERNEL);
 }
 
 static void
 i915_dependency_free(struct i915_dependency *dep)
 {
-       kmem_cache_free(global.slab_dependencies, dep);
+       kmem_cache_free(slab_dependencies, dep);
 }
 
 bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
@@ -489,39 +434,78 @@ void i915_request_show_with_schedule(struct drm_printer *m,
        rcu_read_unlock();
 }
 
-static void i915_global_scheduler_shrink(void)
+static void default_destroy(struct kref *kref)
+{
+       struct i915_sched_engine *sched_engine =
+               container_of(kref, typeof(*sched_engine), ref);
+
+       tasklet_kill(&sched_engine->tasklet); /* flush the callback */
+       kfree(sched_engine);
+}
+
+static bool default_disabled(struct i915_sched_engine *sched_engine)
 {
-       kmem_cache_shrink(global.slab_dependencies);
-       kmem_cache_shrink(global.slab_priorities);
+       return false;
 }
 
-static void i915_global_scheduler_exit(void)
+struct i915_sched_engine *
+i915_sched_engine_create(unsigned int subclass)
 {
-       kmem_cache_destroy(global.slab_dependencies);
-       kmem_cache_destroy(global.slab_priorities);
+       struct i915_sched_engine *sched_engine;
+
+       sched_engine = kzalloc(sizeof(*sched_engine), GFP_KERNEL);
+       if (!sched_engine)
+               return NULL;
+
+       kref_init(&sched_engine->ref);
+
+       sched_engine->queue = RB_ROOT_CACHED;
+       sched_engine->queue_priority_hint = INT_MIN;
+       sched_engine->destroy = default_destroy;
+       sched_engine->disabled = default_disabled;
+
+       INIT_LIST_HEAD(&sched_engine->requests);
+       INIT_LIST_HEAD(&sched_engine->hold);
+
+       spin_lock_init(&sched_engine->lock);
+       lockdep_set_subclass(&sched_engine->lock, subclass);
+
+       /*
+        * Due to an interesting quirk in lockdep's internal debug tracking,
+        * after setting a subclass we must ensure the lock is used. Otherwise,
+        * nr_unused_locks is incremented once too often.
+        */
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+       local_irq_disable();
+       lock_map_acquire(&sched_engine->lock.dep_map);
+       lock_map_release(&sched_engine->lock.dep_map);
+       local_irq_enable();
+#endif
+
+       return sched_engine;
 }
 
-static struct i915_global_scheduler global = { {
-       .shrink = i915_global_scheduler_shrink,
-       .exit = i915_global_scheduler_exit,
-} };
+void i915_scheduler_module_exit(void)
+{
+       kmem_cache_destroy(slab_dependencies);
+       kmem_cache_destroy(slab_priorities);
+}
 
-int __init i915_global_scheduler_init(void)
+int __init i915_scheduler_module_init(void)
 {
-       global.slab_dependencies = KMEM_CACHE(i915_dependency,
+       slab_dependencies = KMEM_CACHE(i915_dependency,
                                              SLAB_HWCACHE_ALIGN |
                                              SLAB_TYPESAFE_BY_RCU);
-       if (!global.slab_dependencies)
+       if (!slab_dependencies)
                return -ENOMEM;
 
-       global.slab_priorities = KMEM_CACHE(i915_priolist, 0);
-       if (!global.slab_priorities)
+       slab_priorities = KMEM_CACHE(i915_priolist, 0);
+       if (!slab_priorities)
                goto err_priorities;
 
-       i915_global_register(&global.base);
        return 0;
 
 err_priorities:
-       kmem_cache_destroy(global.slab_priorities);
+       kmem_cache_destroy(slab_priorities);
        return -ENOMEM;
 }
index 858a093..0b9b86a 100644 (file)
@@ -39,7 +39,7 @@ void i915_schedule(struct i915_request *request,
                   const struct i915_sched_attr *attr);
 
 struct list_head *
-i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio);
+i915_sched_lookup_priolist(struct i915_sched_engine *sched_engine, int prio);
 
 void __i915_priolist_free(struct i915_priolist *p);
 static inline void i915_priolist_free(struct i915_priolist *p)
@@ -48,9 +48,61 @@ static inline void i915_priolist_free(struct i915_priolist *p)
                __i915_priolist_free(p);
 }
 
+struct i915_sched_engine *
+i915_sched_engine_create(unsigned int subclass);
+
+static inline struct i915_sched_engine *
+i915_sched_engine_get(struct i915_sched_engine *sched_engine)
+{
+       kref_get(&sched_engine->ref);
+       return sched_engine;
+}
+
+static inline void
+i915_sched_engine_put(struct i915_sched_engine *sched_engine)
+{
+       kref_put(&sched_engine->ref, sched_engine->destroy);
+}
+
+static inline bool
+i915_sched_engine_is_empty(struct i915_sched_engine *sched_engine)
+{
+       return RB_EMPTY_ROOT(&sched_engine->queue.rb_root);
+}
+
+static inline void
+i915_sched_engine_reset_on_empty(struct i915_sched_engine *sched_engine)
+{
+       if (i915_sched_engine_is_empty(sched_engine))
+               sched_engine->no_priolist = false;
+}
+
+static inline void
+i915_sched_engine_active_lock_bh(struct i915_sched_engine *sched_engine)
+{
+       local_bh_disable(); /* prevent local softirq and lock recursion */
+       tasklet_lock(&sched_engine->tasklet);
+}
+
+static inline void
+i915_sched_engine_active_unlock_bh(struct i915_sched_engine *sched_engine)
+{
+       tasklet_unlock(&sched_engine->tasklet);
+       local_bh_enable(); /* restore softirq, and kick ksoftirqd! */
+}
+
 void i915_request_show_with_schedule(struct drm_printer *m,
                                     const struct i915_request *rq,
                                     const char *prefix,
                                     int indent);
 
+static inline bool
+i915_sched_engine_disabled(struct i915_sched_engine *sched_engine)
+{
+       return sched_engine->disabled(sched_engine);
+}
+
+void i915_scheduler_module_exit(void);
+int i915_scheduler_module_init(void);
+
 #endif /* _I915_SCHEDULER_H_ */
index 343ed44..b0a1b58 100644 (file)
@@ -91,4 +91,115 @@ struct i915_dependency {
                                &(rq__)->sched.signalers_list, \
                                signal_link)
 
+/**
+ * struct i915_sched_engine - scheduler engine
+ *
+ * A schedule engine represents a submission queue with different priority
+ * bands. It contains all the common state (relative to the backend) to queue,
+ * track, and submit a request.
+ *
+ * This object at the moment is quite i915 specific but will transition into a
+ * container for the drm_gpu_scheduler plus a few other variables once the i915
+ * is integrated with the DRM scheduler.
+ */
+struct i915_sched_engine {
+       /**
+        * @ref: reference count of schedule engine object
+        */
+       struct kref ref;
+
+       /**
+        * @lock: protects requests in priority lists, requests, hold and
+        * tasklet while running
+        */
+       spinlock_t lock;
+
+       /**
+        * @requests: list of requests inflight on this schedule engine
+        */
+       struct list_head requests;
+
+       /**
+        * @hold: list of ready requests, but on hold
+        */
+       struct list_head hold;
+
+       /**
+        * @tasklet: softirq tasklet for submission
+        */
+       struct tasklet_struct tasklet;
+
+       /**
+        * @default_priolist: priority list for I915_PRIORITY_NORMAL
+        */
+       struct i915_priolist default_priolist;
+
+       /**
+        * @queue_priority_hint: Highest pending priority.
+        *
+        * When we add requests into the queue, or adjust the priority of
+        * executing requests, we compute the maximum priority of those
+        * pending requests. We can then use this value to determine if
+        * we need to preempt the executing requests to service the queue.
+        * However, since the we may have recorded the priority of an inflight
+        * request we wanted to preempt but since completed, at the time of
+        * dequeuing the priority hint may no longer may match the highest
+        * available request priority.
+        */
+       int queue_priority_hint;
+
+       /**
+        * @queue: queue of requests, in priority lists
+        */
+       struct rb_root_cached queue;
+
+       /**
+        * @no_priolist: priority lists disabled
+        */
+       bool no_priolist;
+
+       /**
+        * @private_data: private data of the submission backend
+        */
+       void *private_data;
+
+       /**
+        * @destroy: destroy schedule engine / cleanup in backend
+        */
+       void    (*destroy)(struct kref *kref);
+
+       /**
+        * @disabled: check if backend has disabled submission
+        */
+       bool    (*disabled)(struct i915_sched_engine *sched_engine);
+
+       /**
+        * @kick_backend: kick backend after a request's priority has changed
+        */
+       void    (*kick_backend)(const struct i915_request *rq,
+                               int prio);
+
+       /**
+        * @bump_inflight_request_prio: update priority of an inflight request
+        */
+       void    (*bump_inflight_request_prio)(struct i915_request *rq,
+                                             int prio);
+
+       /**
+        * @retire_inflight_request_prio: indicate request is retired to
+        * priority tracking
+        */
+       void    (*retire_inflight_request_prio)(struct i915_request *rq);
+
+       /**
+        * @schedule: adjust priority of request
+        *
+        * Call when the priority on a request has changed and it and its
+        * dependencies may need rescheduling. Note the request itself may
+        * not be ready to run!
+        */
+       void    (*schedule)(struct i915_request *request,
+                           const struct i915_sched_attr *attr);
+};
+
 #endif /* _I915_SCHEDULER_TYPES_H_ */
index a3a81bb..5b33ef2 100644 (file)
@@ -16,11 +16,8 @@ static void fence_complete(struct dma_fence_work *f)
 static void fence_work(struct work_struct *work)
 {
        struct dma_fence_work *f = container_of(work, typeof(*f), work);
-       int err;
 
-       err = f->ops->work(f);
-       if (err)
-               dma_fence_set_error(&f->dma, err);
+       f->ops->work(f);
 
        fence_complete(f);
        dma_fence_put(&f->dma);
index 2c409f1..d568069 100644 (file)
@@ -17,7 +17,7 @@ struct dma_fence_work;
 
 struct dma_fence_work_ops {
        const char *name;
-       int (*work)(struct dma_fence_work *f);
+       void (*work)(struct dma_fence_work *f);
        void (*release)(struct dma_fence_work *f);
 };
 
index 873bf99..cdf0e9c 100644 (file)
@@ -272,7 +272,7 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
        struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
        struct intel_rps *rps = &i915->gt.rps;
 
-       return sysfs_emit(buf, "%d\n", intel_gpu_freq(rps, rps->cur_freq));
+       return sysfs_emit(buf, "%d\n", intel_rps_get_requested_frequency(rps));
 }
 
 static ssize_t gt_boost_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
@@ -326,9 +326,10 @@ static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
 static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
 {
        struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
-       struct intel_rps *rps = &dev_priv->gt.rps;
+       struct intel_gt *gt = &dev_priv->gt;
+       struct intel_rps *rps = &gt->rps;
 
-       return sysfs_emit(buf, "%d\n", intel_gpu_freq(rps, rps->max_freq_softlimit));
+       return sysfs_emit(buf, "%d\n", intel_rps_get_max_frequency(rps));
 }
 
 static ssize_t gt_max_freq_mhz_store(struct device *kdev,
@@ -336,7 +337,8 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
                                     const char *buf, size_t count)
 {
        struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
-       struct intel_rps *rps = &dev_priv->gt.rps;
+       struct intel_gt *gt = &dev_priv->gt;
+       struct intel_rps *rps = &gt->rps;
        ssize_t ret;
        u32 val;
 
@@ -344,53 +346,26 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
        if (ret)
                return ret;
 
-       mutex_lock(&rps->lock);
-
-       val = intel_freq_opcode(rps, val);
-       if (val < rps->min_freq ||
-           val > rps->max_freq ||
-           val < rps->min_freq_softlimit) {
-               ret = -EINVAL;
-               goto unlock;
-       }
-
-       if (val > rps->rp0_freq)
-               DRM_DEBUG("User requested overclocking to %d\n",
-                         intel_gpu_freq(rps, val));
-
-       rps->max_freq_softlimit = val;
-
-       val = clamp_t(int, rps->cur_freq,
-                     rps->min_freq_softlimit,
-                     rps->max_freq_softlimit);
-
-       /*
-        * We still need *_set_rps to process the new max_delay and
-        * update the interrupt limits and PMINTRMSK even though
-        * frequency request may be unchanged.
-        */
-       intel_rps_set(rps, val);
-
-unlock:
-       mutex_unlock(&rps->lock);
+       ret = intel_rps_set_max_frequency(rps, val);
 
        return ret ?: count;
 }
 
 static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
 {
-       struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
-       struct intel_rps *rps = &dev_priv->gt.rps;
+       struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
+       struct intel_gt *gt = &i915->gt;
+       struct intel_rps *rps = &gt->rps;
 
-       return sysfs_emit(buf, "%d\n", intel_gpu_freq(rps, rps->min_freq_softlimit));
+       return sysfs_emit(buf, "%d\n", intel_rps_get_min_frequency(rps));
 }
 
 static ssize_t gt_min_freq_mhz_store(struct device *kdev,
                                     struct device_attribute *attr,
                                     const char *buf, size_t count)
 {
-       struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
-       struct intel_rps *rps = &dev_priv->gt.rps;
+       struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
+       struct intel_rps *rps = &i915->gt.rps;
        ssize_t ret;
        u32 val;
 
@@ -398,31 +373,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
        if (ret)
                return ret;
 
-       mutex_lock(&rps->lock);
-
-       val = intel_freq_opcode(rps, val);
-       if (val < rps->min_freq ||
-           val > rps->max_freq ||
-           val > rps->max_freq_softlimit) {
-               ret = -EINVAL;
-               goto unlock;
-       }
-
-       rps->min_freq_softlimit = val;
-
-       val = clamp_t(int, rps->cur_freq,
-                     rps->min_freq_softlimit,
-                     rps->max_freq_softlimit);
-
-       /*
-        * We still need *_set_rps to process the new min_delay and
-        * update the interrupt limits and PMINTRMSK even though
-        * frequency request may be unchanged.
-        */
-       intel_rps_set(rps, val);
-
-unlock:
-       mutex_unlock(&rps->lock);
+       ret = intel_rps_set_min_frequency(rps, val);
 
        return ret ?: count;
 }
@@ -448,11 +399,11 @@ static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr
        u32 val;
 
        if (attr == &dev_attr_gt_RP0_freq_mhz)
-               val = intel_gpu_freq(rps, rps->rp0_freq);
+               val = intel_rps_get_rp0_frequency(rps);
        else if (attr == &dev_attr_gt_RP1_freq_mhz)
-               val = intel_gpu_freq(rps, rps->rp1_freq);
+               val = intel_rps_get_rp1_frequency(rps);
        else if (attr == &dev_attr_gt_RPn_freq_mhz)
-               val = intel_gpu_freq(rps, rps->min_freq);
+               val = intel_rps_get_rpn_frequency(rps);
        else
                BUG();
 
index 6778ad2..806ad68 100644 (file)
@@ -794,30 +794,40 @@ DECLARE_EVENT_CLASS(i915_request,
            TP_STRUCT__entry(
                             __field(u32, dev)
                             __field(u64, ctx)
+                            __field(u32, guc_id)
                             __field(u16, class)
                             __field(u16, instance)
                             __field(u32, seqno)
+                            __field(u32, tail)
                             ),
 
            TP_fast_assign(
                           __entry->dev = rq->engine->i915->drm.primary->index;
                           __entry->class = rq->engine->uabi_class;
                           __entry->instance = rq->engine->uabi_instance;
+                          __entry->guc_id = rq->context->guc_id;
                           __entry->ctx = rq->fence.context;
                           __entry->seqno = rq->fence.seqno;
+                          __entry->tail = rq->tail;
                           ),
 
-           TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u",
+           TP_printk("dev=%u, engine=%u:%u, guc_id=%u, ctx=%llu, seqno=%u, tail=%u",
                      __entry->dev, __entry->class, __entry->instance,
-                     __entry->ctx, __entry->seqno)
+                     __entry->guc_id, __entry->ctx, __entry->seqno,
+                     __entry->tail)
 );
 
 DEFINE_EVENT(i915_request, i915_request_add,
-           TP_PROTO(struct i915_request *rq),
-           TP_ARGS(rq)
+            TP_PROTO(struct i915_request *rq),
+            TP_ARGS(rq)
 );
 
 #if defined(CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS)
+DEFINE_EVENT(i915_request, i915_request_guc_submit,
+            TP_PROTO(struct i915_request *rq),
+            TP_ARGS(rq)
+);
+
 DEFINE_EVENT(i915_request, i915_request_submit,
             TP_PROTO(struct i915_request *rq),
             TP_ARGS(rq)
@@ -885,8 +895,116 @@ TRACE_EVENT(i915_request_out,
                              __entry->ctx, __entry->seqno, __entry->completed)
 );
 
+DECLARE_EVENT_CLASS(intel_context,
+                   TP_PROTO(struct intel_context *ce),
+                   TP_ARGS(ce),
+
+                   TP_STRUCT__entry(
+                            __field(u32, guc_id)
+                            __field(int, pin_count)
+                            __field(u32, sched_state)
+                            __field(u32, guc_sched_state_no_lock)
+                            __field(u8, guc_prio)
+                            ),
+
+                   TP_fast_assign(
+                          __entry->guc_id = ce->guc_id;
+                          __entry->pin_count = atomic_read(&ce->pin_count);
+                          __entry->sched_state = ce->guc_state.sched_state;
+                          __entry->guc_sched_state_no_lock =
+                          atomic_read(&ce->guc_sched_state_no_lock);
+                          __entry->guc_prio = ce->guc_prio;
+                          ),
+
+                   TP_printk("guc_id=%d, pin_count=%d sched_state=0x%x,0x%x, guc_prio=%u",
+                             __entry->guc_id, __entry->pin_count,
+                             __entry->sched_state,
+                             __entry->guc_sched_state_no_lock,
+                             __entry->guc_prio)
+);
+
+DEFINE_EVENT(intel_context, intel_context_set_prio,
+            TP_PROTO(struct intel_context *ce),
+            TP_ARGS(ce)
+);
+
+DEFINE_EVENT(intel_context, intel_context_reset,
+            TP_PROTO(struct intel_context *ce),
+            TP_ARGS(ce)
+);
+
+DEFINE_EVENT(intel_context, intel_context_ban,
+            TP_PROTO(struct intel_context *ce),
+            TP_ARGS(ce)
+);
+
+DEFINE_EVENT(intel_context, intel_context_register,
+            TP_PROTO(struct intel_context *ce),
+            TP_ARGS(ce)
+);
+
+DEFINE_EVENT(intel_context, intel_context_deregister,
+            TP_PROTO(struct intel_context *ce),
+            TP_ARGS(ce)
+);
+
+DEFINE_EVENT(intel_context, intel_context_deregister_done,
+            TP_PROTO(struct intel_context *ce),
+            TP_ARGS(ce)
+);
+
+DEFINE_EVENT(intel_context, intel_context_sched_enable,
+            TP_PROTO(struct intel_context *ce),
+            TP_ARGS(ce)
+);
+
+DEFINE_EVENT(intel_context, intel_context_sched_disable,
+            TP_PROTO(struct intel_context *ce),
+            TP_ARGS(ce)
+);
+
+DEFINE_EVENT(intel_context, intel_context_sched_done,
+            TP_PROTO(struct intel_context *ce),
+            TP_ARGS(ce)
+);
+
+DEFINE_EVENT(intel_context, intel_context_create,
+            TP_PROTO(struct intel_context *ce),
+            TP_ARGS(ce)
+);
+
+DEFINE_EVENT(intel_context, intel_context_fence_release,
+            TP_PROTO(struct intel_context *ce),
+            TP_ARGS(ce)
+);
+
+DEFINE_EVENT(intel_context, intel_context_free,
+            TP_PROTO(struct intel_context *ce),
+            TP_ARGS(ce)
+);
+
+DEFINE_EVENT(intel_context, intel_context_steal_guc_id,
+            TP_PROTO(struct intel_context *ce),
+            TP_ARGS(ce)
+);
+
+DEFINE_EVENT(intel_context, intel_context_do_pin,
+            TP_PROTO(struct intel_context *ce),
+            TP_ARGS(ce)
+);
+
+DEFINE_EVENT(intel_context, intel_context_do_unpin,
+            TP_PROTO(struct intel_context *ce),
+            TP_ARGS(ce)
+);
+
 #else
 #if !defined(TRACE_HEADER_MULTI_READ)
+static inline void
+trace_i915_request_guc_submit(struct i915_request *rq)
+{
+}
+
 static inline void
 trace_i915_request_submit(struct i915_request *rq)
 {
@@ -906,6 +1024,81 @@ static inline void
 trace_i915_request_out(struct i915_request *rq)
 {
 }
+
+static inline void
+trace_intel_context_set_prio(struct intel_context *ce)
+{
+}
+
+static inline void
+trace_intel_context_reset(struct intel_context *ce)
+{
+}
+
+static inline void
+trace_intel_context_ban(struct intel_context *ce)
+{
+}
+
+static inline void
+trace_intel_context_register(struct intel_context *ce)
+{
+}
+
+static inline void
+trace_intel_context_deregister(struct intel_context *ce)
+{
+}
+
+static inline void
+trace_intel_context_deregister_done(struct intel_context *ce)
+{
+}
+
+static inline void
+trace_intel_context_sched_enable(struct intel_context *ce)
+{
+}
+
+static inline void
+trace_intel_context_sched_disable(struct intel_context *ce)
+{
+}
+
+static inline void
+trace_intel_context_sched_done(struct intel_context *ce)
+{
+}
+
+static inline void
+trace_intel_context_create(struct intel_context *ce)
+{
+}
+
+static inline void
+trace_intel_context_fence_release(struct intel_context *ce)
+{
+}
+
+static inline void
+trace_intel_context_free(struct intel_context *ce)
+{
+}
+
+static inline void
+trace_intel_context_steal_guc_id(struct intel_context *ce)
+{
+}
+
+static inline void
+trace_intel_context_do_pin(struct intel_context *ce)
+{
+}
+
+static inline void
+trace_intel_context_do_unpin(struct intel_context *ce)
+{
+}
 #endif
 #endif
 
diff --git a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
new file mode 100644 (file)
index 0000000..6877362
--- /dev/null
@@ -0,0 +1,258 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright Â© 2021 Intel Corporation
+ */
+
+#include <linux/slab.h>
+
+#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_placement.h>
+
+#include "i915_ttm_buddy_manager.h"
+
+#include "i915_buddy.h"
+#include "i915_gem.h"
+
+struct i915_ttm_buddy_manager {
+       struct ttm_resource_manager manager;
+       struct i915_buddy_mm mm;
+       struct list_head reserved;
+       struct mutex lock;
+       u64 default_page_size;
+};
+
+static struct i915_ttm_buddy_manager *
+to_buddy_manager(struct ttm_resource_manager *man)
+{
+       return container_of(man, struct i915_ttm_buddy_manager, manager);
+}
+
+static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man,
+                                   struct ttm_buffer_object *bo,
+                                   const struct ttm_place *place,
+                                   struct ttm_resource **res)
+{
+       struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
+       struct i915_ttm_buddy_resource *bman_res;
+       struct i915_buddy_mm *mm = &bman->mm;
+       unsigned long n_pages;
+       unsigned int min_order;
+       u64 min_page_size;
+       u64 size;
+       int err;
+
+       GEM_BUG_ON(place->fpfn || place->lpfn);
+
+       bman_res = kzalloc(sizeof(*bman_res), GFP_KERNEL);
+       if (!bman_res)
+               return -ENOMEM;
+
+       ttm_resource_init(bo, place, &bman_res->base);
+       INIT_LIST_HEAD(&bman_res->blocks);
+       bman_res->mm = mm;
+
+       GEM_BUG_ON(!bman_res->base.num_pages);
+       size = bman_res->base.num_pages << PAGE_SHIFT;
+
+       min_page_size = bman->default_page_size;
+       if (bo->page_alignment)
+               min_page_size = bo->page_alignment << PAGE_SHIFT;
+
+       GEM_BUG_ON(min_page_size < mm->chunk_size);
+       min_order = ilog2(min_page_size) - ilog2(mm->chunk_size);
+       if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
+               size = roundup_pow_of_two(size);
+               min_order = ilog2(size) - ilog2(mm->chunk_size);
+       }
+
+       if (size > mm->size) {
+               err = -E2BIG;
+               goto err_free_res;
+       }
+
+       n_pages = size >> ilog2(mm->chunk_size);
+
+       do {
+               struct i915_buddy_block *block;
+               unsigned int order;
+
+               order = fls(n_pages) - 1;
+               GEM_BUG_ON(order > mm->max_order);
+               GEM_BUG_ON(order < min_order);
+
+               do {
+                       mutex_lock(&bman->lock);
+                       block = i915_buddy_alloc(mm, order);
+                       mutex_unlock(&bman->lock);
+                       if (!IS_ERR(block))
+                               break;
+
+                       if (order-- == min_order) {
+                               err = -ENOSPC;
+                               goto err_free_blocks;
+                       }
+               } while (1);
+
+               n_pages -= BIT(order);
+
+               list_add_tail(&block->link, &bman_res->blocks);
+
+               if (!n_pages)
+                       break;
+       } while (1);
+
+       *res = &bman_res->base;
+       return 0;
+
+err_free_blocks:
+       mutex_lock(&bman->lock);
+       i915_buddy_free_list(mm, &bman_res->blocks);
+       mutex_unlock(&bman->lock);
+err_free_res:
+       kfree(bman_res);
+       return err;
+}
+
+static void i915_ttm_buddy_man_free(struct ttm_resource_manager *man,
+                                   struct ttm_resource *res)
+{
+       struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res);
+       struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
+
+       mutex_lock(&bman->lock);
+       i915_buddy_free_list(&bman->mm, &bman_res->blocks);
+       mutex_unlock(&bman->lock);
+
+       kfree(bman_res);
+}
+
+static const struct ttm_resource_manager_func i915_ttm_buddy_manager_func = {
+       .alloc = i915_ttm_buddy_man_alloc,
+       .free = i915_ttm_buddy_man_free,
+};
+
+
+/**
+ * i915_ttm_buddy_man_init - Setup buddy allocator based ttm manager
+ * @bdev: The ttm device
+ * @type: Memory type we want to manage
+ * @use_tt: Set use_tt for the manager
+ * @size: The size in bytes to manage
+ * @default_page_size: The default minimum page size in bytes for allocations,
+ * this must be at least as large as @chunk_size, and can be overridden by
+ * setting the BO page_alignment, to be larger or smaller as needed.
+ * @chunk_size: The minimum page size in bytes for our allocations i.e
+ * order-zero
+ *
+ * Note that the starting address is assumed to be zero here, since this
+ * simplifies keeping the property where allocated blocks having natural
+ * power-of-two alignment. So long as the real starting address is some large
+ * power-of-two, or naturally start from zero, then this should be fine.  Also
+ * the &i915_ttm_buddy_man_reserve interface can be used to preserve alignment
+ * if say there is some unusable range from the start of the region. We can
+ * revisit this in the future and make the interface accept an actual starting
+ * offset and let it take care of the rest.
+ *
+ * Note that if the @size is not aligned to the @chunk_size then we perform the
+ * required rounding to get the usable size. The final size in pages can be
+ * taken from &ttm_resource_manager.size.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int i915_ttm_buddy_man_init(struct ttm_device *bdev,
+                           unsigned int type, bool use_tt,
+                           u64 size, u64 default_page_size,
+                           u64 chunk_size)
+{
+       struct ttm_resource_manager *man;
+       struct i915_ttm_buddy_manager *bman;
+       int err;
+
+       bman = kzalloc(sizeof(*bman), GFP_KERNEL);
+       if (!bman)
+               return -ENOMEM;
+
+       err = i915_buddy_init(&bman->mm, size, chunk_size);
+       if (err)
+               goto err_free_bman;
+
+       mutex_init(&bman->lock);
+       INIT_LIST_HEAD(&bman->reserved);
+       GEM_BUG_ON(default_page_size < chunk_size);
+       bman->default_page_size = default_page_size;
+
+       man = &bman->manager;
+       man->use_tt = use_tt;
+       man->func = &i915_ttm_buddy_manager_func;
+       ttm_resource_manager_init(man, bman->mm.size >> PAGE_SHIFT);
+
+       ttm_resource_manager_set_used(man, true);
+       ttm_set_driver_manager(bdev, type, man);
+
+       return 0;
+
+err_free_bman:
+       kfree(bman);
+       return err;
+}
+
+/**
+ * i915_ttm_buddy_man_fini - Destroy the buddy allocator ttm manager
+ * @bdev: The ttm device
+ * @type: Memory type we want to manage
+ *
+ * Note that if we reserved anything with &i915_ttm_buddy_man_reserve, this will
+ * also be freed for us here.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int i915_ttm_buddy_man_fini(struct ttm_device *bdev, unsigned int type)
+{
+       struct ttm_resource_manager *man = ttm_manager_type(bdev, type);
+       struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
+       struct i915_buddy_mm *mm = &bman->mm;
+       int ret;
+
+       ttm_resource_manager_set_used(man, false);
+
+       ret = ttm_resource_manager_evict_all(bdev, man);
+       if (ret)
+               return ret;
+
+       ttm_set_driver_manager(bdev, type, NULL);
+
+       mutex_lock(&bman->lock);
+       i915_buddy_free_list(mm, &bman->reserved);
+       i915_buddy_fini(mm);
+       mutex_unlock(&bman->lock);
+
+       ttm_resource_manager_cleanup(man);
+       kfree(bman);
+
+       return 0;
+}
+
+/**
+ * i915_ttm_buddy_man_reserve - Reserve address range
+ * @man: The buddy allocator ttm manager
+ * @start: The offset in bytes, where the region start is assumed to be zero
+ * @size: The size in bytes
+ *
+ * Note that the starting address for the region is always assumed to be zero.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int i915_ttm_buddy_man_reserve(struct ttm_resource_manager *man,
+                              u64 start, u64 size)
+{
+       struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
+       struct i915_buddy_mm *mm = &bman->mm;
+       int ret;
+
+       mutex_lock(&bman->lock);
+       ret = i915_buddy_alloc_range(mm, &bman->reserved, start, size);
+       mutex_unlock(&bman->lock);
+
+       return ret;
+}
+
diff --git a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.h b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.h
new file mode 100644 (file)
index 0000000..0722d33
--- /dev/null
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright Â© 2021 Intel Corporation
+ */
+
+#ifndef __I915_TTM_BUDDY_MANAGER_H__
+#define __I915_TTM_BUDDY_MANAGER_H__
+
+#include <linux/list.h>
+#include <linux/types.h>
+
+#include <drm/ttm/ttm_resource.h>
+
+struct ttm_device;
+struct ttm_resource_manager;
+struct i915_buddy_mm;
+
+/**
+ * struct i915_ttm_buddy_resource
+ *
+ * @base: struct ttm_resource base class we extend
+ * @blocks: the list of struct i915_buddy_block for this resource/allocation
+ * @mm: the struct i915_buddy_mm for this resource
+ *
+ * Extends the struct ttm_resource to manage an address space allocation with
+ * one or more struct i915_buddy_block.
+ */
+struct i915_ttm_buddy_resource {
+       struct ttm_resource base;
+       struct list_head blocks;
+       struct i915_buddy_mm *mm;
+};
+
+/**
+ * to_ttm_buddy_resource
+ *
+ * @res: the resource to upcast
+ *
+ * Upcast the struct ttm_resource object into a struct i915_ttm_buddy_resource.
+ */
+static inline struct i915_ttm_buddy_resource *
+to_ttm_buddy_resource(struct ttm_resource *res)
+{
+       return container_of(res, struct i915_ttm_buddy_resource, base);
+}
+
+int i915_ttm_buddy_man_init(struct ttm_device *bdev,
+                           unsigned type, bool use_tt,
+                           u64 size, u64 default_page_size, u64 chunk_size);
+int i915_ttm_buddy_man_fini(struct ttm_device *bdev,
+                           unsigned int type);
+
+int i915_ttm_buddy_man_reserve(struct ttm_resource_manager *man,
+                              u64 start, u64 size);
+
+#endif
index 0f227f2..4b7fc46 100644 (file)
 #include "gt/intel_gt_requests.h"
 
 #include "i915_drv.h"
-#include "i915_globals.h"
 #include "i915_sw_fence_work.h"
 #include "i915_trace.h"
 #include "i915_vma.h"
 
-static struct i915_global_vma {
-       struct i915_global base;
-       struct kmem_cache *slab_vmas;
-} global;
+static struct kmem_cache *slab_vmas;
 
 struct i915_vma *i915_vma_alloc(void)
 {
-       return kmem_cache_zalloc(global.slab_vmas, GFP_KERNEL);
+       return kmem_cache_zalloc(slab_vmas, GFP_KERNEL);
 }
 
 void i915_vma_free(struct i915_vma *vma)
 {
-       return kmem_cache_free(global.slab_vmas, vma);
+       return kmem_cache_free(slab_vmas, vma);
 }
 
 #if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM)
@@ -300,14 +296,13 @@ struct i915_vma_work {
        unsigned int flags;
 };
 
-static int __vma_bind(struct dma_fence_work *work)
+static void __vma_bind(struct dma_fence_work *work)
 {
        struct i915_vma_work *vw = container_of(work, typeof(*vw), base);
        struct i915_vma *vma = vw->vma;
 
        vma->ops->bind_vma(vw->vm, &vw->stash,
                           vma, vw->cache_level, vw->flags);
-       return 0;
 }
 
 static void __vma_release(struct dma_fence_work *work)
@@ -1415,27 +1410,16 @@ void i915_vma_make_purgeable(struct i915_vma *vma)
 #include "selftests/i915_vma.c"
 #endif
 
-static void i915_global_vma_shrink(void)
+void i915_vma_module_exit(void)
 {
-       kmem_cache_shrink(global.slab_vmas);
+       kmem_cache_destroy(slab_vmas);
 }
 
-static void i915_global_vma_exit(void)
-{
-       kmem_cache_destroy(global.slab_vmas);
-}
-
-static struct i915_global_vma global = { {
-       .shrink = i915_global_vma_shrink,
-       .exit = i915_global_vma_exit,
-} };
-
-int __init i915_global_vma_init(void)
+int __init i915_vma_module_init(void)
 {
-       global.slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
-       if (!global.slab_vmas)
+       slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
+       if (!slab_vmas)
                return -ENOMEM;
 
-       i915_global_register(&global.base);
        return 0;
 }
index eca452a..ed69f66 100644 (file)
@@ -426,4 +426,7 @@ static inline int i915_vma_sync(struct i915_vma *vma)
        return i915_active_wait(&vma->active);
 }
 
+void i915_vma_module_exit(void);
+int i915_vma_module_init(void);
+
 #endif
index 7eaa92f..305face 100644 (file)
@@ -59,7 +59,6 @@ static const char * const platform_names[] = {
        PLATFORM_NAME(GEMINILAKE),
        PLATFORM_NAME(COFFEELAKE),
        PLATFORM_NAME(COMETLAKE),
-       PLATFORM_NAME(CANNONLAKE),
        PLATFORM_NAME(ICELAKE),
        PLATFORM_NAME(ELKHARTLAKE),
        PLATFORM_NAME(JASPERLAKE),
@@ -68,6 +67,8 @@ static const char * const platform_names[] = {
        PLATFORM_NAME(DG1),
        PLATFORM_NAME(ALDERLAKE_S),
        PLATFORM_NAME(ALDERLAKE_P),
+       PLATFORM_NAME(XEHPSDV),
+       PLATFORM_NAME(DG2),
 };
 #undef PLATFORM_NAME
 
@@ -96,9 +97,17 @@ static const char *iommu_name(void)
 void intel_device_info_print_static(const struct intel_device_info *info,
                                    struct drm_printer *p)
 {
-       drm_printf(p, "graphics_ver: %u\n", info->graphics_ver);
-       drm_printf(p, "media_ver: %u\n", info->media_ver);
-       drm_printf(p, "display_ver: %u\n", info->display.ver);
+       if (info->graphics_rel)
+               drm_printf(p, "graphics version: %u.%02u\n", info->graphics_ver, info->graphics_rel);
+       else
+               drm_printf(p, "graphics version: %u\n", info->graphics_ver);
+
+       if (info->media_rel)
+               drm_printf(p, "media version: %u.%02u\n", info->media_ver, info->media_rel);
+       else
+               drm_printf(p, "media version: %u\n", info->media_ver);
+
+       drm_printf(p, "display version: %u\n", info->display.ver);
        drm_printf(p, "gt: %d\n", info->gt);
        drm_printf(p, "iommu: %s\n", iommu_name());
        drm_printf(p, "memory-regions: %x\n", info->memory_regions);
@@ -165,7 +174,6 @@ static const u16 subplatform_ulx_ids[] = {
 };
 
 static const u16 subplatform_portf_ids[] = {
-       INTEL_CNL_PORT_F_IDS(0),
        INTEL_ICL_PORT_F_IDS(0),
 };
 
@@ -253,14 +261,14 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
        struct intel_runtime_info *runtime = RUNTIME_INFO(dev_priv);
        enum pipe pipe;
 
-       /* Wa_14011765242: adl-s A0 */
-       if (IS_ADLS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_A0))
+       /* Wa_14011765242: adl-s A0,A1 */
+       if (IS_ADLS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_A2))
                for_each_pipe(dev_priv, pipe)
                        runtime->num_scalers[pipe] = 0;
-       else if (GRAPHICS_VER(dev_priv) >= 10) {
+       else if (DISPLAY_VER(dev_priv) >= 11) {
                for_each_pipe(dev_priv, pipe)
                        runtime->num_scalers[pipe] = 2;
-       } else if (GRAPHICS_VER(dev_priv) == 9) {
+       } else if (DISPLAY_VER(dev_priv) >= 9) {
                runtime->num_scalers[PIPE_A] = 2;
                runtime->num_scalers[PIPE_B] = 2;
                runtime->num_scalers[PIPE_C] = 1;
@@ -271,10 +279,10 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
        if (DISPLAY_VER(dev_priv) >= 13 || HAS_D12_PLANE_MINIMIZATION(dev_priv))
                for_each_pipe(dev_priv, pipe)
                        runtime->num_sprites[pipe] = 4;
-       else if (GRAPHICS_VER(dev_priv) >= 11)
+       else if (DISPLAY_VER(dev_priv) >= 11)
                for_each_pipe(dev_priv, pipe)
                        runtime->num_sprites[pipe] = 6;
-       else if (GRAPHICS_VER(dev_priv) == 10 || IS_GEMINILAKE(dev_priv))
+       else if (DISPLAY_VER(dev_priv) == 10)
                for_each_pipe(dev_priv, pipe)
                        runtime->num_sprites[pipe] = 3;
        else if (IS_BROXTON(dev_priv)) {
@@ -293,7 +301,7 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
        } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
                for_each_pipe(dev_priv, pipe)
                        runtime->num_sprites[pipe] = 2;
-       } else if (GRAPHICS_VER(dev_priv) >= 5 || IS_G4X(dev_priv)) {
+       } else if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv)) {
                for_each_pipe(dev_priv, pipe)
                        runtime->num_sprites[pipe] = 1;
        }
@@ -325,7 +333,7 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
                        info->pipe_mask &= ~BIT(PIPE_C);
                        info->cpu_transcoder_mask &= ~BIT(TRANSCODER_C);
                }
-       } else if (HAS_DISPLAY(dev_priv) && GRAPHICS_VER(dev_priv) >= 9) {
+       } else if (HAS_DISPLAY(dev_priv) && DISPLAY_VER(dev_priv) >= 9) {
                u32 dfsm = intel_de_read(dev_priv, SKL_DFSM);
 
                if (dfsm & SKL_DFSM_PIPE_A_DISABLE) {
@@ -340,7 +348,8 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
                        info->pipe_mask &= ~BIT(PIPE_C);
                        info->cpu_transcoder_mask &= ~BIT(TRANSCODER_C);
                }
-               if (GRAPHICS_VER(dev_priv) >= 12 &&
+
+               if (DISPLAY_VER(dev_priv) >= 12 &&
                    (dfsm & TGL_DFSM_PIPE_D_DISABLE)) {
                        info->pipe_mask &= ~BIT(PIPE_D);
                        info->cpu_transcoder_mask &= ~BIT(TRANSCODER_D);
@@ -352,11 +361,11 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
                if (dfsm & SKL_DFSM_DISPLAY_PM_DISABLE)
                        info->display.has_fbc = 0;
 
-               if (GRAPHICS_VER(dev_priv) >= 11 && (dfsm & ICL_DFSM_DMC_DISABLE))
+               if (DISPLAY_VER(dev_priv) >= 11 && (dfsm & ICL_DFSM_DMC_DISABLE))
                        info->display.has_dmc = 0;
 
-               if (GRAPHICS_VER(dev_priv) >= 10 &&
-                   (dfsm & CNL_DFSM_DISPLAY_DSC_DISABLE))
+               if (DISPLAY_VER(dev_priv) >= 10 &&
+                   (dfsm & GLK_DFSM_DISPLAY_DSC_DISABLE))
                        info->display.has_dsc = 0;
        }
 
index b326aff..d328bb9 100644 (file)
@@ -76,8 +76,6 @@ enum intel_platform {
        INTEL_GEMINILAKE,
        INTEL_COFFEELAKE,
        INTEL_COMETLAKE,
-       /* gen10 */
-       INTEL_CANNONLAKE,
        /* gen11 */
        INTEL_ICELAKE,
        INTEL_ELKHARTLAKE,
@@ -88,6 +86,8 @@ enum intel_platform {
        INTEL_DG1,
        INTEL_ALDERLAKE_S,
        INTEL_ALDERLAKE_P,
+       INTEL_XEHPSDV,
+       INTEL_DG2,
        INTEL_MAX_PLATFORMS
 };
 
@@ -103,9 +103,13 @@ enum intel_platform {
 #define INTEL_SUBPLATFORM_ULT  (0)
 #define INTEL_SUBPLATFORM_ULX  (1)
 
-/* CNL/ICL */
+/* ICL */
 #define INTEL_SUBPLATFORM_PORTF        (0)
 
+/* DG2 */
+#define INTEL_SUBPLATFORM_G10  0
+#define INTEL_SUBPLATFORM_G11  1
+
 enum intel_ppgtt_type {
        INTEL_PPGTT_NONE = I915_GEM_PPGTT_NONE,
        INTEL_PPGTT_ALIASING = I915_GEM_PPGTT_ALIASING,
@@ -127,7 +131,7 @@ enum intel_ppgtt_type {
        func(has_llc); \
        func(has_logical_ring_contexts); \
        func(has_logical_ring_elsq); \
-       func(has_master_unit_irq); \
+       func(has_mslices); \
        func(has_pooled_eu); \
        func(has_rc6); \
        func(has_rc6p); \
@@ -141,6 +145,7 @@ enum intel_ppgtt_type {
 #define DEV_INFO_DISPLAY_FOR_EACH_FLAG(func) \
        /* Keep in alphabetical order */ \
        func(cursor_needs_physical); \
+       func(has_cdclk_crawl); \
        func(has_dmc); \
        func(has_ddi); \
        func(has_dp_mst); \
@@ -162,9 +167,10 @@ enum intel_ppgtt_type {
 
 struct intel_device_info {
        u8 graphics_ver;
+       u8 graphics_rel;
        u8 media_ver;
+       u8 media_rel;
 
-       u8 gt; /* GT number, 0 if undefined */
        intel_engine_mask_t platform_engine_mask; /* Engines supported by the HW */
 
        enum intel_platform platform;
@@ -180,13 +186,13 @@ struct intel_device_info {
 
        u32 display_mmio_offset;
 
+       u8 gt; /* GT number, 0 if undefined */
+
        u8 pipe_mask;
        u8 cpu_transcoder_mask;
 
        u8 abox_mask;
 
-       u8 has_cdclk_crawl;  /* does support CDCLK crawling */
-
 #define DEFINE_FLAG(name) u8 name:1
        DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG);
 #undef DEFINE_FLAG
index 879b0f0..9186652 100644 (file)
@@ -77,21 +77,21 @@ static int skl_get_dimm_ranks(u16 val)
 }
 
 /* Returns total Gb for the whole DIMM */
-static int cnl_get_dimm_size(u16 val)
+static int icl_get_dimm_size(u16 val)
 {
-       return (val & CNL_DRAM_SIZE_MASK) * 8 / 2;
+       return (val & ICL_DRAM_SIZE_MASK) * 8 / 2;
 }
 
-static int cnl_get_dimm_width(u16 val)
+static int icl_get_dimm_width(u16 val)
 {
-       if (cnl_get_dimm_size(val) == 0)
+       if (icl_get_dimm_size(val) == 0)
                return 0;
 
-       switch (val & CNL_DRAM_WIDTH_MASK) {
-       case CNL_DRAM_WIDTH_X8:
-       case CNL_DRAM_WIDTH_X16:
-       case CNL_DRAM_WIDTH_X32:
-               val = (val & CNL_DRAM_WIDTH_MASK) >> CNL_DRAM_WIDTH_SHIFT;
+       switch (val & ICL_DRAM_WIDTH_MASK) {
+       case ICL_DRAM_WIDTH_X8:
+       case ICL_DRAM_WIDTH_X16:
+       case ICL_DRAM_WIDTH_X32:
+               val = (val & ICL_DRAM_WIDTH_MASK) >> ICL_DRAM_WIDTH_SHIFT;
                return 8 << val;
        default:
                MISSING_CASE(val);
@@ -99,12 +99,12 @@ static int cnl_get_dimm_width(u16 val)
        }
 }
 
-static int cnl_get_dimm_ranks(u16 val)
+static int icl_get_dimm_ranks(u16 val)
 {
-       if (cnl_get_dimm_size(val) == 0)
+       if (icl_get_dimm_size(val) == 0)
                return 0;
 
-       val = (val & CNL_DRAM_RANK_MASK) >> CNL_DRAM_RANK_SHIFT;
+       val = (val & ICL_DRAM_RANK_MASK) >> ICL_DRAM_RANK_SHIFT;
 
        return val + 1;
 }
@@ -121,10 +121,10 @@ skl_dram_get_dimm_info(struct drm_i915_private *i915,
                       struct dram_dimm_info *dimm,
                       int channel, char dimm_name, u16 val)
 {
-       if (GRAPHICS_VER(i915) >= 10) {
-               dimm->size = cnl_get_dimm_size(val);
-               dimm->width = cnl_get_dimm_width(val);
-               dimm->ranks = cnl_get_dimm_ranks(val);
+       if (GRAPHICS_VER(i915) >= 11) {
+               dimm->size = icl_get_dimm_size(val);
+               dimm->width = icl_get_dimm_width(val);
+               dimm->ranks = icl_get_dimm_ranks(val);
        } else {
                dimm->size = skl_get_dimm_size(val);
                dimm->width = skl_get_dimm_width(val);
@@ -468,6 +468,7 @@ static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv)
 
        dram_info->num_channels = (val & 0xf0) >> 4;
        dram_info->num_qgv_points = (val & 0xf00) >> 8;
+       dram_info->num_psf_gv_points = (val & 0x3000) >> 12;
 
        return 0;
 }
@@ -494,15 +495,15 @@ void intel_dram_detect(struct drm_i915_private *i915)
        struct dram_info *dram_info = &i915->dram_info;
        int ret;
 
+       if (GRAPHICS_VER(i915) < 9 || IS_DG2(i915) || !HAS_DISPLAY(i915))
+               return;
+
        /*
         * Assume level 0 watermark latency adjustment is needed until proven
         * otherwise, this w/a is not needed by bxt/glk.
         */
        dram_info->wm_lv_0_adjust_needed = !IS_GEN9_LP(i915);
 
-       if (GRAPHICS_VER(i915) < 9 || !HAS_DISPLAY(i915))
-               return;
-
        if (GRAPHICS_VER(i915) >= 12)
                ret = gen12_get_dram_info(i915);
        else if (GRAPHICS_VER(i915) >= 11)
index 12fb542..779eb2f 100644 (file)
@@ -5,6 +5,7 @@
 
 #include "intel_memory_region.h"
 #include "i915_drv.h"
+#include "i915_ttm_buddy_manager.h"
 
 static const struct {
        u16 class;
@@ -28,11 +29,6 @@ static const struct {
        },
 };
 
-struct intel_region_reserve {
-       struct list_head link;
-       struct ttm_resource *res;
-};
-
 struct intel_memory_region *
 intel_memory_region_lookup(struct drm_i915_private *i915,
                           u16 class, u16 instance)
@@ -63,27 +59,6 @@ intel_memory_region_by_type(struct drm_i915_private *i915,
        return NULL;
 }
 
-/**
- * intel_memory_region_unreserve - Unreserve all previously reserved
- * ranges
- * @mem: The region containing the reserved ranges.
- */
-void intel_memory_region_unreserve(struct intel_memory_region *mem)
-{
-       struct intel_region_reserve *reserve, *next;
-
-       if (!mem->priv_ops || !mem->priv_ops->free)
-               return;
-
-       mutex_lock(&mem->mm_lock);
-       list_for_each_entry_safe(reserve, next, &mem->reserved, link) {
-               list_del(&reserve->link);
-               mem->priv_ops->free(mem, reserve->res);
-               kfree(reserve);
-       }
-       mutex_unlock(&mem->mm_lock);
-}
-
 /**
  * intel_memory_region_reserve - Reserve a memory range
  * @mem: The region for which we want to reserve a range.
@@ -96,28 +71,11 @@ int intel_memory_region_reserve(struct intel_memory_region *mem,
                                resource_size_t offset,
                                resource_size_t size)
 {
-       int ret;
-       struct intel_region_reserve *reserve;
-
-       if (!mem->priv_ops || !mem->priv_ops->reserve)
-               return -EINVAL;
-
-       reserve = kzalloc(sizeof(*reserve), GFP_KERNEL);
-       if (!reserve)
-               return -ENOMEM;
+       struct ttm_resource_manager *man = mem->region_private;
 
-       reserve->res = mem->priv_ops->reserve(mem, offset, size);
-       if (IS_ERR(reserve->res)) {
-               ret = PTR_ERR(reserve->res);
-               kfree(reserve);
-               return ret;
-       }
-
-       mutex_lock(&mem->mm_lock);
-       list_add_tail(&reserve->link, &mem->reserved);
-       mutex_unlock(&mem->mm_lock);
+       GEM_BUG_ON(mem->is_range_manager);
 
-       return 0;
+       return i915_ttm_buddy_man_reserve(man, offset, size);
 }
 
 struct intel_memory_region *
@@ -149,9 +107,6 @@ intel_memory_region_create(struct drm_i915_private *i915,
 
        mutex_init(&mem->objects.lock);
        INIT_LIST_HEAD(&mem->objects.list);
-       INIT_LIST_HEAD(&mem->reserved);
-
-       mutex_init(&mem->mm_lock);
 
        if (ops->init) {
                err = ops->init(mem);
@@ -182,11 +137,9 @@ static void __intel_memory_region_destroy(struct kref *kref)
        struct intel_memory_region *mem =
                container_of(kref, typeof(*mem), kref);
 
-       intel_memory_region_unreserve(mem);
        if (mem->ops->release)
                mem->ops->release(mem);
 
-       mutex_destroy(&mem->mm_lock);
        mutex_destroy(&mem->objects.lock);
        kfree(mem);
 }
@@ -220,7 +173,12 @@ int intel_memory_regions_hw_probe(struct drm_i915_private *i915)
                instance = intel_region_map[i].instance;
                switch (type) {
                case INTEL_MEMORY_SYSTEM:
-                       mem = i915_gem_shmem_setup(i915, type, instance);
+                       if (IS_DGFX(i915))
+                               mem = i915_gem_ttm_system_setup(i915, type,
+                                                               instance);
+                       else
+                               mem = i915_gem_shmem_setup(i915, type,
+                                                          instance);
                        break;
                case INTEL_MEMORY_STOLEN_LOCAL:
                        mem = i915_gem_stolen_lmem_setup(i915, type, instance);
index c7e635d..1f2b96e 100644 (file)
@@ -40,8 +40,7 @@ enum intel_region_id {
 #define REGION_STOLEN_SMEM   BIT(INTEL_REGION_STOLEN_SMEM)
 #define REGION_STOLEN_LMEM   BIT(INTEL_REGION_STOLEN_LMEM)
 
-#define I915_ALLOC_MIN_PAGE_SIZE  BIT(0)
-#define I915_ALLOC_CONTIGUOUS     BIT(1)
+#define I915_ALLOC_CONTIGUOUS     BIT(0)
 
 #define for_each_memory_region(mr, i915, id) \
        for (id = 0; id < ARRAY_SIZE((i915)->mm.regions); id++) \
@@ -56,22 +55,14 @@ struct intel_memory_region_ops {
        int (*init_object)(struct intel_memory_region *mem,
                           struct drm_i915_gem_object *obj,
                           resource_size_t size,
+                          resource_size_t page_size,
                           unsigned int flags);
 };
 
-struct intel_memory_region_private_ops {
-       struct ttm_resource *(*reserve)(struct intel_memory_region *mem,
-                                       resource_size_t offset,
-                                       resource_size_t size);
-       void (*free)(struct intel_memory_region *mem,
-                    struct ttm_resource *res);
-};
-
 struct intel_memory_region {
        struct drm_i915_private *i915;
 
        const struct intel_memory_region_ops *ops;
-       const struct intel_memory_region_private_ops *priv_ops;
 
        struct io_mapping iomap;
        struct resource region;
@@ -79,8 +70,6 @@ struct intel_memory_region {
        /* For fake LMEM */
        struct drm_mm_node fake_mappable;
 
-       struct mutex mm_lock;
-
        struct kref kref;
 
        resource_size_t io_start;
@@ -94,8 +83,6 @@ struct intel_memory_region {
        char name[16];
        bool private; /* not for userspace */
 
-       struct list_head reserved;
-
        dma_addr_t remap_addr;
 
        struct {
@@ -103,8 +90,6 @@ struct intel_memory_region {
                struct list_head list;
        } objects;
 
-       size_t chunk_size;
-       unsigned int max_order;
        bool is_range_manager;
 
        void *region_private;
@@ -138,9 +123,15 @@ __printf(2, 3) void
 intel_memory_region_set_name(struct intel_memory_region *mem,
                             const char *fmt, ...);
 
-void intel_memory_region_unreserve(struct intel_memory_region *mem);
-
 int intel_memory_region_reserve(struct intel_memory_region *mem,
                                resource_size_t offset,
                                resource_size_t size);
+
+struct intel_memory_region *
+i915_gem_ttm_system_setup(struct drm_i915_private *i915,
+                         u16 type, u16 instance);
+struct intel_memory_region *
+i915_gem_shmem_setup(struct drm_i915_private *i915,
+                    u16 type, u16 instance);
+
 #endif
index 4e92ae1..d1d4b97 100644 (file)
@@ -81,7 +81,6 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
        case INTEL_PCH_CNP_DEVICE_ID_TYPE:
                drm_dbg_kms(&dev_priv->drm, "Found Cannon Lake PCH (CNP)\n");
                drm_WARN_ON(&dev_priv->drm,
-                           !IS_CANNONLAKE(dev_priv) &&
                            !IS_COFFEELAKE(dev_priv) &&
                            !IS_COMETLAKE(dev_priv));
                return PCH_CNP;
@@ -89,7 +88,6 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
                drm_dbg_kms(&dev_priv->drm,
                            "Found Cannon Lake LP PCH (CNP-LP)\n");
                drm_WARN_ON(&dev_priv->drm,
-                           !IS_CANNONLAKE(dev_priv) &&
                            !IS_COFFEELAKE(dev_priv) &&
                            !IS_COMETLAKE(dev_priv));
                return PCH_CNP;
@@ -171,8 +169,7 @@ intel_virt_detect_pch(const struct drm_i915_private *dev_priv,
                id = INTEL_PCH_MCC_DEVICE_ID_TYPE;
        else if (IS_ICELAKE(dev_priv))
                id = INTEL_PCH_ICP_DEVICE_ID_TYPE;
-       else if (IS_CANNONLAKE(dev_priv) ||
-                IS_COFFEELAKE(dev_priv) ||
+       else if (IS_COFFEELAKE(dev_priv) ||
                 IS_COMETLAKE(dev_priv))
                id = INTEL_PCH_CNP_DEVICE_ID_TYPE;
        else if (IS_KABYLAKE(dev_priv) || IS_SKYLAKE(dev_priv))
@@ -211,6 +208,9 @@ void intel_detect_pch(struct drm_i915_private *dev_priv)
        if (IS_DG1(dev_priv)) {
                dev_priv->pch_type = PCH_DG1;
                return;
+       } else if (IS_DG2(dev_priv)) {
+               dev_priv->pch_type = PCH_DG2;
+               return;
        }
 
        /*
index e2f3f30..7c0d83d 100644 (file)
@@ -30,6 +30,7 @@ enum intel_pch {
 
        /* Fake PCHs, functionality handled on the same PCI dev */
        PCH_DG1 = 1024,
+       PCH_DG2,
 };
 
 #define INTEL_PCH_DEVICE_ID_MASK               0xff80
@@ -62,6 +63,7 @@ enum intel_pch {
 
 #define INTEL_PCH_TYPE(dev_priv)               ((dev_priv)->pch_type)
 #define INTEL_PCH_ID(dev_priv)                 ((dev_priv)->pch_id)
+#define HAS_PCH_DG2(dev_priv)                  (INTEL_PCH_TYPE(dev_priv) == PCH_DG2)
 #define HAS_PCH_ADP(dev_priv)                  (INTEL_PCH_TYPE(dev_priv) == PCH_ADP)
 #define HAS_PCH_DG1(dev_priv)                  (INTEL_PCH_TYPE(dev_priv) == PCH_DG1)
 #define HAS_PCH_JSP(dev_priv)                  (INTEL_PCH_TYPE(dev_priv) == PCH_JSP)
index 74a8863..65bc370 100644 (file)
@@ -4584,6 +4584,117 @@ static const struct dbuf_slice_conf_entry tgl_allowed_dbufs[] =
        {}
 };
 
+static const struct dbuf_slice_conf_entry dg2_allowed_dbufs[] = {
+       {
+               .active_pipes = BIT(PIPE_A),
+               .dbuf_mask = {
+                       [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+               },
+       },
+       {
+               .active_pipes = BIT(PIPE_B),
+               .dbuf_mask = {
+                       [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
+               },
+       },
+       {
+               .active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
+               .dbuf_mask = {
+                       [PIPE_A] = BIT(DBUF_S1),
+                       [PIPE_B] = BIT(DBUF_S2),
+               },
+       },
+       {
+               .active_pipes = BIT(PIPE_C),
+               .dbuf_mask = {
+                       [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
+               },
+       },
+       {
+               .active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
+               .dbuf_mask = {
+                       [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+                       [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
+               },
+       },
+       {
+               .active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
+               .dbuf_mask = {
+                       [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
+                       [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
+               },
+       },
+       {
+               .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
+               .dbuf_mask = {
+                       [PIPE_A] = BIT(DBUF_S1),
+                       [PIPE_B] = BIT(DBUF_S2),
+                       [PIPE_C] = BIT(DBUF_S3) | BIT(DBUF_S4),
+               },
+       },
+       {
+               .active_pipes = BIT(PIPE_D),
+               .dbuf_mask = {
+                       [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
+               },
+       },
+       {
+               .active_pipes = BIT(PIPE_A) | BIT(PIPE_D),
+               .dbuf_mask = {
+                       [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+                       [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
+               },
+       },
+       {
+               .active_pipes = BIT(PIPE_B) | BIT(PIPE_D),
+               .dbuf_mask = {
+                       [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
+                       [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
+               },
+       },
+       {
+               .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D),
+               .dbuf_mask = {
+                       [PIPE_A] = BIT(DBUF_S1),
+                       [PIPE_B] = BIT(DBUF_S2),
+                       [PIPE_D] = BIT(DBUF_S3) | BIT(DBUF_S4),
+               },
+       },
+       {
+               .active_pipes = BIT(PIPE_C) | BIT(PIPE_D),
+               .dbuf_mask = {
+                       [PIPE_C] = BIT(DBUF_S3),
+                       [PIPE_D] = BIT(DBUF_S4),
+               },
+       },
+       {
+               .active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D),
+               .dbuf_mask = {
+                       [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+                       [PIPE_C] = BIT(DBUF_S3),
+                       [PIPE_D] = BIT(DBUF_S4),
+               },
+       },
+       {
+               .active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
+               .dbuf_mask = {
+                       [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
+                       [PIPE_C] = BIT(DBUF_S3),
+                       [PIPE_D] = BIT(DBUF_S4),
+               },
+       },
+       {
+               .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
+               .dbuf_mask = {
+                       [PIPE_A] = BIT(DBUF_S1),
+                       [PIPE_B] = BIT(DBUF_S2),
+                       [PIPE_C] = BIT(DBUF_S3),
+                       [PIPE_D] = BIT(DBUF_S4),
+               },
+       },
+       {}
+};
+
 static const struct dbuf_slice_conf_entry adlp_allowed_dbufs[] = {
        {
                .active_pipes = BIT(PIPE_A),
@@ -4759,12 +4870,19 @@ static u32 adlp_compute_dbuf_slices(enum pipe pipe, u32 active_pipes)
        return compute_dbuf_slices(pipe, active_pipes, adlp_allowed_dbufs);
 }
 
+static u32 dg2_compute_dbuf_slices(enum pipe pipe, u32 active_pipes)
+{
+       return compute_dbuf_slices(pipe, active_pipes, dg2_allowed_dbufs);
+}
+
 static u8 skl_compute_dbuf_slices(struct intel_crtc *crtc, u8 active_pipes)
 {
        struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
        enum pipe pipe = crtc->pipe;
 
-       if (IS_ALDERLAKE_P(dev_priv))
+       if (IS_DG2(dev_priv))
+               return dg2_compute_dbuf_slices(pipe, active_pipes);
+       else if (IS_ALDERLAKE_P(dev_priv))
                return adlp_compute_dbuf_slices(pipe, active_pipes);
        else if (DISPLAY_VER(dev_priv) == 12)
                return tgl_compute_dbuf_slices(pipe, active_pipes);
@@ -7345,30 +7463,24 @@ static void icl_init_clock_gating(struct drm_i915_private *dev_priv)
        intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN,
                   ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL);
 
-       /* This is not an Wa. Enable to reduce Sampler power */
-       intel_uncore_write(&dev_priv->uncore, GEN10_DFR_RATIO_EN_AND_CHICKEN,
-                  intel_uncore_read(&dev_priv->uncore, GEN10_DFR_RATIO_EN_AND_CHICKEN) & ~DFR_DISABLE);
-
        /*Wa_14010594013:icl, ehl */
        intel_uncore_rmw(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1,
-                        0, CNL_DELAY_PMRSP);
+                        0, ICL_DELAY_PMRSP);
 }
 
 static void gen12lp_init_clock_gating(struct drm_i915_private *dev_priv)
 {
-       /* Wa_1409120013:tgl,rkl,adl_s,dg1 */
-       intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN,
-                          ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL);
+       /* Wa_1409120013:tgl,rkl,adl-s,dg1 */
+       if (IS_TIGERLAKE(dev_priv) || IS_ROCKETLAKE(dev_priv) ||
+           IS_ALDERLAKE_S(dev_priv) || IS_DG1(dev_priv))
+               intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN,
+                                  ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL);
 
        /* Wa_1409825376:tgl (pre-prod)*/
-       if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B1))
+       if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0))
                intel_uncore_write(&dev_priv->uncore, GEN9_CLKGATE_DIS_3, intel_uncore_read(&dev_priv->uncore, GEN9_CLKGATE_DIS_3) |
                           TGL_VRH_GATING_DIS);
 
-       /* Wa_14011059788:tgl,rkl,adl_s,dg1 */
-       intel_uncore_rmw(&dev_priv->uncore, GEN10_DFR_RATIO_EN_AND_CHICKEN,
-                        0, DFR_DISABLE);
-
        /* Wa_14013723622:tgl,rkl,dg1,adl-s */
        if (DISPLAY_VER(dev_priv) == 12)
                intel_uncore_rmw(&dev_priv->uncore, CLKREQ_POLICY,
@@ -7388,7 +7500,7 @@ static void dg1_init_clock_gating(struct drm_i915_private *dev_priv)
        gen12lp_init_clock_gating(dev_priv);
 
        /* Wa_1409836686:dg1[a0] */
-       if (IS_DG1_REVID(dev_priv, DG1_REVID_A0, DG1_REVID_A0))
+       if (IS_DG1_GT_STEP(dev_priv, STEP_A0, STEP_B0))
                intel_uncore_write(&dev_priv->uncore, GEN9_CLKGATE_DIS_3, intel_uncore_read(&dev_priv->uncore, GEN9_CLKGATE_DIS_3) |
                           DPT_GATING_DIS);
 }
@@ -7403,43 +7515,6 @@ static void cnp_init_clock_gating(struct drm_i915_private *dev_priv)
                   CNP_PWM_CGE_GATING_DISABLE);
 }
 
-static void cnl_init_clock_gating(struct drm_i915_private *dev_priv)
-{
-       u32 val;
-       cnp_init_clock_gating(dev_priv);
-
-       /* This is not an Wa. Enable for better image quality */
-       intel_uncore_write(&dev_priv->uncore, _3D_CHICKEN3,
-                  _MASKED_BIT_ENABLE(_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE));
-
-       /* WaEnableChickenDCPR:cnl */
-       intel_uncore_write(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1,
-                  intel_uncore_read(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
-
-       /*
-        * WaFbcWakeMemOn:cnl
-        * Display WA #0859: cnl
-        */
-       intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) |
-                  DISP_FBC_MEMORY_WAKE);
-
-       val = intel_uncore_read(&dev_priv->uncore, SLICE_UNIT_LEVEL_CLKGATE);
-       /* ReadHitWriteOnlyDisable:cnl */
-       val |= RCCUNIT_CLKGATE_DIS;
-       intel_uncore_write(&dev_priv->uncore, SLICE_UNIT_LEVEL_CLKGATE, val);
-
-       /* Wa_2201832410:cnl */
-       val = intel_uncore_read(&dev_priv->uncore, SUBSLICE_UNIT_LEVEL_CLKGATE);
-       val |= GWUNIT_CLKGATE_DIS;
-       intel_uncore_write(&dev_priv->uncore, SUBSLICE_UNIT_LEVEL_CLKGATE, val);
-
-       /* WaDisableVFclkgate:cnl */
-       /* WaVFUnitClockGatingDisable:cnl */
-       val = intel_uncore_read(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE);
-       val |= VFUNIT_CLKGATE_DIS;
-       intel_uncore_write(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE, val);
-}
-
 static void cfl_init_clock_gating(struct drm_i915_private *dev_priv)
 {
        cnp_init_clock_gating(dev_priv);
@@ -7473,12 +7548,12 @@ static void kbl_init_clock_gating(struct drm_i915_private *dev_priv)
                   FBC_LLC_FULLY_OPEN);
 
        /* WaDisableSDEUnitClockGating:kbl */
-       if (IS_KBL_GT_STEP(dev_priv, 0, STEP_B0))
+       if (IS_KBL_GT_STEP(dev_priv, 0, STEP_C0))
                intel_uncore_write(&dev_priv->uncore, GEN8_UCGCTL6, intel_uncore_read(&dev_priv->uncore, GEN8_UCGCTL6) |
                           GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
 
        /* WaDisableGamClockGating:kbl */
-       if (IS_KBL_GT_STEP(dev_priv, 0, STEP_B0))
+       if (IS_KBL_GT_STEP(dev_priv, 0, STEP_C0))
                intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL1, intel_uncore_read(&dev_priv->uncore, GEN6_UCGCTL1) |
                           GEN6_GAMUNIT_CLOCK_GATE_DISABLE);
 
@@ -7868,8 +7943,6 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
                dev_priv->display.init_clock_gating = gen12lp_init_clock_gating;
        else if (GRAPHICS_VER(dev_priv) == 11)
                dev_priv->display.init_clock_gating = icl_init_clock_gating;
-       else if (IS_CANNONLAKE(dev_priv))
-               dev_priv->display.init_clock_gating = cnl_init_clock_gating;
        else if (IS_COFFEELAKE(dev_priv) || IS_COMETLAKE(dev_priv))
                dev_priv->display.init_clock_gating = cfl_init_clock_gating;
        else if (IS_SKYLAKE(dev_priv))
index 27fe066..98c7339 100644 (file)
@@ -8,6 +8,7 @@
 
 #include "i915_drv.h"
 #include "i915_scatterlist.h"
+#include "i915_ttm_buddy_manager.h"
 
 #include "intel_region_ttm.h"
 
@@ -50,12 +51,16 @@ void intel_region_ttm_device_fini(struct drm_i915_private *dev_priv)
  * driver-private types for now, reserving TTM_PL_VRAM for stolen
  * memory and TTM_PL_TT for GGTT use if decided to implement this.
  */
-static int intel_region_to_ttm_type(struct intel_memory_region *mem)
+int intel_region_to_ttm_type(const struct intel_memory_region *mem)
 {
        int type;
 
        GEM_BUG_ON(mem->type != INTEL_MEMORY_LOCAL &&
-                  mem->type != INTEL_MEMORY_MOCK);
+                  mem->type != INTEL_MEMORY_MOCK &&
+                  mem->type != INTEL_MEMORY_SYSTEM);
+
+       if (mem->type == INTEL_MEMORY_SYSTEM)
+               return TTM_PL_SYSTEM;
 
        type = mem->instance + TTM_PL_PRIV;
        GEM_BUG_ON(type >= TTM_NUM_MEM_TYPES);
@@ -63,72 +68,29 @@ static int intel_region_to_ttm_type(struct intel_memory_region *mem)
        return type;
 }
 
-static struct ttm_resource *
-intel_region_ttm_node_reserve(struct intel_memory_region *mem,
-                             resource_size_t offset,
-                             resource_size_t size)
-{
-       struct ttm_resource_manager *man = mem->region_private;
-       struct ttm_place place = {};
-       struct ttm_buffer_object mock_bo = {};
-       struct ttm_resource *res;
-       int ret;
-
-       /*
-        * Having to use a mock_bo is unfortunate but stems from some
-        * drivers having private managers that insist to know what the
-        * allocate memory is intended for, using it to send private
-        * data to the manager. Also recently the bo has been used to send
-        * alignment info to the manager. Assume that apart from the latter,
-        * none of the managers we use will ever access the buffer object
-        * members, hoping we can pass the alignment info in the
-        * struct ttm_place in the future.
-        */
-
-       place.fpfn = offset >> PAGE_SHIFT;
-       place.lpfn = place.fpfn + (size >> PAGE_SHIFT);
-       mock_bo.base.size = size;
-       ret = man->func->alloc(man, &mock_bo, &place, &res);
-       if (ret == -ENOSPC)
-               ret = -ENXIO;
-
-       return ret ? ERR_PTR(ret) : res;
-}
-
 /**
- * intel_region_ttm_node_free - Free a node allocated from a resource manager
- * @mem: The region the node was allocated from.
- * @node: The opaque node representing an allocation.
+ * intel_region_ttm_init - Initialize a memory region for TTM.
+ * @mem: The region to initialize.
+ *
+ * This function initializes a suitable TTM resource manager for the
+ * region, and if it's a LMEM region type, attaches it to the TTM
+ * device. MOCK regions are NOT attached to the TTM device, since we don't
+ * have one for the mock selftests.
+ *
+ * Return: 0 on success, negative error code on failure.
  */
-void intel_region_ttm_node_free(struct intel_memory_region *mem,
-                               struct ttm_resource *res)
-{
-       struct ttm_resource_manager *man = mem->region_private;
-
-       man->func->free(man, res);
-}
-
-static const struct intel_memory_region_private_ops priv_ops = {
-       .reserve = intel_region_ttm_node_reserve,
-       .free = intel_region_ttm_node_free,
-};
-
 int intel_region_ttm_init(struct intel_memory_region *mem)
 {
        struct ttm_device *bdev = &mem->i915->bdev;
        int mem_type = intel_region_to_ttm_type(mem);
        int ret;
 
-       ret = ttm_range_man_init(bdev, mem_type, false,
-                                resource_size(&mem->region) >> PAGE_SHIFT);
+       ret = i915_ttm_buddy_man_init(bdev, mem_type, false,
+                                     resource_size(&mem->region),
+                                     mem->min_page_size, PAGE_SIZE);
        if (ret)
                return ret;
 
-       mem->chunk_size = PAGE_SIZE;
-       mem->max_order =
-               get_order(rounddown_pow_of_two(resource_size(&mem->region)));
-       mem->is_range_manager = true;
-       mem->priv_ops = &priv_ops;
        mem->region_private = ttm_manager_type(bdev, mem_type);
 
        return 0;
@@ -146,17 +108,17 @@ void intel_region_ttm_fini(struct intel_memory_region *mem)
 {
        int ret;
 
-       ret = ttm_range_man_fini(&mem->i915->bdev,
-                                intel_region_to_ttm_type(mem));
+       ret = i915_ttm_buddy_man_fini(&mem->i915->bdev,
+                                     intel_region_to_ttm_type(mem));
        GEM_WARN_ON(ret);
        mem->region_private = NULL;
 }
 
 /**
- * intel_region_ttm_node_to_st - Convert an opaque TTM resource manager node
+ * intel_region_ttm_resource_to_st - Convert an opaque TTM resource manager resource
  * to an sg_table.
  * @mem: The memory region.
- * @node: The resource manager node obtained from the TTM resource manager.
+ * @res: The resource manager resource obtained from the TTM resource manager.
  *
  * The gem backends typically use sg-tables for operations on the underlying
  * io_memory. So provide a way for the backends to translate the
@@ -164,20 +126,23 @@ void intel_region_ttm_fini(struct intel_memory_region *mem)
  *
  * Return: A malloced sg_table on success, an error pointer on failure.
  */
-struct sg_table *intel_region_ttm_node_to_st(struct intel_memory_region *mem,
-                                            struct ttm_resource *res)
+struct sg_table *intel_region_ttm_resource_to_st(struct intel_memory_region *mem,
+                                                struct ttm_resource *res)
 {
-       struct ttm_range_mgr_node *range_node =
-               container_of(res, typeof(*range_node), base);
+       if (mem->is_range_manager) {
+               struct ttm_range_mgr_node *range_node =
+                       to_ttm_range_mgr_node(res);
 
-       GEM_WARN_ON(!mem->is_range_manager);
-       return i915_sg_from_mm_node(&range_node->mm_nodes[0],
-                                   mem->region.start);
+               return i915_sg_from_mm_node(&range_node->mm_nodes[0],
+                                           mem->region.start);
+       } else {
+               return i915_sg_from_buddy_resource(res, mem->region.start);
+       }
 }
 
 #ifdef CONFIG_DRM_I915_SELFTEST
 /**
- * intel_region_ttm_node_alloc - Allocate memory resources from a region
+ * intel_region_ttm_resource_alloc - Allocate memory resources from a region
  * @mem: The memory region,
  * @size: The requested size in bytes
  * @flags: Allocation flags
@@ -186,15 +151,15 @@ struct sg_table *intel_region_ttm_node_to_st(struct intel_memory_region *mem,
  * memory from standalone TTM range managers, without the TTM eviction
  * functionality. Don't use if you are not completely sure that's the
  * case. The returned opaque node can be converted to an sg_table using
- * intel_region_ttm_node_to_st(), and can be freed using
- * intel_region_ttm_node_free().
+ * intel_region_ttm_resource_to_st(), and can be freed using
+ * intel_region_ttm_resource_free().
  *
  * Return: A valid pointer on success, an error pointer on failure.
  */
 struct ttm_resource *
-intel_region_ttm_node_alloc(struct intel_memory_region *mem,
-                           resource_size_t size,
-                           unsigned int flags)
+intel_region_ttm_resource_alloc(struct intel_memory_region *mem,
+                               resource_size_t size,
+                               unsigned int flags)
 {
        struct ttm_resource_manager *man = mem->region_private;
        struct ttm_place place = {};
@@ -202,25 +167,26 @@ intel_region_ttm_node_alloc(struct intel_memory_region *mem,
        struct ttm_resource *res;
        int ret;
 
-       /*
-        * We ignore the flags for now since we're using the range
-        * manager and contigous and min page size would be fulfilled
-        * by default if size is min page size aligned.
-        */
        mock_bo.base.size = size;
-
-       if (mem->is_range_manager) {
-               if (size >= SZ_1G)
-                       mock_bo.page_alignment = SZ_1G >> PAGE_SHIFT;
-               else if (size >= SZ_2M)
-                       mock_bo.page_alignment = SZ_2M >> PAGE_SHIFT;
-               else if (size >= SZ_64K)
-                       mock_bo.page_alignment = SZ_64K >> PAGE_SHIFT;
-       }
+       place.flags = flags;
 
        ret = man->func->alloc(man, &mock_bo, &place, &res);
        if (ret == -ENOSPC)
                ret = -ENXIO;
        return ret ? ERR_PTR(ret) : res;
 }
+
 #endif
+
+/**
+ * intel_region_ttm_resource_free - Free a resource allocated from a resource manager
+ * @mem: The region the resource was allocated from.
+ * @res: The opaque resource representing an allocation.
+ */
+void intel_region_ttm_resource_free(struct intel_memory_region *mem,
+                                   struct ttm_resource *res)
+{
+       struct ttm_resource_manager *man = mem->region_private;
+
+       man->func->free(man, res);
+}
index e8cf830..6f44075 100644 (file)
@@ -22,18 +22,20 @@ int intel_region_ttm_init(struct intel_memory_region *mem);
 
 void intel_region_ttm_fini(struct intel_memory_region *mem);
 
-struct sg_table *intel_region_ttm_node_to_st(struct intel_memory_region *mem,
-                                            struct ttm_resource *res);
+struct sg_table *intel_region_ttm_resource_to_st(struct intel_memory_region *mem,
+                                                struct ttm_resource *res);
 
-void intel_region_ttm_node_free(struct intel_memory_region *mem,
-                               struct ttm_resource *node);
+void intel_region_ttm_resource_free(struct intel_memory_region *mem,
+                                   struct ttm_resource *res);
+
+int intel_region_to_ttm_type(const struct intel_memory_region *mem);
 
 struct ttm_device_funcs *i915_ttm_driver(void);
 
 #ifdef CONFIG_DRM_I915_SELFTEST
 struct ttm_resource *
-intel_region_ttm_node_alloc(struct intel_memory_region *mem,
-                           resource_size_t size,
-                           unsigned int flags);
+intel_region_ttm_resource_alloc(struct intel_memory_region *mem,
+                               resource_size_t size,
+                               unsigned int flags);
 #endif
 #endif /* _INTEL_REGION_TTM_H_ */
index f0a82b3..e304bf4 100644 (file)
@@ -556,17 +556,22 @@ out:
 #undef COND
 }
 
-void intel_pcode_init(struct drm_i915_private *i915)
+int intel_pcode_init(struct drm_i915_private *i915)
 {
-       int ret;
+       int ret = 0;
 
        if (!IS_DGFX(i915))
-               return;
+               return ret;
 
        ret = skl_pcode_request(i915, DG1_PCODE_STATUS,
                                DG1_UNCORE_GET_INIT_STATUS,
                                DG1_UNCORE_INIT_STATUS_COMPLETE,
-                               DG1_UNCORE_INIT_STATUS_COMPLETE, 50);
+                               DG1_UNCORE_INIT_STATUS_COMPLETE, 180000);
+
+       drm_dbg(&i915->drm, "PCODE init status %d\n", ret);
+
        if (ret)
                drm_err(&i915->drm, "Pcode did not report uncore initialization completion!\n");
+
+       return ret;
 }
index 094c7b1..d1d14bc 100644 (file)
@@ -138,6 +138,6 @@ int sandybridge_pcode_write_timeout(struct drm_i915_private *i915, u32 mbox,
 int skl_pcode_request(struct drm_i915_private *i915, u32 mbox, u32 request,
                      u32 reply_mask, u32 reply, int timeout_base_ms);
 
-void intel_pcode_init(struct drm_i915_private *i915);
+int intel_pcode_init(struct drm_i915_private *i915);
 
 #endif /* _INTEL_SIDEBAND_H */
index ba9479a..6cf9676 100644 (file)
@@ -7,16 +7,32 @@
 #include "intel_step.h"
 
 /*
- * KBL revision ID ordering is bizarre; higher revision ID's map to lower
- * steppings in some cases.  So rather than test against the revision ID
- * directly, let's map that into our own range of increasing ID's that we
- * can test against in a regular manner.
+ * Some platforms have unusual ways of mapping PCI revision ID to GT/display
+ * steppings.  E.g., in some cases a higher PCI revision may translate to a
+ * lower stepping of the GT and/or display IP.  This file provides lookup
+ * tables to map the PCI revision into a standard set of stepping values that
+ * can be compared numerically.
+ *
+ * Also note that some revisions/steppings may have been set aside as
+ * placeholders but never materialized in real hardware; in those cases there
+ * may be jumps in the revision IDs or stepping values in the tables below.
  */
 
+/*
+ * Some platforms always have the same stepping value for GT and display;
+ * use a macro to define these to make it easier to identify the platforms
+ * where the two steppings can deviate.
+ */
+#define COMMON_STEP(x)  .gt_step = STEP_##x, .display_step = STEP_##x
+
+static const struct intel_step_info skl_revids[] = {
+       [0x6] = { COMMON_STEP(G0) },
+       [0x7] = { COMMON_STEP(H0) },
+       [0x9] = { COMMON_STEP(J0) },
+       [0xA] = { COMMON_STEP(I1) },
+};
 
-/* FIXME: what about REVID_E0 */
 static const struct intel_step_info kbl_revids[] = {
-       [0] = { .gt_step = STEP_A0, .display_step = STEP_A0 },
        [1] = { .gt_step = STEP_B0, .display_step = STEP_B0 },
        [2] = { .gt_step = STEP_C0, .display_step = STEP_B0 },
        [3] = { .gt_step = STEP_D0, .display_step = STEP_B0 },
@@ -26,7 +42,27 @@ static const struct intel_step_info kbl_revids[] = {
        [7] = { .gt_step = STEP_G0, .display_step = STEP_C0 },
 };
 
-static const struct intel_step_info tgl_uy_revid_step_tbl[] = {
+static const struct intel_step_info bxt_revids[] = {
+       [0xA] = { COMMON_STEP(C0) },
+       [0xB] = { COMMON_STEP(C0) },
+       [0xC] = { COMMON_STEP(D0) },
+       [0xD] = { COMMON_STEP(E0) },
+};
+
+static const struct intel_step_info glk_revids[] = {
+       [3] = { COMMON_STEP(B0) },
+};
+
+static const struct intel_step_info icl_revids[] = {
+       [7] = { COMMON_STEP(D0) },
+};
+
+static const struct intel_step_info jsl_ehl_revids[] = {
+       [0] = { COMMON_STEP(A0) },
+       [1] = { COMMON_STEP(B0) },
+};
+
+static const struct intel_step_info tgl_uy_revids[] = {
        [0] = { .gt_step = STEP_A0, .display_step = STEP_A0 },
        [1] = { .gt_step = STEP_B0, .display_step = STEP_C0 },
        [2] = { .gt_step = STEP_B1, .display_step = STEP_C0 },
@@ -34,12 +70,23 @@ static const struct intel_step_info tgl_uy_revid_step_tbl[] = {
 };
 
 /* Same GT stepping between tgl_uy_revids and tgl_revids don't mean the same HW */
-static const struct intel_step_info tgl_revid_step_tbl[] = {
+static const struct intel_step_info tgl_revids[] = {
        [0] = { .gt_step = STEP_A0, .display_step = STEP_B0 },
        [1] = { .gt_step = STEP_B0, .display_step = STEP_D0 },
 };
 
-static const struct intel_step_info adls_revid_step_tbl[] = {
+static const struct intel_step_info rkl_revids[] = {
+       [0] = { COMMON_STEP(A0) },
+       [1] = { COMMON_STEP(B0) },
+       [4] = { COMMON_STEP(C0) },
+};
+
+static const struct intel_step_info dg1_revids[] = {
+       [0] = { COMMON_STEP(A0) },
+       [1] = { COMMON_STEP(B0) },
+};
+
+static const struct intel_step_info adls_revids[] = {
        [0x0] = { .gt_step = STEP_A0, .display_step = STEP_A0 },
        [0x1] = { .gt_step = STEP_A0, .display_step = STEP_A2 },
        [0x4] = { .gt_step = STEP_B0, .display_step = STEP_B0 },
@@ -47,13 +94,33 @@ static const struct intel_step_info adls_revid_step_tbl[] = {
        [0xC] = { .gt_step = STEP_D0, .display_step = STEP_C0 },
 };
 
-static const struct intel_step_info adlp_revid_step_tbl[] = {
+static const struct intel_step_info adlp_revids[] = {
        [0x0] = { .gt_step = STEP_A0, .display_step = STEP_A0 },
        [0x4] = { .gt_step = STEP_B0, .display_step = STEP_B0 },
        [0x8] = { .gt_step = STEP_C0, .display_step = STEP_C0 },
        [0xC] = { .gt_step = STEP_C0, .display_step = STEP_D0 },
 };
 
+static const struct intel_step_info xehpsdv_revids[] = {
+       [0x0] = { .gt_step = STEP_A0 },
+       [0x1] = { .gt_step = STEP_A1 },
+       [0x4] = { .gt_step = STEP_B0 },
+       [0x8] = { .gt_step = STEP_C0 },
+};
+
+static const struct intel_step_info dg2_g10_revid_step_tbl[] = {
+       [0x0] = { .gt_step = STEP_A0, .display_step = STEP_A0 },
+       [0x1] = { .gt_step = STEP_A1, .display_step = STEP_A0 },
+       [0x4] = { .gt_step = STEP_B0, .display_step = STEP_B0 },
+       [0x8] = { .gt_step = STEP_C0, .display_step = STEP_C0 },
+};
+
+static const struct intel_step_info dg2_g11_revid_step_tbl[] = {
+       [0x0] = { .gt_step = STEP_A0, .display_step = STEP_B0 },
+       [0x4] = { .gt_step = STEP_B0, .display_step = STEP_C0 },
+       [0x5] = { .gt_step = STEP_B1, .display_step = STEP_C0 },
+};
+
 void intel_step_init(struct drm_i915_private *i915)
 {
        const struct intel_step_info *revids = NULL;
@@ -61,21 +128,51 @@ void intel_step_init(struct drm_i915_private *i915)
        int revid = INTEL_REVID(i915);
        struct intel_step_info step = {};
 
-       if (IS_ALDERLAKE_P(i915)) {
-               revids = adlp_revid_step_tbl;
-               size = ARRAY_SIZE(adlp_revid_step_tbl);
+       if (IS_DG2_G10(i915)) {
+               revids = dg2_g10_revid_step_tbl;
+               size = ARRAY_SIZE(dg2_g10_revid_step_tbl);
+       } else if (IS_DG2_G11(i915)) {
+               revids = dg2_g11_revid_step_tbl;
+               size = ARRAY_SIZE(dg2_g11_revid_step_tbl);
+       } else if (IS_XEHPSDV(i915)) {
+               revids = xehpsdv_revids;
+               size = ARRAY_SIZE(xehpsdv_revids);
+       } else if (IS_ALDERLAKE_P(i915)) {
+               revids = adlp_revids;
+               size = ARRAY_SIZE(adlp_revids);
        } else if (IS_ALDERLAKE_S(i915)) {
-               revids = adls_revid_step_tbl;
-               size = ARRAY_SIZE(adls_revid_step_tbl);
+               revids = adls_revids;
+               size = ARRAY_SIZE(adls_revids);
+       } else if (IS_DG1(i915)) {
+               revids = dg1_revids;
+               size = ARRAY_SIZE(dg1_revids);
+       } else if (IS_ROCKETLAKE(i915)) {
+               revids = rkl_revids;
+               size = ARRAY_SIZE(rkl_revids);
        } else if (IS_TGL_U(i915) || IS_TGL_Y(i915)) {
-               revids = tgl_uy_revid_step_tbl;
-               size = ARRAY_SIZE(tgl_uy_revid_step_tbl);
+               revids = tgl_uy_revids;
+               size = ARRAY_SIZE(tgl_uy_revids);
        } else if (IS_TIGERLAKE(i915)) {
-               revids = tgl_revid_step_tbl;
-               size = ARRAY_SIZE(tgl_revid_step_tbl);
+               revids = tgl_revids;
+               size = ARRAY_SIZE(tgl_revids);
+       } else if (IS_JSL_EHL(i915)) {
+               revids = jsl_ehl_revids;
+               size = ARRAY_SIZE(jsl_ehl_revids);
+       } else if (IS_ICELAKE(i915)) {
+               revids = icl_revids;
+               size = ARRAY_SIZE(icl_revids);
+       } else if (IS_GEMINILAKE(i915)) {
+               revids = glk_revids;
+               size = ARRAY_SIZE(glk_revids);
+       } else if (IS_BROXTON(i915)) {
+               revids = bxt_revids;
+               size = ARRAY_SIZE(bxt_revids);
        } else if (IS_KABYLAKE(i915)) {
                revids = kbl_revids;
                size = ARRAY_SIZE(kbl_revids);
+       } else if (IS_SKYLAKE(i915)) {
+               revids = skl_revids;
+               size = ARRAY_SIZE(skl_revids);
        }
 
        /* Not using the stepping scheme for the platform yet. */
@@ -114,3 +211,17 @@ void intel_step_init(struct drm_i915_private *i915)
 
        RUNTIME_INFO(i915)->step = step;
 }
+
+#define STEP_NAME_CASE(name)   \
+       case STEP_##name:       \
+               return #name;
+
+const char *intel_step_name(enum intel_step step)
+{
+       switch (step) {
+       STEP_NAME_LIST(STEP_NAME_CASE);
+
+       default:
+               return "**";
+       }
+}
index 958a8bb..f6641e2 100644 (file)
@@ -15,26 +15,39 @@ struct intel_step_info {
        u8 display_step;
 };
 
+#define STEP_ENUM_VAL(name)  STEP_##name,
+
+#define STEP_NAME_LIST(func)           \
+       func(A0)                        \
+       func(A1)                        \
+       func(A2)                        \
+       func(B0)                        \
+       func(B1)                        \
+       func(B2)                        \
+       func(C0)                        \
+       func(C1)                        \
+       func(D0)                        \
+       func(D1)                        \
+       func(E0)                        \
+       func(F0)                        \
+       func(G0)                        \
+       func(H0)                        \
+       func(I0)                        \
+       func(I1)                        \
+       func(J0)
+
 /*
  * Symbolic steppings that do not match the hardware. These are valid both as gt
  * and display steppings as symbolic names.
  */
 enum intel_step {
        STEP_NONE = 0,
-       STEP_A0,
-       STEP_A2,
-       STEP_B0,
-       STEP_B1,
-       STEP_C0,
-       STEP_D0,
-       STEP_D1,
-       STEP_E0,
-       STEP_F0,
-       STEP_G0,
+       STEP_NAME_LIST(STEP_ENUM_VAL)
        STEP_FUTURE,
        STEP_FOREVER,
 };
 
 void intel_step_init(struct drm_i915_private *i915);
+const char *intel_step_name(enum intel_step step);
 
 #endif /* __INTEL_STEP_H__ */
index 7178bc6..6b38bc2 100644 (file)
@@ -24,6 +24,8 @@
 #include <linux/pm_runtime.h>
 #include <asm/iosf_mbi.h>
 
+#include "gt/intel_lrc_reg.h" /* for shadow reg list */
+
 #include "i915_drv.h"
 #include "i915_trace.h"
 #include "i915_vgpu.h"
@@ -68,8 +70,14 @@ static const char * const forcewake_domain_names[] = {
        "vdbox1",
        "vdbox2",
        "vdbox3",
+       "vdbox4",
+       "vdbox5",
+       "vdbox6",
+       "vdbox7",
        "vebox0",
        "vebox1",
+       "vebox2",
+       "vebox3",
 };
 
 const char *
@@ -952,30 +960,80 @@ static const i915_reg_t gen8_shadowed_regs[] = {
 };
 
 static const i915_reg_t gen11_shadowed_regs[] = {
-       RING_TAIL(RENDER_RING_BASE),            /* 0x2000 (base) */
-       GEN6_RPNSWREQ,                          /* 0xA008 */
-       GEN6_RC_VIDEO_FREQ,                     /* 0xA00C */
-       RING_TAIL(BLT_RING_BASE),               /* 0x22000 (base) */
-       RING_TAIL(GEN11_BSD_RING_BASE),         /* 0x1C0000 (base) */
-       RING_TAIL(GEN11_BSD2_RING_BASE),        /* 0x1C4000 (base) */
-       RING_TAIL(GEN11_VEBOX_RING_BASE),       /* 0x1C8000 (base) */
-       RING_TAIL(GEN11_BSD3_RING_BASE),        /* 0x1D0000 (base) */
-       RING_TAIL(GEN11_BSD4_RING_BASE),        /* 0x1D4000 (base) */
-       RING_TAIL(GEN11_VEBOX2_RING_BASE),      /* 0x1D8000 (base) */
+       RING_TAIL(RENDER_RING_BASE),                    /* 0x2000 (base) */
+       RING_EXECLIST_CONTROL(RENDER_RING_BASE),        /* 0x2550 */
+       GEN6_RPNSWREQ,                                  /* 0xA008 */
+       GEN6_RC_VIDEO_FREQ,                             /* 0xA00C */
+       RING_TAIL(BLT_RING_BASE),                       /* 0x22000 (base) */
+       RING_EXECLIST_CONTROL(BLT_RING_BASE),           /* 0x22550 */
+       RING_TAIL(GEN11_BSD_RING_BASE),                 /* 0x1C0000 (base) */
+       RING_EXECLIST_CONTROL(GEN11_BSD_RING_BASE),     /* 0x1C0550 */
+       RING_TAIL(GEN11_BSD2_RING_BASE),                /* 0x1C4000 (base) */
+       RING_EXECLIST_CONTROL(GEN11_BSD2_RING_BASE),    /* 0x1C4550 */
+       RING_TAIL(GEN11_VEBOX_RING_BASE),               /* 0x1C8000 (base) */
+       RING_EXECLIST_CONTROL(GEN11_VEBOX_RING_BASE),   /* 0x1C8550 */
+       RING_TAIL(GEN11_BSD3_RING_BASE),                /* 0x1D0000 (base) */
+       RING_EXECLIST_CONTROL(GEN11_BSD3_RING_BASE),    /* 0x1D0550 */
+       RING_TAIL(GEN11_BSD4_RING_BASE),                /* 0x1D4000 (base) */
+       RING_EXECLIST_CONTROL(GEN11_BSD4_RING_BASE),    /* 0x1D4550 */
+       RING_TAIL(GEN11_VEBOX2_RING_BASE),              /* 0x1D8000 (base) */
+       RING_EXECLIST_CONTROL(GEN11_VEBOX2_RING_BASE),  /* 0x1D8550 */
        /* TODO: Other registers are not yet used */
 };
 
 static const i915_reg_t gen12_shadowed_regs[] = {
-       RING_TAIL(RENDER_RING_BASE),            /* 0x2000 (base) */
-       GEN6_RPNSWREQ,                          /* 0xA008 */
-       GEN6_RC_VIDEO_FREQ,                     /* 0xA00C */
-       RING_TAIL(BLT_RING_BASE),               /* 0x22000 (base) */
-       RING_TAIL(GEN11_BSD_RING_BASE),         /* 0x1C0000 (base) */
-       RING_TAIL(GEN11_BSD2_RING_BASE),        /* 0x1C4000 (base) */
-       RING_TAIL(GEN11_VEBOX_RING_BASE),       /* 0x1C8000 (base) */
-       RING_TAIL(GEN11_BSD3_RING_BASE),        /* 0x1D0000 (base) */
-       RING_TAIL(GEN11_BSD4_RING_BASE),        /* 0x1D4000 (base) */
-       RING_TAIL(GEN11_VEBOX2_RING_BASE),      /* 0x1D8000 (base) */
+       RING_TAIL(RENDER_RING_BASE),                    /* 0x2000 (base) */
+       RING_EXECLIST_CONTROL(RENDER_RING_BASE),        /* 0x2550 */
+       GEN6_RPNSWREQ,                                  /* 0xA008 */
+       GEN6_RC_VIDEO_FREQ,                             /* 0xA00C */
+       RING_TAIL(BLT_RING_BASE),                       /* 0x22000 (base) */
+       RING_EXECLIST_CONTROL(BLT_RING_BASE),           /* 0x22550 */
+       RING_TAIL(GEN11_BSD_RING_BASE),                 /* 0x1C0000 (base) */
+       RING_EXECLIST_CONTROL(GEN11_BSD_RING_BASE),     /* 0x1C0550 */
+       RING_TAIL(GEN11_BSD2_RING_BASE),                /* 0x1C4000 (base) */
+       RING_EXECLIST_CONTROL(GEN11_BSD2_RING_BASE),    /* 0x1C4550 */
+       RING_TAIL(GEN11_VEBOX_RING_BASE),               /* 0x1C8000 (base) */
+       RING_EXECLIST_CONTROL(GEN11_VEBOX_RING_BASE),   /* 0x1C8550 */
+       RING_TAIL(GEN11_BSD3_RING_BASE),                /* 0x1D0000 (base) */
+       RING_EXECLIST_CONTROL(GEN11_BSD3_RING_BASE),    /* 0x1D0550 */
+       RING_TAIL(GEN11_BSD4_RING_BASE),                /* 0x1D4000 (base) */
+       RING_EXECLIST_CONTROL(GEN11_BSD4_RING_BASE),    /* 0x1D4550 */
+       RING_TAIL(GEN11_VEBOX2_RING_BASE),              /* 0x1D8000 (base) */
+       RING_EXECLIST_CONTROL(GEN11_VEBOX2_RING_BASE),  /* 0x1D8550 */
+       /* TODO: Other registers are not yet used */
+};
+
+static const i915_reg_t xehp_shadowed_regs[] = {
+       RING_TAIL(RENDER_RING_BASE),                    /* 0x2000 (base) */
+       RING_EXECLIST_CONTROL(RENDER_RING_BASE),        /* 0x2550 */
+       GEN6_RPNSWREQ,                                  /* 0xA008 */
+       GEN6_RC_VIDEO_FREQ,                             /* 0xA00C */
+       RING_TAIL(BLT_RING_BASE),                       /* 0x22000 (base) */
+       RING_EXECLIST_CONTROL(BLT_RING_BASE),           /* 0x22550 */
+       RING_TAIL(GEN11_BSD_RING_BASE),                 /* 0x1C0000 (base) */
+       RING_EXECLIST_CONTROL(GEN11_BSD_RING_BASE),     /* 0x1C0550 */
+       RING_TAIL(GEN11_BSD2_RING_BASE),                /* 0x1C4000 (base) */
+       RING_EXECLIST_CONTROL(GEN11_BSD2_RING_BASE),    /* 0x1C4550 */
+       RING_TAIL(GEN11_VEBOX_RING_BASE),               /* 0x1C8000 (base) */
+       RING_EXECLIST_CONTROL(GEN11_VEBOX_RING_BASE),   /* 0x1C8550 */
+       RING_TAIL(GEN11_BSD3_RING_BASE),                /* 0x1D0000 (base) */
+       RING_EXECLIST_CONTROL(GEN11_BSD3_RING_BASE),    /* 0x1D0550 */
+       RING_TAIL(GEN11_BSD4_RING_BASE),                /* 0x1D4000 (base) */
+       RING_EXECLIST_CONTROL(GEN11_BSD4_RING_BASE),    /* 0x1D4550 */
+       RING_TAIL(GEN11_VEBOX2_RING_BASE),              /* 0x1D8000 (base) */
+       RING_EXECLIST_CONTROL(GEN11_VEBOX2_RING_BASE),  /* 0x1D8550 */
+       RING_TAIL(XEHP_BSD5_RING_BASE),                 /* 0x1E0000 (base) */
+       RING_EXECLIST_CONTROL(XEHP_BSD5_RING_BASE),     /* 0x1E0550 */
+       RING_TAIL(XEHP_BSD6_RING_BASE),                 /* 0x1E4000 (base) */
+       RING_EXECLIST_CONTROL(XEHP_BSD6_RING_BASE),     /* 0x1E4550 */
+       RING_TAIL(XEHP_VEBOX3_RING_BASE),               /* 0x1E8000 (base) */
+       RING_EXECLIST_CONTROL(XEHP_VEBOX3_RING_BASE),   /* 0x1E8550 */
+       RING_TAIL(XEHP_BSD7_RING_BASE),                 /* 0x1F0000 (base) */
+       RING_EXECLIST_CONTROL(XEHP_BSD7_RING_BASE),     /* 0x1F0550 */
+       RING_TAIL(XEHP_BSD8_RING_BASE),                 /* 0x1F4000 (base) */
+       RING_EXECLIST_CONTROL(XEHP_BSD8_RING_BASE),     /* 0x1F4550 */
+       RING_TAIL(XEHP_VEBOX4_RING_BASE),               /* 0x1F8000 (base) */
+       RING_EXECLIST_CONTROL(XEHP_VEBOX4_RING_BASE),   /* 0x1F8550 */
        /* TODO: Other registers are not yet used */
 };
 
@@ -991,17 +1049,18 @@ static int mmio_reg_cmp(u32 key, const i915_reg_t *reg)
                return 0;
 }
 
-#define __is_genX_shadowed(x) \
-static bool is_gen##x##_shadowed(u32 offset) \
+#define __is_X_shadowed(x) \
+static bool is_##x##_shadowed(u32 offset) \
 { \
-       const i915_reg_t *regs = gen##x##_shadowed_regs; \
-       return BSEARCH(offset, regs, ARRAY_SIZE(gen##x##_shadowed_regs), \
+       const i915_reg_t *regs = x##_shadowed_regs; \
+       return BSEARCH(offset, regs, ARRAY_SIZE(x##_shadowed_regs), \
                       mmio_reg_cmp); \
 }
 
-__is_genX_shadowed(8)
-__is_genX_shadowed(11)
-__is_genX_shadowed(12)
+__is_X_shadowed(gen8)
+__is_X_shadowed(gen11)
+__is_X_shadowed(gen12)
+__is_X_shadowed(xehp)
 
 static enum forcewake_domains
 gen6_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg)
@@ -1065,6 +1124,15 @@ static const struct intel_forcewake_range __chv_fw_ranges[] = {
        __fwd; \
 })
 
+#define __xehp_fwtable_reg_write_fw_domains(uncore, offset) \
+({ \
+       enum forcewake_domains __fwd = 0; \
+       const u32 __offset = (offset); \
+       if (!is_xehp_shadowed(__offset)) \
+               __fwd = find_fw_domain(uncore, __offset); \
+       __fwd; \
+})
+
 /* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
 static const struct intel_forcewake_range __gen9_fw_ranges[] = {
        GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_GT),
@@ -1249,6 +1317,172 @@ static const struct intel_forcewake_range __gen12_fw_ranges[] = {
                0x1d3f00 - 0x1d3fff: VD2 */
 };
 
+/*
+ * Graphics IP version 12.55 brings a slight change to the 0xd800 range,
+ * switching it from the GT domain to the render domain.
+ *
+ * *Must* be sorted by offset ranges! See intel_fw_table_check().
+ */
+#define XEHP_FWRANGES(FW_RANGE_D800)                                   \
+       GEN_FW_RANGE(0x0, 0x1fff, 0), /*                                        \
+                 0x0 -  0xaff: reserved                                        \
+               0xb00 - 0x1fff: always on */                                    \
+       GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER),                         \
+       GEN_FW_RANGE(0x2700, 0x4aff, FORCEWAKE_GT),                             \
+       GEN_FW_RANGE(0x4b00, 0x51ff, 0), /*                                     \
+               0x4b00 - 0x4fff: reserved                                       \
+               0x5000 - 0x51ff: always on */                                   \
+       GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER),                         \
+       GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_GT),                             \
+       GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER),                         \
+       GEN_FW_RANGE(0x8160, 0x81ff, 0), /*                                     \
+               0x8160 - 0x817f: reserved                                       \
+               0x8180 - 0x81ff: always on */                                   \
+       GEN_FW_RANGE(0x8200, 0x82ff, FORCEWAKE_GT),                             \
+       GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER),                         \
+       GEN_FW_RANGE(0x8500, 0x8cff, FORCEWAKE_GT), /*                          \
+               0x8500 - 0x87ff: gt                                             \
+               0x8800 - 0x8c7f: reserved                                       \
+               0x8c80 - 0x8cff: gt (DG2 only) */                               \
+       GEN_FW_RANGE(0x8d00, 0x8fff, FORCEWAKE_RENDER), /*                      \
+               0x8d00 - 0x8dff: render (DG2 only)                              \
+               0x8e00 - 0x8fff: reserved */                                    \
+       GEN_FW_RANGE(0x9000, 0x94cf, FORCEWAKE_GT), /*                          \
+               0x9000 - 0x947f: gt                                             \
+               0x9480 - 0x94cf: reserved */                                    \
+       GEN_FW_RANGE(0x94d0, 0x955f, FORCEWAKE_RENDER),                         \
+       GEN_FW_RANGE(0x9560, 0x967f, 0), /*                                     \
+               0x9560 - 0x95ff: always on                                      \
+               0x9600 - 0x967f: reserved */                                    \
+       GEN_FW_RANGE(0x9680, 0x97ff, FORCEWAKE_RENDER), /*                      \
+               0x9680 - 0x96ff: render (DG2 only)                              \
+               0x9700 - 0x97ff: reserved */                                    \
+       GEN_FW_RANGE(0x9800, 0xcfff, FORCEWAKE_GT), /*                          \
+               0x9800 - 0xb4ff: gt                                             \
+               0xb500 - 0xbfff: reserved                                       \
+               0xc000 - 0xcfff: gt */                                          \
+       GEN_FW_RANGE(0xd000, 0xd7ff, 0),                                        \
+       GEN_FW_RANGE(0xd800, 0xd87f, FW_RANGE_D800),                    \
+       GEN_FW_RANGE(0xd880, 0xdbff, FORCEWAKE_GT),                             \
+       GEN_FW_RANGE(0xdc00, 0xdcff, FORCEWAKE_RENDER),                         \
+       GEN_FW_RANGE(0xdd00, 0xde7f, FORCEWAKE_GT), /*                          \
+               0xdd00 - 0xddff: gt                                             \
+               0xde00 - 0xde7f: reserved */                                    \
+       GEN_FW_RANGE(0xde80, 0xe8ff, FORCEWAKE_RENDER), /*                      \
+               0xde80 - 0xdfff: render                                         \
+               0xe000 - 0xe0ff: reserved                                       \
+               0xe100 - 0xe8ff: render */                                      \
+       GEN_FW_RANGE(0xe900, 0xffff, FORCEWAKE_GT), /*                          \
+               0xe900 - 0xe9ff: gt                                             \
+               0xea00 - 0xefff: reserved                                       \
+               0xf000 - 0xffff: gt */                                          \
+       GEN_FW_RANGE(0x10000, 0x12fff, 0), /*                                   \
+               0x10000 - 0x11fff: reserved                                     \
+               0x12000 - 0x127ff: always on                                    \
+               0x12800 - 0x12fff: reserved */                                  \
+       GEN_FW_RANGE(0x13000, 0x131ff, FORCEWAKE_MEDIA_VDBOX0), /* DG2 only */  \
+       GEN_FW_RANGE(0x13200, 0x13fff, FORCEWAKE_MEDIA_VDBOX2), /*              \
+               0x13200 - 0x133ff: VD2 (DG2 only)                               \
+               0x13400 - 0x13fff: reserved */                                  \
+       GEN_FW_RANGE(0x14000, 0x141ff, FORCEWAKE_MEDIA_VDBOX0), /* XEHPSDV only */      \
+       GEN_FW_RANGE(0x14200, 0x143ff, FORCEWAKE_MEDIA_VDBOX2), /* XEHPSDV only */      \
+       GEN_FW_RANGE(0x14400, 0x145ff, FORCEWAKE_MEDIA_VDBOX4), /* XEHPSDV only */      \
+       GEN_FW_RANGE(0x14600, 0x147ff, FORCEWAKE_MEDIA_VDBOX6), /* XEHPSDV only */      \
+       GEN_FW_RANGE(0x14800, 0x14fff, FORCEWAKE_RENDER),                       \
+       GEN_FW_RANGE(0x15000, 0x16dff, FORCEWAKE_GT), /*                        \
+               0x15000 - 0x15fff: gt (DG2 only)                                \
+               0x16000 - 0x16dff: reserved */                                  \
+       GEN_FW_RANGE(0x16e00, 0x1ffff, FORCEWAKE_RENDER),                       \
+       GEN_FW_RANGE(0x20000, 0x21fff, FORCEWAKE_MEDIA_VDBOX0), /*              \
+               0x20000 - 0x20fff: VD0 (XEHPSDV only)                           \
+               0x21000 - 0x21fff: reserved */                                  \
+       GEN_FW_RANGE(0x22000, 0x23fff, FORCEWAKE_GT),                           \
+       GEN_FW_RANGE(0x24000, 0x2417f, 0), /*                                   \
+               0x24000 - 0x2407f: always on                                    \
+               0x24080 - 0x2417f: reserved */                                  \
+       GEN_FW_RANGE(0x24180, 0x249ff, FORCEWAKE_GT), /*                        \
+               0x24180 - 0x241ff: gt                                           \
+               0x24200 - 0x249ff: reserved */                                  \
+       GEN_FW_RANGE(0x24a00, 0x251ff, FORCEWAKE_RENDER), /*                    \
+               0x24a00 - 0x24a7f: render                                       \
+               0x24a80 - 0x251ff: reserved */                                  \
+       GEN_FW_RANGE(0x25200, 0x25fff, FORCEWAKE_GT), /*                        \
+               0x25200 - 0x252ff: gt                                           \
+               0x25300 - 0x25fff: reserved */                                  \
+       GEN_FW_RANGE(0x26000, 0x2ffff, FORCEWAKE_RENDER), /*                    \
+               0x26000 - 0x27fff: render                                       \
+               0x28000 - 0x29fff: reserved                                     \
+               0x2a000 - 0x2ffff: undocumented */                              \
+       GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_GT),                           \
+       GEN_FW_RANGE(0x40000, 0x1bffff, 0),                                     \
+       GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), /*            \
+               0x1c0000 - 0x1c2bff: VD0                                        \
+               0x1c2c00 - 0x1c2cff: reserved                                   \
+               0x1c2d00 - 0x1c2dff: VD0                                        \
+               0x1c2e00 - 0x1c3eff: VD0 (DG2 only)                             \
+               0x1c3f00 - 0x1c3fff: VD0 */                                     \
+       GEN_FW_RANGE(0x1c4000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX1), /*            \
+               0x1c4000 - 0x1c6bff: VD1                                        \
+               0x1c6c00 - 0x1c6cff: reserved                                   \
+               0x1c6d00 - 0x1c6dff: VD1                                        \
+               0x1c6e00 - 0x1c7fff: reserved */                                \
+       GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0), /*            \
+               0x1c8000 - 0x1ca0ff: VE0                                        \
+               0x1ca100 - 0x1cbfff: reserved */                                \
+       GEN_FW_RANGE(0x1cc000, 0x1ccfff, FORCEWAKE_MEDIA_VDBOX0),               \
+       GEN_FW_RANGE(0x1cd000, 0x1cdfff, FORCEWAKE_MEDIA_VDBOX2),               \
+       GEN_FW_RANGE(0x1ce000, 0x1cefff, FORCEWAKE_MEDIA_VDBOX4),               \
+       GEN_FW_RANGE(0x1cf000, 0x1cffff, FORCEWAKE_MEDIA_VDBOX6),               \
+       GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2), /*            \
+               0x1d0000 - 0x1d2bff: VD2                                        \
+               0x1d2c00 - 0x1d2cff: reserved                                   \
+               0x1d2d00 - 0x1d2dff: VD2                                        \
+               0x1d2e00 - 0x1d3dff: VD2 (DG2 only)                             \
+               0x1d3e00 - 0x1d3eff: reserved                                   \
+               0x1d3f00 - 0x1d3fff: VD2 */                                     \
+       GEN_FW_RANGE(0x1d4000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX3), /*            \
+               0x1d4000 - 0x1d6bff: VD3                                        \
+               0x1d6c00 - 0x1d6cff: reserved                                   \
+               0x1d6d00 - 0x1d6dff: VD3                                        \
+               0x1d6e00 - 0x1d7fff: reserved */                                \
+       GEN_FW_RANGE(0x1d8000, 0x1dffff, FORCEWAKE_MEDIA_VEBOX1), /*            \
+               0x1d8000 - 0x1da0ff: VE1                                        \
+               0x1da100 - 0x1dffff: reserved */                                \
+       GEN_FW_RANGE(0x1e0000, 0x1e3fff, FORCEWAKE_MEDIA_VDBOX4), /*            \
+               0x1e0000 - 0x1e2bff: VD4                                        \
+               0x1e2c00 - 0x1e2cff: reserved                                   \
+               0x1e2d00 - 0x1e2dff: VD4                                        \
+               0x1e2e00 - 0x1e3eff: reserved                                   \
+               0x1e3f00 - 0x1e3fff: VD4 */                                     \
+       GEN_FW_RANGE(0x1e4000, 0x1e7fff, FORCEWAKE_MEDIA_VDBOX5), /*            \
+               0x1e4000 - 0x1e6bff: VD5                                        \
+               0x1e6c00 - 0x1e6cff: reserved                                   \
+               0x1e6d00 - 0x1e6dff: VD5                                        \
+               0x1e6e00 - 0x1e7fff: reserved */                                \
+       GEN_FW_RANGE(0x1e8000, 0x1effff, FORCEWAKE_MEDIA_VEBOX2), /*            \
+               0x1e8000 - 0x1ea0ff: VE2                                        \
+               0x1ea100 - 0x1effff: reserved */                                \
+       GEN_FW_RANGE(0x1f0000, 0x1f3fff, FORCEWAKE_MEDIA_VDBOX6), /*            \
+               0x1f0000 - 0x1f2bff: VD6                                        \
+               0x1f2c00 - 0x1f2cff: reserved                                   \
+               0x1f2d00 - 0x1f2dff: VD6                                        \
+               0x1f2e00 - 0x1f3eff: reserved                                   \
+               0x1f3f00 - 0x1f3fff: VD6 */                                     \
+       GEN_FW_RANGE(0x1f4000, 0x1f7fff, FORCEWAKE_MEDIA_VDBOX7), /*            \
+               0x1f4000 - 0x1f6bff: VD7                                        \
+               0x1f6c00 - 0x1f6cff: reserved                                   \
+               0x1f6d00 - 0x1f6dff: VD7                                        \
+               0x1f6e00 - 0x1f7fff: reserved */                                \
+       GEN_FW_RANGE(0x1f8000, 0x1fa0ff, FORCEWAKE_MEDIA_VEBOX3),
+
+static const struct intel_forcewake_range __xehp_fw_ranges[] = {
+       XEHP_FWRANGES(FORCEWAKE_GT)
+};
+
+static const struct intel_forcewake_range __dg2_fw_ranges[] = {
+       XEHP_FWRANGES(FORCEWAKE_RENDER)
+};
+
 static void
 ilk_dummy_write(struct intel_uncore *uncore)
 {
@@ -1502,6 +1736,7 @@ __gen_write(func, 8) \
 __gen_write(func, 16) \
 __gen_write(func, 32)
 
+__gen_reg_write_funcs(xehp_fwtable);
 __gen_reg_write_funcs(gen12_fwtable);
 __gen_reg_write_funcs(gen11_fwtable);
 __gen_reg_write_funcs(fwtable);
@@ -1582,8 +1817,14 @@ static int __fw_domain_init(struct intel_uncore *uncore,
        BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX1));
        BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX2 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX2));
        BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX3 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX3));
+       BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX4 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX4));
+       BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX5 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX5));
+       BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX6 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX6));
+       BUILD_BUG_ON(FORCEWAKE_MEDIA_VDBOX7 != (1 << FW_DOMAIN_ID_MEDIA_VDBOX7));
        BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX0 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX0));
        BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX1 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX1));
+       BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX2 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX2));
+       BUILD_BUG_ON(FORCEWAKE_MEDIA_VEBOX3 != (1 << FW_DOMAIN_ID_MEDIA_VEBOX3));
 
        d->mask = BIT(domain_id);
 
@@ -1870,36 +2111,40 @@ static int uncore_forcewake_init(struct intel_uncore *uncore)
                return ret;
        forcewake_early_sanitize(uncore, 0);
 
-       if (IS_GRAPHICS_VER(i915, 6, 7)) {
-               ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6);
-
-               if (IS_VALLEYVIEW(i915)) {
-                       ASSIGN_FW_DOMAINS_TABLE(uncore, __vlv_fw_ranges);
-                       ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
-               } else {
-                       ASSIGN_READ_MMIO_VFUNCS(uncore, gen6);
-               }
-       } else if (GRAPHICS_VER(i915) == 8) {
-               if (IS_CHERRYVIEW(i915)) {
-                       ASSIGN_FW_DOMAINS_TABLE(uncore, __chv_fw_ranges);
-                       ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
-                       ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
-               } else {
-                       ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen8);
-                       ASSIGN_READ_MMIO_VFUNCS(uncore, gen6);
-               }
-       } else if (IS_GRAPHICS_VER(i915, 9, 10)) {
-               ASSIGN_FW_DOMAINS_TABLE(uncore, __gen9_fw_ranges);
-               ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
-               ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
-       } else if (GRAPHICS_VER(i915) == 11) {
-               ASSIGN_FW_DOMAINS_TABLE(uncore, __gen11_fw_ranges);
-               ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen11_fwtable);
+       if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) {
+               ASSIGN_FW_DOMAINS_TABLE(uncore, __dg2_fw_ranges);
+               ASSIGN_WRITE_MMIO_VFUNCS(uncore, xehp_fwtable);
                ASSIGN_READ_MMIO_VFUNCS(uncore, gen11_fwtable);
-       } else {
+       } else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
+               ASSIGN_FW_DOMAINS_TABLE(uncore, __xehp_fw_ranges);
+               ASSIGN_WRITE_MMIO_VFUNCS(uncore, xehp_fwtable);
+               ASSIGN_READ_MMIO_VFUNCS(uncore, gen11_fwtable);
+       } else if (GRAPHICS_VER(i915) >= 12) {
                ASSIGN_FW_DOMAINS_TABLE(uncore, __gen12_fw_ranges);
                ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen12_fwtable);
                ASSIGN_READ_MMIO_VFUNCS(uncore, gen12_fwtable);
+       } else if (GRAPHICS_VER(i915) == 11) {
+               ASSIGN_FW_DOMAINS_TABLE(uncore, __gen11_fw_ranges);
+               ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen11_fwtable);
+               ASSIGN_READ_MMIO_VFUNCS(uncore, gen11_fwtable);
+       } else if (IS_GRAPHICS_VER(i915, 9, 10)) {
+               ASSIGN_FW_DOMAINS_TABLE(uncore, __gen9_fw_ranges);
+               ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
+               ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
+       } else if (IS_CHERRYVIEW(i915)) {
+               ASSIGN_FW_DOMAINS_TABLE(uncore, __chv_fw_ranges);
+               ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
+               ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
+       } else if (GRAPHICS_VER(i915) == 8) {
+               ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen8);
+               ASSIGN_READ_MMIO_VFUNCS(uncore, gen6);
+       } else if (IS_VALLEYVIEW(i915)) {
+               ASSIGN_FW_DOMAINS_TABLE(uncore, __vlv_fw_ranges);
+               ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6);
+               ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
+       } else if (IS_GRAPHICS_VER(i915, 6, 7)) {
+               ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6);
+               ASSIGN_READ_MMIO_VFUNCS(uncore, gen6);
        }
 
        uncore->pmic_bus_access_nb.notifier_call = i915_pmic_bus_access_notifier;
@@ -1988,6 +2233,22 @@ void intel_uncore_prune_engine_fw_domains(struct intel_uncore *uncore,
                if (HAS_ENGINE(gt, _VCS(i)))
                        continue;
 
+               /*
+                * Starting with XeHP, the power well for an even-numbered
+                * VDBOX is also used for shared units within the
+                * media slice such as SFC.  So even if the engine
+                * itself is fused off, we still need to initialize
+                * the forcewake domain if any of the other engines
+                * in the same media slice are present.
+                */
+               if (GRAPHICS_VER_FULL(uncore->i915) >= IP_VER(12, 50) && i % 2 == 0) {
+                       if ((i + 1 < I915_MAX_VCS) && HAS_ENGINE(gt, _VCS(i + 1)))
+                               continue;
+
+                       if (HAS_ENGINE(gt, _VECS(i / 2)))
+                               continue;
+               }
+
                if (fw_domains & BIT(domain_id))
                        fw_domain_fini(uncore, domain_id);
        }
@@ -2277,6 +2538,61 @@ intel_uncore_forcewake_for_reg(struct intel_uncore *uncore,
        return fw_domains;
 }
 
+u32 intel_uncore_read_with_mcr_steering_fw(struct intel_uncore *uncore,
+                                          i915_reg_t reg,
+                                          int slice, int subslice)
+{
+       u32 mcr_mask, mcr_ss, mcr, old_mcr, val;
+
+       lockdep_assert_held(&uncore->lock);
+
+       if (GRAPHICS_VER(uncore->i915) >= 11) {
+               mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK;
+               mcr_ss = GEN11_MCR_SLICE(slice) | GEN11_MCR_SUBSLICE(subslice);
+       } else {
+               mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK;
+               mcr_ss = GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice);
+       }
+
+       old_mcr = mcr = intel_uncore_read_fw(uncore, GEN8_MCR_SELECTOR);
+
+       mcr &= ~mcr_mask;
+       mcr |= mcr_ss;
+       intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
+
+       val = intel_uncore_read_fw(uncore, reg);
+
+       mcr &= ~mcr_mask;
+       mcr |= old_mcr & mcr_mask;
+
+       intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
+
+       return val;
+}
+
+u32 intel_uncore_read_with_mcr_steering(struct intel_uncore *uncore,
+                                       i915_reg_t reg, int slice, int subslice)
+{
+       enum forcewake_domains fw_domains;
+       u32 val;
+
+       fw_domains = intel_uncore_forcewake_for_reg(uncore, reg,
+                                                   FW_REG_READ);
+       fw_domains |= intel_uncore_forcewake_for_reg(uncore,
+                                                    GEN8_MCR_SELECTOR,
+                                                    FW_REG_READ | FW_REG_WRITE);
+
+       spin_lock_irq(&uncore->lock);
+       intel_uncore_forcewake_get__locked(uncore, fw_domains);
+
+       val = intel_uncore_read_with_mcr_steering_fw(uncore, reg, slice, subslice);
+
+       intel_uncore_forcewake_put__locked(uncore, fw_domains);
+       spin_unlock_irq(&uncore->lock);
+
+       return val;
+}
+
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 #include "selftests/mock_uncore.c"
 #include "selftests/intel_uncore.c"
index 59f0da8..3c0b0a8 100644 (file)
@@ -52,8 +52,14 @@ enum forcewake_domain_id {
        FW_DOMAIN_ID_MEDIA_VDBOX1,
        FW_DOMAIN_ID_MEDIA_VDBOX2,
        FW_DOMAIN_ID_MEDIA_VDBOX3,
+       FW_DOMAIN_ID_MEDIA_VDBOX4,
+       FW_DOMAIN_ID_MEDIA_VDBOX5,
+       FW_DOMAIN_ID_MEDIA_VDBOX6,
+       FW_DOMAIN_ID_MEDIA_VDBOX7,
        FW_DOMAIN_ID_MEDIA_VEBOX0,
        FW_DOMAIN_ID_MEDIA_VEBOX1,
+       FW_DOMAIN_ID_MEDIA_VEBOX2,
+       FW_DOMAIN_ID_MEDIA_VEBOX3,
 
        FW_DOMAIN_ID_COUNT
 };
@@ -66,10 +72,16 @@ enum forcewake_domains {
        FORCEWAKE_MEDIA_VDBOX1  = BIT(FW_DOMAIN_ID_MEDIA_VDBOX1),
        FORCEWAKE_MEDIA_VDBOX2  = BIT(FW_DOMAIN_ID_MEDIA_VDBOX2),
        FORCEWAKE_MEDIA_VDBOX3  = BIT(FW_DOMAIN_ID_MEDIA_VDBOX3),
+       FORCEWAKE_MEDIA_VDBOX4  = BIT(FW_DOMAIN_ID_MEDIA_VDBOX4),
+       FORCEWAKE_MEDIA_VDBOX5  = BIT(FW_DOMAIN_ID_MEDIA_VDBOX5),
+       FORCEWAKE_MEDIA_VDBOX6  = BIT(FW_DOMAIN_ID_MEDIA_VDBOX6),
+       FORCEWAKE_MEDIA_VDBOX7  = BIT(FW_DOMAIN_ID_MEDIA_VDBOX7),
        FORCEWAKE_MEDIA_VEBOX0  = BIT(FW_DOMAIN_ID_MEDIA_VEBOX0),
        FORCEWAKE_MEDIA_VEBOX1  = BIT(FW_DOMAIN_ID_MEDIA_VEBOX1),
+       FORCEWAKE_MEDIA_VEBOX2  = BIT(FW_DOMAIN_ID_MEDIA_VEBOX2),
+       FORCEWAKE_MEDIA_VEBOX3  = BIT(FW_DOMAIN_ID_MEDIA_VEBOX3),
 
-       FORCEWAKE_ALL = BIT(FW_DOMAIN_ID_COUNT) - 1
+       FORCEWAKE_ALL = BIT(FW_DOMAIN_ID_COUNT) - 1,
 };
 
 struct intel_uncore_funcs {
@@ -182,6 +194,12 @@ intel_uncore_has_fifo(const struct intel_uncore *uncore)
        return uncore->flags & UNCORE_HAS_FIFO;
 }
 
+u32 intel_uncore_read_with_mcr_steering_fw(struct intel_uncore *uncore,
+                                          i915_reg_t reg,
+                                          int slice, int subslice);
+u32 intel_uncore_read_with_mcr_steering(struct intel_uncore *uncore,
+                                       i915_reg_t reg, int slice, int subslice);
+
 void
 intel_uncore_mmio_debug_init_early(struct intel_uncore_mmio_debug *mmio_debug);
 void intel_uncore_init_early(struct intel_uncore *uncore,
index 8309455..5e511bb 100644 (file)
@@ -56,8 +56,8 @@
 
 /* 24KB at the end of WOPCM is reserved for RC6 CTX on BXT. */
 #define BXT_WOPCM_RC6_CTX_RESERVED     (SZ_16K + SZ_8K)
-/* 36KB WOPCM reserved at the end of WOPCM on CNL. */
-#define CNL_WOPCM_HW_CTX_RESERVED      (SZ_32K + SZ_4K)
+/* 36KB WOPCM reserved at the end of WOPCM on ICL. */
+#define ICL_WOPCM_HW_CTX_RESERVED      (SZ_32K + SZ_4K)
 
 /* 128KB from GUC_WOPCM_RESERVED is reserved for FW on Gen9. */
 #define GEN9_GUC_FW_RESERVED   SZ_128K
@@ -93,8 +93,8 @@ static u32 context_reserved_size(struct drm_i915_private *i915)
 {
        if (IS_GEN9_LP(i915))
                return BXT_WOPCM_RC6_CTX_RESERVED;
-       else if (GRAPHICS_VER(i915) >= 10)
-               return CNL_WOPCM_HW_CTX_RESERVED;
+       else if (GRAPHICS_VER(i915) >= 11)
+               return ICL_WOPCM_HW_CTX_RESERVED;
        else
                return 0;
 }
@@ -126,7 +126,7 @@ static bool gen9_check_huc_fw_fits(struct drm_i915_private *i915,
                                   u32 guc_wopcm_size, u32 huc_fw_size)
 {
        /*
-        * On Gen9 & CNL A0, hardware requires the total available GuC WOPCM
+        * On Gen9, hardware requires the total available GuC WOPCM
         * size to be larger than or equal to HuC firmware size. Otherwise,
         * firmware uploading would fail.
         */
diff --git a/drivers/gpu/drm/i915/selftests/i915_buddy.c b/drivers/gpu/drm/i915/selftests/i915_buddy.c
new file mode 100644 (file)
index 0000000..d61ec9c
--- /dev/null
@@ -0,0 +1,787 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright Â© 2019 Intel Corporation
+ */
+
+#include <linux/prime_numbers.h>
+
+#include "../i915_selftest.h"
+#include "i915_random.h"
+
+static void __igt_dump_block(struct i915_buddy_mm *mm,
+                            struct i915_buddy_block *block,
+                            bool buddy)
+{
+       pr_err("block info: header=%llx, state=%u, order=%d, offset=%llx size=%llx root=%s buddy=%s\n",
+              block->header,
+              i915_buddy_block_state(block),
+              i915_buddy_block_order(block),
+              i915_buddy_block_offset(block),
+              i915_buddy_block_size(mm, block),
+              yesno(!block->parent),
+              yesno(buddy));
+}
+
+static void igt_dump_block(struct i915_buddy_mm *mm,
+                          struct i915_buddy_block *block)
+{
+       struct i915_buddy_block *buddy;
+
+       __igt_dump_block(mm, block, false);
+
+       buddy = get_buddy(block);
+       if (buddy)
+               __igt_dump_block(mm, buddy, true);
+}
+
+static int igt_check_block(struct i915_buddy_mm *mm,
+                          struct i915_buddy_block *block)
+{
+       struct i915_buddy_block *buddy;
+       unsigned int block_state;
+       u64 block_size;
+       u64 offset;
+       int err = 0;
+
+       block_state = i915_buddy_block_state(block);
+
+       if (block_state != I915_BUDDY_ALLOCATED &&
+           block_state != I915_BUDDY_FREE &&
+           block_state != I915_BUDDY_SPLIT) {
+               pr_err("block state mismatch\n");
+               err = -EINVAL;
+       }
+
+       block_size = i915_buddy_block_size(mm, block);
+       offset = i915_buddy_block_offset(block);
+
+       if (block_size < mm->chunk_size) {
+               pr_err("block size smaller than min size\n");
+               err = -EINVAL;
+       }
+
+       if (!is_power_of_2(block_size)) {
+               pr_err("block size not power of two\n");
+               err = -EINVAL;
+       }
+
+       if (!IS_ALIGNED(block_size, mm->chunk_size)) {
+               pr_err("block size not aligned to min size\n");
+               err = -EINVAL;
+       }
+
+       if (!IS_ALIGNED(offset, mm->chunk_size)) {
+               pr_err("block offset not aligned to min size\n");
+               err = -EINVAL;
+       }
+
+       if (!IS_ALIGNED(offset, block_size)) {
+               pr_err("block offset not aligned to block size\n");
+               err = -EINVAL;
+       }
+
+       buddy = get_buddy(block);
+
+       if (!buddy && block->parent) {
+               pr_err("buddy has gone fishing\n");
+               err = -EINVAL;
+       }
+
+       if (buddy) {
+               if (i915_buddy_block_offset(buddy) != (offset ^ block_size)) {
+                       pr_err("buddy has wrong offset\n");
+                       err = -EINVAL;
+               }
+
+               if (i915_buddy_block_size(mm, buddy) != block_size) {
+                       pr_err("buddy size mismatch\n");
+                       err = -EINVAL;
+               }
+
+               if (i915_buddy_block_state(buddy) == block_state &&
+                   block_state == I915_BUDDY_FREE) {
+                       pr_err("block and its buddy are free\n");
+                       err = -EINVAL;
+               }
+       }
+
+       return err;
+}
+
+static int igt_check_blocks(struct i915_buddy_mm *mm,
+                           struct list_head *blocks,
+                           u64 expected_size,
+                           bool is_contiguous)
+{
+       struct i915_buddy_block *block;
+       struct i915_buddy_block *prev;
+       u64 total;
+       int err = 0;
+
+       block = NULL;
+       prev = NULL;
+       total = 0;
+
+       list_for_each_entry(block, blocks, link) {
+               err = igt_check_block(mm, block);
+
+               if (!i915_buddy_block_is_allocated(block)) {
+                       pr_err("block not allocated\n"),
+                       err = -EINVAL;
+               }
+
+               if (is_contiguous && prev) {
+                       u64 prev_block_size;
+                       u64 prev_offset;
+                       u64 offset;
+
+                       prev_offset = i915_buddy_block_offset(prev);
+                       prev_block_size = i915_buddy_block_size(mm, prev);
+                       offset = i915_buddy_block_offset(block);
+
+                       if (offset != (prev_offset + prev_block_size)) {
+                               pr_err("block offset mismatch\n");
+                               err = -EINVAL;
+                       }
+               }
+
+               if (err)
+                       break;
+
+               total += i915_buddy_block_size(mm, block);
+               prev = block;
+       }
+
+       if (!err) {
+               if (total != expected_size) {
+                       pr_err("size mismatch, expected=%llx, found=%llx\n",
+                              expected_size, total);
+                       err = -EINVAL;
+               }
+               return err;
+       }
+
+       if (prev) {
+               pr_err("prev block, dump:\n");
+               igt_dump_block(mm, prev);
+       }
+
+       pr_err("bad block, dump:\n");
+       igt_dump_block(mm, block);
+
+       return err;
+}
+
+static int igt_check_mm(struct i915_buddy_mm *mm)
+{
+       struct i915_buddy_block *root;
+       struct i915_buddy_block *prev;
+       unsigned int i;
+       u64 total;
+       int err = 0;
+
+       if (!mm->n_roots) {
+               pr_err("n_roots is zero\n");
+               return -EINVAL;
+       }
+
+       if (mm->n_roots != hweight64(mm->size)) {
+               pr_err("n_roots mismatch, n_roots=%u, expected=%lu\n",
+                      mm->n_roots, hweight64(mm->size));
+               return -EINVAL;
+       }
+
+       root = NULL;
+       prev = NULL;
+       total = 0;
+
+       for (i = 0; i < mm->n_roots; ++i) {
+               struct i915_buddy_block *block;
+               unsigned int order;
+
+               root = mm->roots[i];
+               if (!root) {
+                       pr_err("root(%u) is NULL\n", i);
+                       err = -EINVAL;
+                       break;
+               }
+
+               err = igt_check_block(mm, root);
+
+               if (!i915_buddy_block_is_free(root)) {
+                       pr_err("root not free\n");
+                       err = -EINVAL;
+               }
+
+               order = i915_buddy_block_order(root);
+
+               if (!i) {
+                       if (order != mm->max_order) {
+                               pr_err("max order root missing\n");
+                               err = -EINVAL;
+                       }
+               }
+
+               if (prev) {
+                       u64 prev_block_size;
+                       u64 prev_offset;
+                       u64 offset;
+
+                       prev_offset = i915_buddy_block_offset(prev);
+                       prev_block_size = i915_buddy_block_size(mm, prev);
+                       offset = i915_buddy_block_offset(root);
+
+                       if (offset != (prev_offset + prev_block_size)) {
+                               pr_err("root offset mismatch\n");
+                               err = -EINVAL;
+                       }
+               }
+
+               block = list_first_entry_or_null(&mm->free_list[order],
+                                                struct i915_buddy_block,
+                                                link);
+               if (block != root) {
+                       pr_err("root mismatch at order=%u\n", order);
+                       err = -EINVAL;
+               }
+
+               if (err)
+                       break;
+
+               prev = root;
+               total += i915_buddy_block_size(mm, root);
+       }
+
+       if (!err) {
+               if (total != mm->size) {
+                       pr_err("expected mm size=%llx, found=%llx\n", mm->size,
+                              total);
+                       err = -EINVAL;
+               }
+               return err;
+       }
+
+       if (prev) {
+               pr_err("prev root(%u), dump:\n", i - 1);
+               igt_dump_block(mm, prev);
+       }
+
+       if (root) {
+               pr_err("bad root(%u), dump:\n", i);
+               igt_dump_block(mm, root);
+       }
+
+       return err;
+}
+
+static void igt_mm_config(u64 *size, u64 *chunk_size)
+{
+       I915_RND_STATE(prng);
+       u32 s, ms;
+
+       /* Nothing fancy, just try to get an interesting bit pattern */
+
+       prandom_seed_state(&prng, i915_selftest.random_seed);
+
+       /* Let size be a random number of pages up to 8 GB (2M pages) */
+       s = 1 + i915_prandom_u32_max_state((BIT(33 - 12)) - 1, &prng);
+       /* Let the chunk size be a random power of 2 less than size */
+       ms = BIT(i915_prandom_u32_max_state(ilog2(s), &prng));
+       /* Round size down to the chunk size */
+       s &= -ms;
+
+       /* Convert from pages to bytes */
+       *chunk_size = (u64)ms << 12;
+       *size = (u64)s << 12;
+}
+
+static int igt_buddy_alloc_smoke(void *arg)
+{
+       struct i915_buddy_mm mm;
+       IGT_TIMEOUT(end_time);
+       I915_RND_STATE(prng);
+       u64 chunk_size;
+       u64 mm_size;
+       int *order;
+       int err, i;
+
+       igt_mm_config(&mm_size, &chunk_size);
+
+       pr_info("buddy_init with size=%llx, chunk_size=%llx\n", mm_size, chunk_size);
+
+       err = i915_buddy_init(&mm, mm_size, chunk_size);
+       if (err) {
+               pr_err("buddy_init failed(%d)\n", err);
+               return err;
+       }
+
+       order = i915_random_order(mm.max_order + 1, &prng);
+       if (!order)
+               goto out_fini;
+
+       for (i = 0; i <= mm.max_order; ++i) {
+               struct i915_buddy_block *block;
+               int max_order = order[i];
+               bool timeout = false;
+               LIST_HEAD(blocks);
+               int order;
+               u64 total;
+
+               err = igt_check_mm(&mm);
+               if (err) {
+                       pr_err("pre-mm check failed, abort\n");
+                       break;
+               }
+
+               pr_info("filling from max_order=%u\n", max_order);
+
+               order = max_order;
+               total = 0;
+
+               do {
+retry:
+                       block = i915_buddy_alloc(&mm, order);
+                       if (IS_ERR(block)) {
+                               err = PTR_ERR(block);
+                               if (err == -ENOMEM) {
+                                       pr_info("buddy_alloc hit -ENOMEM with order=%d\n",
+                                               order);
+                               } else {
+                                       if (order--) {
+                                               err = 0;
+                                               goto retry;
+                                       }
+
+                                       pr_err("buddy_alloc with order=%d failed(%d)\n",
+                                              order, err);
+                               }
+
+                               break;
+                       }
+
+                       list_add_tail(&block->link, &blocks);
+
+                       if (i915_buddy_block_order(block) != order) {
+                               pr_err("buddy_alloc order mismatch\n");
+                               err = -EINVAL;
+                               break;
+                       }
+
+                       total += i915_buddy_block_size(&mm, block);
+
+                       if (__igt_timeout(end_time, NULL)) {
+                               timeout = true;
+                               break;
+                       }
+               } while (total < mm.size);
+
+               if (!err)
+                       err = igt_check_blocks(&mm, &blocks, total, false);
+
+               i915_buddy_free_list(&mm, &blocks);
+
+               if (!err) {
+                       err = igt_check_mm(&mm);
+                       if (err)
+                               pr_err("post-mm check failed\n");
+               }
+
+               if (err || timeout)
+                       break;
+
+               cond_resched();
+       }
+
+       if (err == -ENOMEM)
+               err = 0;
+
+       kfree(order);
+out_fini:
+       i915_buddy_fini(&mm);
+
+       return err;
+}
+
+static int igt_buddy_alloc_pessimistic(void *arg)
+{
+       const unsigned int max_order = 16;
+       struct i915_buddy_block *block, *bn;
+       struct i915_buddy_mm mm;
+       unsigned int order;
+       LIST_HEAD(blocks);
+       int err;
+
+       /*
+        * Create a pot-sized mm, then allocate one of each possible
+        * order within. This should leave the mm with exactly one
+        * page left.
+        */
+
+       err = i915_buddy_init(&mm, PAGE_SIZE << max_order, PAGE_SIZE);
+       if (err) {
+               pr_err("buddy_init failed(%d)\n", err);
+               return err;
+       }
+       GEM_BUG_ON(mm.max_order != max_order);
+
+       for (order = 0; order < max_order; order++) {
+               block = i915_buddy_alloc(&mm, order);
+               if (IS_ERR(block)) {
+                       pr_info("buddy_alloc hit -ENOMEM with order=%d\n",
+                               order);
+                       err = PTR_ERR(block);
+                       goto err;
+               }
+
+               list_add_tail(&block->link, &blocks);
+       }
+
+       /* And now the last remaining block available */
+       block = i915_buddy_alloc(&mm, 0);
+       if (IS_ERR(block)) {
+               pr_info("buddy_alloc hit -ENOMEM on final alloc\n");
+               err = PTR_ERR(block);
+               goto err;
+       }
+       list_add_tail(&block->link, &blocks);
+
+       /* Should be completely full! */
+       for (order = max_order; order--; ) {
+               block = i915_buddy_alloc(&mm, order);
+               if (!IS_ERR(block)) {
+                       pr_info("buddy_alloc unexpectedly succeeded at order %d, it should be full!",
+                               order);
+                       list_add_tail(&block->link, &blocks);
+                       err = -EINVAL;
+                       goto err;
+               }
+       }
+
+       block = list_last_entry(&blocks, typeof(*block), link);
+       list_del(&block->link);
+       i915_buddy_free(&mm, block);
+
+       /* As we free in increasing size, we make available larger blocks */
+       order = 1;
+       list_for_each_entry_safe(block, bn, &blocks, link) {
+               list_del(&block->link);
+               i915_buddy_free(&mm, block);
+
+               block = i915_buddy_alloc(&mm, order);
+               if (IS_ERR(block)) {
+                       pr_info("buddy_alloc (realloc) hit -ENOMEM with order=%d\n",
+                               order);
+                       err = PTR_ERR(block);
+                       goto err;
+               }
+               i915_buddy_free(&mm, block);
+               order++;
+       }
+
+       /* To confirm, now the whole mm should be available */
+       block = i915_buddy_alloc(&mm, max_order);
+       if (IS_ERR(block)) {
+               pr_info("buddy_alloc (realloc) hit -ENOMEM with order=%d\n",
+                       max_order);
+               err = PTR_ERR(block);
+               goto err;
+       }
+       i915_buddy_free(&mm, block);
+
+err:
+       i915_buddy_free_list(&mm, &blocks);
+       i915_buddy_fini(&mm);
+       return err;
+}
+
+static int igt_buddy_alloc_optimistic(void *arg)
+{
+       const int max_order = 16;
+       struct i915_buddy_block *block;
+       struct i915_buddy_mm mm;
+       LIST_HEAD(blocks);
+       int order;
+       int err;
+
+       /*
+        * Create a mm with one block of each order available, and
+        * try to allocate them all.
+        */
+
+       err = i915_buddy_init(&mm,
+                             PAGE_SIZE * ((1 << (max_order + 1)) - 1),
+                             PAGE_SIZE);
+       if (err) {
+               pr_err("buddy_init failed(%d)\n", err);
+               return err;
+       }
+       GEM_BUG_ON(mm.max_order != max_order);
+
+       for (order = 0; order <= max_order; order++) {
+               block = i915_buddy_alloc(&mm, order);
+               if (IS_ERR(block)) {
+                       pr_info("buddy_alloc hit -ENOMEM with order=%d\n",
+                               order);
+                       err = PTR_ERR(block);
+                       goto err;
+               }
+
+               list_add_tail(&block->link, &blocks);
+       }
+
+       /* Should be completely full! */
+       block = i915_buddy_alloc(&mm, 0);
+       if (!IS_ERR(block)) {
+               pr_info("buddy_alloc unexpectedly succeeded, it should be full!");
+               list_add_tail(&block->link, &blocks);
+               err = -EINVAL;
+               goto err;
+       }
+
+err:
+       i915_buddy_free_list(&mm, &blocks);
+       i915_buddy_fini(&mm);
+       return err;
+}
+
+static int igt_buddy_alloc_pathological(void *arg)
+{
+       const int max_order = 16;
+       struct i915_buddy_block *block;
+       struct i915_buddy_mm mm;
+       LIST_HEAD(blocks);
+       LIST_HEAD(holes);
+       int order, top;
+       int err;
+
+       /*
+        * Create a pot-sized mm, then allocate one of each possible
+        * order within. This should leave the mm with exactly one
+        * page left. Free the largest block, then whittle down again.
+        * Eventually we will have a fully 50% fragmented mm.
+        */
+
+       err = i915_buddy_init(&mm, PAGE_SIZE << max_order, PAGE_SIZE);
+       if (err) {
+               pr_err("buddy_init failed(%d)\n", err);
+               return err;
+       }
+       GEM_BUG_ON(mm.max_order != max_order);
+
+       for (top = max_order; top; top--) {
+               /* Make room by freeing the largest allocated block */
+               block = list_first_entry_or_null(&blocks, typeof(*block), link);
+               if (block) {
+                       list_del(&block->link);
+                       i915_buddy_free(&mm, block);
+               }
+
+               for (order = top; order--; ) {
+                       block = i915_buddy_alloc(&mm, order);
+                       if (IS_ERR(block)) {
+                               pr_info("buddy_alloc hit -ENOMEM with order=%d, top=%d\n",
+                                       order, top);
+                               err = PTR_ERR(block);
+                               goto err;
+                       }
+                       list_add_tail(&block->link, &blocks);
+               }
+
+               /* There should be one final page for this sub-allocation */
+               block = i915_buddy_alloc(&mm, 0);
+               if (IS_ERR(block)) {
+                       pr_info("buddy_alloc hit -ENOMEM for hole\n");
+                       err = PTR_ERR(block);
+                       goto err;
+               }
+               list_add_tail(&block->link, &holes);
+
+               block = i915_buddy_alloc(&mm, top);
+               if (!IS_ERR(block)) {
+                       pr_info("buddy_alloc unexpectedly succeeded at top-order %d/%d, it should be full!",
+                               top, max_order);
+                       list_add_tail(&block->link, &blocks);
+                       err = -EINVAL;
+                       goto err;
+               }
+       }
+
+       i915_buddy_free_list(&mm, &holes);
+
+       /* Nothing larger than blocks of chunk_size now available */
+       for (order = 1; order <= max_order; order++) {
+               block = i915_buddy_alloc(&mm, order);
+               if (!IS_ERR(block)) {
+                       pr_info("buddy_alloc unexpectedly succeeded at order %d, it should be full!",
+                               order);
+                       list_add_tail(&block->link, &blocks);
+                       err = -EINVAL;
+                       goto err;
+               }
+       }
+
+err:
+       list_splice_tail(&holes, &blocks);
+       i915_buddy_free_list(&mm, &blocks);
+       i915_buddy_fini(&mm);
+       return err;
+}
+
+static int igt_buddy_alloc_range(void *arg)
+{
+       struct i915_buddy_mm mm;
+       unsigned long page_num;
+       LIST_HEAD(blocks);
+       u64 chunk_size;
+       u64 offset;
+       u64 size;
+       u64 rem;
+       int err;
+
+       igt_mm_config(&size, &chunk_size);
+
+       pr_info("buddy_init with size=%llx, chunk_size=%llx\n", size, chunk_size);
+
+       err = i915_buddy_init(&mm, size, chunk_size);
+       if (err) {
+               pr_err("buddy_init failed(%d)\n", err);
+               return err;
+       }
+
+       err = igt_check_mm(&mm);
+       if (err) {
+               pr_err("pre-mm check failed, abort, abort, abort!\n");
+               goto err_fini;
+       }
+
+       rem = mm.size;
+       offset = 0;
+
+       for_each_prime_number_from(page_num, 1, ULONG_MAX - 1) {
+               struct i915_buddy_block *block;
+               LIST_HEAD(tmp);
+
+               size = min(page_num * mm.chunk_size, rem);
+
+               err = i915_buddy_alloc_range(&mm, &tmp, offset, size);
+               if (err) {
+                       if (err == -ENOMEM) {
+                               pr_info("alloc_range hit -ENOMEM with size=%llx\n",
+                                       size);
+                       } else {
+                               pr_err("alloc_range with offset=%llx, size=%llx failed(%d)\n",
+                                      offset, size, err);
+                       }
+
+                       break;
+               }
+
+               block = list_first_entry_or_null(&tmp,
+                                                struct i915_buddy_block,
+                                                link);
+               if (!block) {
+                       pr_err("alloc_range has no blocks\n");
+                       err = -EINVAL;
+                       break;
+               }
+
+               if (i915_buddy_block_offset(block) != offset) {
+                       pr_err("alloc_range start offset mismatch, found=%llx, expected=%llx\n",
+                              i915_buddy_block_offset(block), offset);
+                       err = -EINVAL;
+               }
+
+               if (!err)
+                       err = igt_check_blocks(&mm, &tmp, size, true);
+
+               list_splice_tail(&tmp, &blocks);
+
+               if (err)
+                       break;
+
+               offset += size;
+
+               rem -= size;
+               if (!rem)
+                       break;
+
+               cond_resched();
+       }
+
+       if (err == -ENOMEM)
+               err = 0;
+
+       i915_buddy_free_list(&mm, &blocks);
+
+       if (!err) {
+               err = igt_check_mm(&mm);
+               if (err)
+                       pr_err("post-mm check failed\n");
+       }
+
+err_fini:
+       i915_buddy_fini(&mm);
+
+       return err;
+}
+
+static int igt_buddy_alloc_limit(void *arg)
+{
+       struct i915_buddy_block *block;
+       struct i915_buddy_mm mm;
+       const u64 size = U64_MAX;
+       int err;
+
+       err = i915_buddy_init(&mm, size, PAGE_SIZE);
+       if (err)
+               return err;
+
+       if (mm.max_order != I915_BUDDY_MAX_ORDER) {
+               pr_err("mm.max_order(%d) != %d\n",
+                      mm.max_order, I915_BUDDY_MAX_ORDER);
+               err = -EINVAL;
+               goto out_fini;
+       }
+
+       block = i915_buddy_alloc(&mm, mm.max_order);
+       if (IS_ERR(block)) {
+               err = PTR_ERR(block);
+               goto out_fini;
+       }
+
+       if (i915_buddy_block_order(block) != mm.max_order) {
+               pr_err("block order(%d) != %d\n",
+                      i915_buddy_block_order(block), mm.max_order);
+               err = -EINVAL;
+               goto out_free;
+       }
+
+       if (i915_buddy_block_size(&mm, block) !=
+           BIT_ULL(mm.max_order) * PAGE_SIZE) {
+               pr_err("block size(%llu) != %llu\n",
+                      i915_buddy_block_size(&mm, block),
+                      BIT_ULL(mm.max_order) * PAGE_SIZE);
+               err = -EINVAL;
+               goto out_free;
+       }
+
+out_free:
+       i915_buddy_free(&mm, block);
+out_fini:
+       i915_buddy_fini(&mm);
+       return err;
+}
+
+int i915_buddy_mock_selftests(void)
+{
+       static const struct i915_subtest tests[] = {
+               SUBTEST(igt_buddy_alloc_pessimistic),
+               SUBTEST(igt_buddy_alloc_optimistic),
+               SUBTEST(igt_buddy_alloc_pathological),
+               SUBTEST(igt_buddy_alloc_smoke),
+               SUBTEST(igt_buddy_alloc_range),
+               SUBTEST(igt_buddy_alloc_limit),
+       };
+
+       return i915_subtests(tests, NULL);
+}
index a92c0e9..cfa5c41 100644 (file)
@@ -26,6 +26,7 @@ selftest(gt_mocs, intel_mocs_live_selftests)
 selftest(gt_pm, intel_gt_pm_live_selftests)
 selftest(gt_heartbeat, intel_heartbeat_live_selftests)
 selftest(requests, i915_request_live_selftests)
+selftest(migrate, intel_migrate_live_selftests)
 selftest(active, i915_active_live_selftests)
 selftest(objects, i915_gem_object_live_selftests)
 selftest(mman, i915_gem_mman_live_selftests)
@@ -37,14 +38,14 @@ selftest(gem, i915_gem_live_selftests)
 selftest(evict, i915_gem_evict_live_selftests)
 selftest(hugepages, i915_gem_huge_page_live_selftests)
 selftest(gem_contexts, i915_gem_context_live_selftests)
-selftest(gem_execbuf, i915_gem_execbuffer_live_selftests)
-selftest(blt, i915_gem_object_blt_live_selftests)
 selftest(client, i915_gem_client_blt_live_selftests)
+selftest(gem_migrate, i915_gem_migrate_live_selftests)
 selftest(reset, intel_reset_live_selftests)
 selftest(memory_region, intel_memory_region_live_selftests)
 selftest(hangcheck, intel_hangcheck_live_selftests)
 selftest(execlists, intel_execlists_live_selftests)
 selftest(ring_submission, intel_ring_submission_live_selftests)
 selftest(perf, i915_perf_live_selftests)
+selftest(slpc, intel_slpc_live_selftests)
 /* Here be dragons: keep last to run last! */
 selftest(late_gt_pm, intel_gt_pm_late_selftests)
index 34e5caf..793fb28 100644 (file)
@@ -32,5 +32,5 @@ selftest(vma, i915_vma_mock_selftests)
 selftest(evict, i915_gem_evict_mock_selftests)
 selftest(gtt, i915_gem_gtt_mock_selftests)
 selftest(hugepages, i915_gem_huge_page_mock_selftests)
-selftest(contexts, i915_gem_context_mock_selftests)
 selftest(memory_region, intel_memory_region_mock_selftests)
+selftest(buddy, i915_buddy_mock_selftests)
index c2389f8..058450d 100644 (file)
@@ -17,5 +17,5 @@
  */
 selftest(engine_cs, intel_engine_cs_perf_selftests)
 selftest(request, i915_request_perf_selftests)
-selftest(blt, i915_gem_object_blt_perf_selftests)
+selftest(migrate, intel_migrate_perf_selftests)
 selftest(region, intel_memory_region_perf_selftests)
index bd5c96a..d67710d 100644 (file)
@@ -1313,7 +1313,7 @@ static int __live_parallel_engine1(void *arg)
                i915_request_add(rq);
 
                err = 0;
-               if (i915_request_wait(rq, 0, HZ / 5) < 0)
+               if (i915_request_wait(rq, 0, HZ) < 0)
                        err = -ETIME;
                i915_request_put(rq);
                if (err)
@@ -1419,7 +1419,7 @@ static int __live_parallel_spin(void *arg)
        }
        igt_spinner_end(&spin);
 
-       if (err == 0 && i915_request_wait(rq, 0, HZ / 5) < 0)
+       if (err == 0 && i915_request_wait(rq, 0, HZ) < 0)
                err = -EIO;
        i915_request_put(rq);
 
index 1bc11c0..484759c 100644 (file)
@@ -187,7 +187,7 @@ int i915_mock_selftests(void)
        err = run_selftests(mock, NULL);
        if (err) {
                i915_selftest.mock = err;
-               return err;
+               return 1;
        }
 
        if (i915_selftest.mock < 0) {
@@ -430,7 +430,7 @@ module_param_named(st_timeout, i915_selftest.timeout_ms, uint, 0400);
 module_param_named(st_filter, i915_selftest.filter, charp, 0400);
 
 module_param_named_unsafe(mock_selftests, i915_selftest.mock, int, 0400);
-MODULE_PARM_DESC(mock_selftests, "Run selftests before loading, using mock hardware (0:disabled [default], 1:run tests then load driver, -1:run tests then exit module)");
+MODULE_PARM_DESC(mock_selftests, "Run selftests before loading, using mock hardware (0:disabled [default], 1:run tests then load driver, -1:run tests then leave dummy module)");
 
 module_param_named_unsafe(live_selftests, i915_selftest.live, int, 0400);
 MODULE_PARM_DESC(live_selftests, "Run selftests after driver initialisation on the live system (0:disabled [default], 1:run tests then continue, -1:run tests then exit module)");
index 7b0939e..a6c71fc 100644 (file)
@@ -19,7 +19,7 @@ int igt_flush_test(struct drm_i915_private *i915)
 
        cond_resched();
 
-       if (intel_gt_wait_for_idle(gt, HZ / 5) == -ETIME) {
+       if (intel_gt_wait_for_idle(gt, HZ) == -ETIME) {
                pr_err("%pS timed out, cancelling all further testing.\n",
                       __builtin_return_address(0));
 
index c130010..1c72154 100644 (file)
@@ -5,7 +5,7 @@
  */
 
 #include "i915_drv.h"
-#include "gt/intel_gt_requests.h"
+#include "gt/intel_gt.h"
 
 #include "../i915_selftest.h"
 #include "igt_flush_test.h"
index c85d516..418caae 100644 (file)
 #include "gem/i915_gem_context.h"
 #include "gem/i915_gem_lmem.h"
 #include "gem/i915_gem_region.h"
-#include "gem/i915_gem_object_blt.h"
 #include "gem/selftests/igt_gem_utils.h"
 #include "gem/selftests/mock_context.h"
+#include "gt/intel_engine_pm.h"
 #include "gt/intel_engine_user.h"
 #include "gt/intel_gt.h"
+#include "i915_buddy.h"
+#include "gt/intel_migrate.h"
 #include "i915_memcpy.h"
+#include "i915_ttm_buddy_manager.h"
 #include "selftests/igt_flush_test.h"
 #include "selftests/i915_random.h"
 
@@ -57,16 +60,15 @@ static int igt_mock_fill(void *arg)
        LIST_HEAD(objects);
        int err = 0;
 
-       page_size = mem->chunk_size;
+       page_size = PAGE_SIZE;
+       max_pages = div64_u64(total, page_size);
        rem = total;
-retry:
-       max_pages = div64_u64(rem, page_size);
 
        for_each_prime_number_from(page_num, 1, max_pages) {
                resource_size_t size = page_num * page_size;
                struct drm_i915_gem_object *obj;
 
-               obj = i915_gem_object_create_region(mem, size, 0);
+               obj = i915_gem_object_create_region(mem, size, 0, 0);
                if (IS_ERR(obj)) {
                        err = PTR_ERR(obj);
                        break;
@@ -86,11 +88,6 @@ retry:
                err = 0;
        if (err == -ENXIO) {
                if (page_num * page_size <= rem) {
-                       if (mem->is_range_manager && max_pages > 1) {
-                               max_pages >>= 1;
-                               goto retry;
-                       }
-
                        pr_err("%s failed, space still left in region\n",
                               __func__);
                        err = -EINVAL;
@@ -113,7 +110,7 @@ igt_object_create(struct intel_memory_region *mem,
        struct drm_i915_gem_object *obj;
        int err;
 
-       obj = i915_gem_object_create_region(mem, size, flags);
+       obj = i915_gem_object_create_region(mem, size, 0, flags);
        if (IS_ERR(obj))
                return obj;
 
@@ -157,6 +154,7 @@ static bool is_contiguous(struct drm_i915_gem_object *obj)
 static int igt_mock_reserve(void *arg)
 {
        struct intel_memory_region *mem = arg;
+       struct drm_i915_private *i915 = mem->i915;
        resource_size_t avail = resource_size(&mem->region);
        struct drm_i915_gem_object *obj;
        const u32 chunk_size = SZ_32M;
@@ -166,16 +164,18 @@ static int igt_mock_reserve(void *arg)
        LIST_HEAD(objects);
        int err = 0;
 
-       if (!list_empty(&mem->reserved)) {
-               pr_err("%s region reserved list is not empty\n", __func__);
-               return -EINVAL;
-       }
-
        count = avail / chunk_size;
        order = i915_random_order(count, &prng);
        if (!order)
                return 0;
 
+       mem = mock_region_create(i915, 0, SZ_2G, I915_GTT_PAGE_SIZE_4K, 0);
+       if (IS_ERR(mem)) {
+               pr_err("failed to create memory region\n");
+               err = PTR_ERR(mem);
+               goto out_free_order;
+       }
+
        /* Reserve a bunch of ranges within the region */
        for (i = 0; i < count; ++i) {
                u64 start = order[i] * chunk_size;
@@ -205,18 +205,12 @@ static int igt_mock_reserve(void *arg)
        do {
                u32 size = i915_prandom_u32_max_state(cur_avail, &prng);
 
-retry:
                size = max_t(u32, round_up(size, PAGE_SIZE), PAGE_SIZE);
                obj = igt_object_create(mem, &objects, size, 0);
                if (IS_ERR(obj)) {
-                       if (PTR_ERR(obj) == -ENXIO) {
-                               if (mem->is_range_manager &&
-                                   size > mem->chunk_size) {
-                                       size >>= 1;
-                                       goto retry;
-                               }
+                       if (PTR_ERR(obj) == -ENXIO)
                                break;
-                       }
+
                        err = PTR_ERR(obj);
                        goto out_close;
                }
@@ -230,9 +224,10 @@ retry:
        }
 
 out_close:
-       kfree(order);
        close_objects(mem, &objects);
-       intel_memory_region_unreserve(mem);
+       intel_memory_region_put(mem);
+out_free_order:
+       kfree(order);
        return err;
 }
 
@@ -252,7 +247,7 @@ static int igt_mock_contiguous(void *arg)
        total = resource_size(&mem->region);
 
        /* Min size */
-       obj = igt_object_create(mem, &objects, mem->chunk_size,
+       obj = igt_object_create(mem, &objects, PAGE_SIZE,
                                I915_BO_ALLOC_CONTIGUOUS);
        if (IS_ERR(obj))
                return PTR_ERR(obj);
@@ -333,17 +328,15 @@ static int igt_mock_contiguous(void *arg)
        min = target;
        target = total >> 1;
 
-       if (!mem->is_range_manager) {
-               /* Make sure we can still allocate all the fragmented space */
-               obj = igt_object_create(mem, &objects, target, 0);
-               if (IS_ERR(obj)) {
-                       err = PTR_ERR(obj);
-                       goto err_close_objects;
-               }
-
-               igt_object_release(obj);
+       /* Make sure we can still allocate all the fragmented space */
+       obj = igt_object_create(mem, &objects, target, 0);
+       if (IS_ERR(obj)) {
+               err = PTR_ERR(obj);
+               goto err_close_objects;
        }
 
+       igt_object_release(obj);
+
        /*
         * Even though we have enough free space, we don't have a big enough
         * contiguous block. Make sure that holds true.
@@ -362,7 +355,7 @@ static int igt_mock_contiguous(void *arg)
                }
 
                target >>= 1;
-       } while (target >= mem->chunk_size);
+       } while (target >= PAGE_SIZE);
 
 err_close_objects:
        list_splice_tail(&holes, &objects);
@@ -374,7 +367,9 @@ static int igt_mock_splintered_region(void *arg)
 {
        struct intel_memory_region *mem = arg;
        struct drm_i915_private *i915 = mem->i915;
+       struct i915_ttm_buddy_resource *res;
        struct drm_i915_gem_object *obj;
+       struct i915_buddy_mm *mm;
        unsigned int expected_order;
        LIST_HEAD(objects);
        u64 size;
@@ -382,7 +377,7 @@ static int igt_mock_splintered_region(void *arg)
 
        /*
         * Sanity check we can still allocate everything even if the
-        * max_order != mm.size. i.e our starting address space size is not a
+        * mm.max_order != mm.size. i.e our starting address space size is not a
         * power-of-two.
         */
 
@@ -391,20 +386,29 @@ static int igt_mock_splintered_region(void *arg)
        if (IS_ERR(mem))
                return PTR_ERR(mem);
 
-       expected_order = get_order(rounddown_pow_of_two(size));
-       if (mem->max_order != expected_order) {
-               pr_err("%s order mismatch(%u != %u)\n",
-                      __func__, mem->max_order, expected_order);
-               err = -EINVAL;
-               goto out_put;
-       }
-
        obj = igt_object_create(mem, &objects, size, 0);
        if (IS_ERR(obj)) {
                err = PTR_ERR(obj);
                goto out_close;
        }
 
+       res = to_ttm_buddy_resource(obj->mm.res);
+       mm = res->mm;
+       if (mm->size != size) {
+               pr_err("%s size mismatch(%llu != %llu)\n",
+                      __func__, mm->size, size);
+               err = -EINVAL;
+               goto out_put;
+       }
+
+       expected_order = get_order(rounddown_pow_of_two(size));
+       if (mm->max_order != expected_order) {
+               pr_err("%s order mismatch(%u != %u)\n",
+                      __func__, mm->max_order, expected_order);
+               err = -EINVAL;
+               goto out_put;
+       }
+
        close_objects(mem, &objects);
 
        /*
@@ -415,15 +419,12 @@ static int igt_mock_splintered_region(void *arg)
         * sure that does indeed hold true.
         */
 
-       if (!mem->is_range_manager) {
-               obj = igt_object_create(mem, &objects, size,
-                                       I915_BO_ALLOC_CONTIGUOUS);
-               if (!IS_ERR(obj)) {
-                       pr_err("%s too large contiguous allocation was not rejected\n",
-                              __func__);
-                       err = -EINVAL;
-                       goto out_close;
-               }
+       obj = igt_object_create(mem, &objects, size, I915_BO_ALLOC_CONTIGUOUS);
+       if (!IS_ERR(obj)) {
+               pr_err("%s too large contiguous allocation was not rejected\n",
+                      __func__);
+               err = -EINVAL;
+               goto out_close;
        }
 
        obj = igt_object_create(mem, &objects, rounddown_pow_of_two(size),
@@ -442,6 +443,74 @@ out_put:
        return err;
 }
 
+#ifndef SZ_8G
+#define SZ_8G BIT_ULL(33)
+#endif
+
+static int igt_mock_max_segment(void *arg)
+{
+       const unsigned int max_segment = rounddown(UINT_MAX, PAGE_SIZE);
+       struct intel_memory_region *mem = arg;
+       struct drm_i915_private *i915 = mem->i915;
+       struct i915_ttm_buddy_resource *res;
+       struct drm_i915_gem_object *obj;
+       struct i915_buddy_block *block;
+       struct i915_buddy_mm *mm;
+       struct list_head *blocks;
+       struct scatterlist *sg;
+       LIST_HEAD(objects);
+       u64 size;
+       int err = 0;
+
+       /*
+        * While we may create very large contiguous blocks, we may need
+        * to break those down for consumption elsewhere. In particular,
+        * dma-mapping with scatterlist elements have an implicit limit of
+        * UINT_MAX on each element.
+        */
+
+       size = SZ_8G;
+       mem = mock_region_create(i915, 0, size, PAGE_SIZE, 0);
+       if (IS_ERR(mem))
+               return PTR_ERR(mem);
+
+       obj = igt_object_create(mem, &objects, size, 0);
+       if (IS_ERR(obj)) {
+               err = PTR_ERR(obj);
+               goto out_put;
+       }
+
+       res = to_ttm_buddy_resource(obj->mm.res);
+       blocks = &res->blocks;
+       mm = res->mm;
+       size = 0;
+       list_for_each_entry(block, blocks, link) {
+               if (i915_buddy_block_size(mm, block) > size)
+                       size = i915_buddy_block_size(mm, block);
+       }
+       if (size < max_segment) {
+               pr_err("%s: Failed to create a huge contiguous block [> %u], largest block %lld\n",
+                      __func__, max_segment, size);
+               err = -EINVAL;
+               goto out_close;
+       }
+
+       for (sg = obj->mm.pages->sgl; sg; sg = sg_next(sg)) {
+               if (sg->length > max_segment) {
+                       pr_err("%s: Created an oversized scatterlist entry, %u > %u\n",
+                              __func__, sg->length, max_segment);
+                       err = -EINVAL;
+                       goto out_close;
+               }
+       }
+
+out_close:
+       close_objects(mem, &objects);
+out_put:
+       intel_memory_region_put(mem);
+       return err;
+}
+
 static int igt_gpu_write_dw(struct intel_context *ce,
                            struct i915_vma *vma,
                            u32 dword,
@@ -579,6 +648,62 @@ out_put:
        return err;
 }
 
+static int igt_lmem_create_with_ps(void *arg)
+{
+       struct drm_i915_private *i915 = arg;
+       int err = 0;
+       u32 ps;
+
+       for (ps = PAGE_SIZE; ps <= SZ_1G; ps <<= 1) {
+               struct drm_i915_gem_object *obj;
+               dma_addr_t daddr;
+
+               obj = __i915_gem_object_create_lmem_with_ps(i915, ps, ps, 0);
+               if (IS_ERR(obj)) {
+                       err = PTR_ERR(obj);
+                       if (err == -ENXIO || err == -E2BIG) {
+                               pr_info("%s not enough lmem for ps(%u) err=%d\n",
+                                       __func__, ps, err);
+                               err = 0;
+                       }
+
+                       break;
+               }
+
+               if (obj->base.size != ps) {
+                       pr_err("%s size(%zu) != ps(%u)\n",
+                              __func__, obj->base.size, ps);
+                       err = -EINVAL;
+                       goto out_put;
+               }
+
+               i915_gem_object_lock(obj, NULL);
+               err = i915_gem_object_pin_pages(obj);
+               if (err)
+                       goto out_put;
+
+               daddr = i915_gem_object_get_dma_address(obj, 0);
+               if (!IS_ALIGNED(daddr, ps)) {
+                       pr_err("%s daddr(%pa) not aligned with ps(%u)\n",
+                              __func__, &daddr, ps);
+                       err = -EINVAL;
+                       goto out_unpin;
+               }
+
+out_unpin:
+               i915_gem_object_unpin_pages(obj);
+               __i915_gem_object_put_pages(obj);
+out_put:
+               i915_gem_object_unlock(obj);
+               i915_gem_object_put(obj);
+
+               if (err)
+                       break;
+       }
+
+       return err;
+}
+
 static int igt_lmem_create_cleared_cpu(void *arg)
 {
        struct drm_i915_private *i915 = arg;
@@ -741,6 +866,7 @@ static int igt_lmem_write_cpu(void *arg)
                PAGE_SIZE - 64,
        };
        struct intel_engine_cs *engine;
+       struct i915_request *rq;
        u32 *vaddr;
        u32 sz;
        u32 i;
@@ -767,15 +893,20 @@ static int igt_lmem_write_cpu(void *arg)
                goto out_put;
        }
 
+       i915_gem_object_lock(obj, NULL);
        /* Put the pages into a known state -- from the gpu for added fun */
        intel_engine_pm_get(engine);
-       err = i915_gem_object_fill_blt(obj, engine->kernel_context, 0xdeadbeaf);
-       intel_engine_pm_put(engine);
-       if (err)
-               goto out_unpin;
+       err = intel_context_migrate_clear(engine->gt->migrate.context, NULL,
+                                         obj->mm.pages->sgl, I915_CACHE_NONE,
+                                         true, 0xdeadbeaf, &rq);
+       if (rq) {
+               dma_resv_add_excl_fence(obj->base.resv, &rq->fence);
+               i915_request_put(rq);
+       }
 
-       i915_gem_object_lock(obj, NULL);
-       err = i915_gem_object_set_to_wc_domain(obj, true);
+       intel_engine_pm_put(engine);
+       if (!err)
+               err = i915_gem_object_set_to_wc_domain(obj, true);
        i915_gem_object_unlock(obj);
        if (err)
                goto out_unpin;
@@ -858,7 +989,7 @@ create_region_for_mapping(struct intel_memory_region *mr, u64 size, u32 type,
        struct drm_i915_gem_object *obj;
        void *addr;
 
-       obj = i915_gem_object_create_region(mr, size, 0);
+       obj = i915_gem_object_create_region(mr, size, 0, 0);
        if (IS_ERR(obj)) {
                if (PTR_ERR(obj) == -ENOSPC) /* Stolen memory */
                        return ERR_PTR(-ENODEV);
@@ -1046,6 +1177,7 @@ int intel_memory_region_mock_selftests(void)
                SUBTEST(igt_mock_fill),
                SUBTEST(igt_mock_contiguous),
                SUBTEST(igt_mock_splintered_region),
+               SUBTEST(igt_mock_max_segment),
        };
        struct intel_memory_region *mem;
        struct drm_i915_private *i915;
@@ -1074,6 +1206,7 @@ int intel_memory_region_live_selftests(struct drm_i915_private *i915)
 {
        static const struct i915_subtest tests[] = {
                SUBTEST(igt_lmem_create),
+               SUBTEST(igt_lmem_create_with_ps),
                SUBTEST(igt_lmem_create_cleared_cpu),
                SUBTEST(igt_lmem_write_cpu),
                SUBTEST(igt_lmem_write_gpu),
diff --git a/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c b/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c
new file mode 100644 (file)
index 0000000..4b32834
--- /dev/null
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright Â© 2021 Intel Corporation
+ */
+
+//#include "gt/intel_engine_user.h"
+#include "gt/intel_gt.h"
+#include "i915_drv.h"
+#include "i915_selftest.h"
+
+#include "selftests/intel_scheduler_helpers.h"
+
+#define REDUCED_TIMESLICE      5
+#define REDUCED_PREEMPT                10
+#define WAIT_FOR_RESET_TIME    10000
+
+int intel_selftest_modify_policy(struct intel_engine_cs *engine,
+                                struct intel_selftest_saved_policy *saved,
+                                u32 modify_type)
+
+{
+       int err;
+
+       saved->reset = engine->i915->params.reset;
+       saved->flags = engine->flags;
+       saved->timeslice = engine->props.timeslice_duration_ms;
+       saved->preempt_timeout = engine->props.preempt_timeout_ms;
+
+       switch (modify_type) {
+       case SELFTEST_SCHEDULER_MODIFY_FAST_RESET:
+               /*
+                * Enable force pre-emption on time slice expiration
+                * together with engine reset on pre-emption timeout.
+                * This is required to make the GuC notice and reset
+                * the single hanging context.
+                * Also, reduce the preemption timeout to something
+                * small to speed the test up.
+                */
+               engine->i915->params.reset = 2;
+               engine->flags |= I915_ENGINE_WANT_FORCED_PREEMPTION;
+               engine->props.timeslice_duration_ms = REDUCED_TIMESLICE;
+               engine->props.preempt_timeout_ms = REDUCED_PREEMPT;
+               break;
+
+       case SELFTEST_SCHEDULER_MODIFY_NO_HANGCHECK:
+               engine->props.preempt_timeout_ms = 0;
+               break;
+
+       default:
+               pr_err("Invalid scheduler policy modification type: %d!\n", modify_type);
+               return -EINVAL;
+       }
+
+       if (!intel_engine_uses_guc(engine))
+               return 0;
+
+       err = intel_guc_global_policies_update(&engine->gt->uc.guc);
+       if (err)
+               intel_selftest_restore_policy(engine, saved);
+
+       return err;
+}
+
+int intel_selftest_restore_policy(struct intel_engine_cs *engine,
+                                 struct intel_selftest_saved_policy *saved)
+{
+       /* Restore the original policies */
+       engine->i915->params.reset = saved->reset;
+       engine->flags = saved->flags;
+       engine->props.timeslice_duration_ms = saved->timeslice;
+       engine->props.preempt_timeout_ms = saved->preempt_timeout;
+
+       if (!intel_engine_uses_guc(engine))
+               return 0;
+
+       return intel_guc_global_policies_update(&engine->gt->uc.guc);
+}
+
+int intel_selftest_wait_for_rq(struct i915_request *rq)
+{
+       long ret;
+
+       ret = i915_request_wait(rq, 0, WAIT_FOR_RESET_TIME);
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
diff --git a/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.h b/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.h
new file mode 100644 (file)
index 0000000..35c0986
--- /dev/null
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright Â© 2021 Intel Corporation
+ */
+
+#ifndef _INTEL_SELFTEST_SCHEDULER_HELPERS_H_
+#define _INTEL_SELFTEST_SCHEDULER_HELPERS_H_
+
+#include <linux/types.h>
+
+struct i915_request;
+struct intel_engine_cs;
+
+struct intel_selftest_saved_policy {
+       u32 flags;
+       u32 reset;
+       u64 timeslice;
+       u64 preempt_timeout;
+};
+
+enum selftest_scheduler_modify {
+       SELFTEST_SCHEDULER_MODIFY_NO_HANGCHECK = 0,
+       SELFTEST_SCHEDULER_MODIFY_FAST_RESET,
+};
+
+int intel_selftest_modify_policy(struct intel_engine_cs *engine,
+                                struct intel_selftest_saved_policy *saved,
+                                enum selftest_scheduler_modify modify_type);
+int intel_selftest_restore_policy(struct intel_engine_cs *engine,
+                                 struct intel_selftest_saved_policy *saved);
+int intel_selftest_wait_for_rq(struct i915_request *rq);
+
+#endif
index 8ef9e6a..720b608 100644 (file)
@@ -68,6 +68,7 @@ static int intel_shadow_table_check(void)
                { gen8_shadowed_regs, ARRAY_SIZE(gen8_shadowed_regs) },
                { gen11_shadowed_regs, ARRAY_SIZE(gen11_shadowed_regs) },
                { gen12_shadowed_regs, ARRAY_SIZE(gen12_shadowed_regs) },
+               { xehp_shadowed_regs, ARRAY_SIZE(xehp_shadowed_regs) },
        };
        const i915_reg_t *reg;
        unsigned int i, j;
@@ -103,6 +104,7 @@ int intel_uncore_mock_selftests(void)
                { __gen9_fw_ranges, ARRAY_SIZE(__gen9_fw_ranges), true },
                { __gen11_fw_ranges, ARRAY_SIZE(__gen11_fw_ranges), true },
                { __gen12_fw_ranges, ARRAY_SIZE(__gen12_fw_ranges), true },
+               { __xehp_fw_ranges, ARRAY_SIZE(__xehp_fw_ranges), true },
        };
        int err, i;
 
index d189c4b..4f81801 100644 (file)
@@ -52,7 +52,8 @@ void mock_device_flush(struct drm_i915_private *i915)
        do {
                for_each_engine(engine, gt, id)
                        mock_engine_flush(engine);
-       } while (intel_gt_retire_requests_timeout(gt, MAX_SCHEDULE_TIMEOUT));
+       } while (intel_gt_retire_requests_timeout(gt, MAX_SCHEDULE_TIMEOUT,
+                                                 NULL));
 }
 
 static void mock_device_release(struct drm_device *dev)
index 5c7ae40..cc047ec 100644 (file)
@@ -73,7 +73,6 @@ struct i915_ppgtt *mock_ppgtt(struct drm_i915_private *i915, const char *name)
        ppgtt->vm.gt = &i915->gt;
        ppgtt->vm.i915 = i915;
        ppgtt->vm.total = round_down(U64_MAX, PAGE_SIZE);
-       ppgtt->vm.file = ERR_PTR(-ENODEV);
        ppgtt->vm.dma = i915->drm.dev;
 
        i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
index eafc5a0..efa86df 100644 (file)
@@ -3,6 +3,7 @@
  * Copyright Â© 2019-2021 Intel Corporation
  */
 
+#include <drm/ttm/ttm_placement.h>
 #include <linux/scatterlist.h>
 
 #include <drm/ttm/ttm_placement.h>
@@ -16,7 +17,7 @@
 static void mock_region_put_pages(struct drm_i915_gem_object *obj,
                                  struct sg_table *pages)
 {
-       intel_region_ttm_node_free(obj->mm.region, obj->mm.st_mm_node);
+       intel_region_ttm_resource_free(obj->mm.region, obj->mm.res);
        sg_free_table(pages);
        kfree(pages);
 }
@@ -25,26 +26,31 @@ static int mock_region_get_pages(struct drm_i915_gem_object *obj)
 {
        unsigned int flags;
        struct sg_table *pages;
+       int err;
 
-       flags = I915_ALLOC_MIN_PAGE_SIZE;
+       flags = 0;
        if (obj->flags & I915_BO_ALLOC_CONTIGUOUS)
-               flags |= I915_ALLOC_CONTIGUOUS;
+               flags |= TTM_PL_FLAG_CONTIGUOUS;
 
-       obj->mm.st_mm_node = intel_region_ttm_node_alloc(obj->mm.region,
-                                                        obj->base.size,
-                                                        flags);
-       if (IS_ERR(obj->mm.st_mm_node))
-               return PTR_ERR(obj->mm.st_mm_node);
+       obj->mm.res = intel_region_ttm_resource_alloc(obj->mm.region,
+                                                     obj->base.size,
+                                                     flags);
+       if (IS_ERR(obj->mm.res))
+               return PTR_ERR(obj->mm.res);
 
-       pages = intel_region_ttm_node_to_st(obj->mm.region, obj->mm.st_mm_node);
+       pages = intel_region_ttm_resource_to_st(obj->mm.region, obj->mm.res);
        if (IS_ERR(pages)) {
-               intel_region_ttm_node_free(obj->mm.region, obj->mm.st_mm_node);
-               return PTR_ERR(pages);
+               err = PTR_ERR(pages);
+               goto err_free_resource;
        }
 
        __i915_gem_object_set_pages(obj, pages, i915_sg_dma_sizes(pages->sgl));
 
        return 0;
+
+err_free_resource:
+       intel_region_ttm_resource_free(obj->mm.region, obj->mm.res);
+       return err;
 }
 
 static const struct drm_i915_gem_object_ops mock_region_obj_ops = {
@@ -57,6 +63,7 @@ static const struct drm_i915_gem_object_ops mock_region_obj_ops = {
 static int mock_object_init(struct intel_memory_region *mem,
                            struct drm_i915_gem_object *obj,
                            resource_size_t size,
+                           resource_size_t page_size,
                            unsigned int flags)
 {
        static struct lock_class_key lock_class;
index 4b86e8b..83e8b85 100644 (file)
@@ -2802,8 +2802,8 @@ ucMaxNBVoltageHigh:     Voltage regulator dependent PWM value. High 8 bits of  t
 ucMinNBVoltageHigh:     Voltage regulator dependent PWM value. High 8 bits of the value for the min voltage.Set this one to 0x00 if VC without PWM or no VC at all.
 
 
-usInterNBVoltageLow:    Voltage regulator dependent PWM value. The value makes the the voltage >=Min NB voltage but <=InterNBVoltageHigh. Set this to 0x0000 if VC without PWM or no VC at all.
-usInterNBVoltageHigh:   Voltage regulator dependent PWM value. The value makes the the voltage >=InterNBVoltageLow but <=Max NB voltage.Set this to 0x0000 if VC without PWM or no VC at all.
+usInterNBVoltageLow:    Voltage regulator dependent PWM value. The value makes the voltage >=Min NB voltage but <=InterNBVoltageHigh. Set this to 0x0000 if VC without PWM or no VC at all.
+usInterNBVoltageHigh:   Voltage regulator dependent PWM value. The value makes the voltage >=InterNBVoltageLow but <=Max NB voltage.Set this to 0x0000 if VC without PWM or no VC at all.
 */
 
 
index 36a888e..eeb590d 100644 (file)
@@ -28,6 +28,7 @@
 
 #include <drm/drm_vblank.h>
 #include <drm/radeon_drm.h>
+#include <drm/drm_fourcc.h>
 
 #include "atom.h"
 #include "avivod.h"
@@ -1414,10 +1415,15 @@ void evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base,
                         bool async)
 {
        struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
+       struct drm_framebuffer *fb = radeon_crtc->base.primary->fb;
 
-       /* update the scanout addresses */
+       /* flip at hsync for async, default is vsync */
        WREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset,
               async ? EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN : 0);
+       /* update pitch */
+       WREG32(EVERGREEN_GRPH_PITCH + radeon_crtc->crtc_offset,
+              fb->pitches[0] / fb->format->cpp[0]);
+       /* update the scanout addresses */
        WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
               upper_32_bits(crtc_base));
        WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
index ba72419..2dd85ba 100644 (file)
@@ -162,6 +162,8 @@ void r100_wait_for_vblank(struct radeon_device *rdev, int crtc)
 void r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base, bool async)
 {
        struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
+       uint32_t crtc_pitch, pitch_pixels;
+       struct drm_framebuffer *fb = radeon_crtc->base.primary->fb;
        u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK;
        int i;
 
@@ -169,6 +171,13 @@ void r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base, bool
        /* update the scanout addresses */
        WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
 
+       /* update pitch */
+       pitch_pixels = fb->pitches[0] / fb->format->cpp[0];
+       crtc_pitch = DIV_ROUND_UP(pitch_pixels * fb->format->cpp[0] * 8,
+                                 fb->format->cpp[0] * 8 * 8);
+       crtc_pitch |= crtc_pitch << 16;
+       WREG32(RADEON_CRTC_PITCH + radeon_crtc->crtc_offset, crtc_pitch);
+
        /* Wait for update_pending to go high. */
        for (i = 0; i < rdev->usec_timeout; i++) {
                if (RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET)
index 00c0d2b..60d5413 100644 (file)
 #       define R300_PVS_CNTL_1_PROGRAM_START_SHIFT   0
 #       define R300_PVS_CNTL_1_POS_END_SHIFT         10
 #       define R300_PVS_CNTL_1_PROGRAM_END_SHIFT     20
-/* Addresses are relative the the vertex program parameters area. */
+/* Addresses are relative the vertex program parameters area. */
 #define R300_VAP_PVS_CNTL_2                 0x22D4
 #       define R300_PVS_CNTL_2_PARAM_OFFSET_SHIFT 0
 #       define R300_PVS_CNTL_2_PARAM_COUNT_SHIFT  16
index cec0323..ac8c325 100644 (file)
@@ -406,7 +406,7 @@ void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell)
 
 /*
  * radeon_wb_*()
- * Writeback is the the method by which the the GPU updates special pages
+ * Writeback is the method by which the GPU updates special pages
  * in memory with the status of certain GPU events (fences, ring pointers,
  * etc.).
  */
index 0b206b0..6640b7c 100644 (file)
@@ -54,6 +54,7 @@ radeonfb_open(struct fb_info *info, int user)
        struct radeon_fbdev *rfbdev = info->par;
        struct radeon_device *rdev = rfbdev->rdev;
        int ret = pm_runtime_get_sync(rdev->ddev->dev);
+
        if (ret < 0 && ret != -EACCES) {
                pm_runtime_mark_last_busy(rdev->ddev->dev);
                pm_runtime_put_autosuspend(rdev->ddev->dev);
@@ -196,9 +197,8 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
                radeon_bo_check_tiling(rbo, 0, 0);
        ret = radeon_bo_kmap(rbo, NULL);
        radeon_bo_unreserve(rbo);
-       if (ret) {
+       if (ret)
                goto out_unref;
-       }
 
        *gobj_p = gobj;
        return 0;
@@ -294,9 +294,6 @@ static int radeonfb_create(struct drm_fb_helper *helper,
        return 0;
 
 out:
-       if (rbo) {
-
-       }
        if (fb && ret) {
                drm_gem_object_put(gobj);
                drm_framebuffer_unregister_private(fb);
index 18f2c2e..e9c47ec 100644 (file)
@@ -50,7 +50,7 @@
  * for GPU/CPU synchronization.  When the fence is written,
  * it is expected that all buffers associated with that fence
  * are no longer in use by the associated ring on the GPU and
- * that the the relevant GPU caches have been flushed.  Whether
+ * that the relevant GPU caches have been flushed.  Whether
  * we use a scratch register or memory location depends on the asic
  * and whether writeback is enabled.
  */
index 36a38ad..bb53016 100644 (file)
@@ -41,7 +41,7 @@
  * (uncached system pages).
  * Each VM has an ID associated with it and there is a page table
  * associated with each VMID.  When execting a command buffer,
- * the kernel tells the the ring what VMID to use for that command
+ * the kernel tells the ring what VMID to use for that command
  * buffer.  VMIDs are allocated dynamically as commands are submitted.
  * The userspace drivers maintain their own address space and the kernel
  * sets up their pages tables accordingly when they submit their
index b2d22e2..b87dd55 100644 (file)
@@ -41,6 +41,7 @@
 
 #include <drm/drm_device.h>
 #include <drm/drm_vblank.h>
+#include <drm/drm_fourcc.h>
 
 #include "atom.h"
 #include "radeon.h"
@@ -118,6 +119,7 @@ void avivo_wait_for_vblank(struct radeon_device *rdev, int crtc)
 void rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base, bool async)
 {
        struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
+       struct drm_framebuffer *fb = radeon_crtc->base.primary->fb;
        u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
        int i;
 
@@ -125,9 +127,13 @@ void rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base, boo
        tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
        WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
 
-       /* update the scanout addresses */
+       /* flip at hsync for async, default is vsync */
        WREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset,
               async ? AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN : 0);
+       /* update pitch */
+       WREG32(AVIVO_D1GRPH_PITCH + radeon_crtc->crtc_offset,
+              fb->pitches[0] / fb->format->cpp[0]);
+       /* update the scanout addresses */
        WREG32(AVIVO_D1GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
               (u32)crtc_base);
        WREG32(AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
index 7449930..e592e57 100644 (file)
@@ -32,6 +32,7 @@
 
 #include <drm/drm_device.h>
 #include <drm/radeon_drm.h>
+#include <drm/drm_fourcc.h>
 
 #include "atom.h"
 #include "avivod.h"
@@ -809,6 +810,7 @@ u32 rv770_get_xclk(struct radeon_device *rdev)
 void rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base, bool async)
 {
        struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
+       struct drm_framebuffer *fb = radeon_crtc->base.primary->fb;
        u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
        int i;
 
@@ -816,9 +818,13 @@ void rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base, boo
        tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
        WREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset, tmp);
 
-       /* update the scanout addresses */
+       /* flip at hsync for async, default is vsync */
        WREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset,
               async ? AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN : 0);
+       /* update pitch */
+       WREG32(AVIVO_D1GRPH_PITCH + radeon_crtc->crtc_offset,
+              fb->pitches[0] / fb->format->cpp[0]);
+       /* update the scanout addresses */
        if (radeon_crtc->crtc_id) {
                WREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base));
                WREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, upper_32_bits(crtc_base));
index b52c6cd..c3d0826 100644 (file)
@@ -3,6 +3,8 @@ config DRM_ZYNQMP_DPSUB
        depends on ARCH_ZYNQMP || COMPILE_TEST
        depends on COMMON_CLK && DRM && OF
        depends on DMADEVICES
+       depends on PHY_XILINX_ZYNQMP
+       depends on XILINX_ZYNQMP_DPDMA
        select DMA_ENGINE
        select DRM_GEM_CMA_HELPER
        select DRM_KMS_CMA_HELPER
index 109d627..ff2b308 100644 (file)
@@ -91,7 +91,7 @@ struct zynqmp_disp_format {
 };
 
 /**
- * enum zynqmp_disp_id - Layer identifier
+ * enum zynqmp_disp_layer_id - Layer identifier
  * @ZYNQMP_DISP_LAYER_VID: Video layer
  * @ZYNQMP_DISP_LAYER_GFX: Graphics layer
  */
@@ -158,43 +158,17 @@ struct zynqmp_disp_layer {
        enum zynqmp_disp_layer_mode mode;
 };
 
-/**
- * struct zynqmp_disp_blend - Blender
- * @base: Registers I/O base address
- */
-struct zynqmp_disp_blend {
-       void __iomem *base;
-};
-
-/**
- * struct zynqmp_disp_avbuf - Audio/video buffer manager
- * @base: Registers I/O base address
- */
-struct zynqmp_disp_avbuf {
-       void __iomem *base;
-};
-
-/**
- * struct zynqmp_disp_audio - Audio mixer
- * @base: Registers I/O base address
- * @clk: Audio clock
- * @clk_from_ps: True of the audio clock comes from PS, false from PL
- */
-struct zynqmp_disp_audio {
-       void __iomem *base;
-       struct clk *clk;
-       bool clk_from_ps;
-};
-
 /**
  * struct zynqmp_disp - Display controller
  * @dev: Device structure
  * @drm: DRM core
  * @dpsub: Display subsystem
  * @crtc: DRM CRTC
- * @blend: Blender (video rendering pipeline)
- * @avbuf: Audio/video buffer manager
- * @audio: Audio mixer
+ * @blend.base: Register I/O base address for the blender
+ * @avbuf.base: Register I/O base address for the audio/video buffer manager
+ * @audio.base: Registers I/O base address for the audio mixer
+ * @audio.clk: Audio clock
+ * @audio.clk_from_ps: True of the audio clock comes from PS, false from PL
  * @layers: Layers (planes)
  * @event: Pending vblank event request
  * @pclk: Pixel clock
@@ -207,9 +181,17 @@ struct zynqmp_disp {
 
        struct drm_crtc crtc;
 
-       struct zynqmp_disp_blend blend;
-       struct zynqmp_disp_avbuf avbuf;
-       struct zynqmp_disp_audio audio;
+       struct {
+               void __iomem *base;
+       } blend;
+       struct {
+               void __iomem *base;
+       } avbuf;
+       struct {
+               void __iomem *base;
+               struct clk *clk;
+               bool clk_from_ps;
+       } audio;
 
        struct zynqmp_disp_layer layers[ZYNQMP_DISP_NUM_LAYERS];
 
@@ -423,51 +405,60 @@ static const struct zynqmp_disp_format avbuf_gfx_fmts[] = {
        },
 };
 
-static u32 zynqmp_disp_avbuf_read(struct zynqmp_disp_avbuf *avbuf, int reg)
+static u32 zynqmp_disp_avbuf_read(struct zynqmp_disp *disp, int reg)
 {
-       return readl(avbuf->base + reg);
+       return readl(disp->avbuf.base + reg);
 }
 
-static void zynqmp_disp_avbuf_write(struct zynqmp_disp_avbuf *avbuf,
-                                   int reg, u32 val)
+static void zynqmp_disp_avbuf_write(struct zynqmp_disp *disp, int reg, u32 val)
 {
-       writel(val, avbuf->base + reg);
+       writel(val, disp->avbuf.base + reg);
+}
+
+static bool zynqmp_disp_layer_is_gfx(const struct zynqmp_disp_layer *layer)
+{
+       return layer->id == ZYNQMP_DISP_LAYER_GFX;
+}
+
+static bool zynqmp_disp_layer_is_video(const struct zynqmp_disp_layer *layer)
+{
+       return layer->id == ZYNQMP_DISP_LAYER_VID;
 }
 
 /**
  * zynqmp_disp_avbuf_set_format - Set the input format for a layer
- * @avbuf: Audio/video buffer manager
- * @layer: The layer ID
+ * @disp: Display controller
+ * @layer: The layer
  * @fmt: The format information
  *
  * Set the video buffer manager format for @layer to @fmt.
  */
-static void zynqmp_disp_avbuf_set_format(struct zynqmp_disp_avbuf *avbuf,
-                                        enum zynqmp_disp_layer_id layer,
+static void zynqmp_disp_avbuf_set_format(struct zynqmp_disp *disp,
+                                        struct zynqmp_disp_layer *layer,
                                         const struct zynqmp_disp_format *fmt)
 {
        unsigned int i;
        u32 val;
 
-       val = zynqmp_disp_avbuf_read(avbuf, ZYNQMP_DISP_AV_BUF_FMT);
-       val &= layer == ZYNQMP_DISP_LAYER_VID
+       val = zynqmp_disp_avbuf_read(disp, ZYNQMP_DISP_AV_BUF_FMT);
+       val &= zynqmp_disp_layer_is_video(layer)
            ? ~ZYNQMP_DISP_AV_BUF_FMT_NL_VID_MASK
            : ~ZYNQMP_DISP_AV_BUF_FMT_NL_GFX_MASK;
        val |= fmt->buf_fmt;
-       zynqmp_disp_avbuf_write(avbuf, ZYNQMP_DISP_AV_BUF_FMT, val);
+       zynqmp_disp_avbuf_write(disp, ZYNQMP_DISP_AV_BUF_FMT, val);
 
        for (i = 0; i < ZYNQMP_DISP_AV_BUF_NUM_SF; i++) {
-               unsigned int reg = layer == ZYNQMP_DISP_LAYER_VID
+               unsigned int reg = zynqmp_disp_layer_is_video(layer)
                                 ? ZYNQMP_DISP_AV_BUF_VID_COMP_SF(i)
                                 : ZYNQMP_DISP_AV_BUF_GFX_COMP_SF(i);
 
-               zynqmp_disp_avbuf_write(avbuf, reg, fmt->sf[i]);
+               zynqmp_disp_avbuf_write(disp, reg, fmt->sf[i]);
        }
 }
 
 /**
  * zynqmp_disp_avbuf_set_clocks_sources - Set the clocks sources
- * @avbuf: Audio/video buffer manager
+ * @disp: Display controller
  * @video_from_ps: True if the video clock originates from the PS
  * @audio_from_ps: True if the audio clock originates from the PS
  * @timings_internal: True if video timings are generated internally
@@ -477,7 +468,7 @@ static void zynqmp_disp_avbuf_set_format(struct zynqmp_disp_avbuf *avbuf,
  * generated internally or externally.
  */
 static void
-zynqmp_disp_avbuf_set_clocks_sources(struct zynqmp_disp_avbuf *avbuf,
+zynqmp_disp_avbuf_set_clocks_sources(struct zynqmp_disp *disp,
                                     bool video_from_ps, bool audio_from_ps,
                                     bool timings_internal)
 {
@@ -490,16 +481,16 @@ zynqmp_disp_avbuf_set_clocks_sources(struct zynqmp_disp_avbuf *avbuf,
        if (timings_internal)
                val |= ZYNQMP_DISP_AV_BUF_CLK_SRC_VID_INTERNAL_TIMING;
 
-       zynqmp_disp_avbuf_write(avbuf, ZYNQMP_DISP_AV_BUF_CLK_SRC, val);
+       zynqmp_disp_avbuf_write(disp, ZYNQMP_DISP_AV_BUF_CLK_SRC, val);
 }
 
 /**
  * zynqmp_disp_avbuf_enable_channels - Enable buffer channels
- * @avbuf: Audio/video buffer manager
+ * @disp: Display controller
  *
  * Enable all (video and audio) buffer channels.
  */
-static void zynqmp_disp_avbuf_enable_channels(struct zynqmp_disp_avbuf *avbuf)
+static void zynqmp_disp_avbuf_enable_channels(struct zynqmp_disp *disp)
 {
        unsigned int i;
        u32 val;
@@ -509,7 +500,7 @@ static void zynqmp_disp_avbuf_enable_channels(struct zynqmp_disp_avbuf *avbuf)
               ZYNQMP_DISP_AV_BUF_CHBUF_BURST_LEN_SHIFT);
 
        for (i = 0; i < ZYNQMP_DISP_AV_BUF_NUM_VID_GFX_BUFFERS; i++)
-               zynqmp_disp_avbuf_write(avbuf, ZYNQMP_DISP_AV_BUF_CHBUF(i),
+               zynqmp_disp_avbuf_write(disp, ZYNQMP_DISP_AV_BUF_CHBUF(i),
                                        val);
 
        val = ZYNQMP_DISP_AV_BUF_CHBUF_EN |
@@ -517,75 +508,75 @@ static void zynqmp_disp_avbuf_enable_channels(struct zynqmp_disp_avbuf *avbuf)
               ZYNQMP_DISP_AV_BUF_CHBUF_BURST_LEN_SHIFT);
 
        for (; i < ZYNQMP_DISP_AV_BUF_NUM_BUFFERS; i++)
-               zynqmp_disp_avbuf_write(avbuf, ZYNQMP_DISP_AV_BUF_CHBUF(i),
+               zynqmp_disp_avbuf_write(disp, ZYNQMP_DISP_AV_BUF_CHBUF(i),
                                        val);
 }
 
 /**
  * zynqmp_disp_avbuf_disable_channels - Disable buffer channels
- * @avbuf: Audio/video buffer manager
+ * @disp: Display controller
  *
  * Disable all (video and audio) buffer channels.
  */
-static void zynqmp_disp_avbuf_disable_channels(struct zynqmp_disp_avbuf *avbuf)
+static void zynqmp_disp_avbuf_disable_channels(struct zynqmp_disp *disp)
 {
        unsigned int i;
 
        for (i = 0; i < ZYNQMP_DISP_AV_BUF_NUM_BUFFERS; i++)
-               zynqmp_disp_avbuf_write(avbuf, ZYNQMP_DISP_AV_BUF_CHBUF(i),
+               zynqmp_disp_avbuf_write(disp, ZYNQMP_DISP_AV_BUF_CHBUF(i),
                                        ZYNQMP_DISP_AV_BUF_CHBUF_FLUSH);
 }
 
 /**
  * zynqmp_disp_avbuf_enable_audio - Enable audio
- * @avbuf: Audio/video buffer manager
+ * @disp: Display controller
  *
  * Enable all audio buffers with a non-live (memory) source.
  */
-static void zynqmp_disp_avbuf_enable_audio(struct zynqmp_disp_avbuf *avbuf)
+static void zynqmp_disp_avbuf_enable_audio(struct zynqmp_disp *disp)
 {
        u32 val;
 
-       val = zynqmp_disp_avbuf_read(avbuf, ZYNQMP_DISP_AV_BUF_OUTPUT);
+       val = zynqmp_disp_avbuf_read(disp, ZYNQMP_DISP_AV_BUF_OUTPUT);
        val &= ~ZYNQMP_DISP_AV_BUF_OUTPUT_AUD1_MASK;
        val |= ZYNQMP_DISP_AV_BUF_OUTPUT_AUD1_MEM;
        val |= ZYNQMP_DISP_AV_BUF_OUTPUT_AUD2_EN;
-       zynqmp_disp_avbuf_write(avbuf, ZYNQMP_DISP_AV_BUF_OUTPUT, val);
+       zynqmp_disp_avbuf_write(disp, ZYNQMP_DISP_AV_BUF_OUTPUT, val);
 }
 
 /**
  * zynqmp_disp_avbuf_disable_audio - Disable audio
- * @avbuf: Audio/video buffer manager
+ * @disp: Display controller
  *
  * Disable all audio buffers.
  */
-static void zynqmp_disp_avbuf_disable_audio(struct zynqmp_disp_avbuf *avbuf)
+static void zynqmp_disp_avbuf_disable_audio(struct zynqmp_disp *disp)
 {
        u32 val;
 
-       val = zynqmp_disp_avbuf_read(avbuf, ZYNQMP_DISP_AV_BUF_OUTPUT);
+       val = zynqmp_disp_avbuf_read(disp, ZYNQMP_DISP_AV_BUF_OUTPUT);
        val &= ~ZYNQMP_DISP_AV_BUF_OUTPUT_AUD1_MASK;
        val |= ZYNQMP_DISP_AV_BUF_OUTPUT_AUD1_DISABLE;
        val &= ~ZYNQMP_DISP_AV_BUF_OUTPUT_AUD2_EN;
-       zynqmp_disp_avbuf_write(avbuf, ZYNQMP_DISP_AV_BUF_OUTPUT, val);
+       zynqmp_disp_avbuf_write(disp, ZYNQMP_DISP_AV_BUF_OUTPUT, val);
 }
 
 /**
  * zynqmp_disp_avbuf_enable_video - Enable a video layer
- * @avbuf: Audio/video buffer manager
- * @layer: The layer ID
+ * @disp: Display controller
+ * @layer: The layer
  * @mode: Operating mode of layer
  *
  * Enable the video/graphics buffer for @layer.
  */
-static void zynqmp_disp_avbuf_enable_video(struct zynqmp_disp_avbuf *avbuf,
-                                          enum zynqmp_disp_layer_id layer,
+static void zynqmp_disp_avbuf_enable_video(struct zynqmp_disp *disp,
+                                          struct zynqmp_disp_layer *layer,
                                           enum zynqmp_disp_layer_mode mode)
 {
        u32 val;
 
-       val = zynqmp_disp_avbuf_read(avbuf, ZYNQMP_DISP_AV_BUF_OUTPUT);
-       if (layer == ZYNQMP_DISP_LAYER_VID) {
+       val = zynqmp_disp_avbuf_read(disp, ZYNQMP_DISP_AV_BUF_OUTPUT);
+       if (zynqmp_disp_layer_is_video(layer)) {
                val &= ~ZYNQMP_DISP_AV_BUF_OUTPUT_VID1_MASK;
                if (mode == ZYNQMP_DISP_LAYER_NONLIVE)
                        val |= ZYNQMP_DISP_AV_BUF_OUTPUT_VID1_MEM;
@@ -599,52 +590,52 @@ static void zynqmp_disp_avbuf_enable_video(struct zynqmp_disp_avbuf *avbuf,
                else
                        val |= ZYNQMP_DISP_AV_BUF_OUTPUT_VID2_LIVE;
        }
-       zynqmp_disp_avbuf_write(avbuf, ZYNQMP_DISP_AV_BUF_OUTPUT, val);
+       zynqmp_disp_avbuf_write(disp, ZYNQMP_DISP_AV_BUF_OUTPUT, val);
 }
 
 /**
  * zynqmp_disp_avbuf_disable_video - Disable a video layer
- * @avbuf: Audio/video buffer manager
- * @layer: The layer ID
+ * @disp: Display controller
+ * @layer: The layer
  *
  * Disable the video/graphics buffer for @layer.
  */
-static void zynqmp_disp_avbuf_disable_video(struct zynqmp_disp_avbuf *avbuf,
-                                           enum zynqmp_disp_layer_id layer)
+static void zynqmp_disp_avbuf_disable_video(struct zynqmp_disp *disp,
+                                           struct zynqmp_disp_layer *layer)
 {
        u32 val;
 
-       val = zynqmp_disp_avbuf_read(avbuf, ZYNQMP_DISP_AV_BUF_OUTPUT);
-       if (layer == ZYNQMP_DISP_LAYER_VID) {
+       val = zynqmp_disp_avbuf_read(disp, ZYNQMP_DISP_AV_BUF_OUTPUT);
+       if (zynqmp_disp_layer_is_video(layer)) {
                val &= ~ZYNQMP_DISP_AV_BUF_OUTPUT_VID1_MASK;
                val |= ZYNQMP_DISP_AV_BUF_OUTPUT_VID1_NONE;
        } else {
                val &= ~ZYNQMP_DISP_AV_BUF_OUTPUT_VID2_MASK;
                val |= ZYNQMP_DISP_AV_BUF_OUTPUT_VID2_DISABLE;
        }
-       zynqmp_disp_avbuf_write(avbuf, ZYNQMP_DISP_AV_BUF_OUTPUT, val);
+       zynqmp_disp_avbuf_write(disp, ZYNQMP_DISP_AV_BUF_OUTPUT, val);
 }
 
 /**
  * zynqmp_disp_avbuf_enable - Enable the video pipe
- * @avbuf: Audio/video buffer manager
+ * @disp: Display controller
  *
  * De-assert the video pipe reset.
  */
-static void zynqmp_disp_avbuf_enable(struct zynqmp_disp_avbuf *avbuf)
+static void zynqmp_disp_avbuf_enable(struct zynqmp_disp *disp)
 {
-       zynqmp_disp_avbuf_write(avbuf, ZYNQMP_DISP_AV_BUF_SRST_REG, 0);
+       zynqmp_disp_avbuf_write(disp, ZYNQMP_DISP_AV_BUF_SRST_REG, 0);
 }
 
 /**
  * zynqmp_disp_avbuf_disable - Disable the video pipe
- * @avbuf: Audio/video buffer manager
+ * @disp: Display controller
  *
  * Assert the video pipe reset.
  */
-static void zynqmp_disp_avbuf_disable(struct zynqmp_disp_avbuf *avbuf)
+static void zynqmp_disp_avbuf_disable(struct zynqmp_disp *disp)
 {
-       zynqmp_disp_avbuf_write(avbuf, ZYNQMP_DISP_AV_BUF_SRST_REG,
+       zynqmp_disp_avbuf_write(disp, ZYNQMP_DISP_AV_BUF_SRST_REG,
                                ZYNQMP_DISP_AV_BUF_SRST_REG_VID_RST);
 }
 
@@ -652,10 +643,9 @@ static void zynqmp_disp_avbuf_disable(struct zynqmp_disp_avbuf *avbuf)
  * Blender (Video Pipeline)
  */
 
-static void zynqmp_disp_blend_write(struct zynqmp_disp_blend *blend,
-                                   int reg, u32 val)
+static void zynqmp_disp_blend_write(struct zynqmp_disp *disp, int reg, u32 val)
 {
-       writel(val, blend->base + reg);
+       writel(val, disp->blend.base + reg);
 }
 
 /*
@@ -701,12 +691,12 @@ static const u32 csc_sdtv_to_rgb_offsets[] = {
 
 /**
  * zynqmp_disp_blend_set_output_format - Set the output format of the blender
- * @blend: Blender object
+ * @disp: Display controller
  * @format: Output format
  *
  * Set the output format of the blender to @format.
  */
-static void zynqmp_disp_blend_set_output_format(struct zynqmp_disp_blend *blend,
+static void zynqmp_disp_blend_set_output_format(struct zynqmp_disp *disp,
                                                enum zynqmp_dpsub_format format)
 {
        static const unsigned int blend_output_fmts[] = {
@@ -722,7 +712,7 @@ static void zynqmp_disp_blend_set_output_format(struct zynqmp_disp_blend *blend,
        const u32 *offsets;
        unsigned int i;
 
-       zynqmp_disp_blend_write(blend, ZYNQMP_DISP_V_BLEND_OUTPUT_VID_FMT, fmt);
+       zynqmp_disp_blend_write(disp, ZYNQMP_DISP_V_BLEND_OUTPUT_VID_FMT, fmt);
        if (fmt == ZYNQMP_DISP_V_BLEND_OUTPUT_VID_FMT_RGB) {
                coeffs = csc_identity_matrix;
                offsets = csc_zero_offsets;
@@ -732,19 +722,19 @@ static void zynqmp_disp_blend_set_output_format(struct zynqmp_disp_blend *blend,
        }
 
        for (i = 0; i < ZYNQMP_DISP_V_BLEND_NUM_COEFF; i++)
-               zynqmp_disp_blend_write(blend,
+               zynqmp_disp_blend_write(disp,
                                        ZYNQMP_DISP_V_BLEND_RGB2YCBCR_COEFF(i),
                                        coeffs[i]);
 
        for (i = 0; i < ZYNQMP_DISP_V_BLEND_NUM_OFFSET; i++)
-               zynqmp_disp_blend_write(blend,
+               zynqmp_disp_blend_write(disp,
                                        ZYNQMP_DISP_V_BLEND_OUTCSC_OFFSET(i),
                                        offsets[i]);
 }
 
 /**
  * zynqmp_disp_blend_set_bg_color - Set the background color
- * @blend: Blender object
+ * @disp: Display controller
  * @rcr: Red/Cr color component
  * @gy: Green/Y color component
  * @bcb: Blue/Cb color component
@@ -753,31 +743,31 @@ static void zynqmp_disp_blend_set_output_format(struct zynqmp_disp_blend *blend,
  * B or Cr, Y and Cb components respectively depending on the selected output
  * format.
  */
-static void zynqmp_disp_blend_set_bg_color(struct zynqmp_disp_blend *blend,
+static void zynqmp_disp_blend_set_bg_color(struct zynqmp_disp *disp,
                                           u32 rcr, u32 gy, u32 bcb)
 {
-       zynqmp_disp_blend_write(blend, ZYNQMP_DISP_V_BLEND_BG_CLR_0, rcr);
-       zynqmp_disp_blend_write(blend, ZYNQMP_DISP_V_BLEND_BG_CLR_1, gy);
-       zynqmp_disp_blend_write(blend, ZYNQMP_DISP_V_BLEND_BG_CLR_2, bcb);
+       zynqmp_disp_blend_write(disp, ZYNQMP_DISP_V_BLEND_BG_CLR_0, rcr);
+       zynqmp_disp_blend_write(disp, ZYNQMP_DISP_V_BLEND_BG_CLR_1, gy);
+       zynqmp_disp_blend_write(disp, ZYNQMP_DISP_V_BLEND_BG_CLR_2, bcb);
 }
 
 /**
  * zynqmp_disp_blend_set_global_alpha - Configure global alpha blending
- * @blend: Blender object
+ * @disp: Display controller
  * @enable: True to enable global alpha blending
  * @alpha: Global alpha value (ignored if @enabled is false)
  */
-static void zynqmp_disp_blend_set_global_alpha(struct zynqmp_disp_blend *blend,
+static void zynqmp_disp_blend_set_global_alpha(struct zynqmp_disp *disp,
                                               bool enable, u32 alpha)
 {
-       zynqmp_disp_blend_write(blend, ZYNQMP_DISP_V_BLEND_SET_GLOBAL_ALPHA,
+       zynqmp_disp_blend_write(disp, ZYNQMP_DISP_V_BLEND_SET_GLOBAL_ALPHA,
                                ZYNQMP_DISP_V_BLEND_SET_GLOBAL_ALPHA_VALUE(alpha) |
                                (enable ? ZYNQMP_DISP_V_BLEND_SET_GLOBAL_ALPHA_EN : 0));
 }
 
 /**
  * zynqmp_disp_blend_layer_set_csc - Configure colorspace conversion for layer
- * @blend: Blender object
+ * @disp: Display controller
  * @layer: The layer
  * @coeffs: Colorspace conversion matrix
  * @offsets: Colorspace conversion offsets
@@ -786,7 +776,7 @@ static void zynqmp_disp_blend_set_global_alpha(struct zynqmp_disp_blend *blend,
  * Columns of the matrix are automatically swapped based on the input format to
  * handle RGB and YCrCb components permutations.
  */
-static void zynqmp_disp_blend_layer_set_csc(struct zynqmp_disp_blend *blend,
+static void zynqmp_disp_blend_layer_set_csc(struct zynqmp_disp *disp,
                                            struct zynqmp_disp_layer *layer,
                                            const u16 *coeffs,
                                            const u32 *offsets)
@@ -807,32 +797,32 @@ static void zynqmp_disp_blend_layer_set_csc(struct zynqmp_disp_blend *blend,
                }
        }
 
-       if (layer->id == ZYNQMP_DISP_LAYER_VID)
+       if (zynqmp_disp_layer_is_video(layer))
                reg = ZYNQMP_DISP_V_BLEND_IN1CSC_COEFF(0);
        else
                reg = ZYNQMP_DISP_V_BLEND_IN2CSC_COEFF(0);
 
        for (i = 0; i < ZYNQMP_DISP_V_BLEND_NUM_COEFF; i += 3, reg += 12) {
-               zynqmp_disp_blend_write(blend, reg + 0, coeffs[i + swap[0]]);
-               zynqmp_disp_blend_write(blend, reg + 4, coeffs[i + swap[1]]);
-               zynqmp_disp_blend_write(blend, reg + 8, coeffs[i + swap[2]]);
+               zynqmp_disp_blend_write(disp, reg + 0, coeffs[i + swap[0]]);
+               zynqmp_disp_blend_write(disp, reg + 4, coeffs[i + swap[1]]);
+               zynqmp_disp_blend_write(disp, reg + 8, coeffs[i + swap[2]]);
        }
 
-       if (layer->id == ZYNQMP_DISP_LAYER_VID)
+       if (zynqmp_disp_layer_is_video(layer))
                reg = ZYNQMP_DISP_V_BLEND_IN1CSC_OFFSET(0);
        else
                reg = ZYNQMP_DISP_V_BLEND_IN2CSC_OFFSET(0);
 
        for (i = 0; i < ZYNQMP_DISP_V_BLEND_NUM_OFFSET; i++)
-               zynqmp_disp_blend_write(blend, reg + i * 4, offsets[i]);
+               zynqmp_disp_blend_write(disp, reg + i * 4, offsets[i]);
 }
 
 /**
  * zynqmp_disp_blend_layer_enable - Enable a layer
- * @blend: Blender object
+ * @disp: Display controller
  * @layer: The layer
  */
-static void zynqmp_disp_blend_layer_enable(struct zynqmp_disp_blend *blend,
+static void zynqmp_disp_blend_layer_enable(struct zynqmp_disp *disp,
                                           struct zynqmp_disp_layer *layer)
 {
        const u16 *coeffs;
@@ -844,7 +834,7 @@ static void zynqmp_disp_blend_layer_enable(struct zynqmp_disp_blend *blend,
              (layer->drm_fmt->hsub > 1 ?
               ZYNQMP_DISP_V_BLEND_LAYER_CONTROL_EN_US : 0);
 
-       zynqmp_disp_blend_write(blend,
+       zynqmp_disp_blend_write(disp,
                                ZYNQMP_DISP_V_BLEND_LAYER_CONTROL(layer->id),
                                val);
 
@@ -856,22 +846,22 @@ static void zynqmp_disp_blend_layer_enable(struct zynqmp_disp_blend *blend,
                offsets = csc_zero_offsets;
        }
 
-       zynqmp_disp_blend_layer_set_csc(blend, layer, coeffs, offsets);
+       zynqmp_disp_blend_layer_set_csc(disp, layer, coeffs, offsets);
 }
 
 /**
  * zynqmp_disp_blend_layer_disable - Disable a layer
- * @blend: Blender object
+ * @disp: Display controller
  * @layer: The layer
  */
-static void zynqmp_disp_blend_layer_disable(struct zynqmp_disp_blend *blend,
+static void zynqmp_disp_blend_layer_disable(struct zynqmp_disp *disp,
                                            struct zynqmp_disp_layer *layer)
 {
-       zynqmp_disp_blend_write(blend,
+       zynqmp_disp_blend_write(disp,
                                ZYNQMP_DISP_V_BLEND_LAYER_CONTROL(layer->id),
                                0);
 
-       zynqmp_disp_blend_layer_set_csc(blend, layer, csc_zero_matrix,
+       zynqmp_disp_blend_layer_set_csc(disp, layer, csc_zero_matrix,
                                        csc_zero_offsets);
 }
 
@@ -879,57 +869,55 @@ static void zynqmp_disp_blend_layer_disable(struct zynqmp_disp_blend *blend,
  * Audio Mixer
  */
 
-static void zynqmp_disp_audio_write(struct zynqmp_disp_audio *audio,
-                                 int reg, u32 val)
+static void zynqmp_disp_audio_write(struct zynqmp_disp *disp, int reg, u32 val)
 {
-       writel(val, audio->base + reg);
+       writel(val, disp->audio.base + reg);
 }
 
 /**
  * zynqmp_disp_audio_enable - Enable the audio mixer
- * @audio: Audio mixer
+ * @disp: Display controller
  *
  * Enable the audio mixer by de-asserting the soft reset. The audio state is set to
  * default values by the reset, set the default mixer volume explicitly.
  */
-static void zynqmp_disp_audio_enable(struct zynqmp_disp_audio *audio)
+static void zynqmp_disp_audio_enable(struct zynqmp_disp *disp)
 {
        /* Clear the audio soft reset register as it's an non-reset flop. */
-       zynqmp_disp_audio_write(audio, ZYNQMP_DISP_AUD_SOFT_RESET, 0);
-       zynqmp_disp_audio_write(audio, ZYNQMP_DISP_AUD_MIXER_VOLUME,
+       zynqmp_disp_audio_write(disp, ZYNQMP_DISP_AUD_SOFT_RESET, 0);
+       zynqmp_disp_audio_write(disp, ZYNQMP_DISP_AUD_MIXER_VOLUME,
                                ZYNQMP_DISP_AUD_MIXER_VOLUME_NO_SCALE);
 }
 
 /**
  * zynqmp_disp_audio_disable - Disable the audio mixer
- * @audio: Audio mixer
+ * @disp: Display controller
  *
  * Disable the audio mixer by asserting its soft reset.
  */
-static void zynqmp_disp_audio_disable(struct zynqmp_disp_audio *audio)
+static void zynqmp_disp_audio_disable(struct zynqmp_disp *disp)
 {
-       zynqmp_disp_audio_write(audio, ZYNQMP_DISP_AUD_SOFT_RESET,
+       zynqmp_disp_audio_write(disp, ZYNQMP_DISP_AUD_SOFT_RESET,
                                ZYNQMP_DISP_AUD_SOFT_RESET_AUD_SRST);
 }
 
-static void zynqmp_disp_audio_init(struct device *dev,
-                                  struct zynqmp_disp_audio *audio)
+static void zynqmp_disp_audio_init(struct zynqmp_disp *disp)
 {
        /* Try the live PL audio clock. */
-       audio->clk = devm_clk_get(dev, "dp_live_audio_aclk");
-       if (!IS_ERR(audio->clk)) {
-               audio->clk_from_ps = false;
+       disp->audio.clk = devm_clk_get(disp->dev, "dp_live_audio_aclk");
+       if (!IS_ERR(disp->audio.clk)) {
+               disp->audio.clk_from_ps = false;
                return;
        }
 
        /* If the live PL audio clock is not valid, fall back to PS clock. */
-       audio->clk = devm_clk_get(dev, "dp_aud_clk");
-       if (!IS_ERR(audio->clk)) {
-               audio->clk_from_ps = true;
+       disp->audio.clk = devm_clk_get(disp->dev, "dp_aud_clk");
+       if (!IS_ERR(disp->audio.clk)) {
+               disp->audio.clk_from_ps = true;
                return;
        }
 
-       dev_err(dev, "audio disabled due to missing clock\n");
+       dev_err(disp->dev, "audio disabled due to missing clock\n");
 }
 
 /* -----------------------------------------------------------------------------
@@ -1025,9 +1013,9 @@ zynqmp_disp_layer_find_format(struct zynqmp_disp_layer *layer,
  */
 static void zynqmp_disp_layer_enable(struct zynqmp_disp_layer *layer)
 {
-       zynqmp_disp_avbuf_enable_video(&layer->disp->avbuf, layer->id,
+       zynqmp_disp_avbuf_enable_video(layer->disp, layer,
                                       ZYNQMP_DISP_LAYER_NONLIVE);
-       zynqmp_disp_blend_layer_enable(&layer->disp->blend, layer);
+       zynqmp_disp_blend_layer_enable(layer->disp, layer);
 
        layer->mode = ZYNQMP_DISP_LAYER_NONLIVE;
 }
@@ -1046,8 +1034,8 @@ static void zynqmp_disp_layer_disable(struct zynqmp_disp_layer *layer)
        for (i = 0; i < layer->drm_fmt->num_planes; i++)
                dmaengine_terminate_sync(layer->dmas[i].chan);
 
-       zynqmp_disp_avbuf_disable_video(&layer->disp->avbuf, layer->id);
-       zynqmp_disp_blend_layer_disable(&layer->disp->blend, layer);
+       zynqmp_disp_avbuf_disable_video(layer->disp, layer);
+       zynqmp_disp_blend_layer_disable(layer->disp, layer);
 }
 
 /**
@@ -1067,8 +1055,7 @@ static void zynqmp_disp_layer_set_format(struct zynqmp_disp_layer *layer,
        layer->disp_fmt = zynqmp_disp_layer_find_format(layer, info->format);
        layer->drm_fmt = info;
 
-       zynqmp_disp_avbuf_set_format(&layer->disp->avbuf, layer->id,
-                                    layer->disp_fmt);
+       zynqmp_disp_avbuf_set_format(layer->disp, layer, layer->disp_fmt);
 
        /*
         * Set slave_id for each DMA channel to indicate they're part of a
@@ -1175,6 +1162,10 @@ zynqmp_disp_plane_atomic_disable(struct drm_plane *plane,
                return;
 
        zynqmp_disp_layer_disable(layer);
+
+       if (zynqmp_disp_layer_is_gfx(layer))
+               zynqmp_disp_blend_set_global_alpha(layer->disp, false,
+                                                  plane->state->alpha >> 8);
 }
 
 static void
@@ -1204,6 +1195,10 @@ zynqmp_disp_plane_atomic_update(struct drm_plane *plane,
 
        zynqmp_disp_layer_update(layer, new_state);
 
+       if (zynqmp_disp_layer_is_gfx(layer))
+               zynqmp_disp_blend_set_global_alpha(layer->disp, true,
+                                                  plane->state->alpha >> 8);
+
        /* Enable or re-enable the plane is the format has changed. */
        if (format_changed)
                zynqmp_disp_layer_enable(layer);
@@ -1244,8 +1239,8 @@ static int zynqmp_disp_create_planes(struct zynqmp_disp *disp)
                        drm_formats[j] = layer->info->formats[j].drm_fmt;
 
                /* Graphics layer is primary, and video layer is overlay. */
-               type = i == ZYNQMP_DISP_LAYER_GFX
-                    ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
+               type = zynqmp_disp_layer_is_video(layer)
+                    ? DRM_PLANE_TYPE_OVERLAY : DRM_PLANE_TYPE_PRIMARY;
                ret = drm_universal_plane_init(disp->drm, &layer->plane, 0,
                                               &zynqmp_disp_plane_funcs,
                                               drm_formats,
@@ -1256,6 +1251,10 @@ static int zynqmp_disp_create_planes(struct zynqmp_disp *disp)
 
                drm_plane_helper_add(&layer->plane,
                                     &zynqmp_disp_plane_helper_funcs);
+
+               drm_plane_create_zpos_immutable_property(&layer->plane, i);
+               if (zynqmp_disp_layer_is_gfx(layer))
+                       drm_plane_create_alpha_property(&layer->plane);
        }
 
        return 0;
@@ -1387,14 +1386,14 @@ err:
  */
 static void zynqmp_disp_enable(struct zynqmp_disp *disp)
 {
-       zynqmp_disp_avbuf_enable(&disp->avbuf);
+       zynqmp_disp_avbuf_enable(disp);
        /* Choose clock source based on the DT clock handle. */
-       zynqmp_disp_avbuf_set_clocks_sources(&disp->avbuf, disp->pclk_from_ps,
+       zynqmp_disp_avbuf_set_clocks_sources(disp, disp->pclk_from_ps,
                                             disp->audio.clk_from_ps, true);
-       zynqmp_disp_avbuf_enable_channels(&disp->avbuf);
-       zynqmp_disp_avbuf_enable_audio(&disp->avbuf);
+       zynqmp_disp_avbuf_enable_channels(disp);
+       zynqmp_disp_avbuf_enable_audio(disp);
 
-       zynqmp_disp_audio_enable(&disp->audio);
+       zynqmp_disp_audio_enable(disp);
 }
 
 /**
@@ -1403,11 +1402,11 @@ static void zynqmp_disp_enable(struct zynqmp_disp *disp)
  */
 static void zynqmp_disp_disable(struct zynqmp_disp *disp)
 {
-       zynqmp_disp_audio_disable(&disp->audio);
+       zynqmp_disp_audio_disable(disp);
 
-       zynqmp_disp_avbuf_disable_audio(&disp->avbuf);
-       zynqmp_disp_avbuf_disable_channels(&disp->avbuf);
-       zynqmp_disp_avbuf_disable(&disp->avbuf);
+       zynqmp_disp_avbuf_disable_audio(disp);
+       zynqmp_disp_avbuf_disable_channels(disp);
+       zynqmp_disp_avbuf_disable(disp);
 }
 
 static inline struct zynqmp_disp *crtc_to_disp(struct drm_crtc *crtc)
@@ -1452,9 +1451,10 @@ zynqmp_disp_crtc_atomic_enable(struct drm_crtc *crtc,
        struct drm_display_mode *adjusted_mode = &crtc->state->adjusted_mode;
        int ret, vrefresh;
 
+       pm_runtime_get_sync(disp->dev);
+
        zynqmp_disp_crtc_setup_clock(crtc, adjusted_mode);
 
-       pm_runtime_get_sync(disp->dev);
        ret = clk_prepare_enable(disp->pclk);
        if (ret) {
                dev_err(disp->dev, "failed to enable a pixel clock\n");
@@ -1462,10 +1462,8 @@ zynqmp_disp_crtc_atomic_enable(struct drm_crtc *crtc,
                return;
        }
 
-       zynqmp_disp_blend_set_output_format(&disp->blend,
-                                           ZYNQMP_DPSUB_FORMAT_RGB);
-       zynqmp_disp_blend_set_bg_color(&disp->blend, 0, 0, 0);
-       zynqmp_disp_blend_set_global_alpha(&disp->blend, false, 0);
+       zynqmp_disp_blend_set_output_format(disp, ZYNQMP_DPSUB_FORMAT_RGB);
+       zynqmp_disp_blend_set_bg_color(disp, 0, 0, 0);
 
        zynqmp_disp_enable(disp);
 
@@ -1674,7 +1672,7 @@ int zynqmp_disp_probe(struct zynqmp_dpsub *dpsub, struct drm_device *drm)
                disp->pclk_from_ps = true;
        }
 
-       zynqmp_disp_audio_init(disp->dev, &disp->audio);
+       zynqmp_disp_audio_init(disp);
 
        ret = zynqmp_disp_create_layers(disp);
        if (ret)
index 82430ca..6f588dc 100644 (file)
@@ -402,10 +402,6 @@ static int zynqmp_dp_phy_init(struct zynqmp_dp *dp)
                }
        }
 
-       ret = zynqmp_dp_reset(dp, false);
-       if (ret < 0)
-               return ret;
-
        zynqmp_dp_clr(dp, ZYNQMP_DP_PHY_RESET, ZYNQMP_DP_PHY_RESET_ALL_RESET);
 
        /*
@@ -441,8 +437,6 @@ static void zynqmp_dp_phy_exit(struct zynqmp_dp *dp)
                                ret);
        }
 
-       zynqmp_dp_reset(dp, true);
-
        for (i = 0; i < dp->num_lanes; i++) {
                ret = phy_exit(dp->phy[i]);
                if (ret)
@@ -1683,9 +1677,13 @@ int zynqmp_dp_probe(struct zynqmp_dpsub *dpsub, struct drm_device *drm)
                return PTR_ERR(dp->reset);
        }
 
+       ret = zynqmp_dp_reset(dp, false);
+       if (ret < 0)
+               return ret;
+
        ret = zynqmp_dp_phy_probe(dp);
        if (ret)
-               return ret;
+               goto err_reset;
 
        /* Initialize the hardware. */
        zynqmp_dp_write(dp, ZYNQMP_DP_TX_PHY_POWER_DOWN,
@@ -1697,7 +1695,7 @@ int zynqmp_dp_probe(struct zynqmp_dpsub *dpsub, struct drm_device *drm)
 
        ret = zynqmp_dp_phy_init(dp);
        if (ret)
-               return ret;
+               goto err_reset;
 
        zynqmp_dp_write(dp, ZYNQMP_DP_TRANSMITTER_ENABLE, 1);
 
@@ -1709,15 +1707,18 @@ int zynqmp_dp_probe(struct zynqmp_dpsub *dpsub, struct drm_device *drm)
                                        zynqmp_dp_irq_handler, IRQF_ONESHOT,
                                        dev_name(dp->dev), dp);
        if (ret < 0)
-               goto error;
+               goto err_phy_exit;
 
        dev_dbg(dp->dev, "ZynqMP DisplayPort Tx probed with %u lanes\n",
                dp->num_lanes);
 
        return 0;
 
-error:
+err_phy_exit:
        zynqmp_dp_phy_exit(dp);
+err_reset:
+       zynqmp_dp_reset(dp, true);
+
        return ret;
 }
 
@@ -1735,4 +1736,5 @@ void zynqmp_dp_remove(struct zynqmp_dpsub *dpsub)
        zynqmp_dp_write(dp, ZYNQMP_DP_INT_DS, 0xffffffff);
 
        zynqmp_dp_phy_exit(dp);
+       zynqmp_dp_reset(dp, true);
 }
index 7db3321..dbed252 100644 (file)
@@ -2302,7 +2302,7 @@ end:
        return ret;
 }
 
-static int hid_device_remove(struct device *dev)
+static void hid_device_remove(struct device *dev)
 {
        struct hid_device *hdev = to_hid_device(dev);
        struct hid_driver *hdrv;
@@ -2322,8 +2322,6 @@ static int hid_device_remove(struct device *dev)
 
        if (!hdev->io_started)
                up(&hdev->driver_input_lock);
-
-       return 0;
 }
 
 static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
index f0802b0..8a51bd9 100644 (file)
@@ -255,7 +255,7 @@ static int ishtp_cl_bus_match(struct device *dev, struct device_driver *drv)
  *
  * Return: Return value from driver remove() call.
  */
-static int ishtp_cl_device_remove(struct device *dev)
+static void ishtp_cl_device_remove(struct device *dev)
 {
        struct ishtp_cl_device *device = to_ishtp_cl_device(dev);
        struct ishtp_cl_driver *driver = to_ishtp_cl_driver(dev->driver);
@@ -267,8 +267,6 @@ static int ishtp_cl_device_remove(struct device *dev)
 
        if (driver->remove)
                driver->remove(device);
-
-       return 0;
 }
 
 /**
index 57bbbaa..392c1ac 100644 (file)
@@ -922,7 +922,7 @@ static int vmbus_probe(struct device *child_device)
 /*
  * vmbus_remove - Remove a vmbus device
  */
-static int vmbus_remove(struct device *child_device)
+static void vmbus_remove(struct device *child_device)
 {
        struct hv_driver *drv;
        struct hv_device *dev = device_to_hv_device(child_device);
@@ -932,11 +932,8 @@ static int vmbus_remove(struct device *child_device)
                if (drv->remove)
                        drv->remove(dev);
        }
-
-       return 0;
 }
 
-
 /*
  * vmbus_shutdown - Shutdown a vmbus device
  */
index 66eed2d..7e753a7 100644 (file)
@@ -95,7 +95,7 @@ out_pm:
 
 static void intel_th_device_remove(struct intel_th_device *thdev);
 
-static int intel_th_remove(struct device *dev)
+static void intel_th_remove(struct device *dev)
 {
        struct intel_th_driver *thdrv = to_intel_th_driver(dev->driver);
        struct intel_th_device *thdev = to_intel_th_device(dev);
@@ -164,8 +164,6 @@ static int intel_th_remove(struct device *dev)
        pm_runtime_disable(dev);
        pm_runtime_set_active(dev);
        pm_runtime_enable(dev);
-
-       return 0;
 }
 
 static struct bus_type intel_th_bus = {
index 84f12bf..54964fb 100644 (file)
@@ -601,7 +601,7 @@ put_sync_adapter:
        return status;
 }
 
-static int i2c_device_remove(struct device *dev)
+static void i2c_device_remove(struct device *dev)
 {
        struct i2c_client       *client = to_i2c_client(dev);
        struct i2c_adapter      *adap;
@@ -631,9 +631,6 @@ static int i2c_device_remove(struct device *dev)
        client->irq = 0;
        if (client->flags & I2C_CLIENT_HOST_NOTIFY)
                pm_runtime_put(&client->adapter->dev);
-
-       /* return always 0 because there is WIP to make remove-functions void */
-       return 0;
 }
 
 #ifdef CONFIG_PM_SLEEP
index e2e12a5..c3b4c67 100644 (file)
@@ -322,7 +322,7 @@ static int i3c_device_probe(struct device *dev)
        return driver->probe(i3cdev);
 }
 
-static int i3c_device_remove(struct device *dev)
+static void i3c_device_remove(struct device *dev)
 {
        struct i3c_device *i3cdev = dev_to_i3cdev(dev);
        struct i3c_driver *driver = drv_to_i3cdrv(dev->driver);
@@ -331,8 +331,6 @@ static int i3c_device_remove(struct device *dev)
                driver->remove(i3cdev);
 
        i3c_device_free_ibi(i3cdev);
-
-       return 0;
 }
 
 struct bus_type i3c_bus_type = {
index 61fa7e7..db58a01 100644 (file)
@@ -697,13 +697,12 @@ static int gameport_driver_probe(struct device *dev)
        return gameport->drv ? 0 : -ENODEV;
 }
 
-static int gameport_driver_remove(struct device *dev)
+static void gameport_driver_remove(struct device *dev)
 {
        struct gameport *gameport = to_gameport_port(dev);
        struct gameport_driver *drv = to_gameport_driver(dev->driver);
 
        drv->disconnect(gameport);
-       return 0;
 }
 
 static void gameport_attach_driver(struct gameport_driver *drv)
index 29f4910..ec117be 100644 (file)
@@ -778,12 +778,11 @@ static int serio_driver_probe(struct device *dev)
        return serio_connect_driver(serio, drv);
 }
 
-static int serio_driver_remove(struct device *dev)
+static void serio_driver_remove(struct device *dev)
 {
        struct serio *serio = to_serio_port(dev);
 
        serio_disconnect_driver(serio);
-       return 0;
 }
 
 static void serio_cleanup(struct serio *serio)
index 7de9605..b1c3198 100644 (file)
@@ -67,15 +67,13 @@ static int ipack_bus_probe(struct device *device)
        return drv->ops->probe(dev);
 }
 
-static int ipack_bus_remove(struct device *device)
+static void ipack_bus_remove(struct device *device)
 {
        struct ipack_device *dev = to_ipack_dev(device);
        struct ipack_driver *drv = to_ipack_driver(device->driver);
 
        if (drv->ops->remove)
                drv->ops->remove(dev);
-
-       return 0;
 }
 
 static int ipack_uevent(struct device *dev, struct kobj_uevent_env *env)
index 49af60b..c1fdf28 100644 (file)
@@ -88,7 +88,7 @@ static int macio_device_probe(struct device *dev)
        return error;
 }
 
-static int macio_device_remove(struct device *dev)
+static void macio_device_remove(struct device *dev)
 {
        struct macio_dev * macio_dev = to_macio_device(dev);
        struct macio_driver * drv = to_macio_driver(dev->driver);
@@ -96,8 +96,6 @@ static int macio_device_remove(struct device *dev)
        if (dev->driver && drv->remove)
                drv->remove(macio_dev);
        macio_dev_put(macio_dev);
-
-       return 0;
 }
 
 static void macio_device_shutdown(struct device *dev)
index 38fbb3b..edf4ee6 100644 (file)
@@ -77,7 +77,7 @@ static int mcb_probe(struct device *dev)
        return ret;
 }
 
-static int mcb_remove(struct device *dev)
+static void mcb_remove(struct device *dev)
 {
        struct mcb_driver *mdrv = to_mcb_driver(dev->driver);
        struct mcb_device *mdev = to_mcb_device(dev);
@@ -89,8 +89,6 @@ static int mcb_remove(struct device *dev)
        module_put(carrier_mod);
 
        put_device(&mdev->dev);
-
-       return 0;
 }
 
 static void mcb_shutdown(struct device *dev)
index b730225..a2b18e2 100644 (file)
@@ -46,14 +46,13 @@ static int bttv_sub_probe(struct device *dev)
        return sub->probe ? sub->probe(sdev) : -ENODEV;
 }
 
-static int bttv_sub_remove(struct device *dev)
+static void bttv_sub_remove(struct device *dev)
 {
        struct bttv_sub_device *sdev = to_bttv_sub_dev(dev);
        struct bttv_sub_driver *sub = to_bttv_sub_drv(dev->driver);
 
        if (sub->remove)
                sub->remove(sdev);
-       return 0;
 }
 
 struct bus_type bttv_sub_bus_type = {
index bb10659..660df7d 100644 (file)
@@ -91,7 +91,7 @@ static int memstick_device_probe(struct device *dev)
        return rc;
 }
 
-static int memstick_device_remove(struct device *dev)
+static void memstick_device_remove(struct device *dev)
 {
        struct memstick_dev *card = container_of(dev, struct memstick_dev,
                                                  dev);
@@ -105,7 +105,6 @@ static int memstick_device_remove(struct device *dev)
        }
 
        put_device(dev);
-       return 0;
 }
 
 #ifdef CONFIG_PM
index eff9423..2fa592c 100644 (file)
@@ -33,13 +33,12 @@ static int mcp_bus_probe(struct device *dev)
        return drv->probe(mcp);
 }
 
-static int mcp_bus_remove(struct device *dev)
+static void mcp_bus_remove(struct device *dev)
 {
        struct mcp *mcp = to_mcp(dev);
        struct mcp_driver *drv = to_mcp_driver(dev->driver);
 
        drv->remove(mcp);
-       return 0;
 }
 
 static struct bus_type mcp_bus_type = {
index 935acc6..3bf2bb4 100644 (file)
@@ -884,7 +884,7 @@ static int mei_cl_device_probe(struct device *dev)
  *
  * Return:  0 on success; < 0 otherwise
  */
-static int mei_cl_device_remove(struct device *dev)
+static void mei_cl_device_remove(struct device *dev)
 {
        struct mei_cl_device *cldev = to_mei_cl_device(dev);
        struct mei_cl_driver *cldrv = to_mei_cl_driver(dev->driver);
@@ -896,8 +896,6 @@ static int mei_cl_device_remove(struct device *dev)
 
        mei_cl_bus_module_put(cldev);
        module_put(THIS_MODULE);
-
-       return 0;
 }
 
 static ssize_t name_show(struct device *dev, struct device_attribute *a,
index 667e574..52656fc 100644 (file)
@@ -87,7 +87,7 @@ static void tifm_dummy_event(struct tifm_dev *sock)
        return;
 }
 
-static int tifm_device_remove(struct device *dev)
+static void tifm_device_remove(struct device *dev)
 {
        struct tifm_dev *sock = container_of(dev, struct tifm_dev, dev);
        struct tifm_driver *drv = container_of(dev->driver, struct tifm_driver,
@@ -101,7 +101,6 @@ static int tifm_device_remove(struct device *dev)
        }
 
        put_device(dev);
-       return 0;
 }
 
 #ifdef CONFIG_PM
index 4383c26..f6b7a9c 100644 (file)
@@ -140,14 +140,12 @@ static int mmc_bus_probe(struct device *dev)
        return drv->probe(card);
 }
 
-static int mmc_bus_remove(struct device *dev)
+static void mmc_bus_remove(struct device *dev)
 {
        struct mmc_driver *drv = to_mmc_driver(dev->driver);
        struct mmc_card *card = mmc_dev_to_card(dev);
 
        drv->remove(card);
-
-       return 0;
 }
 
 static void mmc_bus_shutdown(struct device *dev)
index 3d70902..fda03b3 100644 (file)
@@ -203,7 +203,7 @@ disable_runtimepm:
        return ret;
 }
 
-static int sdio_bus_remove(struct device *dev)
+static void sdio_bus_remove(struct device *dev)
 {
        struct sdio_driver *drv = to_sdio_driver(dev->driver);
        struct sdio_func *func = dev_to_sdio_func(dev);
@@ -232,8 +232,6 @@ static int sdio_bus_remove(struct device *dev)
                pm_runtime_put_sync(dev);
 
        dev_pm_domain_detach(dev, false);
-
-       return 0;
 }
 
 static const struct dev_pm_ops sdio_bus_pm_ops = {
index ccec299..14b1549 100644 (file)
@@ -370,12 +370,11 @@ static int nsim_bus_probe(struct device *dev)
        return nsim_dev_probe(nsim_bus_dev);
 }
 
-static int nsim_bus_remove(struct device *dev)
+static void nsim_bus_remove(struct device *dev)
 {
        struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev);
 
        nsim_dev_remove(nsim_bus_dev);
-       return 0;
 }
 
 static int nsim_num_vf(struct device *dev)
index f8f75a5..27dd93d 100644 (file)
@@ -271,7 +271,7 @@ static int ntb_probe(struct device *dev)
        return rc;
 }
 
-static int ntb_remove(struct device *dev)
+static void ntb_remove(struct device *dev)
 {
        struct ntb_dev *ntb;
        struct ntb_client *client;
@@ -283,8 +283,6 @@ static int ntb_remove(struct device *dev)
                client->ops.remove(client, ntb);
                put_device(dev);
        }
-
-       return 0;
 }
 
 static void ntb_dev_release(struct device *dev)
index 4a02561..a9b97eb 100644 (file)
@@ -304,7 +304,7 @@ static int ntb_transport_bus_probe(struct device *dev)
        return rc;
 }
 
-static int ntb_transport_bus_remove(struct device *dev)
+static void ntb_transport_bus_remove(struct device *dev)
 {
        const struct ntb_transport_client *client;
 
@@ -312,8 +312,6 @@ static int ntb_transport_bus_remove(struct device *dev)
        client->remove(dev);
 
        put_device(dev);
-
-       return 0;
 }
 
 static struct bus_type ntb_transport_bus = {
index ad3d17c..d9d04f2 100644 (file)
@@ -29,14 +29,12 @@ static int nubus_device_probe(struct device *dev)
        return err;
 }
 
-static int nubus_device_remove(struct device *dev)
+static void nubus_device_remove(struct device *dev)
 {
        struct nubus_driver *ndrv = to_nubus_driver(dev->driver);
-       int err = -ENODEV;
 
        if (dev->driver && ndrv->remove)
-               err = ndrv->remove(to_nubus_board(dev));
-       return err;
+               ndrv->remove(to_nubus_board(dev));
 }
 
 struct bus_type nubus_bus_type = {
index e6aa870..9dc7f3e 100644 (file)
@@ -108,7 +108,7 @@ static int nvdimm_bus_probe(struct device *dev)
        return rc;
 }
 
-static int nvdimm_bus_remove(struct device *dev)
+static void nvdimm_bus_remove(struct device *dev)
 {
        struct nd_device_driver *nd_drv = to_nd_device_driver(dev->driver);
        struct module *provider = to_bus_provider(dev);
@@ -123,7 +123,6 @@ static int nvdimm_bus_remove(struct device *dev)
        dev_dbg(&nvdimm_bus->dev, "%s.remove(%s)\n", dev->driver->name,
                        dev_name(dev));
        module_put(provider);
-       return 0;
 }
 
 static void nvdimm_bus_shutdown(struct device *dev)
index e9289d1..502eb79 100644 (file)
@@ -387,17 +387,14 @@ static int pci_epf_device_probe(struct device *dev)
        return driver->probe(epf);
 }
 
-static int pci_epf_device_remove(struct device *dev)
+static void pci_epf_device_remove(struct device *dev)
 {
-       int ret = 0;
        struct pci_epf *epf = to_pci_epf(dev);
        struct pci_epf_driver *driver = to_pci_epf_driver(dev->driver);
 
        if (driver->remove)
-               ret = driver->remove(epf);
+               driver->remove(epf);
        epf->driver = NULL;
-
-       return ret;
 }
 
 static struct bus_type pci_epf_bus_type = {
index 3a72352..a061539 100644 (file)
@@ -440,7 +440,7 @@ static int pci_device_probe(struct device *dev)
        return error;
 }
 
-static int pci_device_remove(struct device *dev)
+static void pci_device_remove(struct device *dev)
 {
        struct pci_dev *pci_dev = to_pci_dev(dev);
        struct pci_driver *drv = pci_dev->driver;
@@ -476,7 +476,6 @@ static int pci_device_remove(struct device *dev)
         */
 
        pci_dev_put(pci_dev);
-       return 0;
 }
 
 static void pci_device_shutdown(struct device *dev)
index bd81aa6..5bd1b80 100644 (file)
@@ -350,7 +350,7 @@ static void pcmcia_card_remove(struct pcmcia_socket *s, struct pcmcia_device *le
        return;
 }
 
-static int pcmcia_device_remove(struct device *dev)
+static void pcmcia_device_remove(struct device *dev)
 {
        struct pcmcia_device *p_dev;
        struct pcmcia_driver *p_drv;
@@ -389,8 +389,6 @@ static int pcmcia_device_remove(struct device *dev)
        /* references from pcmcia_device_probe */
        pcmcia_put_dev(p_dev);
        module_put(p_drv->owner);
-
-       return 0;
 }
 
 
index 0169677..0a40dd9 100644 (file)
@@ -316,14 +316,12 @@ static int ssam_bus_probe(struct device *dev)
                ->probe(to_ssam_device(dev));
 }
 
-static int ssam_bus_remove(struct device *dev)
+static void ssam_bus_remove(struct device *dev)
 {
        struct ssam_device_driver *sdrv = to_ssam_device_driver(dev->driver);
 
        if (sdrv->remove)
                sdrv->remove(to_ssam_device(dev));
-
-       return 0;
 }
 
 struct bus_type ssam_bus_type = {
index 62e0d56..a763130 100644 (file)
@@ -980,7 +980,7 @@ probe_failure:
        return ret;
 }
 
-static int wmi_dev_remove(struct device *dev)
+static void wmi_dev_remove(struct device *dev)
 {
        struct wmi_block *wblock = dev_to_wblock(dev);
        struct wmi_driver *wdriver =
@@ -997,8 +997,6 @@ static int wmi_dev_remove(struct device *dev)
 
        if (ACPI_FAILURE(wmi_method_enable(wblock, 0)))
                dev_warn(dev, "failed to disable device\n");
-
-       return 0;
 }
 
 static struct class wmi_bus_class = {
index c29d590..cc6757d 100644 (file)
@@ -123,7 +123,7 @@ fail:
        return error;
 }
 
-static int pnp_device_remove(struct device *dev)
+static void pnp_device_remove(struct device *dev)
 {
        struct pnp_dev *pnp_dev = to_pnp_dev(dev);
        struct pnp_driver *drv = pnp_dev->driver;
@@ -139,7 +139,6 @@ static int pnp_device_remove(struct device *dev)
                pnp_disable_dev(pnp_dev);
 
        pnp_device_detach(pnp_dev);
-       return 0;
 }
 
 static void pnp_device_shutdown(struct device *dev)
index 7287415..a72bb0a 100644 (file)
@@ -112,7 +112,7 @@ static int rio_device_probe(struct device *dev)
  * driver, then run the driver remove() method.  Then update
  * the reference count.
  */
-static int rio_device_remove(struct device *dev)
+static void rio_device_remove(struct device *dev)
 {
        struct rio_dev *rdev = to_rio_dev(dev);
        struct rio_driver *rdrv = rdev->driver;
@@ -124,8 +124,6 @@ static int rio_device_remove(struct device *dev)
        }
 
        rio_dev_put(rdev);
-
-       return 0;
 }
 
 static void rio_device_shutdown(struct device *dev)
index c1404d3..9151836 100644 (file)
@@ -530,14 +530,13 @@ out:
        return err;
 }
 
-static int rpmsg_dev_remove(struct device *dev)
+static void rpmsg_dev_remove(struct device *dev)
 {
        struct rpmsg_device *rpdev = to_rpmsg_device(dev);
        struct rpmsg_driver *rpdrv = to_rpmsg_driver(rpdev->dev.driver);
-       int err = 0;
 
        if (rpdev->ops->announce_destroy)
-               err = rpdev->ops->announce_destroy(rpdev);
+               rpdev->ops->announce_destroy(rpdev);
 
        if (rpdrv->remove)
                rpdrv->remove(rpdev);
@@ -546,8 +545,6 @@ static int rpmsg_dev_remove(struct device *dev)
 
        if (rpdev->ept)
                rpmsg_destroy_ept(rpdev->ept);
-
-       return err;
 }
 
 static struct bus_type rpmsg_bus = {
index 3134fd6..69a845e 100644 (file)
@@ -60,15 +60,13 @@ out:
        return ret;
 }
 
-static int scm_remove(struct scm_device *scmdev)
+static void scm_remove(struct scm_device *scmdev)
 {
        struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev);
 
        scm_blk_dev_cleanup(bdev);
        dev_set_drvdata(&scmdev->dev, NULL);
        kfree(bdev);
-
-       return 0;
 }
 
 static struct scm_driver scm_drv = {
index 9748165..382c5b5 100644 (file)
@@ -439,17 +439,13 @@ module_exit(cleanup_ccwgroup);
 
 /************************** driver stuff ******************************/
 
-static int ccwgroup_remove(struct device *dev)
+static void ccwgroup_remove(struct device *dev)
 {
        struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
        struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver);
 
-       if (!dev->driver)
-               return 0;
        if (gdrv->remove)
                gdrv->remove(gdev);
-
-       return 0;
 }
 
 static void ccwgroup_shutdown(struct device *dev)
index c42405c..684348d 100644 (file)
@@ -100,7 +100,7 @@ static int chsc_subchannel_probe(struct subchannel *sch)
        return ret;
 }
 
-static int chsc_subchannel_remove(struct subchannel *sch)
+static void chsc_subchannel_remove(struct subchannel *sch)
 {
        struct chsc_private *private;
 
@@ -112,7 +112,6 @@ static int chsc_subchannel_remove(struct subchannel *sch)
                put_device(&sch->dev);
        }
        kfree(private);
-       return 0;
 }
 
 static void chsc_subchannel_shutdown(struct subchannel *sch)
index a974943..ebc321e 100644 (file)
@@ -1371,15 +1371,14 @@ static int css_probe(struct device *dev)
        return ret;
 }
 
-static int css_remove(struct device *dev)
+static void css_remove(struct device *dev)
 {
        struct subchannel *sch;
-       int ret;
 
        sch = to_subchannel(dev);
-       ret = sch->driver->remove ? sch->driver->remove(sch) : 0;
+       if (sch->driver->remove)
+               sch->driver->remove(sch);
        sch->driver = NULL;
-       return ret;
 }
 
 static void css_shutdown(struct device *dev)
index 2eddfc4..c98522c 100644 (file)
@@ -81,7 +81,7 @@ struct css_driver {
        int (*chp_event)(struct subchannel *, struct chp_link *, int);
        int (*sch_event)(struct subchannel *, int);
        int (*probe)(struct subchannel *);
-       int (*remove)(struct subchannel *);
+       void (*remove)(struct subchannel *);
        void (*shutdown)(struct subchannel *);
        int (*settle)(void);
 };
index 84f659c..adf33b6 100644 (file)
@@ -137,7 +137,7 @@ static int ccw_uevent(struct device *dev, struct kobj_uevent_env *env)
 
 static void io_subchannel_irq(struct subchannel *);
 static int io_subchannel_probe(struct subchannel *);
-static int io_subchannel_remove(struct subchannel *);
+static void io_subchannel_remove(struct subchannel *);
 static void io_subchannel_shutdown(struct subchannel *);
 static int io_subchannel_sch_event(struct subchannel *, int);
 static int io_subchannel_chp_event(struct subchannel *, struct chp_link *,
@@ -1101,7 +1101,7 @@ out_schedule:
        return 0;
 }
 
-static int io_subchannel_remove(struct subchannel *sch)
+static void io_subchannel_remove(struct subchannel *sch)
 {
        struct io_subchannel_private *io_priv = to_io_private(sch);
        struct ccw_device *cdev;
@@ -1120,7 +1120,6 @@ out_free:
                          io_priv->dma_area, io_priv->dma_area_dma);
        kfree(io_priv);
        sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
-       return 0;
 }
 
 static void io_subchannel_verify(struct subchannel *sch)
@@ -1742,7 +1741,7 @@ ccw_device_probe (struct device *dev)
        return 0;
 }
 
-static int ccw_device_remove(struct device *dev)
+static void ccw_device_remove(struct device *dev)
 {
        struct ccw_device *cdev = to_ccwdev(dev);
        struct ccw_driver *cdrv = cdev->drv;
@@ -1776,8 +1775,6 @@ static int ccw_device_remove(struct device *dev)
        spin_unlock_irq(cdev->ccwlock);
        io_subchannel_quiesce(sch);
        __disable_cmf(cdev);
-
-       return 0;
 }
 
 static void ccw_device_shutdown(struct device *dev)
index c8964e0..15bdae5 100644 (file)
@@ -282,7 +282,7 @@ disable:
        spin_unlock_irq(sch->lock);
 }
 
-static int eadm_subchannel_remove(struct subchannel *sch)
+static void eadm_subchannel_remove(struct subchannel *sch)
 {
        struct eadm_private *private = get_eadm_private(sch);
 
@@ -297,8 +297,6 @@ static int eadm_subchannel_remove(struct subchannel *sch)
        spin_unlock_irq(sch->lock);
 
        kfree(private);
-
-       return 0;
 }
 
 static void eadm_subchannel_shutdown(struct subchannel *sch)
index 9f26d43..b6b4589 100644 (file)
@@ -28,12 +28,13 @@ static int scmdev_probe(struct device *dev)
        return scmdrv->probe ? scmdrv->probe(scmdev) : -ENODEV;
 }
 
-static int scmdev_remove(struct device *dev)
+static void scmdev_remove(struct device *dev)
 {
        struct scm_device *scmdev = to_scm_dev(dev);
        struct scm_driver *scmdrv = to_scm_drv(dev->driver);
 
-       return scmdrv->remove ? scmdrv->remove(scmdev) : -ENODEV;
+       if (scmdrv->remove)
+               scmdrv->remove(scmdev);
 }
 
 static int scmdev_uevent(struct device *dev, struct kobj_uevent_env *env)
index 9b61e9b..76099bc 100644 (file)
@@ -234,7 +234,7 @@ out_free:
        return ret;
 }
 
-static int vfio_ccw_sch_remove(struct subchannel *sch)
+static void vfio_ccw_sch_remove(struct subchannel *sch)
 {
        struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
        struct vfio_ccw_crw *crw, *temp;
@@ -257,7 +257,6 @@ static int vfio_ccw_sch_remove(struct subchannel *sch)
        VFIO_CCW_MSG_EVENT(4, "unbound from subchannel %x.%x.%04x\n",
                           sch->schid.cssid, sch->schid.ssid,
                           sch->schid.sch_no);
-       return 0;
 }
 
 static void vfio_ccw_sch_shutdown(struct subchannel *sch)
index 8d3a1d8..0992edc 100644 (file)
@@ -901,7 +901,7 @@ out:
        return rc;
 }
 
-static int ap_device_remove(struct device *dev)
+static void ap_device_remove(struct device *dev)
 {
        struct ap_device *ap_dev = to_ap_dev(dev);
        struct ap_driver *ap_drv = ap_dev->drv;
@@ -926,8 +926,6 @@ static int ap_device_remove(struct device *dev)
        ap_dev->drv = NULL;
 
        put_device(dev);
-
-       return 0;
 }
 
 struct ap_queue *ap_get_qdev(ap_qid_t qid)
index 5b3a20a..58f6936 100644 (file)
@@ -7674,7 +7674,7 @@ static int sdebug_driver_probe(struct device *dev)
        return error;
 }
 
-static int sdebug_driver_remove(struct device *dev)
+static void sdebug_driver_remove(struct device *dev)
 {
        struct sdebug_host_info *sdbg_host;
        struct sdebug_dev_info *sdbg_devinfo, *tmp;
@@ -7691,7 +7691,6 @@ static int sdebug_driver_remove(struct device *dev)
        }
 
        scsi_host_put(sdbg_host->shost);
-       return 0;
 }
 
 static int pseudo_lld_bus_match(struct device *dev,
index 348836b..c0ab904 100644 (file)
@@ -150,17 +150,13 @@ static int superhyway_device_probe(struct device *dev)
        return -ENODEV;
 }
 
-static int superhyway_device_remove(struct device *dev)
+static void superhyway_device_remove(struct device *dev)
 {
        struct superhyway_device *shyway_dev = to_superhyway_device(dev);
        struct superhyway_driver *shyway_drv = to_superhyway_driver(dev->driver);
 
-       if (shyway_drv && shyway_drv->remove) {
+       if (shyway_drv && shyway_drv->remove)
                shyway_drv->remove(shyway_dev);
-               return 0;
-       }
-
-       return -ENODEV;
 }
 
 /**
index 1794ff0..7c4f32d 100644 (file)
@@ -520,7 +520,7 @@ static int siox_probe(struct device *dev)
        return sdriver->probe(sdevice);
 }
 
-static int siox_remove(struct device *dev)
+static void siox_remove(struct device *dev)
 {
        struct siox_driver *sdriver =
                container_of(dev->driver, struct siox_driver, driver);
@@ -528,8 +528,6 @@ static int siox_remove(struct device *dev)
 
        if (sdriver->remove)
                sdriver->remove(sdevice);
-
-       return 0;
 }
 
 static void siox_shutdown(struct device *dev)
index 1d2bc18..78480e3 100644 (file)
@@ -81,7 +81,7 @@ static int slim_device_probe(struct device *dev)
        return ret;
 }
 
-static int slim_device_remove(struct device *dev)
+static void slim_device_remove(struct device *dev)
 {
        struct slim_device *sbdev = to_slim_device(dev);
        struct slim_driver *sbdrv;
@@ -91,8 +91,6 @@ static int slim_device_remove(struct device *dev)
                if (sbdrv->remove)
                        sbdrv->remove(sbdev);
        }
-
-       return 0;
 }
 
 static int slim_device_uevent(struct device *dev, struct kobj_uevent_env *env)
index 7abfc8c..475a57b 100644 (file)
@@ -217,7 +217,7 @@ static int apr_device_probe(struct device *dev)
        return adrv->probe(adev);
 }
 
-static int apr_device_remove(struct device *dev)
+static void apr_device_remove(struct device *dev)
 {
        struct apr_device *adev = to_apr_device(dev);
        struct apr_driver *adrv;
@@ -231,8 +231,6 @@ static int apr_device_remove(struct device *dev)
                idr_remove(&apr->svcs_idr, adev->svc_id);
                spin_unlock(&apr->svcs_lock);
        }
-
-       return 0;
 }
 
 static int apr_uevent(struct device *dev, struct kobj_uevent_env *env)
index c991811..ad2b558 100644 (file)
@@ -405,7 +405,7 @@ static int spi_probe(struct device *dev)
        return ret;
 }
 
-static int spi_remove(struct device *dev)
+static void spi_remove(struct device *dev)
 {
        const struct spi_driver         *sdrv = to_spi_driver(dev->driver);
 
@@ -420,8 +420,6 @@ static int spi_remove(struct device *dev)
        }
 
        dev_pm_domain_detach(dev, true);
-
-       return 0;
 }
 
 static void spi_shutdown(struct device *dev)
index 51f5aeb..b37ead9 100644 (file)
@@ -345,7 +345,7 @@ fail_probe:
        return err;
 }
 
-static int spmi_drv_remove(struct device *dev)
+static void spmi_drv_remove(struct device *dev)
 {
        const struct spmi_driver *sdrv = to_spmi_driver(dev->driver);
 
@@ -356,7 +356,6 @@ static int spmi_drv_remove(struct device *dev)
        pm_runtime_disable(dev);
        pm_runtime_set_suspended(dev);
        pm_runtime_put_noidle(dev);
-       return 0;
 }
 
 static void spmi_drv_shutdown(struct device *dev)
index 3a29b55..8a93c83 100644 (file)
@@ -283,7 +283,7 @@ static void ssb_device_shutdown(struct device *dev)
                ssb_drv->shutdown(ssb_dev);
 }
 
-static int ssb_device_remove(struct device *dev)
+static void ssb_device_remove(struct device *dev)
 {
        struct ssb_device *ssb_dev = dev_to_ssb_dev(dev);
        struct ssb_driver *ssb_drv = drv_to_ssb_drv(dev->driver);
@@ -291,8 +291,6 @@ static int ssb_device_remove(struct device *dev)
        if (ssb_drv && ssb_drv->remove)
                ssb_drv->remove(ssb_dev);
        ssb_device_put(ssb_dev);
-
-       return 0;
 }
 
 static int ssb_device_probe(struct device *dev)
index 0f730ef..8a75f66 100644 (file)
@@ -1186,15 +1186,13 @@ static int anybus_bus_probe(struct device *dev)
        return adrv->probe(adev);
 }
 
-static int anybus_bus_remove(struct device *dev)
+static void anybus_bus_remove(struct device *dev)
 {
        struct anybuss_client_driver *adrv =
                to_anybuss_client_driver(dev->driver);
 
        if (adrv->remove)
                adrv->remove(to_anybuss_client(dev));
-
-       return 0;
 }
 
 static struct bus_type anybus_bus = {
index 13d3198..5a5c17a 100644 (file)
@@ -169,7 +169,7 @@ static int gbphy_dev_probe(struct device *dev)
        return ret;
 }
 
-static int gbphy_dev_remove(struct device *dev)
+static void gbphy_dev_remove(struct device *dev)
 {
        struct gbphy_driver *gbphy_drv = to_gbphy_driver(dev->driver);
        struct gbphy_device *gbphy_dev = to_gbphy_dev(dev);
@@ -180,8 +180,6 @@ static int gbphy_dev_remove(struct device *dev)
        pm_runtime_set_suspended(dev);
        pm_runtime_put_noidle(dev);
        pm_runtime_dont_use_autosuspend(dev);
-
-       return 0;
 }
 
 static struct bus_type gbphy_bus_type = {
index 6d0b0e6..cbb2118 100644 (file)
@@ -81,7 +81,7 @@ static int tcm_loop_show_info(struct seq_file *m, struct Scsi_Host *host)
 }
 
 static int tcm_loop_driver_probe(struct device *);
-static int tcm_loop_driver_remove(struct device *);
+static void tcm_loop_driver_remove(struct device *);
 
 static int pseudo_lld_bus_match(struct device *dev,
                                struct device_driver *dev_driver)
@@ -363,7 +363,7 @@ static int tcm_loop_driver_probe(struct device *dev)
        return 0;
 }
 
-static int tcm_loop_driver_remove(struct device *dev)
+static void tcm_loop_driver_remove(struct device *dev)
 {
        struct tcm_loop_hba *tl_hba;
        struct Scsi_Host *sh;
@@ -373,7 +373,6 @@ static int tcm_loop_driver_remove(struct device *dev)
 
        scsi_remove_host(sh);
        scsi_host_put(sh);
-       return 0;
 }
 
 static void tcm_loop_release_adapter(struct device *dev)
index a062bef..7018d95 100644 (file)
@@ -86,7 +86,7 @@ static int tb_service_probe(struct device *dev)
        return driver->probe(svc, id);
 }
 
-static int tb_service_remove(struct device *dev)
+static void tb_service_remove(struct device *dev)
 {
        struct tb_service *svc = tb_to_service(dev);
        struct tb_service_driver *driver;
@@ -94,8 +94,6 @@ static int tb_service_remove(struct device *dev)
        driver = container_of(dev->driver, struct tb_service_driver, driver);
        if (driver->remove)
                driver->remove(svc);
-
-       return 0;
 }
 
 static void tb_service_shutdown(struct device *dev)
index 9cdfcfe..9249896 100644 (file)
@@ -421,15 +421,13 @@ static int serdev_drv_probe(struct device *dev)
        return ret;
 }
 
-static int serdev_drv_remove(struct device *dev)
+static void serdev_drv_remove(struct device *dev)
 {
        const struct serdev_device_driver *sdrv = to_serdev_device_driver(dev->driver);
        if (sdrv->remove)
                sdrv->remove(to_serdev_device(dev));
 
        dev_pm_domain_detach(dev, true);
-
-       return 0;
 }
 
 static struct bus_type serdev_bus_type = {
index 7e13b74..4169cf4 100644 (file)
@@ -78,14 +78,12 @@ static int ulpi_probe(struct device *dev)
        return drv->probe(to_ulpi_dev(dev));
 }
 
-static int ulpi_remove(struct device *dev)
+static void ulpi_remove(struct device *dev)
 {
        struct ulpi_driver *drv = to_ulpi_driver(dev->driver);
 
        if (drv->remove)
                drv->remove(to_ulpi_dev(dev));
-
-       return 0;
 }
 
 static struct bus_type ulpi_bus = {
index 7133818..9e38142 100644 (file)
@@ -74,7 +74,7 @@ err_autopm_put:
        return retval;
 }
 
-static int usb_serial_device_remove(struct device *dev)
+static void usb_serial_device_remove(struct device *dev)
 {
        struct usb_serial_port *port = to_usb_serial_port(dev);
        struct usb_serial_driver *driver;
@@ -101,8 +101,6 @@ static int usb_serial_device_remove(struct device *dev)
 
        if (!autopm_err)
                usb_autopm_put_interface(port->serial->interface);
-
-       return 0;
 }
 
 static ssize_t new_id_store(struct device_driver *driver,
index 7f3c9a8..78e0e78 100644 (file)
@@ -382,7 +382,7 @@ static int typec_probe(struct device *dev)
        return ret;
 }
 
-static int typec_remove(struct device *dev)
+static void typec_remove(struct device *dev)
 {
        struct typec_altmode_driver *drv = to_altmode_driver(dev->driver);
        struct typec_altmode *adev = to_typec_altmode(dev);
@@ -400,8 +400,6 @@ static int typec_remove(struct device *dev)
 
        adev->desc = NULL;
        adev->ops = NULL;
-
-       return 0;
 }
 
 struct bus_type typec_bus = {
index bb3f1d1..3fc4525 100644 (file)
@@ -34,15 +34,13 @@ static int vdpa_dev_probe(struct device *d)
        return ret;
 }
 
-static int vdpa_dev_remove(struct device *d)
+static void vdpa_dev_remove(struct device *d)
 {
        struct vdpa_device *vdev = dev_to_vdpa(d);
        struct vdpa_driver *drv = drv_to_vdpa(vdev->dev.driver);
 
        if (drv && drv->remove)
                drv->remove(vdev);
-
-       return 0;
 }
 
 static struct bus_type vdpa_bus = {
index c368ec8..e2cb1ff 100644 (file)
@@ -57,7 +57,7 @@ static int mdev_probe(struct device *dev)
        return ret;
 }
 
-static int mdev_remove(struct device *dev)
+static void mdev_remove(struct device *dev)
 {
        struct mdev_driver *drv =
                container_of(dev->driver, struct mdev_driver, driver);
@@ -67,8 +67,6 @@ static int mdev_remove(struct device *dev)
                drv->remove(mdev);
 
        mdev_detach_iommu(mdev);
-
-       return 0;
 }
 
 static int mdev_match(struct device *dev, struct device_driver *drv)
index 4b15c00..2a6055c 100644 (file)
@@ -278,7 +278,7 @@ err:
 
 }
 
-static int virtio_dev_remove(struct device *_d)
+static void virtio_dev_remove(struct device *_d)
 {
        struct virtio_device *dev = dev_to_virtio(_d);
        struct virtio_driver *drv = drv_to_virtio(dev->dev.driver);
@@ -292,7 +292,6 @@ static int virtio_dev_remove(struct device *_d)
 
        /* Acknowledge the device's existence again. */
        virtio_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE);
-       return 0;
 }
 
 static struct bus_type virtio_bus = {
index e966981..4af6615 100644 (file)
@@ -342,14 +342,12 @@ static int vlynq_device_probe(struct device *dev)
        return result;
 }
 
-static int vlynq_device_remove(struct device *dev)
+static void vlynq_device_remove(struct device *dev)
 {
        struct vlynq_driver *drv = to_vlynq_driver(dev->driver);
 
        if (drv->remove)
                drv->remove(to_vlynq_device(dev));
-
-       return 0;
 }
 
 int __vlynq_register_driver(struct vlynq_driver *driver, struct module *owner)
index 1b15afe..8dba201 100644 (file)
@@ -1990,7 +1990,7 @@ static int vme_bus_probe(struct device *dev)
        return -ENODEV;
 }
 
-static int vme_bus_remove(struct device *dev)
+static void vme_bus_remove(struct device *dev)
 {
        struct vme_driver *driver;
        struct vme_dev *vdev = dev_to_vme_dev(dev);
@@ -1998,8 +1998,6 @@ static int vme_bus_remove(struct device *dev)
        driver = dev->platform_data;
        if (driver->remove)
                driver->remove(vdev);
-
-       return 0;
 }
 
 struct bus_type vme_bus_type = {
index 2a93b7c..2754bdf 100644 (file)
@@ -106,7 +106,7 @@ void xs_request_exit(struct xb_req_data *req);
 
 int xenbus_match(struct device *_dev, struct device_driver *_drv);
 int xenbus_dev_probe(struct device *_dev);
-int xenbus_dev_remove(struct device *_dev);
+void xenbus_dev_remove(struct device *_dev);
 int xenbus_register_driver_common(struct xenbus_driver *drv,
                                  struct xen_bus_type *bus,
                                  struct module *owner,
index 33d09b3..bd003ca 100644 (file)
@@ -325,7 +325,7 @@ fail:
 }
 EXPORT_SYMBOL_GPL(xenbus_dev_probe);
 
-int xenbus_dev_remove(struct device *_dev)
+void xenbus_dev_remove(struct device *_dev)
 {
        struct xenbus_device *dev = to_xenbus_device(_dev);
        struct xenbus_driver *drv = to_xenbus_driver(_dev->driver);
@@ -355,8 +355,6 @@ int xenbus_dev_remove(struct device *_dev)
        if (!drv->allow_rebind ||
            xenbus_read_driver_state(dev->nodename) == XenbusStateClosing)
                xenbus_switch_state(dev, XenbusStateClosed);
-
-       return 0;
 }
 EXPORT_SYMBOL_GPL(xenbus_dev_remove);
 
index 0dd7cbc..c18524b 100644 (file)
@@ -62,7 +62,7 @@ static int zorro_device_probe(struct device *dev)
 }
 
 
-static int zorro_device_remove(struct device *dev)
+static void zorro_device_remove(struct device *dev)
 {
        struct zorro_dev *z = to_zorro_dev(dev);
        struct zorro_driver *drv = to_zorro_driver(dev->driver);
@@ -72,7 +72,6 @@ static int zorro_device_remove(struct device *dev)
                        drv->remove(z);
                z->driver = NULL;
        }
-       return 0;
 }
 
 
index 1ea5e1d..062777a 100644 (file)
@@ -91,7 +91,7 @@ struct bus_type {
        int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
        int (*probe)(struct device *dev);
        void (*sync_state)(struct device *dev);
-       int (*remove)(struct device *dev);
+       void (*remove)(struct device *dev);
        void (*shutdown)(struct device *dev);
 
        int (*online)(struct device *dev);
index 2debc27..8292420 100644 (file)
@@ -85,7 +85,7 @@ struct pci_epf_ops {
  */
 struct pci_epf_driver {
        int     (*probe)(struct pci_epf *epf);
-       int     (*remove)(struct pci_epf *epf);
+       void    (*remove)(struct pci_epf *epf);
 
        struct device_driver    driver;
        struct pci_epf_ops      *ops;
index c2c7759..bde5860 100644 (file)
@@ -572,6 +572,15 @@ typedef struct drm_i915_irq_wait {
 #define   I915_SCHEDULER_CAP_PREEMPTION        (1ul << 2)
 #define   I915_SCHEDULER_CAP_SEMAPHORES        (1ul << 3)
 #define   I915_SCHEDULER_CAP_ENGINE_BUSY_STATS (1ul << 4)
+/*
+ * Indicates the 2k user priority levels are statically mapped into 3 buckets as
+ * follows:
+ *
+ * -1k to -1   Low priority
+ * 0           Normal priority
+ * 1 to 1k     Highest priority
+ */
+#define   I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP       (1ul << 5)
 
 #define I915_PARAM_HUC_STATUS           42
 
@@ -674,6 +683,9 @@ typedef struct drm_i915_irq_wait {
  */
 #define I915_PARAM_HAS_EXEC_TIMELINE_FENCES 55
 
+/* Query if the kernel supports the I915_USERPTR_PROBE flag. */
+#define I915_PARAM_HAS_USERPTR_PROBE 56
+
 /* Must be kept compact -- no holes and well documented */
 
 typedef struct drm_i915_getparam {
@@ -849,45 +861,113 @@ struct drm_i915_gem_mmap_gtt {
        __u64 offset;
 };
 
+/**
+ * struct drm_i915_gem_mmap_offset - Retrieve an offset so we can mmap this buffer object.
+ *
+ * This struct is passed as argument to the `DRM_IOCTL_I915_GEM_MMAP_OFFSET` ioctl,
+ * and is used to retrieve the fake offset to mmap an object specified by &handle.
+ *
+ * The legacy way of using `DRM_IOCTL_I915_GEM_MMAP` is removed on gen12+.
+ * `DRM_IOCTL_I915_GEM_MMAP_GTT` is an older supported alias to this struct, but will behave
+ * as setting the &extensions to 0, and &flags to `I915_MMAP_OFFSET_GTT`.
+ */
 struct drm_i915_gem_mmap_offset {
-       /** Handle for the object being mapped. */
+       /** @handle: Handle for the object being mapped. */
        __u32 handle;
+       /** @pad: Must be zero */
        __u32 pad;
        /**
-        * Fake offset to use for subsequent mmap call
+        * @offset: The fake offset to use for subsequent mmap call
         *
         * This is a fixed-size type for 32/64 compatibility.
         */
        __u64 offset;
 
        /**
-        * Flags for extended behaviour.
+        * @flags: Flags for extended behaviour.
+        *
+        * It is mandatory that one of the `MMAP_OFFSET` types
+        * should be included:
         *
-        * It is mandatory that one of the MMAP_OFFSET types
-        * (GTT, WC, WB, UC, etc) should be included.
+        * - `I915_MMAP_OFFSET_GTT`: Use mmap with the object bound to GTT. (Write-Combined)
+        * - `I915_MMAP_OFFSET_WC`: Use Write-Combined caching.
+        * - `I915_MMAP_OFFSET_WB`: Use Write-Back caching.
+        * - `I915_MMAP_OFFSET_FIXED`: Use object placement to determine caching.
+        *
+        * On devices with local memory `I915_MMAP_OFFSET_FIXED` is the only valid
+        * type. On devices without local memory, this caching mode is invalid.
+        *
+        * As caching mode when specifying `I915_MMAP_OFFSET_FIXED`, WC or WB will
+        * be used, depending on the object placement on creation. WB will be used
+        * when the object can only exist in system memory, WC otherwise.
         */
        __u64 flags;
-#define I915_MMAP_OFFSET_GTT 0
-#define I915_MMAP_OFFSET_WC  1
-#define I915_MMAP_OFFSET_WB  2
-#define I915_MMAP_OFFSET_UC  3
 
-       /*
-        * Zero-terminated chain of extensions.
+#define I915_MMAP_OFFSET_GTT   0
+#define I915_MMAP_OFFSET_WC    1
+#define I915_MMAP_OFFSET_WB    2
+#define I915_MMAP_OFFSET_UC    3
+#define I915_MMAP_OFFSET_FIXED 4
+
+       /**
+        * @extensions: Zero-terminated chain of extensions.
         *
         * No current extensions defined; mbz.
         */
        __u64 extensions;
 };
 
+/**
+ * struct drm_i915_gem_set_domain - Adjust the objects write or read domain, in
+ * preparation for accessing the pages via some CPU domain.
+ *
+ * Specifying a new write or read domain will flush the object out of the
+ * previous domain(if required), before then updating the objects domain
+ * tracking with the new domain.
+ *
+ * Note this might involve waiting for the object first if it is still active on
+ * the GPU.
+ *
+ * Supported values for @read_domains and @write_domain:
+ *
+ *     - I915_GEM_DOMAIN_WC: Uncached write-combined domain
+ *     - I915_GEM_DOMAIN_CPU: CPU cache domain
+ *     - I915_GEM_DOMAIN_GTT: Mappable aperture domain
+ *
+ * All other domains are rejected.
+ *
+ * Note that for discrete, starting from DG1, this is no longer supported, and
+ * is instead rejected. On such platforms the CPU domain is effectively static,
+ * where we also only support a single &drm_i915_gem_mmap_offset cache mode,
+ * which can't be set explicitly and instead depends on the object placements,
+ * as per the below.
+ *
+ * Implicit caching rules, starting from DG1:
+ *
+ *     - If any of the object placements (see &drm_i915_gem_create_ext_memory_regions)
+ *       contain I915_MEMORY_CLASS_DEVICE then the object will be allocated and
+ *       mapped as write-combined only.
+ *
+ *     - Everything else is always allocated and mapped as write-back, with the
+ *       guarantee that everything is also coherent with the GPU.
+ *
+ * Note that this is likely to change in the future again, where we might need
+ * more flexibility on future devices, so making this all explicit as part of a
+ * new &drm_i915_gem_create_ext extension is probable.
+ */
 struct drm_i915_gem_set_domain {
-       /** Handle for the object */
+       /** @handle: Handle for the object. */
        __u32 handle;
 
-       /** New read domains */
+       /** @read_domains: New read domains. */
        __u32 read_domains;
 
-       /** New write domain */
+       /**
+        * @write_domain: New write domain.
+        *
+        * Note that having something in the write domain implies it's in the
+        * read domain, and only that read domain.
+        */
        __u32 write_domain;
 };
 
@@ -1348,12 +1428,11 @@ struct drm_i915_gem_busy {
         * reading from the object simultaneously.
         *
         * The value of each engine class is the same as specified in the
-        * I915_CONTEXT_SET_ENGINES parameter and via perf, i.e.
+        * I915_CONTEXT_PARAM_ENGINES context parameter and via perf, i.e.
         * I915_ENGINE_CLASS_RENDER, I915_ENGINE_CLASS_COPY, etc.
-        * reported as active itself. Some hardware may have parallel
-        * execution engines, e.g. multiple media engines, which are
-        * mapped to the same class identifier and so are not separately
-        * reported for busyness.
+        * Some hardware may have parallel execution engines, e.g. multiple
+        * media engines, which are mapped to the same class identifier and so
+        * are not separately reported for busyness.
         *
         * Caveat emptor:
         * Only the boolean result of this query is reliable; that is whether
@@ -1364,43 +1443,79 @@ struct drm_i915_gem_busy {
 };
 
 /**
- * I915_CACHING_NONE
- *
- * GPU access is not coherent with cpu caches. Default for machines without an
- * LLC.
- */
-#define I915_CACHING_NONE              0
-/**
- * I915_CACHING_CACHED
- *
- * GPU access is coherent with cpu caches and furthermore the data is cached in
- * last-level caches shared between cpu cores and the gpu GT. Default on
- * machines with HAS_LLC.
+ * struct drm_i915_gem_caching - Set or get the caching for given object
+ * handle.
+ *
+ * Allow userspace to control the GTT caching bits for a given object when the
+ * object is later mapped through the ppGTT(or GGTT on older platforms lacking
+ * ppGTT support, or if the object is used for scanout). Note that this might
+ * require unbinding the object from the GTT first, if its current caching value
+ * doesn't match.
+ *
+ * Note that this all changes on discrete platforms, starting from DG1, the
+ * set/get caching is no longer supported, and is now rejected.  Instead the CPU
+ * caching attributes(WB vs WC) will become an immutable creation time property
+ * for the object, along with the GTT caching level. For now we don't expose any
+ * new uAPI for this, instead on DG1 this is all implicit, although this largely
+ * shouldn't matter since DG1 is coherent by default(without any way of
+ * controlling it).
+ *
+ * Implicit caching rules, starting from DG1:
+ *
+ *     - If any of the object placements (see &drm_i915_gem_create_ext_memory_regions)
+ *       contain I915_MEMORY_CLASS_DEVICE then the object will be allocated and
+ *       mapped as write-combined only.
+ *
+ *     - Everything else is always allocated and mapped as write-back, with the
+ *       guarantee that everything is also coherent with the GPU.
+ *
+ * Note that this is likely to change in the future again, where we might need
+ * more flexibility on future devices, so making this all explicit as part of a
+ * new &drm_i915_gem_create_ext extension is probable.
+ *
+ * Side note: Part of the reason for this is that changing the at-allocation-time CPU
+ * caching attributes for the pages might be required(and is expensive) if we
+ * need to then CPU map the pages later with different caching attributes. This
+ * inconsistent caching behaviour, while supported on x86, is not universally
+ * supported on other architectures. So for simplicity we opt for setting
+ * everything at creation time, whilst also making it immutable, on discrete
+ * platforms.
  */
-#define I915_CACHING_CACHED            1
-/**
- * I915_CACHING_DISPLAY
- *
- * Special GPU caching mode which is coherent with the scanout engines.
- * Transparently falls back to I915_CACHING_NONE on platforms where no special
- * cache mode (like write-through or gfdt flushing) is available. The kernel
- * automatically sets this mode when using a buffer as a scanout target.
- * Userspace can manually set this mode to avoid a costly stall and clflush in
- * the hotpath of drawing the first frame.
- */
-#define I915_CACHING_DISPLAY           2
-
 struct drm_i915_gem_caching {
        /**
-        * Handle of the buffer to set/get the caching level of. */
+        * @handle: Handle of the buffer to set/get the caching level.
+        */
        __u32 handle;
 
        /**
-        * Cacheing level to apply or return value
+        * @caching: The GTT caching level to apply or possible return value.
+        *
+        * The supported @caching values:
         *
-        * bits0-15 are for generic caching control (i.e. the above defined
-        * values). bits16-31 are reserved for platform-specific variations
-        * (e.g. l3$ caching on gen7). */
+        * I915_CACHING_NONE:
+        *
+        * GPU access is not coherent with CPU caches.  Default for machines
+        * without an LLC. This means manual flushing might be needed, if we
+        * want GPU access to be coherent.
+        *
+        * I915_CACHING_CACHED:
+        *
+        * GPU access is coherent with CPU caches and furthermore the data is
+        * cached in last-level caches shared between CPU cores and the GPU GT.
+        *
+        * I915_CACHING_DISPLAY:
+        *
+        * Special GPU caching mode which is coherent with the scanout engines.
+        * Transparently falls back to I915_CACHING_NONE on platforms where no
+        * special cache mode (like write-through or gfdt flushing) is
+        * available. The kernel automatically sets this mode when using a
+        * buffer as a scanout target.  Userspace can manually set this mode to
+        * avoid a costly stall and clflush in the hotpath of drawing the first
+        * frame.
+        */
+#define I915_CACHING_NONE              0
+#define I915_CACHING_CACHED            1
+#define I915_CACHING_DISPLAY           2
        __u32 caching;
 };
 
@@ -1639,6 +1754,10 @@ struct drm_i915_gem_context_param {
        __u32 size;
        __u64 param;
 #define I915_CONTEXT_PARAM_BAN_PERIOD  0x1
+/* I915_CONTEXT_PARAM_NO_ZEROMAP has been removed.  On the off chance
+ * someone somewhere has attempted to use it, never re-use this context
+ * param number.
+ */
 #define I915_CONTEXT_PARAM_NO_ZEROMAP  0x2
 #define I915_CONTEXT_PARAM_GTT_SIZE    0x3
 #define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE    0x4
@@ -1723,24 +1842,8 @@ struct drm_i915_gem_context_param {
  */
 #define I915_CONTEXT_PARAM_PERSISTENCE 0xb
 
-/*
- * I915_CONTEXT_PARAM_RINGSIZE:
- *
- * Sets the size of the CS ringbuffer to use for logical ring contexts. This
- * applies a limit of how many batches can be queued to HW before the caller
- * is blocked due to lack of space for more commands.
- *
- * Only reliably possible to be set prior to first use, i.e. during
- * construction. At any later point, the current execution must be flushed as
- * the ring can only be changed while the context is idle. Note, the ringsize
- * can be specified as a constructor property, see
- * I915_CONTEXT_CREATE_EXT_SETPARAM, but can also be set later if required.
- *
- * Only applies to the current set of engine and lost when those engines
- * are replaced by a new mapping (see I915_CONTEXT_PARAM_ENGINES).
- *
- * Must be between 4 - 512 KiB, in intervals of page size [4 KiB].
- * Default is 16 KiB.
+/* This API has been removed.  On the off chance someone somewhere has
+ * attempted to use it, never re-use this context param number.
  */
 #define I915_CONTEXT_PARAM_RINGSIZE    0xc
 /* Must be kept compact -- no holes and well documented */
@@ -1807,6 +1910,69 @@ struct drm_i915_gem_context_param_sseu {
        __u32 rsvd;
 };
 
+/**
+ * DOC: Virtual Engine uAPI
+ *
+ * Virtual engine is a concept where userspace is able to configure a set of
+ * physical engines, submit a batch buffer, and let the driver execute it on any
+ * engine from the set as it sees fit.
+ *
+ * This is primarily useful on parts which have multiple instances of a same
+ * class engine, like for example GT3+ Skylake parts with their two VCS engines.
+ *
+ * For instance userspace can enumerate all engines of a certain class using the
+ * previously described `Engine Discovery uAPI`_. After that userspace can
+ * create a GEM context with a placeholder slot for the virtual engine (using
+ * `I915_ENGINE_CLASS_INVALID` and `I915_ENGINE_CLASS_INVALID_NONE` for class
+ * and instance respectively) and finally using the
+ * `I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE` extension place a virtual engine in
+ * the same reserved slot.
+ *
+ * Example of creating a virtual engine and submitting a batch buffer to it:
+ *
+ * .. code-block:: C
+ *
+ *     I915_DEFINE_CONTEXT_ENGINES_LOAD_BALANCE(virtual, 2) = {
+ *             .base.name = I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE,
+ *             .engine_index = 0, // Place this virtual engine into engine map slot 0
+ *             .num_siblings = 2,
+ *             .engines = { { I915_ENGINE_CLASS_VIDEO, 0 },
+ *                          { I915_ENGINE_CLASS_VIDEO, 1 }, },
+ *     };
+ *     I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, 1) = {
+ *             .engines = { { I915_ENGINE_CLASS_INVALID,
+ *                            I915_ENGINE_CLASS_INVALID_NONE } },
+ *             .extensions = to_user_pointer(&virtual), // Chains after load_balance extension
+ *     };
+ *     struct drm_i915_gem_context_create_ext_setparam p_engines = {
+ *             .base = {
+ *                     .name = I915_CONTEXT_CREATE_EXT_SETPARAM,
+ *             },
+ *             .param = {
+ *                     .param = I915_CONTEXT_PARAM_ENGINES,
+ *                     .value = to_user_pointer(&engines),
+ *                     .size = sizeof(engines),
+ *             },
+ *     };
+ *     struct drm_i915_gem_context_create_ext create = {
+ *             .flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS,
+ *             .extensions = to_user_pointer(&p_engines);
+ *     };
+ *
+ *     ctx_id = gem_context_create_ext(drm_fd, &create);
+ *
+ *     // Now we have created a GEM context with its engine map containing a
+ *     // single virtual engine. Submissions to this slot can go either to
+ *     // vcs0 or vcs1, depending on the load balancing algorithm used inside
+ *     // the driver. The load balancing is dynamic from one batch buffer to
+ *     // another and transparent to userspace.
+ *
+ *     ...
+ *     execbuf.rsvd1 = ctx_id;
+ *     execbuf.flags = 0; // Submits to index 0 which is the virtual engine
+ *     gem_execbuf(drm_fd, &execbuf);
+ */
+
 /*
  * i915_context_engines_load_balance:
  *
@@ -1883,6 +2049,61 @@ struct i915_context_engines_bond {
        struct i915_engine_class_instance engines[N__]; \
 } __attribute__((packed)) name__
 
+/**
+ * DOC: Context Engine Map uAPI
+ *
+ * Context engine map is a new way of addressing engines when submitting batch-
+ * buffers, replacing the existing way of using identifiers like `I915_EXEC_BLT`
+ * inside the flags field of `struct drm_i915_gem_execbuffer2`.
+ *
+ * To use it created GEM contexts need to be configured with a list of engines
+ * the user is intending to submit to. This is accomplished using the
+ * `I915_CONTEXT_PARAM_ENGINES` parameter and `struct
+ * i915_context_param_engines`.
+ *
+ * For such contexts the `I915_EXEC_RING_MASK` field becomes an index into the
+ * configured map.
+ *
+ * Example of creating such context and submitting against it:
+ *
+ * .. code-block:: C
+ *
+ *     I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, 2) = {
+ *             .engines = { { I915_ENGINE_CLASS_RENDER, 0 },
+ *                          { I915_ENGINE_CLASS_COPY, 0 } }
+ *     };
+ *     struct drm_i915_gem_context_create_ext_setparam p_engines = {
+ *             .base = {
+ *                     .name = I915_CONTEXT_CREATE_EXT_SETPARAM,
+ *             },
+ *             .param = {
+ *                     .param = I915_CONTEXT_PARAM_ENGINES,
+ *                     .value = to_user_pointer(&engines),
+ *                     .size = sizeof(engines),
+ *             },
+ *     };
+ *     struct drm_i915_gem_context_create_ext create = {
+ *             .flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS,
+ *             .extensions = to_user_pointer(&p_engines);
+ *     };
+ *
+ *     ctx_id = gem_context_create_ext(drm_fd, &create);
+ *
+ *     // We have now created a GEM context with two engines in the map:
+ *     // Index 0 points to rcs0 while index 1 points to bcs0. Other engines
+ *     // will not be accessible from this context.
+ *
+ *     ...
+ *     execbuf.rsvd1 = ctx_id;
+ *     execbuf.flags = 0; // Submits to index 0, which is rcs0 for this context
+ *     gem_execbuf(drm_fd, &execbuf);
+ *
+ *     ...
+ *     execbuf.rsvd1 = ctx_id;
+ *     execbuf.flags = 1; // Submits to index 0, which is bcs0 for this context
+ *     gem_execbuf(drm_fd, &execbuf);
+ */
+
 struct i915_context_param_engines {
        __u64 extensions; /* linked chain of extension blocks, 0 terminates */
 #define I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE 0 /* see i915_context_engines_load_balance */
@@ -1901,20 +2122,10 @@ struct drm_i915_gem_context_create_ext_setparam {
        struct drm_i915_gem_context_param param;
 };
 
-struct drm_i915_gem_context_create_ext_clone {
+/* This API has been removed.  On the off chance someone somewhere has
+ * attempted to use it, never re-use this extension number.
+ */
 #define I915_CONTEXT_CREATE_EXT_CLONE 1
-       struct i915_user_extension base;
-       __u32 clone_id;
-       __u32 flags;
-#define I915_CONTEXT_CLONE_ENGINES     (1u << 0)
-#define I915_CONTEXT_CLONE_FLAGS       (1u << 1)
-#define I915_CONTEXT_CLONE_SCHEDATTR   (1u << 2)
-#define I915_CONTEXT_CLONE_SSEU                (1u << 3)
-#define I915_CONTEXT_CLONE_TIMELINE    (1u << 4)
-#define I915_CONTEXT_CLONE_VM          (1u << 5)
-#define I915_CONTEXT_CLONE_UNKNOWN -(I915_CONTEXT_CLONE_VM << 1)
-       __u64 rsvd;
-};
 
 struct drm_i915_gem_context_destroy {
        __u32 ctx_id;
@@ -1986,14 +2197,69 @@ struct drm_i915_reset_stats {
        __u32 pad;
 };
 
+/**
+ * struct drm_i915_gem_userptr - Create GEM object from user allocated memory.
+ *
+ * Userptr objects have several restrictions on what ioctls can be used with the
+ * object handle.
+ */
 struct drm_i915_gem_userptr {
+       /**
+        * @user_ptr: The pointer to the allocated memory.
+        *
+        * Needs to be aligned to PAGE_SIZE.
+        */
        __u64 user_ptr;
+
+       /**
+        * @user_size:
+        *
+        * The size in bytes for the allocated memory. This will also become the
+        * object size.
+        *
+        * Needs to be aligned to PAGE_SIZE, and should be at least PAGE_SIZE,
+        * or larger.
+        */
        __u64 user_size;
+
+       /**
+        * @flags:
+        *
+        * Supported flags:
+        *
+        * I915_USERPTR_READ_ONLY:
+        *
+        * Mark the object as readonly, this also means GPU access can only be
+        * readonly. This is only supported on HW which supports readonly access
+        * through the GTT. If the HW can't support readonly access, an error is
+        * returned.
+        *
+        * I915_USERPTR_PROBE:
+        *
+        * Probe the provided @user_ptr range and validate that the @user_ptr is
+        * indeed pointing to normal memory and that the range is also valid.
+        * For example if some garbage address is given to the kernel, then this
+        * should complain.
+        *
+        * Returns -EFAULT if the probe failed.
+        *
+        * Note that this doesn't populate the backing pages, and also doesn't
+        * guarantee that the object will remain valid when the object is
+        * eventually used.
+        *
+        * The kernel supports this feature if I915_PARAM_HAS_USERPTR_PROBE
+        * returns a non-zero value.
+        *
+        * I915_USERPTR_UNSYNCHRONIZED:
+        *
+        * NOT USED. Setting this flag will result in an error.
+        */
        __u32 flags;
 #define I915_USERPTR_READ_ONLY 0x1
+#define I915_USERPTR_PROBE 0x2
 #define I915_USERPTR_UNSYNCHRONIZED 0x80000000
        /**
-        * Returned handle for the object.
+        * @handle: Returned handle for the object.
         *
         * Object handles are nonzero.
         */
@@ -2376,6 +2642,76 @@ struct drm_i915_query_topology_info {
        __u8 data[];
 };
 
+/**
+ * DOC: Engine Discovery uAPI
+ *
+ * Engine discovery uAPI is a way of enumerating physical engines present in a
+ * GPU associated with an open i915 DRM file descriptor. This supersedes the old
+ * way of using `DRM_IOCTL_I915_GETPARAM` and engine identifiers like
+ * `I915_PARAM_HAS_BLT`.
+ *
+ * The need for this interface came starting with Icelake and newer GPUs, which
+ * started to establish a pattern of having multiple engines of a same class,
+ * where not all instances were always completely functionally equivalent.
+ *
+ * Entry point for this uapi is `DRM_IOCTL_I915_QUERY` with the
+ * `DRM_I915_QUERY_ENGINE_INFO` as the queried item id.
+ *
+ * Example for getting the list of engines:
+ *
+ * .. code-block:: C
+ *
+ *     struct drm_i915_query_engine_info *info;
+ *     struct drm_i915_query_item item = {
+ *             .query_id = DRM_I915_QUERY_ENGINE_INFO;
+ *     };
+ *     struct drm_i915_query query = {
+ *             .num_items = 1,
+ *             .items_ptr = (uintptr_t)&item,
+ *     };
+ *     int err, i;
+ *
+ *     // First query the size of the blob we need, this needs to be large
+ *     // enough to hold our array of engines. The kernel will fill out the
+ *     // item.length for us, which is the number of bytes we need.
+ *     //
+ *     // Alternatively a large buffer can be allocated straight away enabling
+ *     // querying in one pass, in which case item.length should contain the
+ *     // length of the provided buffer.
+ *     err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
+ *     if (err) ...
+ *
+ *     info = calloc(1, item.length);
+ *     // Now that we allocated the required number of bytes, we call the ioctl
+ *     // again, this time with the data_ptr pointing to our newly allocated
+ *     // blob, which the kernel can then populate with info on all engines.
+ *     item.data_ptr = (uintptr_t)&info,
+ *
+ *     err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
+ *     if (err) ...
+ *
+ *     // We can now access each engine in the array
+ *     for (i = 0; i < info->num_engines; i++) {
+ *             struct drm_i915_engine_info einfo = info->engines[i];
+ *             u16 class = einfo.engine.class;
+ *             u16 instance = einfo.engine.instance;
+ *             ....
+ *     }
+ *
+ *     free(info);
+ *
+ * Each of the enumerated engines, apart from being defined by its class and
+ * instance (see `struct i915_engine_class_instance`), also can have flags and
+ * capabilities defined as documented in i915_drm.h.
+ *
+ * For instance video engines which support HEVC encoding will have the
+ * `I915_VIDEO_CLASS_CAPABILITY_HEVC` capability bit set.
+ *
+ * Engine discovery only fully comes to its own when combined with the new way
+ * of addressing engines when submitting batch buffers using contexts with
+ * engine maps configured.
+ */
+
 /**
  * struct drm_i915_engine_info
  *
index 3cb5b5d..af96af1 100644 (file)
  * - 1.3 - Add SMI events support
  * - 1.4 - Indicate new SRAM EDC bit in device properties
  * - 1.5 - Add SVM API
+ * - 1.6 - Query clear flags in SVM get_attr API
  */
 #define KFD_IOCTL_MAJOR_VERSION 1
-#define KFD_IOCTL_MINOR_VERSION 5
+#define KFD_IOCTL_MINOR_VERSION 6
 
 struct kfd_ioctl_get_version_args {
        __u32 major_version;    /* from KFD */
@@ -575,18 +576,19 @@ struct kfd_ioctl_svm_attribute {
  * @KFD_IOCTL_SVM_ATTR_PREFERRED_LOC or
  * @KFD_IOCTL_SVM_ATTR_PREFETCH_LOC resepctively. For
  * @KFD_IOCTL_SVM_ATTR_SET_FLAGS, flags of all pages will be
- * aggregated by bitwise AND. The minimum  migration granularity
- * throughout the range will be returned for
- * @KFD_IOCTL_SVM_ATTR_GRANULARITY.
+ * aggregated by bitwise AND. That means, a flag will be set in the
+ * output, if that flag is set for all pages in the range. For
+ * @KFD_IOCTL_SVM_ATTR_CLR_FLAGS, flags of all pages will be
+ * aggregated by bitwise NOR. That means, a flag will be set in the
+ * output, if that flag is clear for all pages in the range.
+ * The minimum migration granularity throughout the range will be
+ * returned for @KFD_IOCTL_SVM_ATTR_GRANULARITY.
  *
  * Querying of accessibility attributes works by initializing the
  * attribute type to @KFD_IOCTL_SVM_ATTR_ACCESS and the value to the
  * GPUID being queried. Multiple attributes can be given to allow
  * querying multiple GPUIDs. The ioctl function overwrites the
  * attribute type to indicate the access for the specified GPU.
- *
- * @KFD_IOCTL_SVM_ATTR_CLR_FLAGS is invalid for
- * @KFD_IOCTL_SVM_OP_GET_ATTR.
  */
 struct kfd_ioctl_svm_args {
        __u64 start_addr;
index 6ddf646..0d31a6d 100644 (file)
@@ -514,7 +514,7 @@ static int ac97_bus_probe(struct device *dev)
        return ret;
 }
 
-static int ac97_bus_remove(struct device *dev)
+static void ac97_bus_remove(struct device *dev)
 {
        struct ac97_codec_device *adev = to_ac97_device(dev);
        struct ac97_codec_driver *adrv = to_ac97_driver(dev->driver);
@@ -522,7 +522,7 @@ static int ac97_bus_remove(struct device *dev)
 
        ret = pm_runtime_resume_and_get(dev);
        if (ret < 0)
-               return ret;
+               return;
 
        ret = adrv->remove(adev);
        pm_runtime_put_noidle(dev);
@@ -530,8 +530,6 @@ static int ac97_bus_remove(struct device *dev)
                ac97_put_disable_clk(adev);
 
        pm_runtime_disable(dev);
-
-       return ret;
 }
 
 static struct bus_type ac97_bus_type = {
index 002fb5b..c9579d9 100644 (file)
@@ -104,7 +104,7 @@ static int soundbus_uevent(struct device *dev, struct kobj_uevent_env *env)
        return retval;
 }
 
-static int soundbus_device_remove(struct device *dev)
+static void soundbus_device_remove(struct device *dev)
 {
        struct soundbus_dev * soundbus_dev = to_soundbus_device(dev);
        struct soundbus_driver * drv = to_soundbus_driver(dev->driver);
@@ -112,8 +112,6 @@ static int soundbus_device_remove(struct device *dev)
        if (dev->driver && drv->remove)
                drv->remove(soundbus_dev);
        soundbus_dev_put(soundbus_dev);
-
-       return 0;
 }
 
 static void soundbus_device_shutdown(struct device *dev)