Merge tag 'gvt-next-2020-03-10' of https://github.com/intel/gvt-linux into drm-intel...
authorRodrigo Vivi <rodrigo.vivi@intel.com>
Tue, 10 Mar 2020 22:46:28 +0000 (15:46 -0700)
committerRodrigo Vivi <rodrigo.vivi@intel.com>
Tue, 10 Mar 2020 22:46:29 +0000 (15:46 -0700)
gvt-next-2020-03-10

- Fix CFL dmabuf display after vfio edid enabling (Tina)
- Clean up scan non-priv batch debugfs entry (Chris)
- Use intel engines initialized in gvt, cleanup previous ring id (Chris)
- Use intel_gt instead (Chris)

Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
From: Zhenyu Wang <zhenyuw@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200310081928.GG28483@zhen-hp.sh.intel.com
1  2 
drivers/gpu/drm/i915/gvt/firmware.c
drivers/gpu/drm/i915/gvt/gtt.c
drivers/gpu/drm/i915/gvt/gvt.c
drivers/gpu/drm/i915/gvt/scheduler.c

@@@ -68,9 -68,7 +68,7 @@@ static struct bin_attribute firmware_at
  
  static int mmio_snapshot_handler(struct intel_gvt *gvt, u32 offset, void *data)
  {
-       struct drm_i915_private *i915 = gvt->dev_priv;
-       *(u32 *)(data + offset) = intel_uncore_read_notrace(&i915->uncore,
+       *(u32 *)(data + offset) = intel_uncore_read_notrace(gvt->gt->uncore,
                                                            _MMIO(offset));
        return 0;
  }
@@@ -78,7 -76,7 +76,7 @@@
  static int expose_firmware_sysfs(struct intel_gvt *gvt)
  {
        struct intel_gvt_device_info *info = &gvt->device_info;
-       struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
+       struct pci_dev *pdev = gvt->gt->i915->drm.pdev;
        struct gvt_firmware_header *h;
        void *firmware;
        void *p;
  
  static void clean_firmware_sysfs(struct intel_gvt *gvt)
  {
-       struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
+       struct pci_dev *pdev = gvt->gt->i915->drm.pdev;
  
        device_remove_bin_file(&pdev->dev, &firmware_attr);
        vfree(firmware_attr.private);
@@@ -146,15 -144,14 +144,14 @@@ void intel_gvt_free_firmware(struct int
                clean_firmware_sysfs(gvt);
  
        kfree(gvt->firmware.cfg_space);
 -      kfree(gvt->firmware.mmio);
 +      vfree(gvt->firmware.mmio);
  }
  
  static int verify_firmware(struct intel_gvt *gvt,
                           const struct firmware *fw)
  {
        struct intel_gvt_device_info *info = &gvt->device_info;
-       struct drm_i915_private *dev_priv = gvt->dev_priv;
-       struct pci_dev *pdev = dev_priv->drm.pdev;
+       struct pci_dev *pdev = gvt->gt->i915->drm.pdev;
        struct gvt_firmware_header *h;
        unsigned long id, crc32_start;
        const void *mem;
@@@ -208,8 -205,7 +205,7 @@@ invalid_firmware
  int intel_gvt_load_firmware(struct intel_gvt *gvt)
  {
        struct intel_gvt_device_info *info = &gvt->device_info;
-       struct drm_i915_private *dev_priv = gvt->dev_priv;
-       struct pci_dev *pdev = dev_priv->drm.pdev;
+       struct pci_dev *pdev = gvt->gt->i915->drm.pdev;
        struct intel_gvt_firmware *firmware = &gvt->firmware;
        struct gvt_firmware_header *h;
        const struct firmware *fw;
  
        firmware->cfg_space = mem;
  
 -      mem = kmalloc(info->mmio_size, GFP_KERNEL);
 +      mem = vmalloc(info->mmio_size);
        if (!mem) {
                kfree(path);
                kfree(firmware->cfg_space);
  
        gvt_dbg_core("request hw state firmware %s...\n", path);
  
-       ret = request_firmware(&fw, path, &dev_priv->drm.pdev->dev);
+       ret = request_firmware(&fw, path, &gvt->gt->i915->drm.pdev->dev);
        kfree(path);
  
        if (ret)
@@@ -71,7 -71,7 +71,7 @@@ bool intel_gvt_ggtt_validate_range(stru
  /* translate a guest gmadr to host gmadr */
  int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr)
  {
-       struct drm_i915_private *i915 = vgpu->gvt->dev_priv;
+       struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
  
        if (drm_WARN(&i915->drm, !vgpu_gmadr_is_valid(vgpu, g_addr),
                     "invalid guest gmadr %llx\n", g_addr))
@@@ -89,7 -89,7 +89,7 @@@
  /* translate a host gmadr to guest gmadr */
  int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr)
  {
-       struct drm_i915_private *i915 = vgpu->gvt->dev_priv;
+       struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
  
        if (drm_WARN(&i915->drm, !gvt_gmadr_is_valid(vgpu->gvt, h_addr),
                     "invalid host gmadr %llx\n", h_addr))
@@@ -279,24 -279,23 +279,23 @@@ static inline int get_pse_type(int type
        return gtt_type_table[type].pse_entry_type;
  }
  
- static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index)
+ static u64 read_pte64(struct i915_ggtt *ggtt, unsigned long index)
  {
-       void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
+       void __iomem *addr = (gen8_pte_t __iomem *)ggtt->gsm + index;
  
        return readq(addr);
  }
  
- static void ggtt_invalidate(struct drm_i915_private *dev_priv)
+ static void ggtt_invalidate(struct intel_gt *gt)
  {
-       mmio_hw_access_pre(dev_priv);
-       I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
-       mmio_hw_access_post(dev_priv);
+       mmio_hw_access_pre(gt);
+       intel_uncore_write(gt->uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
+       mmio_hw_access_post(gt);
  }
  
- static void write_pte64(struct drm_i915_private *dev_priv,
-               unsigned long index, u64 pte)
+ static void write_pte64(struct i915_ggtt *ggtt, unsigned long index, u64 pte)
  {
-       void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
+       void __iomem *addr = (gen8_pte_t __iomem *)ggtt->gsm + index;
  
        writeq(pte, addr);
  }
@@@ -319,7 -318,7 +318,7 @@@ static inline int gtt_get_entry64(void 
                if (WARN_ON(ret))
                        return ret;
        } else if (!pt) {
-               e->val64 = read_pte64(vgpu->gvt->dev_priv, index);
+               e->val64 = read_pte64(vgpu->gvt->gt->ggtt, index);
        } else {
                e->val64 = *((u64 *)pt + index);
        }
@@@ -344,7 -343,7 +343,7 @@@ static inline int gtt_set_entry64(void 
                if (WARN_ON(ret))
                        return ret;
        } else if (!pt) {
-               write_pte64(vgpu->gvt->dev_priv, index, e->val64);
+               write_pte64(vgpu->gvt->gt->ggtt, index, e->val64);
        } else {
                *((u64 *)pt + index) = e->val64;
        }
@@@ -738,7 -737,7 +737,7 @@@ static int detach_oos_page(struct intel
  
  static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt)
  {
-       struct device *kdev = &spt->vgpu->gvt->dev_priv->drm.pdev->dev;
+       struct device *kdev = &spt->vgpu->gvt->gt->i915->drm.pdev->dev;
  
        trace_spt_free(spt->vgpu->id, spt, spt->guest_page.type);
  
@@@ -823,7 -822,7 +822,7 @@@ static int reclaim_one_ppgtt_mm(struct 
  static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt(
                struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type)
  {
-       struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev;
+       struct device *kdev = &vgpu->gvt->gt->i915->drm.pdev->dev;
        struct intel_vgpu_ppgtt_spt *spt = NULL;
        dma_addr_t daddr;
        int ret;
@@@ -944,7 -943,7 +943,7 @@@ static int ppgtt_invalidate_spt(struct 
  static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu,
                struct intel_gvt_gtt_entry *e)
  {
-       struct drm_i915_private *i915 = vgpu->gvt->dev_priv;
+       struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
        struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
        struct intel_vgpu_ppgtt_spt *s;
        enum intel_gvt_gtt_type cur_pt_type;
@@@ -1051,7 -1050,7 +1050,7 @@@ fail
  
  static bool vgpu_ips_enabled(struct intel_vgpu *vgpu)
  {
-       struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+       struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
  
        if (INTEL_GEN(dev_priv) == 9 || INTEL_GEN(dev_priv) == 10) {
                u32 ips = vgpu_vreg_t(vgpu, GEN8_GAMW_ECO_DEV_RW_IA) &
@@@ -1160,7 -1159,7 +1159,7 @@@ static int is_2MB_gtt_possible(struct i
        struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
        unsigned long pfn;
  
-       if (!HAS_PAGE_SIZES(vgpu->gvt->dev_priv, I915_GTT_PAGE_SIZE_2M))
+       if (!HAS_PAGE_SIZES(vgpu->gvt->gt->i915, I915_GTT_PAGE_SIZE_2M))
                return 0;
  
        pfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, ops->get_pfn(entry));
@@@ -1963,11 -1962,7 +1962,11 @@@ void _intel_vgpu_mm_release(struct kre
  
        if (mm->type == INTEL_GVT_MM_PPGTT) {
                list_del(&mm->ppgtt_mm.list);
 +
 +              mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
                list_del(&mm->ppgtt_mm.lru_list);
 +              mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
 +
                invalidate_ppgtt_mm(mm);
        } else {
                vfree(mm->ggtt_mm.virtual_ggtt);
@@@ -2321,7 -2316,7 +2320,7 @@@ out
        ggtt_invalidate_pte(vgpu, &e);
  
        ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index);
-       ggtt_invalidate(gvt->dev_priv);
+       ggtt_invalidate(gvt->gt);
        return 0;
  }
  
@@@ -2354,14 -2349,14 +2353,14 @@@ int intel_vgpu_emulate_ggtt_mmio_write(
  static int alloc_scratch_pages(struct intel_vgpu *vgpu,
                enum intel_gvt_gtt_type type)
  {
-       struct drm_i915_private *i915 = vgpu->gvt->dev_priv;
+       struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
        struct intel_vgpu_gtt *gtt = &vgpu->gtt;
        struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
        int page_entry_num = I915_GTT_PAGE_SIZE >>
                                vgpu->gvt->device_info.gtt_entry_size_shift;
        void *scratch_pt;
        int i;
-       struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
+       struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev;
        dma_addr_t daddr;
  
        if (drm_WARN_ON(&i915->drm,
  static int release_scratch_page_tree(struct intel_vgpu *vgpu)
  {
        int i;
-       struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
+       struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev;
        dma_addr_t daddr;
  
        for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
@@@ -2691,7 -2686,7 +2690,7 @@@ int intel_gvt_init_gtt(struct intel_gv
  {
        int ret;
        void *page;
-       struct device *dev = &gvt->dev_priv->drm.pdev->dev;
+       struct device *dev = &gvt->gt->i915->drm.pdev->dev;
        dma_addr_t daddr;
  
        gvt_dbg_core("init gtt\n");
   */
  void intel_gvt_clean_gtt(struct intel_gvt *gvt)
  {
-       struct device *dev = &gvt->dev_priv->drm.pdev->dev;
+       struct device *dev = &gvt->gt->i915->drm.pdev->dev;
        dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_mfn <<
                                        I915_GTT_PAGE_SHIFT);
  
@@@ -2788,7 -2783,6 +2787,6 @@@ void intel_vgpu_invalidate_ppgtt(struc
  void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old)
  {
        struct intel_gvt *gvt = vgpu->gvt;
-       struct drm_i915_private *dev_priv = gvt->dev_priv;
        struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
        struct intel_gvt_gtt_entry entry = {.type = GTT_TYPE_GGTT_PTE};
        struct intel_gvt_gtt_entry old_entry;
                ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++);
        }
  
-       ggtt_invalidate(dev_priv);
+       ggtt_invalidate(gvt->gt);
  }
  
  /**
@@@ -35,7 -35,6 +35,7 @@@
  #include <linux/kthread.h>
  
  #include "i915_drv.h"
 +#include "intel_gvt.h"
  #include "gvt.h"
  #include <linux/vfio.h>
  #include <linux/mdev.h>
@@@ -50,15 -49,15 +50,15 @@@ static const char * const supported_hyp
  static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt,
                const char *name)
  {
+       const char *driver_name =
+               dev_driver_string(&gvt->gt->i915->drm.pdev->dev);
        int i;
-       struct intel_vgpu_type *t;
-       const char *driver_name = dev_driver_string(
-                       &gvt->dev_priv->drm.pdev->dev);
  
+       name += strlen(driver_name) + 1;
        for (i = 0; i < gvt->num_types; i++) {
-               t = &gvt->types[i];
-               if (!strncmp(t->name, name + strlen(driver_name) + 1,
-                       sizeof(t->name)))
+               struct intel_vgpu_type *t = &gvt->types[i];
+               if (!strncmp(t->name, name, sizeof(t->name)))
                        return t;
        }
  
@@@ -190,7 -189,7 +190,7 @@@ static const struct intel_gvt_ops intel
  static void init_device_info(struct intel_gvt *gvt)
  {
        struct intel_gvt_device_info *info = &gvt->device_info;
-       struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
+       struct pci_dev *pdev = gvt->gt->i915->drm.pdev;
  
        info->max_support_vgpus = 8;
        info->cfg_space_size = PCI_CFG_SPACE_EXP_SIZE;
@@@ -256,17 -255,17 +256,17 @@@ static int init_service_thread(struct i
  
  /**
   * intel_gvt_clean_device - clean a GVT device
-  * @dev_priv: i915 private
+  * @i915: i915 private
   *
   * This function is called at the driver unloading stage, to free the
   * resources owned by a GVT device.
   *
   */
- void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
+ void intel_gvt_clean_device(struct drm_i915_private *i915)
  {
-       struct intel_gvt *gvt = to_gvt(dev_priv);
+       struct intel_gvt *gvt = fetch_and_zero(&i915->gvt);
  
-       if (drm_WARN_ON(&dev_priv->drm, !gvt))
+       if (drm_WARN_ON(&i915->drm, !gvt))
                return;
  
        intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
        intel_gvt_clean_mmio_info(gvt);
        idr_destroy(&gvt->vgpu_idr);
  
-       kfree(dev_priv->gvt);
-       dev_priv->gvt = NULL;
+       kfree(i915->gvt);
  }
  
  /**
   * intel_gvt_init_device - initialize a GVT device
-  * @dev_priv: drm i915 private data
+  * @i915: drm i915 private data
   *
   * This function is called at the initialization stage, to initialize
   * necessary GVT components.
   * Zero on success, negative error code if failed.
   *
   */
- int intel_gvt_init_device(struct drm_i915_private *dev_priv)
+ int intel_gvt_init_device(struct drm_i915_private *i915)
  {
        struct intel_gvt *gvt;
        struct intel_vgpu *vgpu;
        int ret;
  
-       if (drm_WARN_ON(&dev_priv->drm, dev_priv->gvt))
+       if (drm_WARN_ON(&i915->drm, i915->gvt))
                return -EEXIST;
  
        gvt = kzalloc(sizeof(struct intel_gvt), GFP_KERNEL);
        spin_lock_init(&gvt->scheduler.mmio_context_lock);
        mutex_init(&gvt->lock);
        mutex_init(&gvt->sched_lock);
-       gvt->dev_priv = dev_priv;
+       gvt->gt = &i915->gt;
+       i915->gvt = gvt;
  
        init_device_info(gvt);
  
        intel_gvt_debugfs_init(gvt);
  
        gvt_dbg_core("gvt device initialization is done\n");
-       dev_priv->gvt = gvt;
-       intel_gvt_host.dev = &dev_priv->drm.pdev->dev;
+       intel_gvt_host.dev = &i915->drm.pdev->dev;
        intel_gvt_host.initialized = true;
        return 0;
  
@@@ -403,6 -401,7 +402,7 @@@ out_clean_mmio_info
  out_clean_idr:
        idr_destroy(&gvt->vgpu_idr);
        kfree(gvt);
+       i915->gvt = NULL;
        return ret;
  }
  
@@@ -84,7 -84,7 +84,7 @@@ static void update_shadow_pdps(struct i
  static void sr_oa_regs(struct intel_vgpu_workload *workload,
                u32 *reg_state, bool save)
  {
-       struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
+       struct drm_i915_private *dev_priv = workload->vgpu->gvt->gt->i915;
        u32 ctx_oactxctrl = dev_priv->perf.ctx_oactxctrl_offset;
        u32 ctx_flexeu0 = dev_priv->perf.ctx_flexeu0_offset;
        int i = 0;
@@@ -98,7 -98,7 +98,7 @@@
                i915_mmio_reg_offset(EU_PERF_CNTL6),
        };
  
-       if (workload->ring_id != RCS0)
+       if (workload->engine->id != RCS0)
                return;
  
        if (save) {
@@@ -128,7 -128,6 +128,6 @@@ static int populate_shadow_context(stru
  {
        struct intel_vgpu *vgpu = workload->vgpu;
        struct intel_gvt *gvt = vgpu->gvt;
-       int ring_id = workload->ring_id;
        struct drm_i915_gem_object *ctx_obj =
                workload->req->context->state->obj;
        struct execlist_ring_context *shadow_ring_context;
        COPY_REG_MASKED(ctx_ctrl);
        COPY_REG(ctx_timestamp);
  
-       if (ring_id == RCS0) {
+       if (workload->engine->id == RCS0) {
                COPY_REG(bb_per_ctx_ptr);
                COPY_REG(rcs_indirect_ctx);
                COPY_REG(rcs_indirect_ctx_offset);
        if (IS_RESTORE_INHIBIT(shadow_ring_context->ctx_ctrl.val))
                return 0;
  
-       gvt_dbg_sched("ring id %d workload lrca %x", ring_id,
-                       workload->ctx_desc.lrca);
-       context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
+       gvt_dbg_sched("ring %s workload lrca %x",
+                     workload->engine->name,
+                     workload->ctx_desc.lrca);
  
+       context_page_num = workload->engine->context_size;
        context_page_num = context_page_num >> PAGE_SHIFT;
  
-       if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS0)
+       if (IS_BROADWELL(gvt->gt->i915) && workload->engine->id == RCS0)
                context_page_num = 19;
  
        i = 2;
@@@ -210,38 -209,43 +209,43 @@@ static inline bool is_gvt_request(struc
        return intel_context_force_single_submission(rq->context);
  }
  
- static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id)
+ static void save_ring_hw_state(struct intel_vgpu *vgpu,
+                              const struct intel_engine_cs *engine)
  {
-       struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
-       u32 ring_base = dev_priv->engine[ring_id]->mmio_base;
+       struct intel_uncore *uncore = engine->uncore;
        i915_reg_t reg;
  
-       reg = RING_INSTDONE(ring_base);
-       vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
-       reg = RING_ACTHD(ring_base);
-       vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
-       reg = RING_ACTHD_UDW(ring_base);
-       vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
+       reg = RING_INSTDONE(engine->mmio_base);
+       vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) =
+               intel_uncore_read(uncore, reg);
+       reg = RING_ACTHD(engine->mmio_base);
+       vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) =
+               intel_uncore_read(uncore, reg);
+       reg = RING_ACTHD_UDW(engine->mmio_base);
+       vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) =
+               intel_uncore_read(uncore, reg);
  }
  
  static int shadow_context_status_change(struct notifier_block *nb,
                unsigned long action, void *data)
  {
-       struct i915_request *req = data;
+       struct i915_request *rq = data;
        struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
-                               shadow_ctx_notifier_block[req->engine->id]);
+                               shadow_ctx_notifier_block[rq->engine->id]);
        struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
-       enum intel_engine_id ring_id = req->engine->id;
+       enum intel_engine_id ring_id = rq->engine->id;
        struct intel_vgpu_workload *workload;
        unsigned long flags;
  
-       if (!is_gvt_request(req)) {
+       if (!is_gvt_request(rq)) {
                spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
                if (action == INTEL_CONTEXT_SCHEDULE_IN &&
                    scheduler->engine_owner[ring_id]) {
                        /* Switch ring from vGPU to host. */
                        intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
-                                             NULL, ring_id);
+                                             NULL, rq->engine);
                        scheduler->engine_owner[ring_id] = NULL;
                }
                spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
                if (workload->vgpu != scheduler->engine_owner[ring_id]) {
                        /* Switch ring from host to vGPU or vGPU to vGPU. */
                        intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
-                                             workload->vgpu, ring_id);
+                                             workload->vgpu, rq->engine);
                        scheduler->engine_owner[ring_id] = workload->vgpu;
                } else
                        gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n",
                atomic_set(&workload->shadow_ctx_active, 1);
                break;
        case INTEL_CONTEXT_SCHEDULE_OUT:
-               save_ring_hw_state(workload->vgpu, ring_id);
+               save_ring_hw_state(workload->vgpu, rq->engine);
                atomic_set(&workload->shadow_ctx_active, 0);
                break;
        case INTEL_CONTEXT_SCHEDULE_PREEMPTED:
-               save_ring_hw_state(workload->vgpu, ring_id);
+               save_ring_hw_state(workload->vgpu, rq->engine);
                break;
        default:
                WARN_ON(1);
@@@ -391,7 -395,7 +395,7 @@@ intel_gvt_workload_req_alloc(struct int
        if (workload->req)
                return 0;
  
-       rq = i915_request_create(s->shadow[workload->ring_id]);
+       rq = i915_request_create(s->shadow[workload->engine->id]);
        if (IS_ERR(rq)) {
                gvt_vgpu_err("fail to allocate gem request\n");
                return PTR_ERR(rq);
@@@ -420,15 -424,16 +424,16 @@@ int intel_gvt_scan_and_shadow_workload(
        if (workload->shadow)
                return 0;
  
-       if (!test_and_set_bit(workload->ring_id, s->shadow_ctx_desc_updated))
-               shadow_context_descriptor_update(s->shadow[workload->ring_id],
+       if (!test_and_set_bit(workload->engine->id, s->shadow_ctx_desc_updated))
+               shadow_context_descriptor_update(s->shadow[workload->engine->id],
                                                 workload);
  
        ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
        if (ret)
                return ret;
  
-       if (workload->ring_id == RCS0 && workload->wa_ctx.indirect_ctx.size) {
+       if (workload->engine->id == RCS0 &&
+           workload->wa_ctx.indirect_ctx.size) {
                ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
                if (ret)
                        goto err_shadow;
  
        workload->shadow = true;
        return 0;
  err_shadow:
        release_shadow_wa_ctx(&workload->wa_ctx);
        return ret;
@@@ -567,12 -573,8 +573,8 @@@ static int prepare_shadow_wa_ctx(struc
  
  static void update_vreg_in_ctx(struct intel_vgpu_workload *workload)
  {
-       struct intel_vgpu *vgpu = workload->vgpu;
-       struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
-       u32 ring_base;
-       ring_base = dev_priv->engine[workload->ring_id]->mmio_base;
-       vgpu_vreg_t(vgpu, RING_START(ring_base)) = workload->rb_start;
+       vgpu_vreg_t(workload->vgpu, RING_START(workload->engine->mmio_base)) =
+               workload->rb_start;
  }
  
  static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
@@@ -608,7 -610,6 +610,6 @@@ static int prepare_workload(struct inte
  {
        struct intel_vgpu *vgpu = workload->vgpu;
        struct intel_vgpu_submission *s = &vgpu->submission;
-       int ring = workload->ring_id;
        int ret = 0;
  
        ret = intel_vgpu_pin_mm(workload->shadow_mm);
  
        update_shadow_pdps(workload);
  
-       set_context_ppgtt_from_shadow(workload, s->shadow[ring]);
+       set_context_ppgtt_from_shadow(workload, s->shadow[workload->engine->id]);
  
        ret = intel_vgpu_sync_oos_pages(workload->vgpu);
        if (ret) {
@@@ -677,11 -678,10 +678,10 @@@ static int dispatch_workload(struct int
  {
        struct intel_vgpu *vgpu = workload->vgpu;
        struct i915_request *rq;
-       int ring_id = workload->ring_id;
        int ret;
  
-       gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
-               ring_id, workload);
+       gvt_dbg_sched("ring id %s prepare to dispatch workload %p\n",
+                     workload->engine->name, workload);
  
        mutex_lock(&vgpu->vgpu_lock);
  
@@@ -710,8 -710,8 +710,8 @@@ out
        }
  
        if (!IS_ERR_OR_NULL(workload->req)) {
-               gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
-                               ring_id, workload->req);
+               gvt_dbg_sched("ring id %s submit workload to i915 %p\n",
+                             workload->engine->name, workload->req);
                i915_request_add(workload->req);
                workload->dispatched = true;
        }
@@@ -722,8 -722,8 +722,8 @@@ err_req
        return ret;
  }
  
- static struct intel_vgpu_workload *pick_next_workload(
              struct intel_gvt *gvt, int ring_id)
+ static struct intel_vgpu_workload *
pick_next_workload(struct intel_gvt *gvt, struct intel_engine_cs *engine)
  {
        struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
        struct intel_vgpu_workload *workload = NULL;
         * bail out
         */
        if (!scheduler->current_vgpu) {
-               gvt_dbg_sched("ring id %d stop - no current vgpu\n", ring_id);
+               gvt_dbg_sched("ring %s stop - no current vgpu\n", engine->name);
                goto out;
        }
  
        if (scheduler->need_reschedule) {
-               gvt_dbg_sched("ring id %d stop - will reschedule\n", ring_id);
+               gvt_dbg_sched("ring %s stop - will reschedule\n", engine->name);
                goto out;
        }
  
        if (!scheduler->current_vgpu->active ||
-           list_empty(workload_q_head(scheduler->current_vgpu, ring_id)))
+           list_empty(workload_q_head(scheduler->current_vgpu, engine)))
                goto out;
  
        /*
         * still have current workload, maybe the workload disptacher
         * fail to submit it for some reason, resubmit it.
         */
-       if (scheduler->current_workload[ring_id]) {
-               workload = scheduler->current_workload[ring_id];
-               gvt_dbg_sched("ring id %d still have current workload %p\n",
-                               ring_id, workload);
+       if (scheduler->current_workload[engine->id]) {
+               workload = scheduler->current_workload[engine->id];
+               gvt_dbg_sched("ring %s still have current workload %p\n",
+                             engine->name, workload);
                goto out;
        }
  
         * will wait the current workload is finished when trying to
         * schedule out a vgpu.
         */
-       scheduler->current_workload[ring_id] = container_of(
-                       workload_q_head(scheduler->current_vgpu, ring_id)->next,
-                       struct intel_vgpu_workload, list);
+       scheduler->current_workload[engine->id] =
+               list_first_entry(workload_q_head(scheduler->current_vgpu,
+                                                engine),
+                                struct intel_vgpu_workload, list);
  
-       workload = scheduler->current_workload[ring_id];
+       workload = scheduler->current_workload[engine->id];
  
-       gvt_dbg_sched("ring id %d pick new workload %p\n", ring_id, workload);
+       gvt_dbg_sched("ring %s pick new workload %p\n", engine->name, workload);
  
        atomic_inc(&workload->vgpu->submission.running_workload_num);
  out:
@@@ -783,14 -784,12 +784,12 @@@ static void update_guest_context(struc
  {
        struct i915_request *rq = workload->req;
        struct intel_vgpu *vgpu = workload->vgpu;
-       struct intel_gvt *gvt = vgpu->gvt;
        struct drm_i915_gem_object *ctx_obj = rq->context->state->obj;
        struct execlist_ring_context *shadow_ring_context;
        struct page *page;
        void *src;
        unsigned long context_gpa, context_page_num;
        int i;
-       struct drm_i915_private *dev_priv = gvt->dev_priv;
        u32 ring_base;
        u32 head, tail;
        u16 wrap_count;
  
        head = (wrap_count << RB_HEAD_WRAP_CNT_OFF) | tail;
  
-       ring_base = dev_priv->engine[workload->ring_id]->mmio_base;
+       ring_base = rq->engine->mmio_base;
        vgpu_vreg_t(vgpu, RING_TAIL(ring_base)) = tail;
        vgpu_vreg_t(vgpu, RING_HEAD(ring_base)) = head;
  
        context_page_num = rq->engine->context_size;
        context_page_num = context_page_num >> PAGE_SHIFT;
  
-       if (IS_BROADWELL(gvt->dev_priv) && rq->engine->id == RCS0)
+       if (IS_BROADWELL(rq->i915) && rq->engine->id == RCS0)
                context_page_num = 19;
  
        i = 2;
@@@ -869,7 -868,7 +868,7 @@@ void intel_vgpu_clean_workloads(struct 
                                intel_engine_mask_t engine_mask)
  {
        struct intel_vgpu_submission *s = &vgpu->submission;
-       struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+       struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
        struct intel_engine_cs *engine;
        struct intel_vgpu_workload *pos, *n;
        intel_engine_mask_t tmp;
@@@ -966,54 -965,47 +965,47 @@@ static void complete_current_workload(s
        mutex_unlock(&vgpu->vgpu_lock);
  }
  
- struct workload_thread_param {
-       struct intel_gvt *gvt;
-       int ring_id;
- };
- static int workload_thread(void *priv)
+ static int workload_thread(void *arg)
  {
-       struct workload_thread_param *p = (struct workload_thread_param *)priv;
-       struct intel_gvt *gvt = p->gvt;
-       int ring_id = p->ring_id;
+       struct intel_engine_cs *engine = arg;
+       const bool need_force_wake = INTEL_GEN(engine->i915) >= 9;
+       struct intel_gvt *gvt = engine->i915->gvt;
        struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
        struct intel_vgpu_workload *workload = NULL;
        struct intel_vgpu *vgpu = NULL;
        int ret;
-       bool need_force_wake = (INTEL_GEN(gvt->dev_priv) >= 9);
        DEFINE_WAIT_FUNC(wait, woken_wake_function);
-       struct intel_runtime_pm *rpm = &gvt->dev_priv->runtime_pm;
-       kfree(p);
  
-       gvt_dbg_core("workload thread for ring %d started\n", ring_id);
+       gvt_dbg_core("workload thread for ring %s started\n", engine->name);
  
        while (!kthread_should_stop()) {
-               add_wait_queue(&scheduler->waitq[ring_id], &wait);
+               intel_wakeref_t wakeref;
+               add_wait_queue(&scheduler->waitq[engine->id], &wait);
                do {
-                       workload = pick_next_workload(gvt, ring_id);
+                       workload = pick_next_workload(gvt, engine);
                        if (workload)
                                break;
                        wait_woken(&wait, TASK_INTERRUPTIBLE,
                                   MAX_SCHEDULE_TIMEOUT);
                } while (!kthread_should_stop());
-               remove_wait_queue(&scheduler->waitq[ring_id], &wait);
+               remove_wait_queue(&scheduler->waitq[engine->id], &wait);
  
                if (!workload)
                        break;
  
-               gvt_dbg_sched("ring id %d next workload %p vgpu %d\n",
-                               workload->ring_id, workload,
-                               workload->vgpu->id);
+               gvt_dbg_sched("ring %s next workload %p vgpu %d\n",
+                             engine->name, workload,
+                             workload->vgpu->id);
  
-               intel_runtime_pm_get(rpm);
+               wakeref = intel_runtime_pm_get(engine->uncore->rpm);
  
-               gvt_dbg_sched("ring id %d will dispatch workload %p\n",
-                               workload->ring_id, workload);
+               gvt_dbg_sched("ring %s will dispatch workload %p\n",
+                             engine->name, workload);
  
                if (need_force_wake)
-                       intel_uncore_forcewake_get(&gvt->dev_priv->uncore,
-                                       FORCEWAKE_ALL);
+                       intel_uncore_forcewake_get(engine->uncore,
+                                                  FORCEWAKE_ALL);
                /*
                 * Update the vReg of the vGPU which submitted this
                 * workload. The vGPU may use these registers for checking
                        goto complete;
                }
  
-               gvt_dbg_sched("ring id %d wait workload %p\n",
-                               workload->ring_id, workload);
+               gvt_dbg_sched("ring %s wait workload %p\n",
+                             engine->name, workload);
                i915_request_wait(workload->req, 0, MAX_SCHEDULE_TIMEOUT);
  
  complete:
                gvt_dbg_sched("will complete workload %p, status: %d\n",
-                               workload, workload->status);
+                             workload, workload->status);
  
-               complete_current_workload(gvt, ring_id);
+               complete_current_workload(gvt, engine->id);
  
                if (need_force_wake)
-                       intel_uncore_forcewake_put(&gvt->dev_priv->uncore,
-                                       FORCEWAKE_ALL);
+                       intel_uncore_forcewake_put(engine->uncore,
+                                                  FORCEWAKE_ALL);
  
-               intel_runtime_pm_put_unchecked(rpm);
+               intel_runtime_pm_put(engine->uncore->rpm, wakeref);
                if (ret && (vgpu_is_vm_unhealthy(ret)))
                        enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
        }
@@@ -1073,7 -1065,7 +1065,7 @@@ void intel_gvt_clean_workload_scheduler
  
        gvt_dbg_core("clean workload scheduler\n");
  
-       for_each_engine(engine, gvt->dev_priv, i) {
+       for_each_engine(engine, gvt->gt, i) {
                atomic_notifier_chain_unregister(
                                        &engine->context_status_notifier,
                                        &gvt->shadow_ctx_notifier_block[i]);
  int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
  {
        struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
-       struct workload_thread_param *param = NULL;
        struct intel_engine_cs *engine;
        enum intel_engine_id i;
        int ret;
  
        init_waitqueue_head(&scheduler->workload_complete_wq);
  
-       for_each_engine(engine, gvt->dev_priv, i) {
+       for_each_engine(engine, gvt->gt, i) {
                init_waitqueue_head(&scheduler->waitq[i]);
  
-               param = kzalloc(sizeof(*param), GFP_KERNEL);
-               if (!param) {
-                       ret = -ENOMEM;
-                       goto err;
-               }
-               param->gvt = gvt;
-               param->ring_id = i;
-               scheduler->thread[i] = kthread_run(workload_thread, param,
-                       "gvt workload %d", i);
+               scheduler->thread[i] = kthread_run(workload_thread, engine,
+                                                  "gvt:%s", engine->name);
                if (IS_ERR(scheduler->thread[i])) {
                        gvt_err("fail to create workload thread\n");
                        ret = PTR_ERR(scheduler->thread[i]);
                atomic_notifier_chain_register(&engine->context_status_notifier,
                                        &gvt->shadow_ctx_notifier_block[i]);
        }
        return 0;
  err:
        intel_gvt_clean_workload_scheduler(gvt);
-       kfree(param);
-       param = NULL;
        return ret;
  }
  
@@@ -1160,7 -1142,7 +1142,7 @@@ void intel_vgpu_clean_submission(struc
        intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
  
        i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(s->shadow[0]->vm));
-       for_each_engine(engine, vgpu->gvt->dev_priv, id)
+       for_each_engine(engine, vgpu->gvt->gt, id)
                intel_context_unpin(s->shadow[id]);
  
        kmem_cache_destroy(s->workloads);
@@@ -1217,7 -1199,7 +1199,7 @@@ i915_context_ppgtt_root_save(struct int
   */
  int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
  {
-       struct drm_i915_private *i915 = vgpu->gvt->dev_priv;
+       struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
        struct intel_vgpu_submission *s = &vgpu->submission;
        struct intel_engine_cs *engine;
        struct i915_ppgtt *ppgtt;
  
        i915_context_ppgtt_root_save(s, ppgtt);
  
-       for_each_engine(engine, i915, i) {
+       for_each_engine(engine, vgpu->gvt->gt, i) {
                struct intel_context *ce;
  
                INIT_LIST_HEAD(&s->workload_q_head[i]);
                ce->vm = i915_vm_get(&ppgtt->vm);
                intel_context_set_single_submission(ce);
  
 -              if (!USES_GUC_SUBMISSION(i915)) { /* Max ring buffer size */
 +              /* Max ring buffer size */
 +              if (!intel_uc_wants_guc_submission(&engine->gt->uc)) {
                        const unsigned int ring_size = 512 * SZ_4K;
  
                        ce->ring = __intel_context_ring_size(ring_size);
  
  out_shadow_ctx:
        i915_context_ppgtt_root_restore(s, ppgtt);
-       for_each_engine(engine, i915, i) {
+       for_each_engine(engine, vgpu->gvt->gt, i) {
                if (IS_ERR(s->shadow[i]))
                        break;
  
@@@ -1310,7 -1291,7 +1292,7 @@@ int intel_vgpu_select_submission_ops(st
                                     intel_engine_mask_t engine_mask,
                                     unsigned int interface)
  {
-       struct drm_i915_private *i915 = vgpu->gvt->dev_priv;
+       struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
        struct intel_vgpu_submission *s = &vgpu->submission;
        const struct intel_vgpu_submission_ops *ops[] = {
                [INTEL_VGPU_EXECLIST_SUBMISSION] =
@@@ -1444,7 -1425,7 +1426,7 @@@ static int prepare_mm(struct intel_vgpu
  /**
   * intel_vgpu_create_workload - create a vGPU workload
   * @vgpu: a vGPU
-  * @ring_id: ring index
+  * @engine: the engine
   * @desc: a guest context descriptor
   *
   * This function is called when creating a vGPU workload.
   *
   */
  struct intel_vgpu_workload *
- intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
+ intel_vgpu_create_workload(struct intel_vgpu *vgpu,
+                          const struct intel_engine_cs *engine,
                           struct execlist_ctx_descriptor_format *desc)
  {
        struct intel_vgpu_submission *s = &vgpu->submission;
-       struct list_head *q = workload_q_head(vgpu, ring_id);
+       struct list_head *q = workload_q_head(vgpu, engine);
        struct intel_vgpu_workload *last_workload = NULL;
        struct intel_vgpu_workload *workload = NULL;
-       struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
        u64 ring_context_gpa;
        u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx;
        u32 guest_head;
        list_for_each_entry_reverse(last_workload, q, list) {
  
                if (same_context(&last_workload->ctx_desc, desc)) {
-                       gvt_dbg_el("ring id %d cur workload == last\n",
-                                       ring_id);
+                       gvt_dbg_el("ring %s cur workload == last\n",
+                                  engine->name);
                        gvt_dbg_el("ctx head %x real head %lx\n", head,
-                                       last_workload->rb_tail);
+                                  last_workload->rb_tail);
                        /*
                         * cannot use guest context head pointer here,
                         * as it might not be updated at this time
                }
        }
  
-       gvt_dbg_el("ring id %d begin a new workload\n", ring_id);
+       gvt_dbg_el("ring %s begin a new workload\n", engine->name);
  
        /* record some ring buffer register values for scan and shadow */
        intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
        if (IS_ERR(workload))
                return workload;
  
-       workload->ring_id = ring_id;
+       workload->engine = engine;
        workload->ctx_desc = *desc;
        workload->ring_context_gpa = ring_context_gpa;
        workload->rb_head = head;
        workload->rb_start = start;
        workload->rb_ctl = ctl;
  
-       if (ring_id == RCS0) {
+       if (engine->id == RCS0) {
                intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
                        RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4);
                intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
                }
        }
  
-       gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n",
-                       workload, ring_id, head, tail, start, ctl);
+       gvt_dbg_el("workload %p ring %s head %x tail %x start %x ctl %x\n",
+                  workload, engine->name, head, tail, start, ctl);
  
        ret = prepare_mm(workload);
        if (ret) {
        /* Only scan and shadow the first workload in the queue
         * as there is only one pre-allocated buf-obj for shadow.
         */
-       if (list_empty(workload_q_head(vgpu, ring_id))) {
-               intel_runtime_pm_get(&dev_priv->runtime_pm);
-               ret = intel_gvt_scan_and_shadow_workload(workload);
-               intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
+       if (list_empty(q)) {
+               intel_wakeref_t wakeref;
+               with_intel_runtime_pm(engine->gt->uncore->rpm, wakeref)
+                       ret = intel_gvt_scan_and_shadow_workload(workload);
        }
  
        if (ret) {
  void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload)
  {
        list_add_tail(&workload->list,
-               workload_q_head(workload->vgpu, workload->ring_id));
+                     workload_q_head(workload->vgpu, workload->engine));
        intel_gvt_kick_schedule(workload->vgpu->gvt);
-       wake_up(&workload->vgpu->gvt->scheduler.waitq[workload->ring_id]);
+       wake_up(&workload->vgpu->gvt->scheduler.waitq[workload->engine->id]);
  }