drm/i915/gvt: Do not destroy ppgtt_mm during vGPU D3->D0.
authorColin Xu <colin.xu@intel.com>
Thu, 9 Jul 2020 07:09:56 +0000 (15:09 +0800)
committerZhenyu Wang <zhenyuw@linux.intel.com>
Wed, 29 Jul 2020 06:18:16 +0000 (14:18 +0800)
When system enters S3 state, device enters D3 state while RAM remains
powered. From vGPU/GVT perspective, ppgtt_mm is residual in guest memory
during vGPU in D3 state, so that when guest state transits from S3->S0,
ppgtt_mm can be re-used and no need rebuild.

Previous implementation invalidate and destroy ppgtt_mm at DMLR,
regardless the power state transition is S0->S3->S0 (guest suspend or
resume) or OFF->S0 (normal boot/reboot), invalidate and destroy ppgtt_mm
is unnecessary in the former transition case.

The patch saves the vGPU D3/D0 transition state when guest writes the
PCI_PM_CTRL in vGPU's configure space, then in later DMLR, GVT can decide
whether or not invalidate and destroy ppgtt_mm is required. The
d3_entered flags is reset after DMLR.

To test this feature, make sure S3 is enabled in QEMU parameters:
i440fx: PIIX4_PM.disable_s3=0
q35: ICH9-LPC.disable_s3=0
Also need enable sleep option in guest OS if it's disabled.

v2:
- Revise commit message to more accurate description. (Kevin)
- Split patch by logic. (Zhenyu)

Reviewed-by: Zhenyu Wang <zhenyuw@linux.intel.com>
Signed-off-by: Hang Yuan <hang.yuan@linux.intel.com>
Signed-off-by: Colin Xu <colin.xu@intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20200709071002.247960-2-colin.xu@intel.com
drivers/gpu/drm/i915/gvt/cfg_space.c
drivers/gpu/drm/i915/gvt/gtt.c
drivers/gpu/drm/i915/gvt/gtt.h
drivers/gpu/drm/i915/gvt/gvt.h
drivers/gpu/drm/i915/gvt/vgpu.c

index 072725a..ad86c5e 100644 (file)
@@ -70,6 +70,7 @@ static void vgpu_pci_cfg_mem_write(struct intel_vgpu *vgpu, unsigned int off,
 {
        u8 *cfg_base = vgpu_cfg_space(vgpu);
        u8 mask, new, old;
+       pci_power_t pwr;
        int i = 0;
 
        for (; i < bytes && (off + i < sizeof(pci_cfg_space_rw_bmp)); i++) {
@@ -91,6 +92,15 @@ static void vgpu_pci_cfg_mem_write(struct intel_vgpu *vgpu, unsigned int off,
        /* For other configuration space directly copy as it is. */
        if (i < bytes)
                memcpy(cfg_base + off + i, src + i, bytes - i);
+
+       if (off == vgpu->cfg_space.pmcsr_off && vgpu->cfg_space.pmcsr_off) {
+               pwr = (pci_power_t __force)(*(u16*)(&vgpu_cfg_space(vgpu)[off])
+                       & PCI_PM_CTRL_STATE_MASK);
+               if (pwr == PCI_D3hot)
+                       vgpu->d3_entered = true;
+               gvt_dbg_core("vgpu-%d power status changed to %d\n",
+                            vgpu->id, pwr);
+       }
 }
 
 /**
@@ -366,6 +376,7 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
        struct intel_gvt *gvt = vgpu->gvt;
        const struct intel_gvt_device_info *info = &gvt->device_info;
        u16 *gmch_ctl;
+       u8 next;
 
        memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space,
               info->cfg_space_size);
@@ -401,6 +412,19 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
                pci_resource_len(gvt->gt->i915->drm.pdev, 2);
 
        memset(vgpu_cfg_space(vgpu) + PCI_ROM_ADDRESS, 0, 4);
+
+       /* PM Support */
+       vgpu->cfg_space.pmcsr_off = 0;
+       if (vgpu_cfg_space(vgpu)[PCI_STATUS] & PCI_STATUS_CAP_LIST) {
+               next = vgpu_cfg_space(vgpu)[PCI_CAPABILITY_LIST];
+               do {
+                       if (vgpu_cfg_space(vgpu)[next + PCI_CAP_LIST_ID] == PCI_CAP_ID_PM) {
+                               vgpu->cfg_space.pmcsr_off = next + PCI_PM_CTRL;
+                               break;
+                       }
+                       next = vgpu_cfg_space(vgpu)[next + PCI_CAP_LIST_NEXT];
+               } while (next);
+       }
 }
 
 /**
index 2100161..a3a4305 100644 (file)
@@ -2501,7 +2501,7 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
        return create_scratch_page_tree(vgpu);
 }
 
-static void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu)
+void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu)
 {
        struct list_head *pos, *n;
        struct intel_vgpu_mm *mm;
index 320b8d6..52d0d88 100644 (file)
@@ -279,4 +279,6 @@ int intel_vgpu_emulate_ggtt_mmio_read(struct intel_vgpu *vgpu,
 int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu,
        unsigned int off, void *p_data, unsigned int bytes);
 
+void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu);
+
 #endif /* _GVT_GTT_H_ */
index a4a6db6..ff7f251 100644 (file)
@@ -106,6 +106,7 @@ struct intel_vgpu_pci_bar {
 struct intel_vgpu_cfg_space {
        unsigned char virtual_cfg_space[PCI_CFG_SPACE_EXP_SIZE];
        struct intel_vgpu_pci_bar bar[INTEL_GVT_MAX_BAR_NUM];
+       u32 pmcsr_off;
 };
 
 #define vgpu_cfg_space(vgpu) ((vgpu)->cfg_space.virtual_cfg_space)
@@ -198,6 +199,8 @@ struct intel_vgpu {
        struct intel_vgpu_submission submission;
        struct radix_tree_root page_track_tree;
        u32 hws_pga[I915_NUM_ENGINES];
+       /* Set on PCI_D3, reset on DMLR, not reflecting the actual PM state */
+       bool d3_entered;
 
        struct dentry *debugfs;
 
index 7d36162..fb12448 100644 (file)
@@ -257,6 +257,7 @@ void intel_gvt_release_vgpu(struct intel_vgpu *vgpu)
        intel_gvt_deactivate_vgpu(vgpu);
 
        mutex_lock(&vgpu->vgpu_lock);
+       vgpu->d3_entered = false;
        intel_vgpu_clean_workloads(vgpu, ALL_ENGINES);
        intel_vgpu_dmabuf_cleanup(vgpu);
        mutex_unlock(&vgpu->vgpu_lock);
@@ -393,6 +394,7 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
        INIT_RADIX_TREE(&vgpu->page_track_tree, GFP_KERNEL);
        idr_init(&vgpu->object_idr);
        intel_vgpu_init_cfg_space(vgpu, param->primary);
+       vgpu->d3_entered = false;
 
        ret = intel_vgpu_init_mmio(vgpu);
        if (ret)
@@ -557,10 +559,15 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
        /* full GPU reset or device model level reset */
        if (engine_mask == ALL_ENGINES || dmlr) {
                intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
-               intel_vgpu_invalidate_ppgtt(vgpu);
+               if (engine_mask == ALL_ENGINES)
+                       intel_vgpu_invalidate_ppgtt(vgpu);
                /*fence will not be reset during virtual reset */
                if (dmlr) {
-                       intel_vgpu_reset_gtt(vgpu);
+                       if(!vgpu->d3_entered) {
+                               intel_vgpu_invalidate_ppgtt(vgpu);
+                               intel_vgpu_destroy_all_ppgtt_mm(vgpu);
+                       }
+                       intel_vgpu_reset_ggtt(vgpu, true);
                        intel_vgpu_reset_resource(vgpu);
                }
 
@@ -573,6 +580,12 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
                        /* only reset the failsafe mode when dmlr reset */
                        vgpu->failsafe = false;
                        vgpu->pv_notified = false;
+                       /*
+                        * PCI_D0 is set before dmlr, so reset d3_entered here
+                        * after done using.
+                        */
+                       if(vgpu->d3_entered)
+                               vgpu->d3_entered = false;
                }
        }