This patch save/restore necessary GVT info during i915 suspend/resume so
that GVT enabled QEMU VM can continue running.
Only GGTT and fence regs are saved/restored now. GVT will save GGTT
entries on each host_entry update, restore the saved dirty entries
and re-init fence regs in resume routine.
V2:
- Change kzalloc/kfree to vzalloc/vfree since the space allocated
from kmalloc may not enough for all saved GGTT entries.
- Keep gvt suspend/resume wrapper in intel_gvt.h/intel_gvt.c and
move the actual implementation to gvt.h/gvt.c. (zhenyu)
- Check gvt config on and active with intel_gvt_active(). (zhenyu)
V3: (zhenyu)
- Incorrect copy length. Should be num entries * entry size.
- Use memcpy_toio()/memcpy_fromio() instead of memcpy for iomem.
- Add F_PM_SAVE flags to indicate which MMIOs to save/restore for PM.
V4:
Rebase.
V5:
Fail intel_gvt_save_ggtt as -ENOMEM if fail to alloc memory to save
ggtt. Free allocated ggtt_entries on failure.
V6:
Save host entry to per-vGPU gtt.ggtt_mm on each host_entry update.
V7:
Restore GGTT entry based on present bit.
Split fence restore and mmio restore in different functions.
Reviewed-by: Zhenyu Wang <zhenyuw@linux.intel.com>
Signed-off-by: Hang Yuan <hang.yuan@linux.intel.com>
Signed-off-by: Colin Xu <colin.xu@intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20201027045308.158955-1-colin.xu@intel.com
struct intel_gvt_gtt_entry *entry, unsigned long index)
{
struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
+ unsigned long offset = index;
GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
+ if (vgpu_gmadr_is_aperture(mm->vgpu, index << I915_GTT_PAGE_SHIFT)) {
+ offset -= (vgpu_aperture_gmadr_base(mm->vgpu) >> PAGE_SHIFT);
+ mm->ggtt_mm.host_ggtt_aperture[offset] = entry->val64;
+ } else if (vgpu_gmadr_is_hidden(mm->vgpu, index << I915_GTT_PAGE_SHIFT)) {
+ offset -= (vgpu_hidden_gmadr_base(mm->vgpu) >> PAGE_SHIFT);
+ mm->ggtt_mm.host_ggtt_hidden[offset] = entry->val64;
+ }
+
pte_ops->set_entry(NULL, entry, index, false, 0, mm->vgpu);
}
return ERR_PTR(-ENOMEM);
}
+ mm->ggtt_mm.host_ggtt_aperture = vzalloc((vgpu_aperture_sz(vgpu) >> PAGE_SHIFT) * sizeof(u64));
+ if (!mm->ggtt_mm.host_ggtt_aperture) {
+ vfree(mm->ggtt_mm.virtual_ggtt);
+ vgpu_free_mm(mm);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ mm->ggtt_mm.host_ggtt_hidden = vzalloc((vgpu_hidden_sz(vgpu) >> PAGE_SHIFT) * sizeof(u64));
+ if (!mm->ggtt_mm.host_ggtt_hidden) {
+ vfree(mm->ggtt_mm.host_ggtt_aperture);
+ vfree(mm->ggtt_mm.virtual_ggtt);
+ vgpu_free_mm(mm);
+ return ERR_PTR(-ENOMEM);
+ }
+
return mm;
}
invalidate_ppgtt_mm(mm);
} else {
vfree(mm->ggtt_mm.virtual_ggtt);
+ vfree(mm->ggtt_mm.host_ggtt_aperture);
+ vfree(mm->ggtt_mm.host_ggtt_hidden);
}
vgpu_free_mm(mm);
intel_vgpu_destroy_all_ppgtt_mm(vgpu);
intel_vgpu_reset_ggtt(vgpu, true);
}
+
+/**
+ * intel_gvt_restore_ggtt - restore all vGPU's ggtt entries
+ * @gvt: intel gvt device
+ *
+ * This function is called at driver resume stage to restore
+ * GGTT entries of every vGPU.
+ *
+ */
+void intel_gvt_restore_ggtt(struct intel_gvt *gvt)
+{
+ struct intel_vgpu *vgpu;
+ struct intel_vgpu_mm *mm;
+ int id;
+ gen8_pte_t pte;
+ u32 idx, num_low, num_hi, offset;
+
+ /* Restore dirty host ggtt for all vGPUs */
+ idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) {
+ mm = vgpu->gtt.ggtt_mm;
+
+ num_low = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT;
+ offset = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT;
+ for (idx = 0; idx < num_low; idx++) {
+ pte = mm->ggtt_mm.host_ggtt_aperture[idx];
+ if (pte & _PAGE_PRESENT)
+ write_pte64(vgpu->gvt->gt->ggtt, offset + idx, pte);
+ }
+
+ num_hi = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT;
+ offset = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT;
+ for (idx = 0; idx < num_hi; idx++) {
+ pte = mm->ggtt_mm.host_ggtt_hidden[idx];
+ if (pte & _PAGE_PRESENT)
+ write_pte64(vgpu->gvt->gt->ggtt, offset + idx, pte);
+ }
+ }
+}
} ppgtt_mm;
struct {
void *virtual_ggtt;
+ /* Save/restore for PM */
+ u64 *host_ggtt_aperture;
+ u64 *host_ggtt_hidden;
struct list_head partial_pte_list;
} ggtt_mm;
};
unsigned int off, void *p_data, unsigned int bytes);
void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu);
+void intel_gvt_restore_ggtt(struct intel_gvt *gvt);
#endif /* _GVT_GTT_H_ */
return ret;
}
+int
+intel_gvt_pm_resume(struct intel_gvt *gvt)
+{
+ intel_gvt_restore_fence(gvt);
+ intel_gvt_restore_mmio(gvt);
+ intel_gvt_restore_ggtt(gvt);
+ return 0;
+}
+
int
intel_gvt_register_hypervisor(struct intel_gvt_mpt *m)
{
#define F_CMD_ACCESS (1 << 3)
/* This reg has been accessed by a VM */
#define F_ACCESSED (1 << 4)
+/* This reg requires save & restore during host PM suspend/resume */
+#define F_PM_SAVE (1 << 5)
/* This reg could be accessed by unaligned address */
#define F_UNALIGN (1 << 6)
/* This reg is in GVT's mmio save-restor list and in hardware
void intel_gvt_debugfs_init(struct intel_gvt *gvt);
void intel_gvt_debugfs_clean(struct intel_gvt *gvt);
+int intel_gvt_pm_resume(struct intel_gvt *gvt);
#include "trace.h"
#include "mpt.h"
MMIO_DFH(TRVATTL3PTRDW(2), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(TRVATTL3PTRDW(3), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(TRVADR, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(TRTTE, D_SKL_PLUS, F_CMD_ACCESS,
- NULL, gen9_trtte_write);
- MMIO_DH(_MMIO(0x4dfc), D_SKL_PLUS, NULL, gen9_trtt_chicken_write);
+ MMIO_DFH(TRTTE, D_SKL_PLUS, F_CMD_ACCESS | F_PM_SAVE,
+ NULL, gen9_trtte_write);
+ MMIO_DFH(_MMIO(0x4dfc), D_SKL_PLUS, F_PM_SAVE,
+ NULL, gen9_trtt_chicken_write);
MMIO_D(_MMIO(0x46430), D_SKL_PLUS);
intel_vgpu_default_mmio_read(vgpu, offset, pdata, bytes) :
intel_vgpu_default_mmio_write(vgpu, offset, pdata, bytes);
}
+
+void intel_gvt_restore_fence(struct intel_gvt *gvt)
+{
+ struct intel_vgpu *vgpu;
+ int i, id;
+
+ idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) {
+ mmio_hw_access_pre(gvt->gt);
+ for (i = 0; i < vgpu_fence_sz(vgpu); i++)
+ intel_vgpu_write_fence(vgpu, i, vgpu_vreg64(vgpu, fence_num_to_offset(i)));
+ mmio_hw_access_post(gvt->gt);
+ }
+}
+
+static inline int mmio_pm_restore_handler(struct intel_gvt *gvt,
+ u32 offset, void *data)
+{
+ struct intel_vgpu *vgpu = data;
+ struct drm_i915_private *dev_priv = gvt->gt->i915;
+
+ if (gvt->mmio.mmio_attribute[offset >> 2] & F_PM_SAVE)
+ I915_WRITE(_MMIO(offset), vgpu_vreg(vgpu, offset));
+
+ return 0;
+}
+
+void intel_gvt_restore_mmio(struct intel_gvt *gvt)
+{
+ struct intel_vgpu *vgpu;
+ int id;
+
+ idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) {
+ mmio_hw_access_pre(gvt->gt);
+ intel_gvt_for_each_tracked_mmio(gvt, mmio_pm_restore_handler, vgpu);
+ mmio_hw_access_post(gvt->gt);
+ }
+}
int intel_vgpu_mask_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes);
+
+void intel_gvt_restore_fence(struct intel_gvt *gvt);
+void intel_gvt_restore_mmio(struct intel_gvt *gvt);
+
#endif
#include "i915_drv.h"
#include "i915_vgpu.h"
#include "intel_gvt.h"
+#include "gvt/gvt.h"
/**
* DOC: Intel GVT-g host support
intel_gvt_clean_device(dev_priv);
}
+
+/**
+ * intel_gvt_resume - GVT resume routine wapper
+ *
+ * @dev_priv: drm i915 private *
+ *
+ * This function is called at the i915 driver resume stage to restore required
+ * HW status for GVT so that vGPU can continue running after resumed.
+ */
+void intel_gvt_resume(struct drm_i915_private *dev_priv)
+{
+ if (intel_gvt_active(dev_priv))
+ intel_gvt_pm_resume(dev_priv->gvt);
+}
void intel_gvt_clean_device(struct drm_i915_private *dev_priv);
int intel_gvt_init_host(void);
void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv);
+void intel_gvt_resume(struct drm_i915_private *dev_priv);
#else
static inline int intel_gvt_init(struct drm_i915_private *dev_priv)
{
static inline void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv)
{
}
+
+static inline void intel_gvt_resume(struct drm_i915_private *dev_priv)
+{
+}
#endif
#endif /* _INTEL_GVT_H_ */