4 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26 * Zhi Wang <zhi.a.wang@intel.com>
27 * Zhenyu Wang <zhenyuw@linux.intel.com>
28 * Xiao Zheng <xiao.zheng@intel.com>
31 * Min He <min.he@intel.com>
32 * Bing Niu <bing.niu@intel.com>
38 #include "i915_pvinfo.h"
41 #if defined(VERBOSE_DEBUG)
42 #define gvt_vdbg_mm(fmt, args...) gvt_dbg_mm(fmt, ##args)
44 #define gvt_vdbg_mm(fmt, args...)
47 static bool enable_out_of_sync = false;
48 static int preallocated_oos_pages = 8192;
51 * validate a gm address and related range size,
52 * translate it to host gm address
54 bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size)
56 if ((!vgpu_gmadr_is_valid(vgpu, addr)) || (size
57 && !vgpu_gmadr_is_valid(vgpu, addr + size - 1))) {
58 gvt_vgpu_err("invalid range gmadr 0x%llx size 0x%x\n",
65 /* translate a guest gmadr to host gmadr */
66 int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr)
68 if (WARN(!vgpu_gmadr_is_valid(vgpu, g_addr),
69 "invalid guest gmadr %llx\n", g_addr))
72 if (vgpu_gmadr_is_aperture(vgpu, g_addr))
73 *h_addr = vgpu_aperture_gmadr_base(vgpu)
74 + (g_addr - vgpu_aperture_offset(vgpu));
76 *h_addr = vgpu_hidden_gmadr_base(vgpu)
77 + (g_addr - vgpu_hidden_offset(vgpu));
81 /* translate a host gmadr to guest gmadr */
82 int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr)
84 if (WARN(!gvt_gmadr_is_valid(vgpu->gvt, h_addr),
85 "invalid host gmadr %llx\n", h_addr))
88 if (gvt_gmadr_is_aperture(vgpu->gvt, h_addr))
89 *g_addr = vgpu_aperture_gmadr_base(vgpu)
90 + (h_addr - gvt_aperture_gmadr_base(vgpu->gvt));
92 *g_addr = vgpu_hidden_gmadr_base(vgpu)
93 + (h_addr - gvt_hidden_gmadr_base(vgpu->gvt));
97 int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
98 unsigned long *h_index)
103 ret = intel_gvt_ggtt_gmadr_g2h(vgpu, g_index << I915_GTT_PAGE_SHIFT,
108 *h_index = h_addr >> I915_GTT_PAGE_SHIFT;
112 int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
113 unsigned long *g_index)
118 ret = intel_gvt_ggtt_gmadr_h2g(vgpu, h_index << I915_GTT_PAGE_SHIFT,
123 *g_index = g_addr >> I915_GTT_PAGE_SHIFT;
127 #define gtt_type_is_entry(type) \
128 (type > GTT_TYPE_INVALID && type < GTT_TYPE_PPGTT_ENTRY \
129 && type != GTT_TYPE_PPGTT_PTE_ENTRY \
130 && type != GTT_TYPE_PPGTT_ROOT_ENTRY)
132 #define gtt_type_is_pt(type) \
133 (type >= GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX)
135 #define gtt_type_is_pte_pt(type) \
136 (type == GTT_TYPE_PPGTT_PTE_PT)
138 #define gtt_type_is_root_pointer(type) \
139 (gtt_type_is_entry(type) && type > GTT_TYPE_PPGTT_ROOT_ENTRY)
141 #define gtt_init_entry(e, t, p, v) do { \
144 memcpy(&(e)->val64, &v, sizeof(v)); \
148 * Mappings between GTT_TYPE* enumerations.
149 * Following information can be found according to the given type:
150 * - type of next level page table
151 * - type of entry inside this level page table
152 * - type of entry with PSE set
154 * If the given type doesn't have such a kind of information,
155 * e.g. give a l4 root entry type, then request to get its PSE type,
156 * give a PTE page table type, then request to get its next level page
157 * table type, as we know l4 root entry doesn't have a PSE bit,
158 * and a PTE page table doesn't have a next level page table type,
159 * GTT_TYPE_INVALID will be returned. This is useful when traversing a
163 struct gtt_type_table_entry {
170 #define GTT_TYPE_TABLE_ENTRY(type, e_type, cpt_type, npt_type, pse_type) \
172 .entry_type = e_type, \
173 .pt_type = cpt_type, \
174 .next_pt_type = npt_type, \
175 .pse_entry_type = pse_type, \
178 static struct gtt_type_table_entry gtt_type_table[] = {
179 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
180 GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
182 GTT_TYPE_PPGTT_PML4_PT,
184 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_PT,
185 GTT_TYPE_PPGTT_PML4_ENTRY,
186 GTT_TYPE_PPGTT_PML4_PT,
187 GTT_TYPE_PPGTT_PDP_PT,
189 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_ENTRY,
190 GTT_TYPE_PPGTT_PML4_ENTRY,
191 GTT_TYPE_PPGTT_PML4_PT,
192 GTT_TYPE_PPGTT_PDP_PT,
194 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_PT,
195 GTT_TYPE_PPGTT_PDP_ENTRY,
196 GTT_TYPE_PPGTT_PDP_PT,
197 GTT_TYPE_PPGTT_PDE_PT,
198 GTT_TYPE_PPGTT_PTE_1G_ENTRY),
199 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
200 GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
202 GTT_TYPE_PPGTT_PDE_PT,
203 GTT_TYPE_PPGTT_PTE_1G_ENTRY),
204 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_ENTRY,
205 GTT_TYPE_PPGTT_PDP_ENTRY,
206 GTT_TYPE_PPGTT_PDP_PT,
207 GTT_TYPE_PPGTT_PDE_PT,
208 GTT_TYPE_PPGTT_PTE_1G_ENTRY),
209 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_PT,
210 GTT_TYPE_PPGTT_PDE_ENTRY,
211 GTT_TYPE_PPGTT_PDE_PT,
212 GTT_TYPE_PPGTT_PTE_PT,
213 GTT_TYPE_PPGTT_PTE_2M_ENTRY),
214 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_ENTRY,
215 GTT_TYPE_PPGTT_PDE_ENTRY,
216 GTT_TYPE_PPGTT_PDE_PT,
217 GTT_TYPE_PPGTT_PTE_PT,
218 GTT_TYPE_PPGTT_PTE_2M_ENTRY),
219 /* We take IPS bit as 'PSE' for PTE level. */
220 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_PT,
221 GTT_TYPE_PPGTT_PTE_4K_ENTRY,
222 GTT_TYPE_PPGTT_PTE_PT,
224 GTT_TYPE_PPGTT_PTE_64K_ENTRY),
225 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_4K_ENTRY,
226 GTT_TYPE_PPGTT_PTE_4K_ENTRY,
227 GTT_TYPE_PPGTT_PTE_PT,
229 GTT_TYPE_PPGTT_PTE_64K_ENTRY),
230 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_64K_ENTRY,
231 GTT_TYPE_PPGTT_PTE_4K_ENTRY,
232 GTT_TYPE_PPGTT_PTE_PT,
234 GTT_TYPE_PPGTT_PTE_64K_ENTRY),
235 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_2M_ENTRY,
236 GTT_TYPE_PPGTT_PDE_ENTRY,
237 GTT_TYPE_PPGTT_PDE_PT,
239 GTT_TYPE_PPGTT_PTE_2M_ENTRY),
240 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_1G_ENTRY,
241 GTT_TYPE_PPGTT_PDP_ENTRY,
242 GTT_TYPE_PPGTT_PDP_PT,
244 GTT_TYPE_PPGTT_PTE_1G_ENTRY),
245 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_GGTT_PTE,
252 static inline int get_next_pt_type(int type)
254 return gtt_type_table[type].next_pt_type;
257 static inline int get_pt_type(int type)
259 return gtt_type_table[type].pt_type;
262 static inline int get_entry_type(int type)
264 return gtt_type_table[type].entry_type;
267 static inline int get_pse_type(int type)
269 return gtt_type_table[type].pse_entry_type;
272 static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index)
274 void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
279 static void ggtt_invalidate(struct drm_i915_private *dev_priv)
281 mmio_hw_access_pre(dev_priv);
282 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
283 mmio_hw_access_post(dev_priv);
286 static void write_pte64(struct drm_i915_private *dev_priv,
287 unsigned long index, u64 pte)
289 void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
294 static inline int gtt_get_entry64(void *pt,
295 struct intel_gvt_gtt_entry *e,
296 unsigned long index, bool hypervisor_access, unsigned long gpa,
297 struct intel_vgpu *vgpu)
299 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
302 if (WARN_ON(info->gtt_entry_size != 8))
305 if (hypervisor_access) {
306 ret = intel_gvt_hypervisor_read_gpa(vgpu, gpa +
307 (index << info->gtt_entry_size_shift),
312 e->val64 = read_pte64(vgpu->gvt->dev_priv, index);
314 e->val64 = *((u64 *)pt + index);
319 static inline int gtt_set_entry64(void *pt,
320 struct intel_gvt_gtt_entry *e,
321 unsigned long index, bool hypervisor_access, unsigned long gpa,
322 struct intel_vgpu *vgpu)
324 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
327 if (WARN_ON(info->gtt_entry_size != 8))
330 if (hypervisor_access) {
331 ret = intel_gvt_hypervisor_write_gpa(vgpu, gpa +
332 (index << info->gtt_entry_size_shift),
337 write_pte64(vgpu->gvt->dev_priv, index, e->val64);
339 *((u64 *)pt + index) = e->val64;
346 #define ADDR_1G_MASK GENMASK_ULL(GTT_HAW - 1, 30)
347 #define ADDR_2M_MASK GENMASK_ULL(GTT_HAW - 1, 21)
348 #define ADDR_64K_MASK GENMASK_ULL(GTT_HAW - 1, 16)
349 #define ADDR_4K_MASK GENMASK_ULL(GTT_HAW - 1, 12)
351 #define GTT_SPTE_FLAG_MASK GENMASK_ULL(62, 52)
352 #define GTT_SPTE_FLAG_64K_SPLITED BIT(52) /* splited 64K gtt entry */
354 static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e)
358 if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY)
359 pfn = (e->val64 & ADDR_1G_MASK) >> PAGE_SHIFT;
360 else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY)
361 pfn = (e->val64 & ADDR_2M_MASK) >> PAGE_SHIFT;
362 else if (e->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY)
363 pfn = (e->val64 & ADDR_64K_MASK) >> PAGE_SHIFT;
365 pfn = (e->val64 & ADDR_4K_MASK) >> PAGE_SHIFT;
369 static void gen8_gtt_set_pfn(struct intel_gvt_gtt_entry *e, unsigned long pfn)
371 if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) {
372 e->val64 &= ~ADDR_1G_MASK;
373 pfn &= (ADDR_1G_MASK >> PAGE_SHIFT);
374 } else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY) {
375 e->val64 &= ~ADDR_2M_MASK;
376 pfn &= (ADDR_2M_MASK >> PAGE_SHIFT);
377 } else if (e->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY) {
378 e->val64 &= ~ADDR_64K_MASK;
379 pfn &= (ADDR_64K_MASK >> PAGE_SHIFT);
381 e->val64 &= ~ADDR_4K_MASK;
382 pfn &= (ADDR_4K_MASK >> PAGE_SHIFT);
385 e->val64 |= (pfn << PAGE_SHIFT);
388 static bool gen8_gtt_test_pse(struct intel_gvt_gtt_entry *e)
390 return !!(e->val64 & _PAGE_PSE);
393 static bool gen8_gtt_test_ips(struct intel_gvt_gtt_entry *e)
395 if (GEM_WARN_ON(e->type != GTT_TYPE_PPGTT_PDE_ENTRY))
398 return !!(e->val64 & GEN8_PDE_IPS_64K);
401 static void gen8_gtt_clear_ips(struct intel_gvt_gtt_entry *e)
403 if (GEM_WARN_ON(e->type != GTT_TYPE_PPGTT_PDE_ENTRY))
406 e->val64 &= ~GEN8_PDE_IPS_64K;
409 static bool gen8_gtt_test_present(struct intel_gvt_gtt_entry *e)
412 * i915 writes PDP root pointer registers without present bit,
413 * it also works, so we need to treat root pointer entry
416 if (e->type == GTT_TYPE_PPGTT_ROOT_L3_ENTRY
417 || e->type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY)
418 return (e->val64 != 0);
420 return (e->val64 & _PAGE_PRESENT);
423 static void gtt_entry_clear_present(struct intel_gvt_gtt_entry *e)
425 e->val64 &= ~_PAGE_PRESENT;
428 static void gtt_entry_set_present(struct intel_gvt_gtt_entry *e)
430 e->val64 |= _PAGE_PRESENT;
433 static bool gen8_gtt_test_64k_splited(struct intel_gvt_gtt_entry *e)
435 return !!(e->val64 & GTT_SPTE_FLAG_64K_SPLITED);
438 static void gen8_gtt_set_64k_splited(struct intel_gvt_gtt_entry *e)
440 e->val64 |= GTT_SPTE_FLAG_64K_SPLITED;
443 static void gen8_gtt_clear_64k_splited(struct intel_gvt_gtt_entry *e)
445 e->val64 &= ~GTT_SPTE_FLAG_64K_SPLITED;
449 * Per-platform GMA routines.
451 static unsigned long gma_to_ggtt_pte_index(unsigned long gma)
453 unsigned long x = (gma >> I915_GTT_PAGE_SHIFT);
455 trace_gma_index(__func__, gma, x);
459 #define DEFINE_PPGTT_GMA_TO_INDEX(prefix, ename, exp) \
460 static unsigned long prefix##_gma_to_##ename##_index(unsigned long gma) \
462 unsigned long x = (exp); \
463 trace_gma_index(__func__, gma, x); \
467 DEFINE_PPGTT_GMA_TO_INDEX(gen8, pte, (gma >> 12 & 0x1ff));
468 DEFINE_PPGTT_GMA_TO_INDEX(gen8, pde, (gma >> 21 & 0x1ff));
469 DEFINE_PPGTT_GMA_TO_INDEX(gen8, l3_pdp, (gma >> 30 & 0x3));
470 DEFINE_PPGTT_GMA_TO_INDEX(gen8, l4_pdp, (gma >> 30 & 0x1ff));
471 DEFINE_PPGTT_GMA_TO_INDEX(gen8, pml4, (gma >> 39 & 0x1ff));
473 static struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops = {
474 .get_entry = gtt_get_entry64,
475 .set_entry = gtt_set_entry64,
476 .clear_present = gtt_entry_clear_present,
477 .set_present = gtt_entry_set_present,
478 .test_present = gen8_gtt_test_present,
479 .test_pse = gen8_gtt_test_pse,
480 .clear_ips = gen8_gtt_clear_ips,
481 .test_ips = gen8_gtt_test_ips,
482 .clear_64k_splited = gen8_gtt_clear_64k_splited,
483 .set_64k_splited = gen8_gtt_set_64k_splited,
484 .test_64k_splited = gen8_gtt_test_64k_splited,
485 .get_pfn = gen8_gtt_get_pfn,
486 .set_pfn = gen8_gtt_set_pfn,
489 static struct intel_gvt_gtt_gma_ops gen8_gtt_gma_ops = {
490 .gma_to_ggtt_pte_index = gma_to_ggtt_pte_index,
491 .gma_to_pte_index = gen8_gma_to_pte_index,
492 .gma_to_pde_index = gen8_gma_to_pde_index,
493 .gma_to_l3_pdp_index = gen8_gma_to_l3_pdp_index,
494 .gma_to_l4_pdp_index = gen8_gma_to_l4_pdp_index,
495 .gma_to_pml4_index = gen8_gma_to_pml4_index,
498 /* Update entry type per pse and ips bit. */
499 static void update_entry_type_for_real(struct intel_gvt_gtt_pte_ops *pte_ops,
500 struct intel_gvt_gtt_entry *entry, bool ips)
502 switch (entry->type) {
503 case GTT_TYPE_PPGTT_PDE_ENTRY:
504 case GTT_TYPE_PPGTT_PDP_ENTRY:
505 if (pte_ops->test_pse(entry))
506 entry->type = get_pse_type(entry->type);
508 case GTT_TYPE_PPGTT_PTE_4K_ENTRY:
510 entry->type = get_pse_type(entry->type);
513 GEM_BUG_ON(!gtt_type_is_entry(entry->type));
516 GEM_BUG_ON(entry->type == GTT_TYPE_INVALID);
522 static void _ppgtt_get_root_entry(struct intel_vgpu_mm *mm,
523 struct intel_gvt_gtt_entry *entry, unsigned long index,
526 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
528 GEM_BUG_ON(mm->type != INTEL_GVT_MM_PPGTT);
530 entry->type = mm->ppgtt_mm.root_entry_type;
531 pte_ops->get_entry(guest ? mm->ppgtt_mm.guest_pdps :
532 mm->ppgtt_mm.shadow_pdps,
533 entry, index, false, 0, mm->vgpu);
534 update_entry_type_for_real(pte_ops, entry, false);
537 static inline void ppgtt_get_guest_root_entry(struct intel_vgpu_mm *mm,
538 struct intel_gvt_gtt_entry *entry, unsigned long index)
540 _ppgtt_get_root_entry(mm, entry, index, true);
543 static inline void ppgtt_get_shadow_root_entry(struct intel_vgpu_mm *mm,
544 struct intel_gvt_gtt_entry *entry, unsigned long index)
546 _ppgtt_get_root_entry(mm, entry, index, false);
549 static void _ppgtt_set_root_entry(struct intel_vgpu_mm *mm,
550 struct intel_gvt_gtt_entry *entry, unsigned long index,
553 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
555 pte_ops->set_entry(guest ? mm->ppgtt_mm.guest_pdps :
556 mm->ppgtt_mm.shadow_pdps,
557 entry, index, false, 0, mm->vgpu);
560 static inline void ppgtt_set_guest_root_entry(struct intel_vgpu_mm *mm,
561 struct intel_gvt_gtt_entry *entry, unsigned long index)
563 _ppgtt_set_root_entry(mm, entry, index, true);
566 static inline void ppgtt_set_shadow_root_entry(struct intel_vgpu_mm *mm,
567 struct intel_gvt_gtt_entry *entry, unsigned long index)
569 _ppgtt_set_root_entry(mm, entry, index, false);
572 static void ggtt_get_guest_entry(struct intel_vgpu_mm *mm,
573 struct intel_gvt_gtt_entry *entry, unsigned long index)
575 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
577 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
579 entry->type = GTT_TYPE_GGTT_PTE;
580 pte_ops->get_entry(mm->ggtt_mm.virtual_ggtt, entry, index,
584 static void ggtt_set_guest_entry(struct intel_vgpu_mm *mm,
585 struct intel_gvt_gtt_entry *entry, unsigned long index)
587 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
589 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
591 pte_ops->set_entry(mm->ggtt_mm.virtual_ggtt, entry, index,
595 static void ggtt_get_host_entry(struct intel_vgpu_mm *mm,
596 struct intel_gvt_gtt_entry *entry, unsigned long index)
598 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
600 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
602 pte_ops->get_entry(NULL, entry, index, false, 0, mm->vgpu);
605 static void ggtt_set_host_entry(struct intel_vgpu_mm *mm,
606 struct intel_gvt_gtt_entry *entry, unsigned long index)
608 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
610 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
612 pte_ops->set_entry(NULL, entry, index, false, 0, mm->vgpu);
616 * PPGTT shadow page table helpers.
618 static inline int ppgtt_spt_get_entry(
619 struct intel_vgpu_ppgtt_spt *spt,
620 void *page_table, int type,
621 struct intel_gvt_gtt_entry *e, unsigned long index,
624 struct intel_gvt *gvt = spt->vgpu->gvt;
625 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
628 e->type = get_entry_type(type);
630 if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
633 ret = ops->get_entry(page_table, e, index, guest,
634 spt->guest_page.gfn << I915_GTT_PAGE_SHIFT,
639 update_entry_type_for_real(ops, e, guest ?
640 spt->guest_page.pde_ips : false);
642 gvt_vdbg_mm("read ppgtt entry, spt type %d, entry type %d, index %lu, value %llx\n",
643 type, e->type, index, e->val64);
647 static inline int ppgtt_spt_set_entry(
648 struct intel_vgpu_ppgtt_spt *spt,
649 void *page_table, int type,
650 struct intel_gvt_gtt_entry *e, unsigned long index,
653 struct intel_gvt *gvt = spt->vgpu->gvt;
654 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
656 if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
659 gvt_vdbg_mm("set ppgtt entry, spt type %d, entry type %d, index %lu, value %llx\n",
660 type, e->type, index, e->val64);
662 return ops->set_entry(page_table, e, index, guest,
663 spt->guest_page.gfn << I915_GTT_PAGE_SHIFT,
667 #define ppgtt_get_guest_entry(spt, e, index) \
668 ppgtt_spt_get_entry(spt, NULL, \
669 spt->guest_page.type, e, index, true)
671 #define ppgtt_set_guest_entry(spt, e, index) \
672 ppgtt_spt_set_entry(spt, NULL, \
673 spt->guest_page.type, e, index, true)
675 #define ppgtt_get_shadow_entry(spt, e, index) \
676 ppgtt_spt_get_entry(spt, spt->shadow_page.vaddr, \
677 spt->shadow_page.type, e, index, false)
679 #define ppgtt_set_shadow_entry(spt, e, index) \
680 ppgtt_spt_set_entry(spt, spt->shadow_page.vaddr, \
681 spt->shadow_page.type, e, index, false)
683 static void *alloc_spt(gfp_t gfp_mask)
685 struct intel_vgpu_ppgtt_spt *spt;
687 spt = kzalloc(sizeof(*spt), gfp_mask);
691 spt->shadow_page.page = alloc_page(gfp_mask);
692 if (!spt->shadow_page.page) {
699 static void free_spt(struct intel_vgpu_ppgtt_spt *spt)
701 __free_page(spt->shadow_page.page);
705 static int detach_oos_page(struct intel_vgpu *vgpu,
706 struct intel_vgpu_oos_page *oos_page);
708 static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt)
710 struct device *kdev = &spt->vgpu->gvt->dev_priv->drm.pdev->dev;
712 trace_spt_free(spt->vgpu->id, spt, spt->guest_page.type);
714 dma_unmap_page(kdev, spt->shadow_page.mfn << I915_GTT_PAGE_SHIFT, 4096,
715 PCI_DMA_BIDIRECTIONAL);
717 radix_tree_delete(&spt->vgpu->gtt.spt_tree, spt->shadow_page.mfn);
719 if (spt->guest_page.oos_page)
720 detach_oos_page(spt->vgpu, spt->guest_page.oos_page);
722 intel_vgpu_unregister_page_track(spt->vgpu, spt->guest_page.gfn);
724 list_del_init(&spt->post_shadow_list);
728 static void ppgtt_free_all_spt(struct intel_vgpu *vgpu)
730 struct intel_vgpu_ppgtt_spt *spt;
731 struct radix_tree_iter iter;
734 radix_tree_for_each_slot(slot, &vgpu->gtt.spt_tree, &iter, 0) {
735 spt = radix_tree_deref_slot(slot);
740 static int ppgtt_handle_guest_write_page_table_bytes(
741 struct intel_vgpu_ppgtt_spt *spt,
742 u64 pa, void *p_data, int bytes);
744 static int ppgtt_write_protection_handler(
745 struct intel_vgpu_page_track *page_track,
746 u64 gpa, void *data, int bytes)
748 struct intel_vgpu_ppgtt_spt *spt = page_track->priv_data;
752 if (bytes != 4 && bytes != 8)
755 ret = ppgtt_handle_guest_write_page_table_bytes(spt, gpa, data, bytes);
761 /* Find a spt by guest gfn. */
762 static struct intel_vgpu_ppgtt_spt *intel_vgpu_find_spt_by_gfn(
763 struct intel_vgpu *vgpu, unsigned long gfn)
765 struct intel_vgpu_page_track *track;
767 track = intel_vgpu_find_page_track(vgpu, gfn);
768 if (track && track->handler == ppgtt_write_protection_handler)
769 return track->priv_data;
774 /* Find the spt by shadow page mfn. */
775 static inline struct intel_vgpu_ppgtt_spt *intel_vgpu_find_spt_by_mfn(
776 struct intel_vgpu *vgpu, unsigned long mfn)
778 return radix_tree_lookup(&vgpu->gtt.spt_tree, mfn);
781 static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt);
783 static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt(
784 struct intel_vgpu *vgpu, int type, unsigned long gfn,
787 struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev;
788 struct intel_vgpu_ppgtt_spt *spt = NULL;
793 spt = alloc_spt(GFP_KERNEL | __GFP_ZERO);
795 if (reclaim_one_ppgtt_mm(vgpu->gvt))
798 gvt_vgpu_err("fail to allocate ppgtt shadow page\n");
799 return ERR_PTR(-ENOMEM);
803 atomic_set(&spt->refcount, 1);
804 INIT_LIST_HEAD(&spt->post_shadow_list);
809 spt->shadow_page.type = type;
810 daddr = dma_map_page(kdev, spt->shadow_page.page,
811 0, 4096, PCI_DMA_BIDIRECTIONAL);
812 if (dma_mapping_error(kdev, daddr)) {
813 gvt_vgpu_err("fail to map dma addr\n");
817 spt->shadow_page.vaddr = page_address(spt->shadow_page.page);
818 spt->shadow_page.mfn = daddr >> I915_GTT_PAGE_SHIFT;
823 spt->guest_page.type = type;
824 spt->guest_page.gfn = gfn;
825 spt->guest_page.pde_ips = guest_pde_ips;
827 ret = intel_vgpu_register_page_track(vgpu, spt->guest_page.gfn,
828 ppgtt_write_protection_handler, spt);
832 ret = radix_tree_insert(&vgpu->gtt.spt_tree, spt->shadow_page.mfn, spt);
834 goto err_unreg_page_track;
836 trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn);
839 err_unreg_page_track:
840 intel_vgpu_unregister_page_track(vgpu, spt->guest_page.gfn);
842 dma_unmap_page(kdev, daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
848 #define pt_entry_size_shift(spt) \
849 ((spt)->vgpu->gvt->device_info.gtt_entry_size_shift)
851 #define pt_entries(spt) \
852 (I915_GTT_PAGE_SIZE >> pt_entry_size_shift(spt))
854 #define for_each_present_guest_entry(spt, e, i) \
855 for (i = 0; i < pt_entries(spt); i++) \
856 if (!ppgtt_get_guest_entry(spt, e, i) && \
857 spt->vgpu->gvt->gtt.pte_ops->test_present(e))
859 #define for_each_present_shadow_entry(spt, e, i) \
860 for (i = 0; i < pt_entries(spt); i++) \
861 if (!ppgtt_get_shadow_entry(spt, e, i) && \
862 spt->vgpu->gvt->gtt.pte_ops->test_present(e))
864 static void ppgtt_get_spt(struct intel_vgpu_ppgtt_spt *spt)
866 int v = atomic_read(&spt->refcount);
868 trace_spt_refcount(spt->vgpu->id, "inc", spt, v, (v + 1));
870 atomic_inc(&spt->refcount);
873 static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt);
875 static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu,
876 struct intel_gvt_gtt_entry *e)
878 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
879 struct intel_vgpu_ppgtt_spt *s;
880 intel_gvt_gtt_type_t cur_pt_type;
882 GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(e->type)));
884 if (e->type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY
885 && e->type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
886 cur_pt_type = get_next_pt_type(e->type) + 1;
887 if (ops->get_pfn(e) ==
888 vgpu->gtt.scratch_pt[cur_pt_type].page_mfn)
891 s = intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(e));
893 gvt_vgpu_err("fail to find shadow page: mfn: 0x%lx\n",
897 return ppgtt_invalidate_spt(s);
900 static inline void ppgtt_invalidate_pte(struct intel_vgpu_ppgtt_spt *spt,
901 struct intel_gvt_gtt_entry *entry)
903 struct intel_vgpu *vgpu = spt->vgpu;
904 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
908 pfn = ops->get_pfn(entry);
909 type = spt->shadow_page.type;
911 if (pfn == vgpu->gtt.scratch_pt[type].page_mfn)
914 intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT);
917 static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt)
919 struct intel_vgpu *vgpu = spt->vgpu;
920 struct intel_gvt_gtt_entry e;
923 int v = atomic_read(&spt->refcount);
925 trace_spt_change(spt->vgpu->id, "die", spt,
926 spt->guest_page.gfn, spt->shadow_page.type);
928 trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1));
930 if (atomic_dec_return(&spt->refcount) > 0)
933 for_each_present_shadow_entry(spt, &e, index) {
935 case GTT_TYPE_PPGTT_PTE_4K_ENTRY:
936 gvt_vdbg_mm("invalidate 4K entry\n");
937 ppgtt_invalidate_pte(spt, &e);
939 case GTT_TYPE_PPGTT_PTE_64K_ENTRY:
940 case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
941 case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
942 WARN(1, "GVT doesn't support 64K/2M/1GB page\n");
944 case GTT_TYPE_PPGTT_PML4_ENTRY:
945 case GTT_TYPE_PPGTT_PDP_ENTRY:
946 case GTT_TYPE_PPGTT_PDE_ENTRY:
947 gvt_vdbg_mm("invalidate PMUL4/PDP/PDE entry\n");
948 ret = ppgtt_invalidate_spt_by_shadow_entry(
958 trace_spt_change(spt->vgpu->id, "release", spt,
959 spt->guest_page.gfn, spt->shadow_page.type);
963 gvt_vgpu_err("fail: shadow page %p shadow entry 0x%llx type %d\n",
964 spt, e.val64, e.type);
968 static bool vgpu_ips_enabled(struct intel_vgpu *vgpu)
970 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
972 if (INTEL_GEN(dev_priv) == 9 || INTEL_GEN(dev_priv) == 10) {
973 u32 ips = vgpu_vreg_t(vgpu, GEN8_GAMW_ECO_DEV_RW_IA) &
974 GAMW_ECO_ENABLE_64K_IPS_FIELD;
976 return ips == GAMW_ECO_ENABLE_64K_IPS_FIELD;
977 } else if (INTEL_GEN(dev_priv) >= 11) {
978 /* 64K paging only controlled by IPS bit in PTE now. */
984 static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt);
986 static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry(
987 struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *we)
989 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
990 struct intel_vgpu_ppgtt_spt *spt = NULL;
994 GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(we->type)));
996 spt = intel_vgpu_find_spt_by_gfn(vgpu, ops->get_pfn(we));
1000 int type = get_next_pt_type(we->type);
1002 if (we->type == GTT_TYPE_PPGTT_PDE_ENTRY)
1003 ips = vgpu_ips_enabled(vgpu) && ops->test_ips(we);
1005 spt = ppgtt_alloc_spt(vgpu, type, ops->get_pfn(we), ips);
1011 ret = intel_vgpu_enable_page_track(vgpu, spt->guest_page.gfn);
1015 ret = ppgtt_populate_spt(spt);
1019 trace_spt_change(vgpu->id, "new", spt, spt->guest_page.gfn,
1020 spt->shadow_page.type);
1024 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
1025 spt, we->val64, we->type);
1026 return ERR_PTR(ret);
1029 static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se,
1030 struct intel_vgpu_ppgtt_spt *s, struct intel_gvt_gtt_entry *ge)
1032 struct intel_gvt_gtt_pte_ops *ops = s->vgpu->gvt->gtt.pte_ops;
1034 se->type = ge->type;
1035 se->val64 = ge->val64;
1037 ops->set_pfn(se, s->shadow_page.mfn);
1040 static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu,
1041 struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
1042 struct intel_gvt_gtt_entry *ge)
1044 struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
1045 struct intel_gvt_gtt_entry se = *ge;
1047 dma_addr_t dma_addr;
1050 if (!pte_ops->test_present(ge))
1053 gfn = pte_ops->get_pfn(ge);
1056 case GTT_TYPE_PPGTT_PTE_4K_ENTRY:
1057 gvt_vdbg_mm("shadow 4K gtt entry\n");
1059 case GTT_TYPE_PPGTT_PTE_64K_ENTRY:
1060 case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
1061 case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
1062 gvt_vgpu_err("GVT doesn't support 64K/2M/1GB entry\n");
1069 ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, &dma_addr);
1073 pte_ops->set_pfn(&se, dma_addr >> PAGE_SHIFT);
1074 ppgtt_set_shadow_entry(spt, &se, index);
1078 static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt)
1080 struct intel_vgpu *vgpu = spt->vgpu;
1081 struct intel_gvt *gvt = vgpu->gvt;
1082 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
1083 struct intel_vgpu_ppgtt_spt *s;
1084 struct intel_gvt_gtt_entry se, ge;
1085 unsigned long gfn, i;
1088 trace_spt_change(spt->vgpu->id, "born", spt,
1089 spt->guest_page.gfn, spt->shadow_page.type);
1091 for_each_present_guest_entry(spt, &ge, i) {
1092 if (gtt_type_is_pt(get_next_pt_type(ge.type))) {
1093 s = ppgtt_populate_spt_by_guest_entry(vgpu, &ge);
1098 ppgtt_get_shadow_entry(spt, &se, i);
1099 ppgtt_generate_shadow_entry(&se, s, &ge);
1100 ppgtt_set_shadow_entry(spt, &se, i);
1102 gfn = ops->get_pfn(&ge);
1103 if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn)) {
1104 ops->set_pfn(&se, gvt->gtt.scratch_mfn);
1105 ppgtt_set_shadow_entry(spt, &se, i);
1109 ret = ppgtt_populate_shadow_entry(vgpu, spt, i, &ge);
1116 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
1117 spt, ge.val64, ge.type);
1121 static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_ppgtt_spt *spt,
1122 struct intel_gvt_gtt_entry *se, unsigned long index)
1124 struct intel_vgpu *vgpu = spt->vgpu;
1125 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1128 trace_spt_guest_change(spt->vgpu->id, "remove", spt,
1129 spt->shadow_page.type, se->val64, index);
1131 gvt_vdbg_mm("destroy old shadow entry, type %d, index %lu, value %llx\n",
1132 se->type, index, se->val64);
1134 if (!ops->test_present(se))
1137 if (ops->get_pfn(se) ==
1138 vgpu->gtt.scratch_pt[spt->shadow_page.type].page_mfn)
1141 if (gtt_type_is_pt(get_next_pt_type(se->type))) {
1142 struct intel_vgpu_ppgtt_spt *s =
1143 intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(se));
1145 gvt_vgpu_err("fail to find guest page\n");
1149 ret = ppgtt_invalidate_spt(s);
1153 ppgtt_invalidate_pte(spt, se);
1157 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
1158 spt, se->val64, se->type);
1162 static int ppgtt_handle_guest_entry_add(struct intel_vgpu_ppgtt_spt *spt,
1163 struct intel_gvt_gtt_entry *we, unsigned long index)
1165 struct intel_vgpu *vgpu = spt->vgpu;
1166 struct intel_gvt_gtt_entry m;
1167 struct intel_vgpu_ppgtt_spt *s;
1170 trace_spt_guest_change(spt->vgpu->id, "add", spt, spt->shadow_page.type,
1173 gvt_vdbg_mm("add shadow entry: type %d, index %lu, value %llx\n",
1174 we->type, index, we->val64);
1176 if (gtt_type_is_pt(get_next_pt_type(we->type))) {
1177 s = ppgtt_populate_spt_by_guest_entry(vgpu, we);
1182 ppgtt_get_shadow_entry(spt, &m, index);
1183 ppgtt_generate_shadow_entry(&m, s, we);
1184 ppgtt_set_shadow_entry(spt, &m, index);
1186 ret = ppgtt_populate_shadow_entry(vgpu, spt, index, we);
1192 gvt_vgpu_err("fail: spt %p guest entry 0x%llx type %d\n",
1193 spt, we->val64, we->type);
1197 static int sync_oos_page(struct intel_vgpu *vgpu,
1198 struct intel_vgpu_oos_page *oos_page)
1200 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1201 struct intel_gvt *gvt = vgpu->gvt;
1202 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
1203 struct intel_vgpu_ppgtt_spt *spt = oos_page->spt;
1204 struct intel_gvt_gtt_entry old, new;
1208 trace_oos_change(vgpu->id, "sync", oos_page->id,
1209 spt, spt->guest_page.type);
1211 old.type = new.type = get_entry_type(spt->guest_page.type);
1212 old.val64 = new.val64 = 0;
1214 for (index = 0; index < (I915_GTT_PAGE_SIZE >>
1215 info->gtt_entry_size_shift); index++) {
1216 ops->get_entry(oos_page->mem, &old, index, false, 0, vgpu);
1217 ops->get_entry(NULL, &new, index, true,
1218 spt->guest_page.gfn << PAGE_SHIFT, vgpu);
1220 if (old.val64 == new.val64
1221 && !test_and_clear_bit(index, spt->post_shadow_bitmap))
1224 trace_oos_sync(vgpu->id, oos_page->id,
1225 spt, spt->guest_page.type,
1228 ret = ppgtt_populate_shadow_entry(vgpu, spt, index, &new);
1232 ops->set_entry(oos_page->mem, &new, index, false, 0, vgpu);
1235 spt->guest_page.write_cnt = 0;
1236 list_del_init(&spt->post_shadow_list);
1240 static int detach_oos_page(struct intel_vgpu *vgpu,
1241 struct intel_vgpu_oos_page *oos_page)
1243 struct intel_gvt *gvt = vgpu->gvt;
1244 struct intel_vgpu_ppgtt_spt *spt = oos_page->spt;
1246 trace_oos_change(vgpu->id, "detach", oos_page->id,
1247 spt, spt->guest_page.type);
1249 spt->guest_page.write_cnt = 0;
1250 spt->guest_page.oos_page = NULL;
1251 oos_page->spt = NULL;
1253 list_del_init(&oos_page->vm_list);
1254 list_move_tail(&oos_page->list, &gvt->gtt.oos_page_free_list_head);
1259 static int attach_oos_page(struct intel_vgpu_oos_page *oos_page,
1260 struct intel_vgpu_ppgtt_spt *spt)
1262 struct intel_gvt *gvt = spt->vgpu->gvt;
1265 ret = intel_gvt_hypervisor_read_gpa(spt->vgpu,
1266 spt->guest_page.gfn << I915_GTT_PAGE_SHIFT,
1267 oos_page->mem, I915_GTT_PAGE_SIZE);
1271 oos_page->spt = spt;
1272 spt->guest_page.oos_page = oos_page;
1274 list_move_tail(&oos_page->list, &gvt->gtt.oos_page_use_list_head);
1276 trace_oos_change(spt->vgpu->id, "attach", oos_page->id,
1277 spt, spt->guest_page.type);
1281 static int ppgtt_set_guest_page_sync(struct intel_vgpu_ppgtt_spt *spt)
1283 struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page;
1286 ret = intel_vgpu_enable_page_track(spt->vgpu, spt->guest_page.gfn);
1290 trace_oos_change(spt->vgpu->id, "set page sync", oos_page->id,
1291 spt, spt->guest_page.type);
1293 list_del_init(&oos_page->vm_list);
1294 return sync_oos_page(spt->vgpu, oos_page);
1297 static int ppgtt_allocate_oos_page(struct intel_vgpu_ppgtt_spt *spt)
1299 struct intel_gvt *gvt = spt->vgpu->gvt;
1300 struct intel_gvt_gtt *gtt = &gvt->gtt;
1301 struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page;
1304 WARN(oos_page, "shadow PPGTT page has already has a oos page\n");
1306 if (list_empty(>t->oos_page_free_list_head)) {
1307 oos_page = container_of(gtt->oos_page_use_list_head.next,
1308 struct intel_vgpu_oos_page, list);
1309 ret = ppgtt_set_guest_page_sync(oos_page->spt);
1312 ret = detach_oos_page(spt->vgpu, oos_page);
1316 oos_page = container_of(gtt->oos_page_free_list_head.next,
1317 struct intel_vgpu_oos_page, list);
1318 return attach_oos_page(oos_page, spt);
1321 static int ppgtt_set_guest_page_oos(struct intel_vgpu_ppgtt_spt *spt)
1323 struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page;
1325 if (WARN(!oos_page, "shadow PPGTT page should have a oos page\n"))
1328 trace_oos_change(spt->vgpu->id, "set page out of sync", oos_page->id,
1329 spt, spt->guest_page.type);
1331 list_add_tail(&oos_page->vm_list, &spt->vgpu->gtt.oos_page_list_head);
1332 return intel_vgpu_disable_page_track(spt->vgpu, spt->guest_page.gfn);
1336 * intel_vgpu_sync_oos_pages - sync all the out-of-synced shadow for vGPU
1339 * This function is called before submitting a guest workload to host,
1340 * to sync all the out-of-synced shadow for vGPU
1343 * Zero on success, negative error code if failed.
1345 int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu)
1347 struct list_head *pos, *n;
1348 struct intel_vgpu_oos_page *oos_page;
1351 if (!enable_out_of_sync)
1354 list_for_each_safe(pos, n, &vgpu->gtt.oos_page_list_head) {
1355 oos_page = container_of(pos,
1356 struct intel_vgpu_oos_page, vm_list);
1357 ret = ppgtt_set_guest_page_sync(oos_page->spt);
1365 * The heart of PPGTT shadow page table.
1367 static int ppgtt_handle_guest_write_page_table(
1368 struct intel_vgpu_ppgtt_spt *spt,
1369 struct intel_gvt_gtt_entry *we, unsigned long index)
1371 struct intel_vgpu *vgpu = spt->vgpu;
1372 int type = spt->shadow_page.type;
1373 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1374 struct intel_gvt_gtt_entry old_se;
1378 new_present = ops->test_present(we);
1381 * Adding the new entry first and then removing the old one, that can
1382 * guarantee the ppgtt table is validated during the window between
1383 * adding and removal.
1385 ppgtt_get_shadow_entry(spt, &old_se, index);
1388 ret = ppgtt_handle_guest_entry_add(spt, we, index);
1393 ret = ppgtt_handle_guest_entry_removal(spt, &old_se, index);
1398 ops->set_pfn(&old_se, vgpu->gtt.scratch_pt[type].page_mfn);
1399 ppgtt_set_shadow_entry(spt, &old_se, index);
1404 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d.\n",
1405 spt, we->val64, we->type);
1411 static inline bool can_do_out_of_sync(struct intel_vgpu_ppgtt_spt *spt)
1413 return enable_out_of_sync
1414 && gtt_type_is_pte_pt(spt->guest_page.type)
1415 && spt->guest_page.write_cnt >= 2;
1418 static void ppgtt_set_post_shadow(struct intel_vgpu_ppgtt_spt *spt,
1419 unsigned long index)
1421 set_bit(index, spt->post_shadow_bitmap);
1422 if (!list_empty(&spt->post_shadow_list))
1425 list_add_tail(&spt->post_shadow_list,
1426 &spt->vgpu->gtt.post_shadow_list_head);
1430 * intel_vgpu_flush_post_shadow - flush the post shadow transactions
1433 * This function is called before submitting a guest workload to host,
1434 * to flush all the post shadows for a vGPU.
1437 * Zero on success, negative error code if failed.
1439 int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu)
1441 struct list_head *pos, *n;
1442 struct intel_vgpu_ppgtt_spt *spt;
1443 struct intel_gvt_gtt_entry ge;
1444 unsigned long index;
1447 list_for_each_safe(pos, n, &vgpu->gtt.post_shadow_list_head) {
1448 spt = container_of(pos, struct intel_vgpu_ppgtt_spt,
1451 for_each_set_bit(index, spt->post_shadow_bitmap,
1452 GTT_ENTRY_NUM_IN_ONE_PAGE) {
1453 ppgtt_get_guest_entry(spt, &ge, index);
1455 ret = ppgtt_handle_guest_write_page_table(spt,
1459 clear_bit(index, spt->post_shadow_bitmap);
1461 list_del_init(&spt->post_shadow_list);
1466 static int ppgtt_handle_guest_write_page_table_bytes(
1467 struct intel_vgpu_ppgtt_spt *spt,
1468 u64 pa, void *p_data, int bytes)
1470 struct intel_vgpu *vgpu = spt->vgpu;
1471 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1472 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1473 struct intel_gvt_gtt_entry we, se;
1474 unsigned long index;
1477 index = (pa & (PAGE_SIZE - 1)) >> info->gtt_entry_size_shift;
1479 ppgtt_get_guest_entry(spt, &we, index);
1481 if (bytes == info->gtt_entry_size) {
1482 ret = ppgtt_handle_guest_write_page_table(spt, &we, index);
1486 if (!test_bit(index, spt->post_shadow_bitmap)) {
1487 int type = spt->shadow_page.type;
1489 ppgtt_get_shadow_entry(spt, &se, index);
1490 ret = ppgtt_handle_guest_entry_removal(spt, &se, index);
1493 ops->set_pfn(&se, vgpu->gtt.scratch_pt[type].page_mfn);
1494 ppgtt_set_shadow_entry(spt, &se, index);
1496 ppgtt_set_post_shadow(spt, index);
1499 if (!enable_out_of_sync)
1502 spt->guest_page.write_cnt++;
1504 if (spt->guest_page.oos_page)
1505 ops->set_entry(spt->guest_page.oos_page->mem, &we, index,
1508 if (can_do_out_of_sync(spt)) {
1509 if (!spt->guest_page.oos_page)
1510 ppgtt_allocate_oos_page(spt);
1512 ret = ppgtt_set_guest_page_oos(spt);
1519 static void invalidate_ppgtt_mm(struct intel_vgpu_mm *mm)
1521 struct intel_vgpu *vgpu = mm->vgpu;
1522 struct intel_gvt *gvt = vgpu->gvt;
1523 struct intel_gvt_gtt *gtt = &gvt->gtt;
1524 struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops;
1525 struct intel_gvt_gtt_entry se;
1528 if (!mm->ppgtt_mm.shadowed)
1531 for (index = 0; index < ARRAY_SIZE(mm->ppgtt_mm.shadow_pdps); index++) {
1532 ppgtt_get_shadow_root_entry(mm, &se, index);
1534 if (!ops->test_present(&se))
1537 ppgtt_invalidate_spt_by_shadow_entry(vgpu, &se);
1539 ppgtt_set_shadow_root_entry(mm, &se, index);
1541 trace_spt_guest_change(vgpu->id, "destroy root pointer",
1542 NULL, se.type, se.val64, index);
1545 mm->ppgtt_mm.shadowed = false;
1549 static int shadow_ppgtt_mm(struct intel_vgpu_mm *mm)
1551 struct intel_vgpu *vgpu = mm->vgpu;
1552 struct intel_gvt *gvt = vgpu->gvt;
1553 struct intel_gvt_gtt *gtt = &gvt->gtt;
1554 struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops;
1555 struct intel_vgpu_ppgtt_spt *spt;
1556 struct intel_gvt_gtt_entry ge, se;
1559 if (mm->ppgtt_mm.shadowed)
1562 mm->ppgtt_mm.shadowed = true;
1564 for (index = 0; index < ARRAY_SIZE(mm->ppgtt_mm.guest_pdps); index++) {
1565 ppgtt_get_guest_root_entry(mm, &ge, index);
1567 if (!ops->test_present(&ge))
1570 trace_spt_guest_change(vgpu->id, __func__, NULL,
1571 ge.type, ge.val64, index);
1573 spt = ppgtt_populate_spt_by_guest_entry(vgpu, &ge);
1575 gvt_vgpu_err("fail to populate guest root pointer\n");
1579 ppgtt_generate_shadow_entry(&se, spt, &ge);
1580 ppgtt_set_shadow_root_entry(mm, &se, index);
1582 trace_spt_guest_change(vgpu->id, "populate root pointer",
1583 NULL, se.type, se.val64, index);
1588 invalidate_ppgtt_mm(mm);
1592 static struct intel_vgpu_mm *vgpu_alloc_mm(struct intel_vgpu *vgpu)
1594 struct intel_vgpu_mm *mm;
1596 mm = kzalloc(sizeof(*mm), GFP_KERNEL);
1601 kref_init(&mm->ref);
1602 atomic_set(&mm->pincount, 0);
1607 static void vgpu_free_mm(struct intel_vgpu_mm *mm)
1613 * intel_vgpu_create_ppgtt_mm - create a ppgtt mm object for a vGPU
1615 * @root_entry_type: ppgtt root entry type
1616 * @pdps: guest pdps.
1618 * This function is used to create a ppgtt mm object for a vGPU.
1621 * Zero on success, negative error code in pointer if failed.
1623 struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu,
1624 intel_gvt_gtt_type_t root_entry_type, u64 pdps[])
1626 struct intel_gvt *gvt = vgpu->gvt;
1627 struct intel_vgpu_mm *mm;
1630 mm = vgpu_alloc_mm(vgpu);
1632 return ERR_PTR(-ENOMEM);
1634 mm->type = INTEL_GVT_MM_PPGTT;
1636 GEM_BUG_ON(root_entry_type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY &&
1637 root_entry_type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY);
1638 mm->ppgtt_mm.root_entry_type = root_entry_type;
1640 INIT_LIST_HEAD(&mm->ppgtt_mm.list);
1641 INIT_LIST_HEAD(&mm->ppgtt_mm.lru_list);
1643 if (root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY)
1644 mm->ppgtt_mm.guest_pdps[0] = pdps[0];
1646 memcpy(mm->ppgtt_mm.guest_pdps, pdps,
1647 sizeof(mm->ppgtt_mm.guest_pdps));
1649 ret = shadow_ppgtt_mm(mm);
1651 gvt_vgpu_err("failed to shadow ppgtt mm\n");
1653 return ERR_PTR(ret);
1656 list_add_tail(&mm->ppgtt_mm.list, &vgpu->gtt.ppgtt_mm_list_head);
1657 list_add_tail(&mm->ppgtt_mm.lru_list, &gvt->gtt.ppgtt_mm_lru_list_head);
1661 static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu)
1663 struct intel_vgpu_mm *mm;
1664 unsigned long nr_entries;
1666 mm = vgpu_alloc_mm(vgpu);
1668 return ERR_PTR(-ENOMEM);
1670 mm->type = INTEL_GVT_MM_GGTT;
1672 nr_entries = gvt_ggtt_gm_sz(vgpu->gvt) >> I915_GTT_PAGE_SHIFT;
1673 mm->ggtt_mm.virtual_ggtt = vzalloc(nr_entries *
1674 vgpu->gvt->device_info.gtt_entry_size);
1675 if (!mm->ggtt_mm.virtual_ggtt) {
1677 return ERR_PTR(-ENOMEM);
1684 * _intel_vgpu_mm_release - destroy a mm object
1685 * @mm_ref: a kref object
1687 * This function is used to destroy a mm object for vGPU
1690 void _intel_vgpu_mm_release(struct kref *mm_ref)
1692 struct intel_vgpu_mm *mm = container_of(mm_ref, typeof(*mm), ref);
1694 if (GEM_WARN_ON(atomic_read(&mm->pincount)))
1695 gvt_err("vgpu mm pin count bug detected\n");
1697 if (mm->type == INTEL_GVT_MM_PPGTT) {
1698 list_del(&mm->ppgtt_mm.list);
1699 list_del(&mm->ppgtt_mm.lru_list);
1700 invalidate_ppgtt_mm(mm);
1702 vfree(mm->ggtt_mm.virtual_ggtt);
1709 * intel_vgpu_unpin_mm - decrease the pin count of a vGPU mm object
1710 * @mm: a vGPU mm object
1712 * This function is called when user doesn't want to use a vGPU mm object
1714 void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm)
1716 atomic_dec(&mm->pincount);
1720 * intel_vgpu_pin_mm - increase the pin count of a vGPU mm object
1723 * This function is called when user wants to use a vGPU mm object. If this
1724 * mm object hasn't been shadowed yet, the shadow will be populated at this
1728 * Zero on success, negative error code if failed.
1730 int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm)
1734 atomic_inc(&mm->pincount);
1736 if (mm->type == INTEL_GVT_MM_PPGTT) {
1737 ret = shadow_ppgtt_mm(mm);
1741 list_move_tail(&mm->ppgtt_mm.lru_list,
1742 &mm->vgpu->gvt->gtt.ppgtt_mm_lru_list_head);
1749 static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt)
1751 struct intel_vgpu_mm *mm;
1752 struct list_head *pos, *n;
1754 list_for_each_safe(pos, n, &gvt->gtt.ppgtt_mm_lru_list_head) {
1755 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.lru_list);
1757 if (atomic_read(&mm->pincount))
1760 list_del_init(&mm->ppgtt_mm.lru_list);
1761 invalidate_ppgtt_mm(mm);
1768 * GMA translation APIs.
1770 static inline int ppgtt_get_next_level_entry(struct intel_vgpu_mm *mm,
1771 struct intel_gvt_gtt_entry *e, unsigned long index, bool guest)
1773 struct intel_vgpu *vgpu = mm->vgpu;
1774 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1775 struct intel_vgpu_ppgtt_spt *s;
1777 s = intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(e));
1782 ppgtt_get_shadow_entry(s, e, index);
1784 ppgtt_get_guest_entry(s, e, index);
1789 * intel_vgpu_gma_to_gpa - translate a gma to GPA
1790 * @mm: mm object. could be a PPGTT or GGTT mm object
1791 * @gma: graphics memory address in this mm object
1793 * This function is used to translate a graphics memory address in specific
1794 * graphics memory space to guest physical address.
1797 * Guest physical address on success, INTEL_GVT_INVALID_ADDR if failed.
1799 unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
1801 struct intel_vgpu *vgpu = mm->vgpu;
1802 struct intel_gvt *gvt = vgpu->gvt;
1803 struct intel_gvt_gtt_pte_ops *pte_ops = gvt->gtt.pte_ops;
1804 struct intel_gvt_gtt_gma_ops *gma_ops = gvt->gtt.gma_ops;
1805 unsigned long gpa = INTEL_GVT_INVALID_ADDR;
1806 unsigned long gma_index[4];
1807 struct intel_gvt_gtt_entry e;
1811 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT &&
1812 mm->type != INTEL_GVT_MM_PPGTT);
1814 if (mm->type == INTEL_GVT_MM_GGTT) {
1815 if (!vgpu_gmadr_is_valid(vgpu, gma))
1818 ggtt_get_guest_entry(mm, &e,
1819 gma_ops->gma_to_ggtt_pte_index(gma));
1821 gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT)
1822 + (gma & ~I915_GTT_PAGE_MASK);
1824 trace_gma_translate(vgpu->id, "ggtt", 0, 0, gma, gpa);
1826 switch (mm->ppgtt_mm.root_entry_type) {
1827 case GTT_TYPE_PPGTT_ROOT_L4_ENTRY:
1828 ppgtt_get_shadow_root_entry(mm, &e, 0);
1830 gma_index[0] = gma_ops->gma_to_pml4_index(gma);
1831 gma_index[1] = gma_ops->gma_to_l4_pdp_index(gma);
1832 gma_index[2] = gma_ops->gma_to_pde_index(gma);
1833 gma_index[3] = gma_ops->gma_to_pte_index(gma);
1836 case GTT_TYPE_PPGTT_ROOT_L3_ENTRY:
1837 ppgtt_get_shadow_root_entry(mm, &e,
1838 gma_ops->gma_to_l3_pdp_index(gma));
1840 gma_index[0] = gma_ops->gma_to_pde_index(gma);
1841 gma_index[1] = gma_ops->gma_to_pte_index(gma);
1848 /* walk the shadow page table and get gpa from guest entry */
1849 for (i = 0; i < levels; i++) {
1850 ret = ppgtt_get_next_level_entry(mm, &e, gma_index[i],
1855 if (!pte_ops->test_present(&e)) {
1856 gvt_dbg_core("GMA 0x%lx is not present\n", gma);
1861 gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT) +
1862 (gma & ~I915_GTT_PAGE_MASK);
1863 trace_gma_translate(vgpu->id, "ppgtt", 0,
1864 mm->ppgtt_mm.root_entry_type, gma, gpa);
1869 gvt_vgpu_err("invalid mm type: %d gma %lx\n", mm->type, gma);
1870 return INTEL_GVT_INVALID_ADDR;
1873 static int emulate_ggtt_mmio_read(struct intel_vgpu *vgpu,
1874 unsigned int off, void *p_data, unsigned int bytes)
1876 struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
1877 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1878 unsigned long index = off >> info->gtt_entry_size_shift;
1879 struct intel_gvt_gtt_entry e;
1881 if (bytes != 4 && bytes != 8)
1884 ggtt_get_guest_entry(ggtt_mm, &e, index);
1885 memcpy(p_data, (void *)&e.val64 + (off & (info->gtt_entry_size - 1)),
1891 * intel_vgpu_emulate_gtt_mmio_read - emulate GTT MMIO register read
1893 * @off: register offset
1894 * @p_data: data will be returned to guest
1895 * @bytes: data length
1897 * This function is used to emulate the GTT MMIO register read
1900 * Zero on success, error code if failed.
1902 int intel_vgpu_emulate_ggtt_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
1903 void *p_data, unsigned int bytes)
1905 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1908 if (bytes != 4 && bytes != 8)
1911 off -= info->gtt_start_offset;
1912 ret = emulate_ggtt_mmio_read(vgpu, off, p_data, bytes);
1916 static void ggtt_invalidate_pte(struct intel_vgpu *vgpu,
1917 struct intel_gvt_gtt_entry *entry)
1919 struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
1922 pfn = pte_ops->get_pfn(entry);
1923 if (pfn != vgpu->gvt->gtt.scratch_mfn)
1924 intel_gvt_hypervisor_dma_unmap_guest_page(vgpu,
1928 static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
1929 void *p_data, unsigned int bytes)
1931 struct intel_gvt *gvt = vgpu->gvt;
1932 const struct intel_gvt_device_info *info = &gvt->device_info;
1933 struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
1934 struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
1935 unsigned long g_gtt_index = off >> info->gtt_entry_size_shift;
1936 unsigned long gma, gfn;
1937 struct intel_gvt_gtt_entry e, m;
1938 dma_addr_t dma_addr;
1941 if (bytes != 4 && bytes != 8)
1944 gma = g_gtt_index << I915_GTT_PAGE_SHIFT;
1946 /* the VM may configure the whole GM space when ballooning is used */
1947 if (!vgpu_gmadr_is_valid(vgpu, gma))
1950 ggtt_get_guest_entry(ggtt_mm, &e, g_gtt_index);
1952 memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data,
1955 if (ops->test_present(&e)) {
1956 gfn = ops->get_pfn(&e);
1959 /* one PTE update may be issued in multiple writes and the
1960 * first write may not construct a valid gfn
1962 if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn)) {
1963 ops->set_pfn(&m, gvt->gtt.scratch_mfn);
1967 ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn,
1970 gvt_vgpu_err("fail to populate guest ggtt entry\n");
1971 /* guest driver may read/write the entry when partial
1972 * update the entry in this situation p2m will fail
1973 * settting the shadow entry to point to a scratch page
1975 ops->set_pfn(&m, gvt->gtt.scratch_mfn);
1977 ops->set_pfn(&m, dma_addr >> PAGE_SHIFT);
1979 ggtt_get_host_entry(ggtt_mm, &m, g_gtt_index);
1980 ggtt_invalidate_pte(vgpu, &m);
1981 ops->set_pfn(&m, gvt->gtt.scratch_mfn);
1982 ops->clear_present(&m);
1986 ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index);
1987 ggtt_invalidate(gvt->dev_priv);
1988 ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
1993 * intel_vgpu_emulate_ggtt_mmio_write - emulate GTT MMIO register write
1995 * @off: register offset
1996 * @p_data: data from guest write
1997 * @bytes: data length
1999 * This function is used to emulate the GTT MMIO register write
2002 * Zero on success, error code if failed.
2004 int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu,
2005 unsigned int off, void *p_data, unsigned int bytes)
2007 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
2010 if (bytes != 4 && bytes != 8)
2013 off -= info->gtt_start_offset;
2014 ret = emulate_ggtt_mmio_write(vgpu, off, p_data, bytes);
2018 static int alloc_scratch_pages(struct intel_vgpu *vgpu,
2019 intel_gvt_gtt_type_t type)
2021 struct intel_vgpu_gtt *gtt = &vgpu->gtt;
2022 struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
2023 int page_entry_num = I915_GTT_PAGE_SIZE >>
2024 vgpu->gvt->device_info.gtt_entry_size_shift;
2027 struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
2030 if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX))
2033 scratch_pt = (void *)get_zeroed_page(GFP_KERNEL);
2035 gvt_vgpu_err("fail to allocate scratch page\n");
2039 daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0,
2040 4096, PCI_DMA_BIDIRECTIONAL);
2041 if (dma_mapping_error(dev, daddr)) {
2042 gvt_vgpu_err("fail to dmamap scratch_pt\n");
2043 __free_page(virt_to_page(scratch_pt));
2046 gtt->scratch_pt[type].page_mfn =
2047 (unsigned long)(daddr >> I915_GTT_PAGE_SHIFT);
2048 gtt->scratch_pt[type].page = virt_to_page(scratch_pt);
2049 gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n",
2050 vgpu->id, type, gtt->scratch_pt[type].page_mfn);
2052 /* Build the tree by full filled the scratch pt with the entries which
2053 * point to the next level scratch pt or scratch page. The
2054 * scratch_pt[type] indicate the scratch pt/scratch page used by the
2056 * e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by
2057 * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scratch_pt it self
2058 * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn.
2060 if (type > GTT_TYPE_PPGTT_PTE_PT) {
2061 struct intel_gvt_gtt_entry se;
2063 memset(&se, 0, sizeof(struct intel_gvt_gtt_entry));
2064 se.type = get_entry_type(type - 1);
2065 ops->set_pfn(&se, gtt->scratch_pt[type - 1].page_mfn);
2067 /* The entry parameters like present/writeable/cache type
2068 * set to the same as i915's scratch page tree.
2070 se.val64 |= _PAGE_PRESENT | _PAGE_RW;
2071 if (type == GTT_TYPE_PPGTT_PDE_PT)
2072 se.val64 |= PPAT_CACHED;
2074 for (i = 0; i < page_entry_num; i++)
2075 ops->set_entry(scratch_pt, &se, i, false, 0, vgpu);
2081 static int release_scratch_page_tree(struct intel_vgpu *vgpu)
2084 struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
2087 for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
2088 if (vgpu->gtt.scratch_pt[i].page != NULL) {
2089 daddr = (dma_addr_t)(vgpu->gtt.scratch_pt[i].page_mfn <<
2090 I915_GTT_PAGE_SHIFT);
2091 dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
2092 __free_page(vgpu->gtt.scratch_pt[i].page);
2093 vgpu->gtt.scratch_pt[i].page = NULL;
2094 vgpu->gtt.scratch_pt[i].page_mfn = 0;
2101 static int create_scratch_page_tree(struct intel_vgpu *vgpu)
2105 for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
2106 ret = alloc_scratch_pages(vgpu, i);
2114 release_scratch_page_tree(vgpu);
2119 * intel_vgpu_init_gtt - initialize per-vGPU graphics memory virulization
2122 * This function is used to initialize per-vGPU graphics memory virtualization
2126 * Zero on success, error code if failed.
2128 int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
2130 struct intel_vgpu_gtt *gtt = &vgpu->gtt;
2132 INIT_RADIX_TREE(>t->spt_tree, GFP_KERNEL);
2134 INIT_LIST_HEAD(>t->ppgtt_mm_list_head);
2135 INIT_LIST_HEAD(>t->oos_page_list_head);
2136 INIT_LIST_HEAD(>t->post_shadow_list_head);
2138 gtt->ggtt_mm = intel_vgpu_create_ggtt_mm(vgpu);
2139 if (IS_ERR(gtt->ggtt_mm)) {
2140 gvt_vgpu_err("fail to create mm for ggtt.\n");
2141 return PTR_ERR(gtt->ggtt_mm);
2144 intel_vgpu_reset_ggtt(vgpu, false);
2146 return create_scratch_page_tree(vgpu);
2149 static void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu)
2151 struct list_head *pos, *n;
2152 struct intel_vgpu_mm *mm;
2154 list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) {
2155 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
2156 intel_vgpu_destroy_mm(mm);
2159 if (GEM_WARN_ON(!list_empty(&vgpu->gtt.ppgtt_mm_list_head)))
2160 gvt_err("vgpu ppgtt mm is not fully destroyed\n");
2162 if (GEM_WARN_ON(!radix_tree_empty(&vgpu->gtt.spt_tree))) {
2163 gvt_err("Why we still has spt not freed?\n");
2164 ppgtt_free_all_spt(vgpu);
2168 static void intel_vgpu_destroy_ggtt_mm(struct intel_vgpu *vgpu)
2170 intel_vgpu_destroy_mm(vgpu->gtt.ggtt_mm);
2171 vgpu->gtt.ggtt_mm = NULL;
2175 * intel_vgpu_clean_gtt - clean up per-vGPU graphics memory virulization
2178 * This function is used to clean up per-vGPU graphics memory virtualization
2182 * Zero on success, error code if failed.
2184 void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu)
2186 intel_vgpu_destroy_all_ppgtt_mm(vgpu);
2187 intel_vgpu_destroy_ggtt_mm(vgpu);
2188 release_scratch_page_tree(vgpu);
2191 static void clean_spt_oos(struct intel_gvt *gvt)
2193 struct intel_gvt_gtt *gtt = &gvt->gtt;
2194 struct list_head *pos, *n;
2195 struct intel_vgpu_oos_page *oos_page;
2197 WARN(!list_empty(>t->oos_page_use_list_head),
2198 "someone is still using oos page\n");
2200 list_for_each_safe(pos, n, >t->oos_page_free_list_head) {
2201 oos_page = container_of(pos, struct intel_vgpu_oos_page, list);
2202 list_del(&oos_page->list);
2207 static int setup_spt_oos(struct intel_gvt *gvt)
2209 struct intel_gvt_gtt *gtt = &gvt->gtt;
2210 struct intel_vgpu_oos_page *oos_page;
2214 INIT_LIST_HEAD(>t->oos_page_free_list_head);
2215 INIT_LIST_HEAD(>t->oos_page_use_list_head);
2217 for (i = 0; i < preallocated_oos_pages; i++) {
2218 oos_page = kzalloc(sizeof(*oos_page), GFP_KERNEL);
2224 INIT_LIST_HEAD(&oos_page->list);
2225 INIT_LIST_HEAD(&oos_page->vm_list);
2227 list_add_tail(&oos_page->list, >t->oos_page_free_list_head);
2230 gvt_dbg_mm("%d oos pages preallocated\n", i);
2239 * intel_vgpu_find_ppgtt_mm - find a PPGTT mm object
2241 * @page_table_level: PPGTT page table level
2242 * @root_entry: PPGTT page table root pointers
2244 * This function is used to find a PPGTT mm object from mm object pool
2247 * pointer to mm object on success, NULL if failed.
2249 struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
2252 struct intel_vgpu_mm *mm;
2253 struct list_head *pos;
2255 list_for_each(pos, &vgpu->gtt.ppgtt_mm_list_head) {
2256 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
2258 switch (mm->ppgtt_mm.root_entry_type) {
2259 case GTT_TYPE_PPGTT_ROOT_L4_ENTRY:
2260 if (pdps[0] == mm->ppgtt_mm.guest_pdps[0])
2263 case GTT_TYPE_PPGTT_ROOT_L3_ENTRY:
2264 if (!memcmp(pdps, mm->ppgtt_mm.guest_pdps,
2265 sizeof(mm->ppgtt_mm.guest_pdps)))
2276 * intel_vgpu_get_ppgtt_mm - get or create a PPGTT mm object.
2278 * @root_entry_type: ppgtt root entry type
2281 * This function is used to find or create a PPGTT mm object from a guest.
2284 * Zero on success, negative error code if failed.
2286 struct intel_vgpu_mm *intel_vgpu_get_ppgtt_mm(struct intel_vgpu *vgpu,
2287 intel_gvt_gtt_type_t root_entry_type, u64 pdps[])
2289 struct intel_vgpu_mm *mm;
2291 mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps);
2293 intel_vgpu_mm_get(mm);
2295 mm = intel_vgpu_create_ppgtt_mm(vgpu, root_entry_type, pdps);
2297 gvt_vgpu_err("fail to create mm\n");
2303 * intel_vgpu_put_ppgtt_mm - find and put a PPGTT mm object.
2307 * This function is used to find a PPGTT mm object from a guest and destroy it.
2310 * Zero on success, negative error code if failed.
2312 int intel_vgpu_put_ppgtt_mm(struct intel_vgpu *vgpu, u64 pdps[])
2314 struct intel_vgpu_mm *mm;
2316 mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps);
2318 gvt_vgpu_err("fail to find ppgtt instance.\n");
2321 intel_vgpu_mm_put(mm);
2326 * intel_gvt_init_gtt - initialize mm components of a GVT device
2329 * This function is called at the initialization stage, to initialize
2330 * the mm components of a GVT device.
2333 * zero on success, negative error code if failed.
2335 int intel_gvt_init_gtt(struct intel_gvt *gvt)
2339 struct device *dev = &gvt->dev_priv->drm.pdev->dev;
2342 gvt_dbg_core("init gtt\n");
2344 gvt->gtt.pte_ops = &gen8_gtt_pte_ops;
2345 gvt->gtt.gma_ops = &gen8_gtt_gma_ops;
2347 page = (void *)get_zeroed_page(GFP_KERNEL);
2349 gvt_err("fail to allocate scratch ggtt page\n");
2353 daddr = dma_map_page(dev, virt_to_page(page), 0,
2354 4096, PCI_DMA_BIDIRECTIONAL);
2355 if (dma_mapping_error(dev, daddr)) {
2356 gvt_err("fail to dmamap scratch ggtt page\n");
2357 __free_page(virt_to_page(page));
2361 gvt->gtt.scratch_page = virt_to_page(page);
2362 gvt->gtt.scratch_mfn = (unsigned long)(daddr >> I915_GTT_PAGE_SHIFT);
2364 if (enable_out_of_sync) {
2365 ret = setup_spt_oos(gvt);
2367 gvt_err("fail to initialize SPT oos\n");
2368 dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
2369 __free_page(gvt->gtt.scratch_page);
2373 INIT_LIST_HEAD(&gvt->gtt.ppgtt_mm_lru_list_head);
2378 * intel_gvt_clean_gtt - clean up mm components of a GVT device
2381 * This function is called at the driver unloading stage, to clean up the
2382 * the mm components of a GVT device.
2385 void intel_gvt_clean_gtt(struct intel_gvt *gvt)
2387 struct device *dev = &gvt->dev_priv->drm.pdev->dev;
2388 dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_mfn <<
2389 I915_GTT_PAGE_SHIFT);
2391 dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
2393 __free_page(gvt->gtt.scratch_page);
2395 if (enable_out_of_sync)
2400 * intel_vgpu_invalidate_ppgtt - invalidate PPGTT instances
2403 * This function is called when invalidate all PPGTT instances of a vGPU.
2406 void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu)
2408 struct list_head *pos, *n;
2409 struct intel_vgpu_mm *mm;
2411 list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) {
2412 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
2413 if (mm->type == INTEL_GVT_MM_PPGTT) {
2414 list_del_init(&mm->ppgtt_mm.lru_list);
2415 if (mm->ppgtt_mm.shadowed)
2416 invalidate_ppgtt_mm(mm);
2422 * intel_vgpu_reset_ggtt - reset the GGTT entry
2424 * @invalidate_old: invalidate old entries
2426 * This function is called at the vGPU create stage
2427 * to reset all the GGTT entries.
2430 void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old)
2432 struct intel_gvt *gvt = vgpu->gvt;
2433 struct drm_i915_private *dev_priv = gvt->dev_priv;
2434 struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
2435 struct intel_gvt_gtt_entry entry = {.type = GTT_TYPE_GGTT_PTE};
2436 struct intel_gvt_gtt_entry old_entry;
2440 pte_ops->set_pfn(&entry, gvt->gtt.scratch_mfn);
2441 pte_ops->set_present(&entry);
2443 index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT;
2444 num_entries = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT;
2445 while (num_entries--) {
2446 if (invalidate_old) {
2447 ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index);
2448 ggtt_invalidate_pte(vgpu, &old_entry);
2450 ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++);
2453 index = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT;
2454 num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT;
2455 while (num_entries--) {
2456 if (invalidate_old) {
2457 ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index);
2458 ggtt_invalidate_pte(vgpu, &old_entry);
2460 ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++);
2463 ggtt_invalidate(dev_priv);
2467 * intel_vgpu_reset_gtt - reset the all GTT related status
2470 * This function is called from vfio core to reset reset all
2471 * GTT related status, including GGTT, PPGTT, scratch page.
2474 void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu)
2476 /* Shadow pages are only created when there is no page
2477 * table tracking data, so remove page tracking data after
2478 * removing the shadow pages.
2480 intel_vgpu_destroy_all_ppgtt_mm(vgpu);
2481 intel_vgpu_reset_ggtt(vgpu, true);