Merge tag 'gvt-next-2020-02-26' of https://github.com/intel/gvt-linux into drm-intel...
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / gvt / gtt.c
1 /*
2  * GTT virtualization
3  *
4  * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the next
14  * paragraph) shall be included in all copies or substantial portions of the
15  * Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23  * SOFTWARE.
24  *
25  * Authors:
26  *    Zhi Wang <zhi.a.wang@intel.com>
27  *    Zhenyu Wang <zhenyuw@linux.intel.com>
28  *    Xiao Zheng <xiao.zheng@intel.com>
29  *
30  * Contributors:
31  *    Min He <min.he@intel.com>
32  *    Bing Niu <bing.niu@intel.com>
33  *
34  */
35
36 #include "i915_drv.h"
37 #include "gvt.h"
38 #include "i915_pvinfo.h"
39 #include "trace.h"
40
41 #if defined(VERBOSE_DEBUG)
42 #define gvt_vdbg_mm(fmt, args...) gvt_dbg_mm(fmt, ##args)
43 #else
44 #define gvt_vdbg_mm(fmt, args...)
45 #endif
46
47 static bool enable_out_of_sync = false;
48 static int preallocated_oos_pages = 8192;
49
50 /*
51  * validate a gm address and related range size,
52  * translate it to host gm address
53  */
54 bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size)
55 {
56         if (size == 0)
57                 return vgpu_gmadr_is_valid(vgpu, addr);
58
59         if (vgpu_gmadr_is_aperture(vgpu, addr) &&
60             vgpu_gmadr_is_aperture(vgpu, addr + size - 1))
61                 return true;
62         else if (vgpu_gmadr_is_hidden(vgpu, addr) &&
63                  vgpu_gmadr_is_hidden(vgpu, addr + size - 1))
64                 return true;
65
66         gvt_dbg_mm("Invalid ggtt range at 0x%llx, size: 0x%x\n",
67                      addr, size);
68         return false;
69 }
70
71 /* translate a guest gmadr to host gmadr */
72 int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr)
73 {
74         struct drm_i915_private *i915 = vgpu->gvt->dev_priv;
75
76         if (drm_WARN(&i915->drm, !vgpu_gmadr_is_valid(vgpu, g_addr),
77                      "invalid guest gmadr %llx\n", g_addr))
78                 return -EACCES;
79
80         if (vgpu_gmadr_is_aperture(vgpu, g_addr))
81                 *h_addr = vgpu_aperture_gmadr_base(vgpu)
82                           + (g_addr - vgpu_aperture_offset(vgpu));
83         else
84                 *h_addr = vgpu_hidden_gmadr_base(vgpu)
85                           + (g_addr - vgpu_hidden_offset(vgpu));
86         return 0;
87 }
88
89 /* translate a host gmadr to guest gmadr */
90 int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr)
91 {
92         struct drm_i915_private *i915 = vgpu->gvt->dev_priv;
93
94         if (drm_WARN(&i915->drm, !gvt_gmadr_is_valid(vgpu->gvt, h_addr),
95                      "invalid host gmadr %llx\n", h_addr))
96                 return -EACCES;
97
98         if (gvt_gmadr_is_aperture(vgpu->gvt, h_addr))
99                 *g_addr = vgpu_aperture_gmadr_base(vgpu)
100                         + (h_addr - gvt_aperture_gmadr_base(vgpu->gvt));
101         else
102                 *g_addr = vgpu_hidden_gmadr_base(vgpu)
103                         + (h_addr - gvt_hidden_gmadr_base(vgpu->gvt));
104         return 0;
105 }
106
107 int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
108                              unsigned long *h_index)
109 {
110         u64 h_addr;
111         int ret;
112
113         ret = intel_gvt_ggtt_gmadr_g2h(vgpu, g_index << I915_GTT_PAGE_SHIFT,
114                                        &h_addr);
115         if (ret)
116                 return ret;
117
118         *h_index = h_addr >> I915_GTT_PAGE_SHIFT;
119         return 0;
120 }
121
122 int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
123                              unsigned long *g_index)
124 {
125         u64 g_addr;
126         int ret;
127
128         ret = intel_gvt_ggtt_gmadr_h2g(vgpu, h_index << I915_GTT_PAGE_SHIFT,
129                                        &g_addr);
130         if (ret)
131                 return ret;
132
133         *g_index = g_addr >> I915_GTT_PAGE_SHIFT;
134         return 0;
135 }
136
137 #define gtt_type_is_entry(type) \
138         (type > GTT_TYPE_INVALID && type < GTT_TYPE_PPGTT_ENTRY \
139          && type != GTT_TYPE_PPGTT_PTE_ENTRY \
140          && type != GTT_TYPE_PPGTT_ROOT_ENTRY)
141
142 #define gtt_type_is_pt(type) \
143         (type >= GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX)
144
145 #define gtt_type_is_pte_pt(type) \
146         (type == GTT_TYPE_PPGTT_PTE_PT)
147
148 #define gtt_type_is_root_pointer(type) \
149         (gtt_type_is_entry(type) && type > GTT_TYPE_PPGTT_ROOT_ENTRY)
150
151 #define gtt_init_entry(e, t, p, v) do { \
152         (e)->type = t; \
153         (e)->pdev = p; \
154         memcpy(&(e)->val64, &v, sizeof(v)); \
155 } while (0)
156
157 /*
158  * Mappings between GTT_TYPE* enumerations.
159  * Following information can be found according to the given type:
160  * - type of next level page table
161  * - type of entry inside this level page table
162  * - type of entry with PSE set
163  *
164  * If the given type doesn't have such a kind of information,
165  * e.g. give a l4 root entry type, then request to get its PSE type,
166  * give a PTE page table type, then request to get its next level page
167  * table type, as we know l4 root entry doesn't have a PSE bit,
168  * and a PTE page table doesn't have a next level page table type,
169  * GTT_TYPE_INVALID will be returned. This is useful when traversing a
170  * page table.
171  */
172
173 struct gtt_type_table_entry {
174         int entry_type;
175         int pt_type;
176         int next_pt_type;
177         int pse_entry_type;
178 };
179
180 #define GTT_TYPE_TABLE_ENTRY(type, e_type, cpt_type, npt_type, pse_type) \
181         [type] = { \
182                 .entry_type = e_type, \
183                 .pt_type = cpt_type, \
184                 .next_pt_type = npt_type, \
185                 .pse_entry_type = pse_type, \
186         }
187
188 static struct gtt_type_table_entry gtt_type_table[] = {
189         GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
190                         GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
191                         GTT_TYPE_INVALID,
192                         GTT_TYPE_PPGTT_PML4_PT,
193                         GTT_TYPE_INVALID),
194         GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_PT,
195                         GTT_TYPE_PPGTT_PML4_ENTRY,
196                         GTT_TYPE_PPGTT_PML4_PT,
197                         GTT_TYPE_PPGTT_PDP_PT,
198                         GTT_TYPE_INVALID),
199         GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_ENTRY,
200                         GTT_TYPE_PPGTT_PML4_ENTRY,
201                         GTT_TYPE_PPGTT_PML4_PT,
202                         GTT_TYPE_PPGTT_PDP_PT,
203                         GTT_TYPE_INVALID),
204         GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_PT,
205                         GTT_TYPE_PPGTT_PDP_ENTRY,
206                         GTT_TYPE_PPGTT_PDP_PT,
207                         GTT_TYPE_PPGTT_PDE_PT,
208                         GTT_TYPE_PPGTT_PTE_1G_ENTRY),
209         GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
210                         GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
211                         GTT_TYPE_INVALID,
212                         GTT_TYPE_PPGTT_PDE_PT,
213                         GTT_TYPE_PPGTT_PTE_1G_ENTRY),
214         GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_ENTRY,
215                         GTT_TYPE_PPGTT_PDP_ENTRY,
216                         GTT_TYPE_PPGTT_PDP_PT,
217                         GTT_TYPE_PPGTT_PDE_PT,
218                         GTT_TYPE_PPGTT_PTE_1G_ENTRY),
219         GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_PT,
220                         GTT_TYPE_PPGTT_PDE_ENTRY,
221                         GTT_TYPE_PPGTT_PDE_PT,
222                         GTT_TYPE_PPGTT_PTE_PT,
223                         GTT_TYPE_PPGTT_PTE_2M_ENTRY),
224         GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_ENTRY,
225                         GTT_TYPE_PPGTT_PDE_ENTRY,
226                         GTT_TYPE_PPGTT_PDE_PT,
227                         GTT_TYPE_PPGTT_PTE_PT,
228                         GTT_TYPE_PPGTT_PTE_2M_ENTRY),
229         /* We take IPS bit as 'PSE' for PTE level. */
230         GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_PT,
231                         GTT_TYPE_PPGTT_PTE_4K_ENTRY,
232                         GTT_TYPE_PPGTT_PTE_PT,
233                         GTT_TYPE_INVALID,
234                         GTT_TYPE_PPGTT_PTE_64K_ENTRY),
235         GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_4K_ENTRY,
236                         GTT_TYPE_PPGTT_PTE_4K_ENTRY,
237                         GTT_TYPE_PPGTT_PTE_PT,
238                         GTT_TYPE_INVALID,
239                         GTT_TYPE_PPGTT_PTE_64K_ENTRY),
240         GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_64K_ENTRY,
241                         GTT_TYPE_PPGTT_PTE_4K_ENTRY,
242                         GTT_TYPE_PPGTT_PTE_PT,
243                         GTT_TYPE_INVALID,
244                         GTT_TYPE_PPGTT_PTE_64K_ENTRY),
245         GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_2M_ENTRY,
246                         GTT_TYPE_PPGTT_PDE_ENTRY,
247                         GTT_TYPE_PPGTT_PDE_PT,
248                         GTT_TYPE_INVALID,
249                         GTT_TYPE_PPGTT_PTE_2M_ENTRY),
250         GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_1G_ENTRY,
251                         GTT_TYPE_PPGTT_PDP_ENTRY,
252                         GTT_TYPE_PPGTT_PDP_PT,
253                         GTT_TYPE_INVALID,
254                         GTT_TYPE_PPGTT_PTE_1G_ENTRY),
255         GTT_TYPE_TABLE_ENTRY(GTT_TYPE_GGTT_PTE,
256                         GTT_TYPE_GGTT_PTE,
257                         GTT_TYPE_INVALID,
258                         GTT_TYPE_INVALID,
259                         GTT_TYPE_INVALID),
260 };
261
262 static inline int get_next_pt_type(int type)
263 {
264         return gtt_type_table[type].next_pt_type;
265 }
266
267 static inline int get_pt_type(int type)
268 {
269         return gtt_type_table[type].pt_type;
270 }
271
272 static inline int get_entry_type(int type)
273 {
274         return gtt_type_table[type].entry_type;
275 }
276
277 static inline int get_pse_type(int type)
278 {
279         return gtt_type_table[type].pse_entry_type;
280 }
281
282 static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index)
283 {
284         void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
285
286         return readq(addr);
287 }
288
289 static void ggtt_invalidate(struct drm_i915_private *dev_priv)
290 {
291         mmio_hw_access_pre(dev_priv);
292         I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
293         mmio_hw_access_post(dev_priv);
294 }
295
296 static void write_pte64(struct drm_i915_private *dev_priv,
297                 unsigned long index, u64 pte)
298 {
299         void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
300
301         writeq(pte, addr);
302 }
303
304 static inline int gtt_get_entry64(void *pt,
305                 struct intel_gvt_gtt_entry *e,
306                 unsigned long index, bool hypervisor_access, unsigned long gpa,
307                 struct intel_vgpu *vgpu)
308 {
309         const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
310         int ret;
311
312         if (WARN_ON(info->gtt_entry_size != 8))
313                 return -EINVAL;
314
315         if (hypervisor_access) {
316                 ret = intel_gvt_hypervisor_read_gpa(vgpu, gpa +
317                                 (index << info->gtt_entry_size_shift),
318                                 &e->val64, 8);
319                 if (WARN_ON(ret))
320                         return ret;
321         } else if (!pt) {
322                 e->val64 = read_pte64(vgpu->gvt->dev_priv, index);
323         } else {
324                 e->val64 = *((u64 *)pt + index);
325         }
326         return 0;
327 }
328
329 static inline int gtt_set_entry64(void *pt,
330                 struct intel_gvt_gtt_entry *e,
331                 unsigned long index, bool hypervisor_access, unsigned long gpa,
332                 struct intel_vgpu *vgpu)
333 {
334         const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
335         int ret;
336
337         if (WARN_ON(info->gtt_entry_size != 8))
338                 return -EINVAL;
339
340         if (hypervisor_access) {
341                 ret = intel_gvt_hypervisor_write_gpa(vgpu, gpa +
342                                 (index << info->gtt_entry_size_shift),
343                                 &e->val64, 8);
344                 if (WARN_ON(ret))
345                         return ret;
346         } else if (!pt) {
347                 write_pte64(vgpu->gvt->dev_priv, index, e->val64);
348         } else {
349                 *((u64 *)pt + index) = e->val64;
350         }
351         return 0;
352 }
353
354 #define GTT_HAW 46
355
356 #define ADDR_1G_MASK    GENMASK_ULL(GTT_HAW - 1, 30)
357 #define ADDR_2M_MASK    GENMASK_ULL(GTT_HAW - 1, 21)
358 #define ADDR_64K_MASK   GENMASK_ULL(GTT_HAW - 1, 16)
359 #define ADDR_4K_MASK    GENMASK_ULL(GTT_HAW - 1, 12)
360
361 #define GTT_SPTE_FLAG_MASK GENMASK_ULL(62, 52)
362 #define GTT_SPTE_FLAG_64K_SPLITED BIT(52) /* splited 64K gtt entry */
363
364 #define GTT_64K_PTE_STRIDE 16
365
366 static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e)
367 {
368         unsigned long pfn;
369
370         if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY)
371                 pfn = (e->val64 & ADDR_1G_MASK) >> PAGE_SHIFT;
372         else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY)
373                 pfn = (e->val64 & ADDR_2M_MASK) >> PAGE_SHIFT;
374         else if (e->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY)
375                 pfn = (e->val64 & ADDR_64K_MASK) >> PAGE_SHIFT;
376         else
377                 pfn = (e->val64 & ADDR_4K_MASK) >> PAGE_SHIFT;
378         return pfn;
379 }
380
381 static void gen8_gtt_set_pfn(struct intel_gvt_gtt_entry *e, unsigned long pfn)
382 {
383         if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) {
384                 e->val64 &= ~ADDR_1G_MASK;
385                 pfn &= (ADDR_1G_MASK >> PAGE_SHIFT);
386         } else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY) {
387                 e->val64 &= ~ADDR_2M_MASK;
388                 pfn &= (ADDR_2M_MASK >> PAGE_SHIFT);
389         } else if (e->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY) {
390                 e->val64 &= ~ADDR_64K_MASK;
391                 pfn &= (ADDR_64K_MASK >> PAGE_SHIFT);
392         } else {
393                 e->val64 &= ~ADDR_4K_MASK;
394                 pfn &= (ADDR_4K_MASK >> PAGE_SHIFT);
395         }
396
397         e->val64 |= (pfn << PAGE_SHIFT);
398 }
399
400 static bool gen8_gtt_test_pse(struct intel_gvt_gtt_entry *e)
401 {
402         return !!(e->val64 & _PAGE_PSE);
403 }
404
405 static void gen8_gtt_clear_pse(struct intel_gvt_gtt_entry *e)
406 {
407         if (gen8_gtt_test_pse(e)) {
408                 switch (e->type) {
409                 case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
410                         e->val64 &= ~_PAGE_PSE;
411                         e->type = GTT_TYPE_PPGTT_PDE_ENTRY;
412                         break;
413                 case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
414                         e->type = GTT_TYPE_PPGTT_PDP_ENTRY;
415                         e->val64 &= ~_PAGE_PSE;
416                         break;
417                 default:
418                         WARN_ON(1);
419                 }
420         }
421 }
422
423 static bool gen8_gtt_test_ips(struct intel_gvt_gtt_entry *e)
424 {
425         if (GEM_WARN_ON(e->type != GTT_TYPE_PPGTT_PDE_ENTRY))
426                 return false;
427
428         return !!(e->val64 & GEN8_PDE_IPS_64K);
429 }
430
431 static void gen8_gtt_clear_ips(struct intel_gvt_gtt_entry *e)
432 {
433         if (GEM_WARN_ON(e->type != GTT_TYPE_PPGTT_PDE_ENTRY))
434                 return;
435
436         e->val64 &= ~GEN8_PDE_IPS_64K;
437 }
438
439 static bool gen8_gtt_test_present(struct intel_gvt_gtt_entry *e)
440 {
441         /*
442          * i915 writes PDP root pointer registers without present bit,
443          * it also works, so we need to treat root pointer entry
444          * specifically.
445          */
446         if (e->type == GTT_TYPE_PPGTT_ROOT_L3_ENTRY
447                         || e->type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY)
448                 return (e->val64 != 0);
449         else
450                 return (e->val64 & _PAGE_PRESENT);
451 }
452
453 static void gtt_entry_clear_present(struct intel_gvt_gtt_entry *e)
454 {
455         e->val64 &= ~_PAGE_PRESENT;
456 }
457
458 static void gtt_entry_set_present(struct intel_gvt_gtt_entry *e)
459 {
460         e->val64 |= _PAGE_PRESENT;
461 }
462
463 static bool gen8_gtt_test_64k_splited(struct intel_gvt_gtt_entry *e)
464 {
465         return !!(e->val64 & GTT_SPTE_FLAG_64K_SPLITED);
466 }
467
468 static void gen8_gtt_set_64k_splited(struct intel_gvt_gtt_entry *e)
469 {
470         e->val64 |= GTT_SPTE_FLAG_64K_SPLITED;
471 }
472
473 static void gen8_gtt_clear_64k_splited(struct intel_gvt_gtt_entry *e)
474 {
475         e->val64 &= ~GTT_SPTE_FLAG_64K_SPLITED;
476 }
477
478 /*
479  * Per-platform GMA routines.
480  */
481 static unsigned long gma_to_ggtt_pte_index(unsigned long gma)
482 {
483         unsigned long x = (gma >> I915_GTT_PAGE_SHIFT);
484
485         trace_gma_index(__func__, gma, x);
486         return x;
487 }
488
489 #define DEFINE_PPGTT_GMA_TO_INDEX(prefix, ename, exp) \
490 static unsigned long prefix##_gma_to_##ename##_index(unsigned long gma) \
491 { \
492         unsigned long x = (exp); \
493         trace_gma_index(__func__, gma, x); \
494         return x; \
495 }
496
497 DEFINE_PPGTT_GMA_TO_INDEX(gen8, pte, (gma >> 12 & 0x1ff));
498 DEFINE_PPGTT_GMA_TO_INDEX(gen8, pde, (gma >> 21 & 0x1ff));
499 DEFINE_PPGTT_GMA_TO_INDEX(gen8, l3_pdp, (gma >> 30 & 0x3));
500 DEFINE_PPGTT_GMA_TO_INDEX(gen8, l4_pdp, (gma >> 30 & 0x1ff));
501 DEFINE_PPGTT_GMA_TO_INDEX(gen8, pml4, (gma >> 39 & 0x1ff));
502
503 static struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops = {
504         .get_entry = gtt_get_entry64,
505         .set_entry = gtt_set_entry64,
506         .clear_present = gtt_entry_clear_present,
507         .set_present = gtt_entry_set_present,
508         .test_present = gen8_gtt_test_present,
509         .test_pse = gen8_gtt_test_pse,
510         .clear_pse = gen8_gtt_clear_pse,
511         .clear_ips = gen8_gtt_clear_ips,
512         .test_ips = gen8_gtt_test_ips,
513         .clear_64k_splited = gen8_gtt_clear_64k_splited,
514         .set_64k_splited = gen8_gtt_set_64k_splited,
515         .test_64k_splited = gen8_gtt_test_64k_splited,
516         .get_pfn = gen8_gtt_get_pfn,
517         .set_pfn = gen8_gtt_set_pfn,
518 };
519
520 static struct intel_gvt_gtt_gma_ops gen8_gtt_gma_ops = {
521         .gma_to_ggtt_pte_index = gma_to_ggtt_pte_index,
522         .gma_to_pte_index = gen8_gma_to_pte_index,
523         .gma_to_pde_index = gen8_gma_to_pde_index,
524         .gma_to_l3_pdp_index = gen8_gma_to_l3_pdp_index,
525         .gma_to_l4_pdp_index = gen8_gma_to_l4_pdp_index,
526         .gma_to_pml4_index = gen8_gma_to_pml4_index,
527 };
528
529 /* Update entry type per pse and ips bit. */
530 static void update_entry_type_for_real(struct intel_gvt_gtt_pte_ops *pte_ops,
531         struct intel_gvt_gtt_entry *entry, bool ips)
532 {
533         switch (entry->type) {
534         case GTT_TYPE_PPGTT_PDE_ENTRY:
535         case GTT_TYPE_PPGTT_PDP_ENTRY:
536                 if (pte_ops->test_pse(entry))
537                         entry->type = get_pse_type(entry->type);
538                 break;
539         case GTT_TYPE_PPGTT_PTE_4K_ENTRY:
540                 if (ips)
541                         entry->type = get_pse_type(entry->type);
542                 break;
543         default:
544                 GEM_BUG_ON(!gtt_type_is_entry(entry->type));
545         }
546
547         GEM_BUG_ON(entry->type == GTT_TYPE_INVALID);
548 }
549
550 /*
551  * MM helpers.
552  */
553 static void _ppgtt_get_root_entry(struct intel_vgpu_mm *mm,
554                 struct intel_gvt_gtt_entry *entry, unsigned long index,
555                 bool guest)
556 {
557         struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
558
559         GEM_BUG_ON(mm->type != INTEL_GVT_MM_PPGTT);
560
561         entry->type = mm->ppgtt_mm.root_entry_type;
562         pte_ops->get_entry(guest ? mm->ppgtt_mm.guest_pdps :
563                            mm->ppgtt_mm.shadow_pdps,
564                            entry, index, false, 0, mm->vgpu);
565         update_entry_type_for_real(pte_ops, entry, false);
566 }
567
568 static inline void ppgtt_get_guest_root_entry(struct intel_vgpu_mm *mm,
569                 struct intel_gvt_gtt_entry *entry, unsigned long index)
570 {
571         _ppgtt_get_root_entry(mm, entry, index, true);
572 }
573
574 static inline void ppgtt_get_shadow_root_entry(struct intel_vgpu_mm *mm,
575                 struct intel_gvt_gtt_entry *entry, unsigned long index)
576 {
577         _ppgtt_get_root_entry(mm, entry, index, false);
578 }
579
580 static void _ppgtt_set_root_entry(struct intel_vgpu_mm *mm,
581                 struct intel_gvt_gtt_entry *entry, unsigned long index,
582                 bool guest)
583 {
584         struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
585
586         pte_ops->set_entry(guest ? mm->ppgtt_mm.guest_pdps :
587                            mm->ppgtt_mm.shadow_pdps,
588                            entry, index, false, 0, mm->vgpu);
589 }
590
591 static inline void ppgtt_set_guest_root_entry(struct intel_vgpu_mm *mm,
592                 struct intel_gvt_gtt_entry *entry, unsigned long index)
593 {
594         _ppgtt_set_root_entry(mm, entry, index, true);
595 }
596
597 static inline void ppgtt_set_shadow_root_entry(struct intel_vgpu_mm *mm,
598                 struct intel_gvt_gtt_entry *entry, unsigned long index)
599 {
600         _ppgtt_set_root_entry(mm, entry, index, false);
601 }
602
603 static void ggtt_get_guest_entry(struct intel_vgpu_mm *mm,
604                 struct intel_gvt_gtt_entry *entry, unsigned long index)
605 {
606         struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
607
608         GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
609
610         entry->type = GTT_TYPE_GGTT_PTE;
611         pte_ops->get_entry(mm->ggtt_mm.virtual_ggtt, entry, index,
612                            false, 0, mm->vgpu);
613 }
614
615 static void ggtt_set_guest_entry(struct intel_vgpu_mm *mm,
616                 struct intel_gvt_gtt_entry *entry, unsigned long index)
617 {
618         struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
619
620         GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
621
622         pte_ops->set_entry(mm->ggtt_mm.virtual_ggtt, entry, index,
623                            false, 0, mm->vgpu);
624 }
625
626 static void ggtt_get_host_entry(struct intel_vgpu_mm *mm,
627                 struct intel_gvt_gtt_entry *entry, unsigned long index)
628 {
629         struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
630
631         GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
632
633         pte_ops->get_entry(NULL, entry, index, false, 0, mm->vgpu);
634 }
635
636 static void ggtt_set_host_entry(struct intel_vgpu_mm *mm,
637                 struct intel_gvt_gtt_entry *entry, unsigned long index)
638 {
639         struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
640
641         GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
642
643         pte_ops->set_entry(NULL, entry, index, false, 0, mm->vgpu);
644 }
645
646 /*
647  * PPGTT shadow page table helpers.
648  */
649 static inline int ppgtt_spt_get_entry(
650                 struct intel_vgpu_ppgtt_spt *spt,
651                 void *page_table, int type,
652                 struct intel_gvt_gtt_entry *e, unsigned long index,
653                 bool guest)
654 {
655         struct intel_gvt *gvt = spt->vgpu->gvt;
656         struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
657         int ret;
658
659         e->type = get_entry_type(type);
660
661         if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
662                 return -EINVAL;
663
664         ret = ops->get_entry(page_table, e, index, guest,
665                         spt->guest_page.gfn << I915_GTT_PAGE_SHIFT,
666                         spt->vgpu);
667         if (ret)
668                 return ret;
669
670         update_entry_type_for_real(ops, e, guest ?
671                                    spt->guest_page.pde_ips : false);
672
673         gvt_vdbg_mm("read ppgtt entry, spt type %d, entry type %d, index %lu, value %llx\n",
674                     type, e->type, index, e->val64);
675         return 0;
676 }
677
678 static inline int ppgtt_spt_set_entry(
679                 struct intel_vgpu_ppgtt_spt *spt,
680                 void *page_table, int type,
681                 struct intel_gvt_gtt_entry *e, unsigned long index,
682                 bool guest)
683 {
684         struct intel_gvt *gvt = spt->vgpu->gvt;
685         struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
686
687         if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
688                 return -EINVAL;
689
690         gvt_vdbg_mm("set ppgtt entry, spt type %d, entry type %d, index %lu, value %llx\n",
691                     type, e->type, index, e->val64);
692
693         return ops->set_entry(page_table, e, index, guest,
694                         spt->guest_page.gfn << I915_GTT_PAGE_SHIFT,
695                         spt->vgpu);
696 }
697
698 #define ppgtt_get_guest_entry(spt, e, index) \
699         ppgtt_spt_get_entry(spt, NULL, \
700                 spt->guest_page.type, e, index, true)
701
702 #define ppgtt_set_guest_entry(spt, e, index) \
703         ppgtt_spt_set_entry(spt, NULL, \
704                 spt->guest_page.type, e, index, true)
705
706 #define ppgtt_get_shadow_entry(spt, e, index) \
707         ppgtt_spt_get_entry(spt, spt->shadow_page.vaddr, \
708                 spt->shadow_page.type, e, index, false)
709
710 #define ppgtt_set_shadow_entry(spt, e, index) \
711         ppgtt_spt_set_entry(spt, spt->shadow_page.vaddr, \
712                 spt->shadow_page.type, e, index, false)
713
714 static void *alloc_spt(gfp_t gfp_mask)
715 {
716         struct intel_vgpu_ppgtt_spt *spt;
717
718         spt = kzalloc(sizeof(*spt), gfp_mask);
719         if (!spt)
720                 return NULL;
721
722         spt->shadow_page.page = alloc_page(gfp_mask);
723         if (!spt->shadow_page.page) {
724                 kfree(spt);
725                 return NULL;
726         }
727         return spt;
728 }
729
730 static void free_spt(struct intel_vgpu_ppgtt_spt *spt)
731 {
732         __free_page(spt->shadow_page.page);
733         kfree(spt);
734 }
735
736 static int detach_oos_page(struct intel_vgpu *vgpu,
737                 struct intel_vgpu_oos_page *oos_page);
738
739 static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt)
740 {
741         struct device *kdev = &spt->vgpu->gvt->dev_priv->drm.pdev->dev;
742
743         trace_spt_free(spt->vgpu->id, spt, spt->guest_page.type);
744
745         dma_unmap_page(kdev, spt->shadow_page.mfn << I915_GTT_PAGE_SHIFT, 4096,
746                        PCI_DMA_BIDIRECTIONAL);
747
748         radix_tree_delete(&spt->vgpu->gtt.spt_tree, spt->shadow_page.mfn);
749
750         if (spt->guest_page.gfn) {
751                 if (spt->guest_page.oos_page)
752                         detach_oos_page(spt->vgpu, spt->guest_page.oos_page);
753
754                 intel_vgpu_unregister_page_track(spt->vgpu, spt->guest_page.gfn);
755         }
756
757         list_del_init(&spt->post_shadow_list);
758         free_spt(spt);
759 }
760
761 static void ppgtt_free_all_spt(struct intel_vgpu *vgpu)
762 {
763         struct intel_vgpu_ppgtt_spt *spt, *spn;
764         struct radix_tree_iter iter;
765         LIST_HEAD(all_spt);
766         void __rcu **slot;
767
768         rcu_read_lock();
769         radix_tree_for_each_slot(slot, &vgpu->gtt.spt_tree, &iter, 0) {
770                 spt = radix_tree_deref_slot(slot);
771                 list_move(&spt->post_shadow_list, &all_spt);
772         }
773         rcu_read_unlock();
774
775         list_for_each_entry_safe(spt, spn, &all_spt, post_shadow_list)
776                 ppgtt_free_spt(spt);
777 }
778
779 static int ppgtt_handle_guest_write_page_table_bytes(
780                 struct intel_vgpu_ppgtt_spt *spt,
781                 u64 pa, void *p_data, int bytes);
782
783 static int ppgtt_write_protection_handler(
784                 struct intel_vgpu_page_track *page_track,
785                 u64 gpa, void *data, int bytes)
786 {
787         struct intel_vgpu_ppgtt_spt *spt = page_track->priv_data;
788
789         int ret;
790
791         if (bytes != 4 && bytes != 8)
792                 return -EINVAL;
793
794         ret = ppgtt_handle_guest_write_page_table_bytes(spt, gpa, data, bytes);
795         if (ret)
796                 return ret;
797         return ret;
798 }
799
800 /* Find a spt by guest gfn. */
801 static struct intel_vgpu_ppgtt_spt *intel_vgpu_find_spt_by_gfn(
802                 struct intel_vgpu *vgpu, unsigned long gfn)
803 {
804         struct intel_vgpu_page_track *track;
805
806         track = intel_vgpu_find_page_track(vgpu, gfn);
807         if (track && track->handler == ppgtt_write_protection_handler)
808                 return track->priv_data;
809
810         return NULL;
811 }
812
813 /* Find the spt by shadow page mfn. */
814 static inline struct intel_vgpu_ppgtt_spt *intel_vgpu_find_spt_by_mfn(
815                 struct intel_vgpu *vgpu, unsigned long mfn)
816 {
817         return radix_tree_lookup(&vgpu->gtt.spt_tree, mfn);
818 }
819
820 static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt);
821
822 /* Allocate shadow page table without guest page. */
823 static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt(
824                 struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type)
825 {
826         struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev;
827         struct intel_vgpu_ppgtt_spt *spt = NULL;
828         dma_addr_t daddr;
829         int ret;
830
831 retry:
832         spt = alloc_spt(GFP_KERNEL | __GFP_ZERO);
833         if (!spt) {
834                 if (reclaim_one_ppgtt_mm(vgpu->gvt))
835                         goto retry;
836
837                 gvt_vgpu_err("fail to allocate ppgtt shadow page\n");
838                 return ERR_PTR(-ENOMEM);
839         }
840
841         spt->vgpu = vgpu;
842         atomic_set(&spt->refcount, 1);
843         INIT_LIST_HEAD(&spt->post_shadow_list);
844
845         /*
846          * Init shadow_page.
847          */
848         spt->shadow_page.type = type;
849         daddr = dma_map_page(kdev, spt->shadow_page.page,
850                              0, 4096, PCI_DMA_BIDIRECTIONAL);
851         if (dma_mapping_error(kdev, daddr)) {
852                 gvt_vgpu_err("fail to map dma addr\n");
853                 ret = -EINVAL;
854                 goto err_free_spt;
855         }
856         spt->shadow_page.vaddr = page_address(spt->shadow_page.page);
857         spt->shadow_page.mfn = daddr >> I915_GTT_PAGE_SHIFT;
858
859         ret = radix_tree_insert(&vgpu->gtt.spt_tree, spt->shadow_page.mfn, spt);
860         if (ret)
861                 goto err_unmap_dma;
862
863         return spt;
864
865 err_unmap_dma:
866         dma_unmap_page(kdev, daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
867 err_free_spt:
868         free_spt(spt);
869         return ERR_PTR(ret);
870 }
871
872 /* Allocate shadow page table associated with specific gfn. */
873 static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt_gfn(
874                 struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type,
875                 unsigned long gfn, bool guest_pde_ips)
876 {
877         struct intel_vgpu_ppgtt_spt *spt;
878         int ret;
879
880         spt = ppgtt_alloc_spt(vgpu, type);
881         if (IS_ERR(spt))
882                 return spt;
883
884         /*
885          * Init guest_page.
886          */
887         ret = intel_vgpu_register_page_track(vgpu, gfn,
888                         ppgtt_write_protection_handler, spt);
889         if (ret) {
890                 ppgtt_free_spt(spt);
891                 return ERR_PTR(ret);
892         }
893
894         spt->guest_page.type = type;
895         spt->guest_page.gfn = gfn;
896         spt->guest_page.pde_ips = guest_pde_ips;
897
898         trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn);
899
900         return spt;
901 }
902
903 #define pt_entry_size_shift(spt) \
904         ((spt)->vgpu->gvt->device_info.gtt_entry_size_shift)
905
906 #define pt_entries(spt) \
907         (I915_GTT_PAGE_SIZE >> pt_entry_size_shift(spt))
908
909 #define for_each_present_guest_entry(spt, e, i) \
910         for (i = 0; i < pt_entries(spt); \
911              i += spt->guest_page.pde_ips ? GTT_64K_PTE_STRIDE : 1) \
912                 if (!ppgtt_get_guest_entry(spt, e, i) && \
913                     spt->vgpu->gvt->gtt.pte_ops->test_present(e))
914
915 #define for_each_present_shadow_entry(spt, e, i) \
916         for (i = 0; i < pt_entries(spt); \
917              i += spt->shadow_page.pde_ips ? GTT_64K_PTE_STRIDE : 1) \
918                 if (!ppgtt_get_shadow_entry(spt, e, i) && \
919                     spt->vgpu->gvt->gtt.pte_ops->test_present(e))
920
921 #define for_each_shadow_entry(spt, e, i) \
922         for (i = 0; i < pt_entries(spt); \
923              i += (spt->shadow_page.pde_ips ? GTT_64K_PTE_STRIDE : 1)) \
924                 if (!ppgtt_get_shadow_entry(spt, e, i))
925
926 static inline void ppgtt_get_spt(struct intel_vgpu_ppgtt_spt *spt)
927 {
928         int v = atomic_read(&spt->refcount);
929
930         trace_spt_refcount(spt->vgpu->id, "inc", spt, v, (v + 1));
931         atomic_inc(&spt->refcount);
932 }
933
934 static inline int ppgtt_put_spt(struct intel_vgpu_ppgtt_spt *spt)
935 {
936         int v = atomic_read(&spt->refcount);
937
938         trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1));
939         return atomic_dec_return(&spt->refcount);
940 }
941
942 static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt);
943
944 static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu,
945                 struct intel_gvt_gtt_entry *e)
946 {
947         struct drm_i915_private *i915 = vgpu->gvt->dev_priv;
948         struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
949         struct intel_vgpu_ppgtt_spt *s;
950         enum intel_gvt_gtt_type cur_pt_type;
951
952         GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(e->type)));
953
954         if (e->type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY
955                 && e->type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
956                 cur_pt_type = get_next_pt_type(e->type);
957
958                 if (!gtt_type_is_pt(cur_pt_type) ||
959                                 !gtt_type_is_pt(cur_pt_type + 1)) {
960                         drm_WARN(&i915->drm, 1,
961                                  "Invalid page table type, cur_pt_type is: %d\n",
962                                  cur_pt_type);
963                         return -EINVAL;
964                 }
965
966                 cur_pt_type += 1;
967
968                 if (ops->get_pfn(e) ==
969                         vgpu->gtt.scratch_pt[cur_pt_type].page_mfn)
970                         return 0;
971         }
972         s = intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(e));
973         if (!s) {
974                 gvt_vgpu_err("fail to find shadow page: mfn: 0x%lx\n",
975                                 ops->get_pfn(e));
976                 return -ENXIO;
977         }
978         return ppgtt_invalidate_spt(s);
979 }
980
981 static inline void ppgtt_invalidate_pte(struct intel_vgpu_ppgtt_spt *spt,
982                 struct intel_gvt_gtt_entry *entry)
983 {
984         struct intel_vgpu *vgpu = spt->vgpu;
985         struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
986         unsigned long pfn;
987         int type;
988
989         pfn = ops->get_pfn(entry);
990         type = spt->shadow_page.type;
991
992         /* Uninitialized spte or unshadowed spte. */
993         if (!pfn || pfn == vgpu->gtt.scratch_pt[type].page_mfn)
994                 return;
995
996         intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT);
997 }
998
999 static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt)
1000 {
1001         struct intel_vgpu *vgpu = spt->vgpu;
1002         struct intel_gvt_gtt_entry e;
1003         unsigned long index;
1004         int ret;
1005
1006         trace_spt_change(spt->vgpu->id, "die", spt,
1007                         spt->guest_page.gfn, spt->shadow_page.type);
1008
1009         if (ppgtt_put_spt(spt) > 0)
1010                 return 0;
1011
1012         for_each_present_shadow_entry(spt, &e, index) {
1013                 switch (e.type) {
1014                 case GTT_TYPE_PPGTT_PTE_4K_ENTRY:
1015                         gvt_vdbg_mm("invalidate 4K entry\n");
1016                         ppgtt_invalidate_pte(spt, &e);
1017                         break;
1018                 case GTT_TYPE_PPGTT_PTE_64K_ENTRY:
1019                         /* We don't setup 64K shadow entry so far. */
1020                         WARN(1, "suspicious 64K gtt entry\n");
1021                         continue;
1022                 case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
1023                         gvt_vdbg_mm("invalidate 2M entry\n");
1024                         continue;
1025                 case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
1026                         WARN(1, "GVT doesn't support 1GB page\n");
1027                         continue;
1028                 case GTT_TYPE_PPGTT_PML4_ENTRY:
1029                 case GTT_TYPE_PPGTT_PDP_ENTRY:
1030                 case GTT_TYPE_PPGTT_PDE_ENTRY:
1031                         gvt_vdbg_mm("invalidate PMUL4/PDP/PDE entry\n");
1032                         ret = ppgtt_invalidate_spt_by_shadow_entry(
1033                                         spt->vgpu, &e);
1034                         if (ret)
1035                                 goto fail;
1036                         break;
1037                 default:
1038                         GEM_BUG_ON(1);
1039                 }
1040         }
1041
1042         trace_spt_change(spt->vgpu->id, "release", spt,
1043                          spt->guest_page.gfn, spt->shadow_page.type);
1044         ppgtt_free_spt(spt);
1045         return 0;
1046 fail:
1047         gvt_vgpu_err("fail: shadow page %p shadow entry 0x%llx type %d\n",
1048                         spt, e.val64, e.type);
1049         return ret;
1050 }
1051
1052 static bool vgpu_ips_enabled(struct intel_vgpu *vgpu)
1053 {
1054         struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
1055
1056         if (INTEL_GEN(dev_priv) == 9 || INTEL_GEN(dev_priv) == 10) {
1057                 u32 ips = vgpu_vreg_t(vgpu, GEN8_GAMW_ECO_DEV_RW_IA) &
1058                         GAMW_ECO_ENABLE_64K_IPS_FIELD;
1059
1060                 return ips == GAMW_ECO_ENABLE_64K_IPS_FIELD;
1061         } else if (INTEL_GEN(dev_priv) >= 11) {
1062                 /* 64K paging only controlled by IPS bit in PTE now. */
1063                 return true;
1064         } else
1065                 return false;
1066 }
1067
1068 static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt);
1069
1070 static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry(
1071                 struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *we)
1072 {
1073         struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1074         struct intel_vgpu_ppgtt_spt *spt = NULL;
1075         bool ips = false;
1076         int ret;
1077
1078         GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(we->type)));
1079
1080         if (we->type == GTT_TYPE_PPGTT_PDE_ENTRY)
1081                 ips = vgpu_ips_enabled(vgpu) && ops->test_ips(we);
1082
1083         spt = intel_vgpu_find_spt_by_gfn(vgpu, ops->get_pfn(we));
1084         if (spt) {
1085                 ppgtt_get_spt(spt);
1086
1087                 if (ips != spt->guest_page.pde_ips) {
1088                         spt->guest_page.pde_ips = ips;
1089
1090                         gvt_dbg_mm("reshadow PDE since ips changed\n");
1091                         clear_page(spt->shadow_page.vaddr);
1092                         ret = ppgtt_populate_spt(spt);
1093                         if (ret) {
1094                                 ppgtt_put_spt(spt);
1095                                 goto err;
1096                         }
1097                 }
1098         } else {
1099                 int type = get_next_pt_type(we->type);
1100
1101                 if (!gtt_type_is_pt(type)) {
1102                         ret = -EINVAL;
1103                         goto err;
1104                 }
1105
1106                 spt = ppgtt_alloc_spt_gfn(vgpu, type, ops->get_pfn(we), ips);
1107                 if (IS_ERR(spt)) {
1108                         ret = PTR_ERR(spt);
1109                         goto err;
1110                 }
1111
1112                 ret = intel_vgpu_enable_page_track(vgpu, spt->guest_page.gfn);
1113                 if (ret)
1114                         goto err_free_spt;
1115
1116                 ret = ppgtt_populate_spt(spt);
1117                 if (ret)
1118                         goto err_free_spt;
1119
1120                 trace_spt_change(vgpu->id, "new", spt, spt->guest_page.gfn,
1121                                  spt->shadow_page.type);
1122         }
1123         return spt;
1124
1125 err_free_spt:
1126         ppgtt_free_spt(spt);
1127         spt = NULL;
1128 err:
1129         gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
1130                      spt, we->val64, we->type);
1131         return ERR_PTR(ret);
1132 }
1133
1134 static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se,
1135                 struct intel_vgpu_ppgtt_spt *s, struct intel_gvt_gtt_entry *ge)
1136 {
1137         struct intel_gvt_gtt_pte_ops *ops = s->vgpu->gvt->gtt.pte_ops;
1138
1139         se->type = ge->type;
1140         se->val64 = ge->val64;
1141
1142         /* Because we always split 64KB pages, so clear IPS in shadow PDE. */
1143         if (se->type == GTT_TYPE_PPGTT_PDE_ENTRY)
1144                 ops->clear_ips(se);
1145
1146         ops->set_pfn(se, s->shadow_page.mfn);
1147 }
1148
1149 /**
1150  * Check if can do 2M page
1151  * @vgpu: target vgpu
1152  * @entry: target pfn's gtt entry
1153  *
1154  * Return 1 if 2MB huge gtt shadowing is possilbe, 0 if miscondition,
1155  * negtive if found err.
1156  */
1157 static int is_2MB_gtt_possible(struct intel_vgpu *vgpu,
1158         struct intel_gvt_gtt_entry *entry)
1159 {
1160         struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1161         unsigned long pfn;
1162
1163         if (!HAS_PAGE_SIZES(vgpu->gvt->dev_priv, I915_GTT_PAGE_SIZE_2M))
1164                 return 0;
1165
1166         pfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, ops->get_pfn(entry));
1167         if (pfn == INTEL_GVT_INVALID_ADDR)
1168                 return -EINVAL;
1169
1170         return PageTransHuge(pfn_to_page(pfn));
1171 }
1172
1173 static int split_2MB_gtt_entry(struct intel_vgpu *vgpu,
1174         struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
1175         struct intel_gvt_gtt_entry *se)
1176 {
1177         struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1178         struct intel_vgpu_ppgtt_spt *sub_spt;
1179         struct intel_gvt_gtt_entry sub_se;
1180         unsigned long start_gfn;
1181         dma_addr_t dma_addr;
1182         unsigned long sub_index;
1183         int ret;
1184
1185         gvt_dbg_mm("Split 2M gtt entry, index %lu\n", index);
1186
1187         start_gfn = ops->get_pfn(se);
1188
1189         sub_spt = ppgtt_alloc_spt(vgpu, GTT_TYPE_PPGTT_PTE_PT);
1190         if (IS_ERR(sub_spt))
1191                 return PTR_ERR(sub_spt);
1192
1193         for_each_shadow_entry(sub_spt, &sub_se, sub_index) {
1194                 ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu,
1195                                 start_gfn + sub_index, PAGE_SIZE, &dma_addr);
1196                 if (ret) {
1197                         ppgtt_invalidate_spt(spt);
1198                         return ret;
1199                 }
1200                 sub_se.val64 = se->val64;
1201
1202                 /* Copy the PAT field from PDE. */
1203                 sub_se.val64 &= ~_PAGE_PAT;
1204                 sub_se.val64 |= (se->val64 & _PAGE_PAT_LARGE) >> 5;
1205
1206                 ops->set_pfn(&sub_se, dma_addr >> PAGE_SHIFT);
1207                 ppgtt_set_shadow_entry(sub_spt, &sub_se, sub_index);
1208         }
1209
1210         /* Clear dirty field. */
1211         se->val64 &= ~_PAGE_DIRTY;
1212
1213         ops->clear_pse(se);
1214         ops->clear_ips(se);
1215         ops->set_pfn(se, sub_spt->shadow_page.mfn);
1216         ppgtt_set_shadow_entry(spt, se, index);
1217         return 0;
1218 }
1219
1220 static int split_64KB_gtt_entry(struct intel_vgpu *vgpu,
1221         struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
1222         struct intel_gvt_gtt_entry *se)
1223 {
1224         struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1225         struct intel_gvt_gtt_entry entry = *se;
1226         unsigned long start_gfn;
1227         dma_addr_t dma_addr;
1228         int i, ret;
1229
1230         gvt_vdbg_mm("Split 64K gtt entry, index %lu\n", index);
1231
1232         GEM_BUG_ON(index % GTT_64K_PTE_STRIDE);
1233
1234         start_gfn = ops->get_pfn(se);
1235
1236         entry.type = GTT_TYPE_PPGTT_PTE_4K_ENTRY;
1237         ops->set_64k_splited(&entry);
1238
1239         for (i = 0; i < GTT_64K_PTE_STRIDE; i++) {
1240                 ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu,
1241                                         start_gfn + i, PAGE_SIZE, &dma_addr);
1242                 if (ret)
1243                         return ret;
1244
1245                 ops->set_pfn(&entry, dma_addr >> PAGE_SHIFT);
1246                 ppgtt_set_shadow_entry(spt, &entry, index + i);
1247         }
1248         return 0;
1249 }
1250
1251 static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu,
1252         struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
1253         struct intel_gvt_gtt_entry *ge)
1254 {
1255         struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
1256         struct intel_gvt_gtt_entry se = *ge;
1257         unsigned long gfn, page_size = PAGE_SIZE;
1258         dma_addr_t dma_addr;
1259         int ret;
1260
1261         if (!pte_ops->test_present(ge))
1262                 return 0;
1263
1264         gfn = pte_ops->get_pfn(ge);
1265
1266         switch (ge->type) {
1267         case GTT_TYPE_PPGTT_PTE_4K_ENTRY:
1268                 gvt_vdbg_mm("shadow 4K gtt entry\n");
1269                 break;
1270         case GTT_TYPE_PPGTT_PTE_64K_ENTRY:
1271                 gvt_vdbg_mm("shadow 64K gtt entry\n");
1272                 /*
1273                  * The layout of 64K page is special, the page size is
1274                  * controlled by uper PDE. To be simple, we always split
1275                  * 64K page to smaller 4K pages in shadow PT.
1276                  */
1277                 return split_64KB_gtt_entry(vgpu, spt, index, &se);
1278         case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
1279                 gvt_vdbg_mm("shadow 2M gtt entry\n");
1280                 ret = is_2MB_gtt_possible(vgpu, ge);
1281                 if (ret == 0)
1282                         return split_2MB_gtt_entry(vgpu, spt, index, &se);
1283                 else if (ret < 0)
1284                         return ret;
1285                 page_size = I915_GTT_PAGE_SIZE_2M;
1286                 break;
1287         case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
1288                 gvt_vgpu_err("GVT doesn't support 1GB entry\n");
1289                 return -EINVAL;
1290         default:
1291                 GEM_BUG_ON(1);
1292         }
1293
1294         /* direct shadow */
1295         ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn, page_size,
1296                                                       &dma_addr);
1297         if (ret)
1298                 return -ENXIO;
1299
1300         pte_ops->set_pfn(&se, dma_addr >> PAGE_SHIFT);
1301         ppgtt_set_shadow_entry(spt, &se, index);
1302         return 0;
1303 }
1304
1305 static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt)
1306 {
1307         struct intel_vgpu *vgpu = spt->vgpu;
1308         struct intel_gvt *gvt = vgpu->gvt;
1309         struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
1310         struct intel_vgpu_ppgtt_spt *s;
1311         struct intel_gvt_gtt_entry se, ge;
1312         unsigned long gfn, i;
1313         int ret;
1314
1315         trace_spt_change(spt->vgpu->id, "born", spt,
1316                          spt->guest_page.gfn, spt->shadow_page.type);
1317
1318         for_each_present_guest_entry(spt, &ge, i) {
1319                 if (gtt_type_is_pt(get_next_pt_type(ge.type))) {
1320                         s = ppgtt_populate_spt_by_guest_entry(vgpu, &ge);
1321                         if (IS_ERR(s)) {
1322                                 ret = PTR_ERR(s);
1323                                 goto fail;
1324                         }
1325                         ppgtt_get_shadow_entry(spt, &se, i);
1326                         ppgtt_generate_shadow_entry(&se, s, &ge);
1327                         ppgtt_set_shadow_entry(spt, &se, i);
1328                 } else {
1329                         gfn = ops->get_pfn(&ge);
1330                         if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn)) {
1331                                 ops->set_pfn(&se, gvt->gtt.scratch_mfn);
1332                                 ppgtt_set_shadow_entry(spt, &se, i);
1333                                 continue;
1334                         }
1335
1336                         ret = ppgtt_populate_shadow_entry(vgpu, spt, i, &ge);
1337                         if (ret)
1338                                 goto fail;
1339                 }
1340         }
1341         return 0;
1342 fail:
1343         gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
1344                         spt, ge.val64, ge.type);
1345         return ret;
1346 }
1347
1348 static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_ppgtt_spt *spt,
1349                 struct intel_gvt_gtt_entry *se, unsigned long index)
1350 {
1351         struct intel_vgpu *vgpu = spt->vgpu;
1352         struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1353         int ret;
1354
1355         trace_spt_guest_change(spt->vgpu->id, "remove", spt,
1356                                spt->shadow_page.type, se->val64, index);
1357
1358         gvt_vdbg_mm("destroy old shadow entry, type %d, index %lu, value %llx\n",
1359                     se->type, index, se->val64);
1360
1361         if (!ops->test_present(se))
1362                 return 0;
1363
1364         if (ops->get_pfn(se) ==
1365             vgpu->gtt.scratch_pt[spt->shadow_page.type].page_mfn)
1366                 return 0;
1367
1368         if (gtt_type_is_pt(get_next_pt_type(se->type))) {
1369                 struct intel_vgpu_ppgtt_spt *s =
1370                         intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(se));
1371                 if (!s) {
1372                         gvt_vgpu_err("fail to find guest page\n");
1373                         ret = -ENXIO;
1374                         goto fail;
1375                 }
1376                 ret = ppgtt_invalidate_spt(s);
1377                 if (ret)
1378                         goto fail;
1379         } else {
1380                 /* We don't setup 64K shadow entry so far. */
1381                 WARN(se->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY,
1382                      "suspicious 64K entry\n");
1383                 ppgtt_invalidate_pte(spt, se);
1384         }
1385
1386         return 0;
1387 fail:
1388         gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
1389                         spt, se->val64, se->type);
1390         return ret;
1391 }
1392
1393 static int ppgtt_handle_guest_entry_add(struct intel_vgpu_ppgtt_spt *spt,
1394                 struct intel_gvt_gtt_entry *we, unsigned long index)
1395 {
1396         struct intel_vgpu *vgpu = spt->vgpu;
1397         struct intel_gvt_gtt_entry m;
1398         struct intel_vgpu_ppgtt_spt *s;
1399         int ret;
1400
1401         trace_spt_guest_change(spt->vgpu->id, "add", spt, spt->shadow_page.type,
1402                                we->val64, index);
1403
1404         gvt_vdbg_mm("add shadow entry: type %d, index %lu, value %llx\n",
1405                     we->type, index, we->val64);
1406
1407         if (gtt_type_is_pt(get_next_pt_type(we->type))) {
1408                 s = ppgtt_populate_spt_by_guest_entry(vgpu, we);
1409                 if (IS_ERR(s)) {
1410                         ret = PTR_ERR(s);
1411                         goto fail;
1412                 }
1413                 ppgtt_get_shadow_entry(spt, &m, index);
1414                 ppgtt_generate_shadow_entry(&m, s, we);
1415                 ppgtt_set_shadow_entry(spt, &m, index);
1416         } else {
1417                 ret = ppgtt_populate_shadow_entry(vgpu, spt, index, we);
1418                 if (ret)
1419                         goto fail;
1420         }
1421         return 0;
1422 fail:
1423         gvt_vgpu_err("fail: spt %p guest entry 0x%llx type %d\n",
1424                 spt, we->val64, we->type);
1425         return ret;
1426 }
1427
1428 static int sync_oos_page(struct intel_vgpu *vgpu,
1429                 struct intel_vgpu_oos_page *oos_page)
1430 {
1431         const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1432         struct intel_gvt *gvt = vgpu->gvt;
1433         struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
1434         struct intel_vgpu_ppgtt_spt *spt = oos_page->spt;
1435         struct intel_gvt_gtt_entry old, new;
1436         int index;
1437         int ret;
1438
1439         trace_oos_change(vgpu->id, "sync", oos_page->id,
1440                          spt, spt->guest_page.type);
1441
1442         old.type = new.type = get_entry_type(spt->guest_page.type);
1443         old.val64 = new.val64 = 0;
1444
1445         for (index = 0; index < (I915_GTT_PAGE_SIZE >>
1446                                 info->gtt_entry_size_shift); index++) {
1447                 ops->get_entry(oos_page->mem, &old, index, false, 0, vgpu);
1448                 ops->get_entry(NULL, &new, index, true,
1449                                spt->guest_page.gfn << PAGE_SHIFT, vgpu);
1450
1451                 if (old.val64 == new.val64
1452                         && !test_and_clear_bit(index, spt->post_shadow_bitmap))
1453                         continue;
1454
1455                 trace_oos_sync(vgpu->id, oos_page->id,
1456                                 spt, spt->guest_page.type,
1457                                 new.val64, index);
1458
1459                 ret = ppgtt_populate_shadow_entry(vgpu, spt, index, &new);
1460                 if (ret)
1461                         return ret;
1462
1463                 ops->set_entry(oos_page->mem, &new, index, false, 0, vgpu);
1464         }
1465
1466         spt->guest_page.write_cnt = 0;
1467         list_del_init(&spt->post_shadow_list);
1468         return 0;
1469 }
1470
1471 static int detach_oos_page(struct intel_vgpu *vgpu,
1472                 struct intel_vgpu_oos_page *oos_page)
1473 {
1474         struct intel_gvt *gvt = vgpu->gvt;
1475         struct intel_vgpu_ppgtt_spt *spt = oos_page->spt;
1476
1477         trace_oos_change(vgpu->id, "detach", oos_page->id,
1478                          spt, spt->guest_page.type);
1479
1480         spt->guest_page.write_cnt = 0;
1481         spt->guest_page.oos_page = NULL;
1482         oos_page->spt = NULL;
1483
1484         list_del_init(&oos_page->vm_list);
1485         list_move_tail(&oos_page->list, &gvt->gtt.oos_page_free_list_head);
1486
1487         return 0;
1488 }
1489
1490 static int attach_oos_page(struct intel_vgpu_oos_page *oos_page,
1491                 struct intel_vgpu_ppgtt_spt *spt)
1492 {
1493         struct intel_gvt *gvt = spt->vgpu->gvt;
1494         int ret;
1495
1496         ret = intel_gvt_hypervisor_read_gpa(spt->vgpu,
1497                         spt->guest_page.gfn << I915_GTT_PAGE_SHIFT,
1498                         oos_page->mem, I915_GTT_PAGE_SIZE);
1499         if (ret)
1500                 return ret;
1501
1502         oos_page->spt = spt;
1503         spt->guest_page.oos_page = oos_page;
1504
1505         list_move_tail(&oos_page->list, &gvt->gtt.oos_page_use_list_head);
1506
1507         trace_oos_change(spt->vgpu->id, "attach", oos_page->id,
1508                          spt, spt->guest_page.type);
1509         return 0;
1510 }
1511
1512 static int ppgtt_set_guest_page_sync(struct intel_vgpu_ppgtt_spt *spt)
1513 {
1514         struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page;
1515         int ret;
1516
1517         ret = intel_vgpu_enable_page_track(spt->vgpu, spt->guest_page.gfn);
1518         if (ret)
1519                 return ret;
1520
1521         trace_oos_change(spt->vgpu->id, "set page sync", oos_page->id,
1522                          spt, spt->guest_page.type);
1523
1524         list_del_init(&oos_page->vm_list);
1525         return sync_oos_page(spt->vgpu, oos_page);
1526 }
1527
1528 static int ppgtt_allocate_oos_page(struct intel_vgpu_ppgtt_spt *spt)
1529 {
1530         struct intel_gvt *gvt = spt->vgpu->gvt;
1531         struct intel_gvt_gtt *gtt = &gvt->gtt;
1532         struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page;
1533         int ret;
1534
1535         WARN(oos_page, "shadow PPGTT page has already has a oos page\n");
1536
1537         if (list_empty(&gtt->oos_page_free_list_head)) {
1538                 oos_page = container_of(gtt->oos_page_use_list_head.next,
1539                         struct intel_vgpu_oos_page, list);
1540                 ret = ppgtt_set_guest_page_sync(oos_page->spt);
1541                 if (ret)
1542                         return ret;
1543                 ret = detach_oos_page(spt->vgpu, oos_page);
1544                 if (ret)
1545                         return ret;
1546         } else
1547                 oos_page = container_of(gtt->oos_page_free_list_head.next,
1548                         struct intel_vgpu_oos_page, list);
1549         return attach_oos_page(oos_page, spt);
1550 }
1551
1552 static int ppgtt_set_guest_page_oos(struct intel_vgpu_ppgtt_spt *spt)
1553 {
1554         struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page;
1555
1556         if (WARN(!oos_page, "shadow PPGTT page should have a oos page\n"))
1557                 return -EINVAL;
1558
1559         trace_oos_change(spt->vgpu->id, "set page out of sync", oos_page->id,
1560                          spt, spt->guest_page.type);
1561
1562         list_add_tail(&oos_page->vm_list, &spt->vgpu->gtt.oos_page_list_head);
1563         return intel_vgpu_disable_page_track(spt->vgpu, spt->guest_page.gfn);
1564 }
1565
1566 /**
1567  * intel_vgpu_sync_oos_pages - sync all the out-of-synced shadow for vGPU
1568  * @vgpu: a vGPU
1569  *
1570  * This function is called before submitting a guest workload to host,
1571  * to sync all the out-of-synced shadow for vGPU
1572  *
1573  * Returns:
1574  * Zero on success, negative error code if failed.
1575  */
1576 int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu)
1577 {
1578         struct list_head *pos, *n;
1579         struct intel_vgpu_oos_page *oos_page;
1580         int ret;
1581
1582         if (!enable_out_of_sync)
1583                 return 0;
1584
1585         list_for_each_safe(pos, n, &vgpu->gtt.oos_page_list_head) {
1586                 oos_page = container_of(pos,
1587                                 struct intel_vgpu_oos_page, vm_list);
1588                 ret = ppgtt_set_guest_page_sync(oos_page->spt);
1589                 if (ret)
1590                         return ret;
1591         }
1592         return 0;
1593 }
1594
1595 /*
1596  * The heart of PPGTT shadow page table.
1597  */
1598 static int ppgtt_handle_guest_write_page_table(
1599                 struct intel_vgpu_ppgtt_spt *spt,
1600                 struct intel_gvt_gtt_entry *we, unsigned long index)
1601 {
1602         struct intel_vgpu *vgpu = spt->vgpu;
1603         int type = spt->shadow_page.type;
1604         struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1605         struct intel_gvt_gtt_entry old_se;
1606         int new_present;
1607         int i, ret;
1608
1609         new_present = ops->test_present(we);
1610
1611         /*
1612          * Adding the new entry first and then removing the old one, that can
1613          * guarantee the ppgtt table is validated during the window between
1614          * adding and removal.
1615          */
1616         ppgtt_get_shadow_entry(spt, &old_se, index);
1617
1618         if (new_present) {
1619                 ret = ppgtt_handle_guest_entry_add(spt, we, index);
1620                 if (ret)
1621                         goto fail;
1622         }
1623
1624         ret = ppgtt_handle_guest_entry_removal(spt, &old_se, index);
1625         if (ret)
1626                 goto fail;
1627
1628         if (!new_present) {
1629                 /* For 64KB splited entries, we need clear them all. */
1630                 if (ops->test_64k_splited(&old_se) &&
1631                     !(index % GTT_64K_PTE_STRIDE)) {
1632                         gvt_vdbg_mm("remove splited 64K shadow entries\n");
1633                         for (i = 0; i < GTT_64K_PTE_STRIDE; i++) {
1634                                 ops->clear_64k_splited(&old_se);
1635                                 ops->set_pfn(&old_se,
1636                                         vgpu->gtt.scratch_pt[type].page_mfn);
1637                                 ppgtt_set_shadow_entry(spt, &old_se, index + i);
1638                         }
1639                 } else if (old_se.type == GTT_TYPE_PPGTT_PTE_2M_ENTRY ||
1640                            old_se.type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) {
1641                         ops->clear_pse(&old_se);
1642                         ops->set_pfn(&old_se,
1643                                      vgpu->gtt.scratch_pt[type].page_mfn);
1644                         ppgtt_set_shadow_entry(spt, &old_se, index);
1645                 } else {
1646                         ops->set_pfn(&old_se,
1647                                      vgpu->gtt.scratch_pt[type].page_mfn);
1648                         ppgtt_set_shadow_entry(spt, &old_se, index);
1649                 }
1650         }
1651
1652         return 0;
1653 fail:
1654         gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d.\n",
1655                         spt, we->val64, we->type);
1656         return ret;
1657 }
1658
1659
1660
1661 static inline bool can_do_out_of_sync(struct intel_vgpu_ppgtt_spt *spt)
1662 {
1663         return enable_out_of_sync
1664                 && gtt_type_is_pte_pt(spt->guest_page.type)
1665                 && spt->guest_page.write_cnt >= 2;
1666 }
1667
1668 static void ppgtt_set_post_shadow(struct intel_vgpu_ppgtt_spt *spt,
1669                 unsigned long index)
1670 {
1671         set_bit(index, spt->post_shadow_bitmap);
1672         if (!list_empty(&spt->post_shadow_list))
1673                 return;
1674
1675         list_add_tail(&spt->post_shadow_list,
1676                         &spt->vgpu->gtt.post_shadow_list_head);
1677 }
1678
1679 /**
1680  * intel_vgpu_flush_post_shadow - flush the post shadow transactions
1681  * @vgpu: a vGPU
1682  *
1683  * This function is called before submitting a guest workload to host,
1684  * to flush all the post shadows for a vGPU.
1685  *
1686  * Returns:
1687  * Zero on success, negative error code if failed.
1688  */
1689 int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu)
1690 {
1691         struct list_head *pos, *n;
1692         struct intel_vgpu_ppgtt_spt *spt;
1693         struct intel_gvt_gtt_entry ge;
1694         unsigned long index;
1695         int ret;
1696
1697         list_for_each_safe(pos, n, &vgpu->gtt.post_shadow_list_head) {
1698                 spt = container_of(pos, struct intel_vgpu_ppgtt_spt,
1699                                 post_shadow_list);
1700
1701                 for_each_set_bit(index, spt->post_shadow_bitmap,
1702                                 GTT_ENTRY_NUM_IN_ONE_PAGE) {
1703                         ppgtt_get_guest_entry(spt, &ge, index);
1704
1705                         ret = ppgtt_handle_guest_write_page_table(spt,
1706                                                         &ge, index);
1707                         if (ret)
1708                                 return ret;
1709                         clear_bit(index, spt->post_shadow_bitmap);
1710                 }
1711                 list_del_init(&spt->post_shadow_list);
1712         }
1713         return 0;
1714 }
1715
1716 static int ppgtt_handle_guest_write_page_table_bytes(
1717                 struct intel_vgpu_ppgtt_spt *spt,
1718                 u64 pa, void *p_data, int bytes)
1719 {
1720         struct intel_vgpu *vgpu = spt->vgpu;
1721         struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1722         const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1723         struct intel_gvt_gtt_entry we, se;
1724         unsigned long index;
1725         int ret;
1726
1727         index = (pa & (PAGE_SIZE - 1)) >> info->gtt_entry_size_shift;
1728
1729         ppgtt_get_guest_entry(spt, &we, index);
1730
1731         /*
1732          * For page table which has 64K gtt entry, only PTE#0, PTE#16,
1733          * PTE#32, ... PTE#496 are used. Unused PTEs update should be
1734          * ignored.
1735          */
1736         if (we.type == GTT_TYPE_PPGTT_PTE_64K_ENTRY &&
1737             (index % GTT_64K_PTE_STRIDE)) {
1738                 gvt_vdbg_mm("Ignore write to unused PTE entry, index %lu\n",
1739                             index);
1740                 return 0;
1741         }
1742
1743         if (bytes == info->gtt_entry_size) {
1744                 ret = ppgtt_handle_guest_write_page_table(spt, &we, index);
1745                 if (ret)
1746                         return ret;
1747         } else {
1748                 if (!test_bit(index, spt->post_shadow_bitmap)) {
1749                         int type = spt->shadow_page.type;
1750
1751                         ppgtt_get_shadow_entry(spt, &se, index);
1752                         ret = ppgtt_handle_guest_entry_removal(spt, &se, index);
1753                         if (ret)
1754                                 return ret;
1755                         ops->set_pfn(&se, vgpu->gtt.scratch_pt[type].page_mfn);
1756                         ppgtt_set_shadow_entry(spt, &se, index);
1757                 }
1758                 ppgtt_set_post_shadow(spt, index);
1759         }
1760
1761         if (!enable_out_of_sync)
1762                 return 0;
1763
1764         spt->guest_page.write_cnt++;
1765
1766         if (spt->guest_page.oos_page)
1767                 ops->set_entry(spt->guest_page.oos_page->mem, &we, index,
1768                                 false, 0, vgpu);
1769
1770         if (can_do_out_of_sync(spt)) {
1771                 if (!spt->guest_page.oos_page)
1772                         ppgtt_allocate_oos_page(spt);
1773
1774                 ret = ppgtt_set_guest_page_oos(spt);
1775                 if (ret < 0)
1776                         return ret;
1777         }
1778         return 0;
1779 }
1780
1781 static void invalidate_ppgtt_mm(struct intel_vgpu_mm *mm)
1782 {
1783         struct intel_vgpu *vgpu = mm->vgpu;
1784         struct intel_gvt *gvt = vgpu->gvt;
1785         struct intel_gvt_gtt *gtt = &gvt->gtt;
1786         struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops;
1787         struct intel_gvt_gtt_entry se;
1788         int index;
1789
1790         if (!mm->ppgtt_mm.shadowed)
1791                 return;
1792
1793         for (index = 0; index < ARRAY_SIZE(mm->ppgtt_mm.shadow_pdps); index++) {
1794                 ppgtt_get_shadow_root_entry(mm, &se, index);
1795
1796                 if (!ops->test_present(&se))
1797                         continue;
1798
1799                 ppgtt_invalidate_spt_by_shadow_entry(vgpu, &se);
1800                 se.val64 = 0;
1801                 ppgtt_set_shadow_root_entry(mm, &se, index);
1802
1803                 trace_spt_guest_change(vgpu->id, "destroy root pointer",
1804                                        NULL, se.type, se.val64, index);
1805         }
1806
1807         mm->ppgtt_mm.shadowed = false;
1808 }
1809
1810
1811 static int shadow_ppgtt_mm(struct intel_vgpu_mm *mm)
1812 {
1813         struct intel_vgpu *vgpu = mm->vgpu;
1814         struct intel_gvt *gvt = vgpu->gvt;
1815         struct intel_gvt_gtt *gtt = &gvt->gtt;
1816         struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops;
1817         struct intel_vgpu_ppgtt_spt *spt;
1818         struct intel_gvt_gtt_entry ge, se;
1819         int index, ret;
1820
1821         if (mm->ppgtt_mm.shadowed)
1822                 return 0;
1823
1824         mm->ppgtt_mm.shadowed = true;
1825
1826         for (index = 0; index < ARRAY_SIZE(mm->ppgtt_mm.guest_pdps); index++) {
1827                 ppgtt_get_guest_root_entry(mm, &ge, index);
1828
1829                 if (!ops->test_present(&ge))
1830                         continue;
1831
1832                 trace_spt_guest_change(vgpu->id, __func__, NULL,
1833                                        ge.type, ge.val64, index);
1834
1835                 spt = ppgtt_populate_spt_by_guest_entry(vgpu, &ge);
1836                 if (IS_ERR(spt)) {
1837                         gvt_vgpu_err("fail to populate guest root pointer\n");
1838                         ret = PTR_ERR(spt);
1839                         goto fail;
1840                 }
1841                 ppgtt_generate_shadow_entry(&se, spt, &ge);
1842                 ppgtt_set_shadow_root_entry(mm, &se, index);
1843
1844                 trace_spt_guest_change(vgpu->id, "populate root pointer",
1845                                        NULL, se.type, se.val64, index);
1846         }
1847
1848         return 0;
1849 fail:
1850         invalidate_ppgtt_mm(mm);
1851         return ret;
1852 }
1853
1854 static struct intel_vgpu_mm *vgpu_alloc_mm(struct intel_vgpu *vgpu)
1855 {
1856         struct intel_vgpu_mm *mm;
1857
1858         mm = kzalloc(sizeof(*mm), GFP_KERNEL);
1859         if (!mm)
1860                 return NULL;
1861
1862         mm->vgpu = vgpu;
1863         kref_init(&mm->ref);
1864         atomic_set(&mm->pincount, 0);
1865
1866         return mm;
1867 }
1868
1869 static void vgpu_free_mm(struct intel_vgpu_mm *mm)
1870 {
1871         kfree(mm);
1872 }
1873
1874 /**
1875  * intel_vgpu_create_ppgtt_mm - create a ppgtt mm object for a vGPU
1876  * @vgpu: a vGPU
1877  * @root_entry_type: ppgtt root entry type
1878  * @pdps: guest pdps.
1879  *
1880  * This function is used to create a ppgtt mm object for a vGPU.
1881  *
1882  * Returns:
1883  * Zero on success, negative error code in pointer if failed.
1884  */
1885 struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu,
1886                 enum intel_gvt_gtt_type root_entry_type, u64 pdps[])
1887 {
1888         struct intel_gvt *gvt = vgpu->gvt;
1889         struct intel_vgpu_mm *mm;
1890         int ret;
1891
1892         mm = vgpu_alloc_mm(vgpu);
1893         if (!mm)
1894                 return ERR_PTR(-ENOMEM);
1895
1896         mm->type = INTEL_GVT_MM_PPGTT;
1897
1898         GEM_BUG_ON(root_entry_type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY &&
1899                    root_entry_type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY);
1900         mm->ppgtt_mm.root_entry_type = root_entry_type;
1901
1902         INIT_LIST_HEAD(&mm->ppgtt_mm.list);
1903         INIT_LIST_HEAD(&mm->ppgtt_mm.lru_list);
1904
1905         if (root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY)
1906                 mm->ppgtt_mm.guest_pdps[0] = pdps[0];
1907         else
1908                 memcpy(mm->ppgtt_mm.guest_pdps, pdps,
1909                        sizeof(mm->ppgtt_mm.guest_pdps));
1910
1911         ret = shadow_ppgtt_mm(mm);
1912         if (ret) {
1913                 gvt_vgpu_err("failed to shadow ppgtt mm\n");
1914                 vgpu_free_mm(mm);
1915                 return ERR_PTR(ret);
1916         }
1917
1918         list_add_tail(&mm->ppgtt_mm.list, &vgpu->gtt.ppgtt_mm_list_head);
1919
1920         mutex_lock(&gvt->gtt.ppgtt_mm_lock);
1921         list_add_tail(&mm->ppgtt_mm.lru_list, &gvt->gtt.ppgtt_mm_lru_list_head);
1922         mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
1923
1924         return mm;
1925 }
1926
1927 static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu)
1928 {
1929         struct intel_vgpu_mm *mm;
1930         unsigned long nr_entries;
1931
1932         mm = vgpu_alloc_mm(vgpu);
1933         if (!mm)
1934                 return ERR_PTR(-ENOMEM);
1935
1936         mm->type = INTEL_GVT_MM_GGTT;
1937
1938         nr_entries = gvt_ggtt_gm_sz(vgpu->gvt) >> I915_GTT_PAGE_SHIFT;
1939         mm->ggtt_mm.virtual_ggtt =
1940                 vzalloc(array_size(nr_entries,
1941                                    vgpu->gvt->device_info.gtt_entry_size));
1942         if (!mm->ggtt_mm.virtual_ggtt) {
1943                 vgpu_free_mm(mm);
1944                 return ERR_PTR(-ENOMEM);
1945         }
1946
1947         return mm;
1948 }
1949
1950 /**
1951  * _intel_vgpu_mm_release - destroy a mm object
1952  * @mm_ref: a kref object
1953  *
1954  * This function is used to destroy a mm object for vGPU
1955  *
1956  */
1957 void _intel_vgpu_mm_release(struct kref *mm_ref)
1958 {
1959         struct intel_vgpu_mm *mm = container_of(mm_ref, typeof(*mm), ref);
1960
1961         if (GEM_WARN_ON(atomic_read(&mm->pincount)))
1962                 gvt_err("vgpu mm pin count bug detected\n");
1963
1964         if (mm->type == INTEL_GVT_MM_PPGTT) {
1965                 list_del(&mm->ppgtt_mm.list);
1966
1967                 mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
1968                 list_del(&mm->ppgtt_mm.lru_list);
1969                 mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
1970
1971                 invalidate_ppgtt_mm(mm);
1972         } else {
1973                 vfree(mm->ggtt_mm.virtual_ggtt);
1974         }
1975
1976         vgpu_free_mm(mm);
1977 }
1978
1979 /**
1980  * intel_vgpu_unpin_mm - decrease the pin count of a vGPU mm object
1981  * @mm: a vGPU mm object
1982  *
1983  * This function is called when user doesn't want to use a vGPU mm object
1984  */
1985 void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm)
1986 {
1987         atomic_dec_if_positive(&mm->pincount);
1988 }
1989
1990 /**
1991  * intel_vgpu_pin_mm - increase the pin count of a vGPU mm object
1992  * @mm: target vgpu mm
1993  *
1994  * This function is called when user wants to use a vGPU mm object. If this
1995  * mm object hasn't been shadowed yet, the shadow will be populated at this
1996  * time.
1997  *
1998  * Returns:
1999  * Zero on success, negative error code if failed.
2000  */
2001 int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm)
2002 {
2003         int ret;
2004
2005         atomic_inc(&mm->pincount);
2006
2007         if (mm->type == INTEL_GVT_MM_PPGTT) {
2008                 ret = shadow_ppgtt_mm(mm);
2009                 if (ret)
2010                         return ret;
2011
2012                 mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
2013                 list_move_tail(&mm->ppgtt_mm.lru_list,
2014                                &mm->vgpu->gvt->gtt.ppgtt_mm_lru_list_head);
2015                 mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
2016         }
2017
2018         return 0;
2019 }
2020
2021 static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt)
2022 {
2023         struct intel_vgpu_mm *mm;
2024         struct list_head *pos, *n;
2025
2026         mutex_lock(&gvt->gtt.ppgtt_mm_lock);
2027
2028         list_for_each_safe(pos, n, &gvt->gtt.ppgtt_mm_lru_list_head) {
2029                 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.lru_list);
2030
2031                 if (atomic_read(&mm->pincount))
2032                         continue;
2033
2034                 list_del_init(&mm->ppgtt_mm.lru_list);
2035                 mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
2036                 invalidate_ppgtt_mm(mm);
2037                 return 1;
2038         }
2039         mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
2040         return 0;
2041 }
2042
2043 /*
2044  * GMA translation APIs.
2045  */
2046 static inline int ppgtt_get_next_level_entry(struct intel_vgpu_mm *mm,
2047                 struct intel_gvt_gtt_entry *e, unsigned long index, bool guest)
2048 {
2049         struct intel_vgpu *vgpu = mm->vgpu;
2050         struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
2051         struct intel_vgpu_ppgtt_spt *s;
2052
2053         s = intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(e));
2054         if (!s)
2055                 return -ENXIO;
2056
2057         if (!guest)
2058                 ppgtt_get_shadow_entry(s, e, index);
2059         else
2060                 ppgtt_get_guest_entry(s, e, index);
2061         return 0;
2062 }
2063
2064 /**
2065  * intel_vgpu_gma_to_gpa - translate a gma to GPA
2066  * @mm: mm object. could be a PPGTT or GGTT mm object
2067  * @gma: graphics memory address in this mm object
2068  *
2069  * This function is used to translate a graphics memory address in specific
2070  * graphics memory space to guest physical address.
2071  *
2072  * Returns:
2073  * Guest physical address on success, INTEL_GVT_INVALID_ADDR if failed.
2074  */
2075 unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
2076 {
2077         struct intel_vgpu *vgpu = mm->vgpu;
2078         struct intel_gvt *gvt = vgpu->gvt;
2079         struct intel_gvt_gtt_pte_ops *pte_ops = gvt->gtt.pte_ops;
2080         struct intel_gvt_gtt_gma_ops *gma_ops = gvt->gtt.gma_ops;
2081         unsigned long gpa = INTEL_GVT_INVALID_ADDR;
2082         unsigned long gma_index[4];
2083         struct intel_gvt_gtt_entry e;
2084         int i, levels = 0;
2085         int ret;
2086
2087         GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT &&
2088                    mm->type != INTEL_GVT_MM_PPGTT);
2089
2090         if (mm->type == INTEL_GVT_MM_GGTT) {
2091                 if (!vgpu_gmadr_is_valid(vgpu, gma))
2092                         goto err;
2093
2094                 ggtt_get_guest_entry(mm, &e,
2095                         gma_ops->gma_to_ggtt_pte_index(gma));
2096
2097                 gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT)
2098                         + (gma & ~I915_GTT_PAGE_MASK);
2099
2100                 trace_gma_translate(vgpu->id, "ggtt", 0, 0, gma, gpa);
2101         } else {
2102                 switch (mm->ppgtt_mm.root_entry_type) {
2103                 case GTT_TYPE_PPGTT_ROOT_L4_ENTRY:
2104                         ppgtt_get_shadow_root_entry(mm, &e, 0);
2105
2106                         gma_index[0] = gma_ops->gma_to_pml4_index(gma);
2107                         gma_index[1] = gma_ops->gma_to_l4_pdp_index(gma);
2108                         gma_index[2] = gma_ops->gma_to_pde_index(gma);
2109                         gma_index[3] = gma_ops->gma_to_pte_index(gma);
2110                         levels = 4;
2111                         break;
2112                 case GTT_TYPE_PPGTT_ROOT_L3_ENTRY:
2113                         ppgtt_get_shadow_root_entry(mm, &e,
2114                                         gma_ops->gma_to_l3_pdp_index(gma));
2115
2116                         gma_index[0] = gma_ops->gma_to_pde_index(gma);
2117                         gma_index[1] = gma_ops->gma_to_pte_index(gma);
2118                         levels = 2;
2119                         break;
2120                 default:
2121                         GEM_BUG_ON(1);
2122                 }
2123
2124                 /* walk the shadow page table and get gpa from guest entry */
2125                 for (i = 0; i < levels; i++) {
2126                         ret = ppgtt_get_next_level_entry(mm, &e, gma_index[i],
2127                                 (i == levels - 1));
2128                         if (ret)
2129                                 goto err;
2130
2131                         if (!pte_ops->test_present(&e)) {
2132                                 gvt_dbg_core("GMA 0x%lx is not present\n", gma);
2133                                 goto err;
2134                         }
2135                 }
2136
2137                 gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT) +
2138                                         (gma & ~I915_GTT_PAGE_MASK);
2139                 trace_gma_translate(vgpu->id, "ppgtt", 0,
2140                                     mm->ppgtt_mm.root_entry_type, gma, gpa);
2141         }
2142
2143         return gpa;
2144 err:
2145         gvt_vgpu_err("invalid mm type: %d gma %lx\n", mm->type, gma);
2146         return INTEL_GVT_INVALID_ADDR;
2147 }
2148
2149 static int emulate_ggtt_mmio_read(struct intel_vgpu *vgpu,
2150         unsigned int off, void *p_data, unsigned int bytes)
2151 {
2152         struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
2153         const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
2154         unsigned long index = off >> info->gtt_entry_size_shift;
2155         unsigned long gma;
2156         struct intel_gvt_gtt_entry e;
2157
2158         if (bytes != 4 && bytes != 8)
2159                 return -EINVAL;
2160
2161         gma = index << I915_GTT_PAGE_SHIFT;
2162         if (!intel_gvt_ggtt_validate_range(vgpu,
2163                                            gma, 1 << I915_GTT_PAGE_SHIFT)) {
2164                 gvt_dbg_mm("read invalid ggtt at 0x%lx\n", gma);
2165                 memset(p_data, 0, bytes);
2166                 return 0;
2167         }
2168
2169         ggtt_get_guest_entry(ggtt_mm, &e, index);
2170         memcpy(p_data, (void *)&e.val64 + (off & (info->gtt_entry_size - 1)),
2171                         bytes);
2172         return 0;
2173 }
2174
2175 /**
2176  * intel_vgpu_emulate_gtt_mmio_read - emulate GTT MMIO register read
2177  * @vgpu: a vGPU
2178  * @off: register offset
2179  * @p_data: data will be returned to guest
2180  * @bytes: data length
2181  *
2182  * This function is used to emulate the GTT MMIO register read
2183  *
2184  * Returns:
2185  * Zero on success, error code if failed.
2186  */
2187 int intel_vgpu_emulate_ggtt_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
2188         void *p_data, unsigned int bytes)
2189 {
2190         const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
2191         int ret;
2192
2193         if (bytes != 4 && bytes != 8)
2194                 return -EINVAL;
2195
2196         off -= info->gtt_start_offset;
2197         ret = emulate_ggtt_mmio_read(vgpu, off, p_data, bytes);
2198         return ret;
2199 }
2200
2201 static void ggtt_invalidate_pte(struct intel_vgpu *vgpu,
2202                 struct intel_gvt_gtt_entry *entry)
2203 {
2204         struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
2205         unsigned long pfn;
2206
2207         pfn = pte_ops->get_pfn(entry);
2208         if (pfn != vgpu->gvt->gtt.scratch_mfn)
2209                 intel_gvt_hypervisor_dma_unmap_guest_page(vgpu,
2210                                                 pfn << PAGE_SHIFT);
2211 }
2212
2213 static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
2214         void *p_data, unsigned int bytes)
2215 {
2216         struct intel_gvt *gvt = vgpu->gvt;
2217         const struct intel_gvt_device_info *info = &gvt->device_info;
2218         struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
2219         struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
2220         unsigned long g_gtt_index = off >> info->gtt_entry_size_shift;
2221         unsigned long gma, gfn;
2222         struct intel_gvt_gtt_entry e = {.val64 = 0, .type = GTT_TYPE_GGTT_PTE};
2223         struct intel_gvt_gtt_entry m = {.val64 = 0, .type = GTT_TYPE_GGTT_PTE};
2224         dma_addr_t dma_addr;
2225         int ret;
2226         struct intel_gvt_partial_pte *partial_pte, *pos, *n;
2227         bool partial_update = false;
2228
2229         if (bytes != 4 && bytes != 8)
2230                 return -EINVAL;
2231
2232         gma = g_gtt_index << I915_GTT_PAGE_SHIFT;
2233
2234         /* the VM may configure the whole GM space when ballooning is used */
2235         if (!vgpu_gmadr_is_valid(vgpu, gma))
2236                 return 0;
2237
2238         e.type = GTT_TYPE_GGTT_PTE;
2239         memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data,
2240                         bytes);
2241
2242         /* If ggtt entry size is 8 bytes, and it's split into two 4 bytes
2243          * write, save the first 4 bytes in a list and update virtual
2244          * PTE. Only update shadow PTE when the second 4 bytes comes.
2245          */
2246         if (bytes < info->gtt_entry_size) {
2247                 bool found = false;
2248
2249                 list_for_each_entry_safe(pos, n,
2250                                 &ggtt_mm->ggtt_mm.partial_pte_list, list) {
2251                         if (g_gtt_index == pos->offset >>
2252                                         info->gtt_entry_size_shift) {
2253                                 if (off != pos->offset) {
2254                                         /* the second partial part*/
2255                                         int last_off = pos->offset &
2256                                                 (info->gtt_entry_size - 1);
2257
2258                                         memcpy((void *)&e.val64 + last_off,
2259                                                 (void *)&pos->data + last_off,
2260                                                 bytes);
2261
2262                                         list_del(&pos->list);
2263                                         kfree(pos);
2264                                         found = true;
2265                                         break;
2266                                 }
2267
2268                                 /* update of the first partial part */
2269                                 pos->data = e.val64;
2270                                 ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
2271                                 return 0;
2272                         }
2273                 }
2274
2275                 if (!found) {
2276                         /* the first partial part */
2277                         partial_pte = kzalloc(sizeof(*partial_pte), GFP_KERNEL);
2278                         if (!partial_pte)
2279                                 return -ENOMEM;
2280                         partial_pte->offset = off;
2281                         partial_pte->data = e.val64;
2282                         list_add_tail(&partial_pte->list,
2283                                 &ggtt_mm->ggtt_mm.partial_pte_list);
2284                         partial_update = true;
2285                 }
2286         }
2287
2288         if (!partial_update && (ops->test_present(&e))) {
2289                 gfn = ops->get_pfn(&e);
2290                 m.val64 = e.val64;
2291                 m.type = e.type;
2292
2293                 /* one PTE update may be issued in multiple writes and the
2294                  * first write may not construct a valid gfn
2295                  */
2296                 if (!intel_gvt_hypervisor_is_valid_gfn(vgpu, gfn)) {
2297                         ops->set_pfn(&m, gvt->gtt.scratch_mfn);
2298                         goto out;
2299                 }
2300
2301                 ret = intel_gvt_hypervisor_dma_map_guest_page(vgpu, gfn,
2302                                                         PAGE_SIZE, &dma_addr);
2303                 if (ret) {
2304                         gvt_vgpu_err("fail to populate guest ggtt entry\n");
2305                         /* guest driver may read/write the entry when partial
2306                          * update the entry in this situation p2m will fail
2307                          * settting the shadow entry to point to a scratch page
2308                          */
2309                         ops->set_pfn(&m, gvt->gtt.scratch_mfn);
2310                 } else
2311                         ops->set_pfn(&m, dma_addr >> PAGE_SHIFT);
2312         } else {
2313                 ops->set_pfn(&m, gvt->gtt.scratch_mfn);
2314                 ops->clear_present(&m);
2315         }
2316
2317 out:
2318         ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
2319
2320         ggtt_get_host_entry(ggtt_mm, &e, g_gtt_index);
2321         ggtt_invalidate_pte(vgpu, &e);
2322
2323         ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index);
2324         ggtt_invalidate(gvt->dev_priv);
2325         return 0;
2326 }
2327
2328 /*
2329  * intel_vgpu_emulate_ggtt_mmio_write - emulate GTT MMIO register write
2330  * @vgpu: a vGPU
2331  * @off: register offset
2332  * @p_data: data from guest write
2333  * @bytes: data length
2334  *
2335  * This function is used to emulate the GTT MMIO register write
2336  *
2337  * Returns:
2338  * Zero on success, error code if failed.
2339  */
2340 int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu,
2341                 unsigned int off, void *p_data, unsigned int bytes)
2342 {
2343         const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
2344         int ret;
2345
2346         if (bytes != 4 && bytes != 8)
2347                 return -EINVAL;
2348
2349         off -= info->gtt_start_offset;
2350         ret = emulate_ggtt_mmio_write(vgpu, off, p_data, bytes);
2351         return ret;
2352 }
2353
2354 static int alloc_scratch_pages(struct intel_vgpu *vgpu,
2355                 enum intel_gvt_gtt_type type)
2356 {
2357         struct drm_i915_private *i915 = vgpu->gvt->dev_priv;
2358         struct intel_vgpu_gtt *gtt = &vgpu->gtt;
2359         struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
2360         int page_entry_num = I915_GTT_PAGE_SIZE >>
2361                                 vgpu->gvt->device_info.gtt_entry_size_shift;
2362         void *scratch_pt;
2363         int i;
2364         struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
2365         dma_addr_t daddr;
2366
2367         if (drm_WARN_ON(&i915->drm,
2368                         type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX))
2369                 return -EINVAL;
2370
2371         scratch_pt = (void *)get_zeroed_page(GFP_KERNEL);
2372         if (!scratch_pt) {
2373                 gvt_vgpu_err("fail to allocate scratch page\n");
2374                 return -ENOMEM;
2375         }
2376
2377         daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0,
2378                         4096, PCI_DMA_BIDIRECTIONAL);
2379         if (dma_mapping_error(dev, daddr)) {
2380                 gvt_vgpu_err("fail to dmamap scratch_pt\n");
2381                 __free_page(virt_to_page(scratch_pt));
2382                 return -ENOMEM;
2383         }
2384         gtt->scratch_pt[type].page_mfn =
2385                 (unsigned long)(daddr >> I915_GTT_PAGE_SHIFT);
2386         gtt->scratch_pt[type].page = virt_to_page(scratch_pt);
2387         gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n",
2388                         vgpu->id, type, gtt->scratch_pt[type].page_mfn);
2389
2390         /* Build the tree by full filled the scratch pt with the entries which
2391          * point to the next level scratch pt or scratch page. The
2392          * scratch_pt[type] indicate the scratch pt/scratch page used by the
2393          * 'type' pt.
2394          * e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by
2395          * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scratch_pt it self
2396          * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn.
2397          */
2398         if (type > GTT_TYPE_PPGTT_PTE_PT) {
2399                 struct intel_gvt_gtt_entry se;
2400
2401                 memset(&se, 0, sizeof(struct intel_gvt_gtt_entry));
2402                 se.type = get_entry_type(type - 1);
2403                 ops->set_pfn(&se, gtt->scratch_pt[type - 1].page_mfn);
2404
2405                 /* The entry parameters like present/writeable/cache type
2406                  * set to the same as i915's scratch page tree.
2407                  */
2408                 se.val64 |= _PAGE_PRESENT | _PAGE_RW;
2409                 if (type == GTT_TYPE_PPGTT_PDE_PT)
2410                         se.val64 |= PPAT_CACHED;
2411
2412                 for (i = 0; i < page_entry_num; i++)
2413                         ops->set_entry(scratch_pt, &se, i, false, 0, vgpu);
2414         }
2415
2416         return 0;
2417 }
2418
2419 static int release_scratch_page_tree(struct intel_vgpu *vgpu)
2420 {
2421         int i;
2422         struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
2423         dma_addr_t daddr;
2424
2425         for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
2426                 if (vgpu->gtt.scratch_pt[i].page != NULL) {
2427                         daddr = (dma_addr_t)(vgpu->gtt.scratch_pt[i].page_mfn <<
2428                                         I915_GTT_PAGE_SHIFT);
2429                         dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
2430                         __free_page(vgpu->gtt.scratch_pt[i].page);
2431                         vgpu->gtt.scratch_pt[i].page = NULL;
2432                         vgpu->gtt.scratch_pt[i].page_mfn = 0;
2433                 }
2434         }
2435
2436         return 0;
2437 }
2438
2439 static int create_scratch_page_tree(struct intel_vgpu *vgpu)
2440 {
2441         int i, ret;
2442
2443         for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
2444                 ret = alloc_scratch_pages(vgpu, i);
2445                 if (ret)
2446                         goto err;
2447         }
2448
2449         return 0;
2450
2451 err:
2452         release_scratch_page_tree(vgpu);
2453         return ret;
2454 }
2455
2456 /**
2457  * intel_vgpu_init_gtt - initialize per-vGPU graphics memory virulization
2458  * @vgpu: a vGPU
2459  *
2460  * This function is used to initialize per-vGPU graphics memory virtualization
2461  * components.
2462  *
2463  * Returns:
2464  * Zero on success, error code if failed.
2465  */
2466 int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
2467 {
2468         struct intel_vgpu_gtt *gtt = &vgpu->gtt;
2469
2470         INIT_RADIX_TREE(&gtt->spt_tree, GFP_KERNEL);
2471
2472         INIT_LIST_HEAD(&gtt->ppgtt_mm_list_head);
2473         INIT_LIST_HEAD(&gtt->oos_page_list_head);
2474         INIT_LIST_HEAD(&gtt->post_shadow_list_head);
2475
2476         gtt->ggtt_mm = intel_vgpu_create_ggtt_mm(vgpu);
2477         if (IS_ERR(gtt->ggtt_mm)) {
2478                 gvt_vgpu_err("fail to create mm for ggtt.\n");
2479                 return PTR_ERR(gtt->ggtt_mm);
2480         }
2481
2482         intel_vgpu_reset_ggtt(vgpu, false);
2483
2484         INIT_LIST_HEAD(&gtt->ggtt_mm->ggtt_mm.partial_pte_list);
2485
2486         return create_scratch_page_tree(vgpu);
2487 }
2488
2489 static void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu)
2490 {
2491         struct list_head *pos, *n;
2492         struct intel_vgpu_mm *mm;
2493
2494         list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) {
2495                 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
2496                 intel_vgpu_destroy_mm(mm);
2497         }
2498
2499         if (GEM_WARN_ON(!list_empty(&vgpu->gtt.ppgtt_mm_list_head)))
2500                 gvt_err("vgpu ppgtt mm is not fully destroyed\n");
2501
2502         if (GEM_WARN_ON(!radix_tree_empty(&vgpu->gtt.spt_tree))) {
2503                 gvt_err("Why we still has spt not freed?\n");
2504                 ppgtt_free_all_spt(vgpu);
2505         }
2506 }
2507
2508 static void intel_vgpu_destroy_ggtt_mm(struct intel_vgpu *vgpu)
2509 {
2510         struct intel_gvt_partial_pte *pos, *next;
2511
2512         list_for_each_entry_safe(pos, next,
2513                                  &vgpu->gtt.ggtt_mm->ggtt_mm.partial_pte_list,
2514                                  list) {
2515                 gvt_dbg_mm("partial PTE update on hold 0x%lx : 0x%llx\n",
2516                         pos->offset, pos->data);
2517                 kfree(pos);
2518         }
2519         intel_vgpu_destroy_mm(vgpu->gtt.ggtt_mm);
2520         vgpu->gtt.ggtt_mm = NULL;
2521 }
2522
2523 /**
2524  * intel_vgpu_clean_gtt - clean up per-vGPU graphics memory virulization
2525  * @vgpu: a vGPU
2526  *
2527  * This function is used to clean up per-vGPU graphics memory virtualization
2528  * components.
2529  *
2530  * Returns:
2531  * Zero on success, error code if failed.
2532  */
2533 void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu)
2534 {
2535         intel_vgpu_destroy_all_ppgtt_mm(vgpu);
2536         intel_vgpu_destroy_ggtt_mm(vgpu);
2537         release_scratch_page_tree(vgpu);
2538 }
2539
2540 static void clean_spt_oos(struct intel_gvt *gvt)
2541 {
2542         struct intel_gvt_gtt *gtt = &gvt->gtt;
2543         struct list_head *pos, *n;
2544         struct intel_vgpu_oos_page *oos_page;
2545
2546         WARN(!list_empty(&gtt->oos_page_use_list_head),
2547                 "someone is still using oos page\n");
2548
2549         list_for_each_safe(pos, n, &gtt->oos_page_free_list_head) {
2550                 oos_page = container_of(pos, struct intel_vgpu_oos_page, list);
2551                 list_del(&oos_page->list);
2552                 free_page((unsigned long)oos_page->mem);
2553                 kfree(oos_page);
2554         }
2555 }
2556
2557 static int setup_spt_oos(struct intel_gvt *gvt)
2558 {
2559         struct intel_gvt_gtt *gtt = &gvt->gtt;
2560         struct intel_vgpu_oos_page *oos_page;
2561         int i;
2562         int ret;
2563
2564         INIT_LIST_HEAD(&gtt->oos_page_free_list_head);
2565         INIT_LIST_HEAD(&gtt->oos_page_use_list_head);
2566
2567         for (i = 0; i < preallocated_oos_pages; i++) {
2568                 oos_page = kzalloc(sizeof(*oos_page), GFP_KERNEL);
2569                 if (!oos_page) {
2570                         ret = -ENOMEM;
2571                         goto fail;
2572                 }
2573                 oos_page->mem = (void *)__get_free_pages(GFP_KERNEL, 0);
2574                 if (!oos_page->mem) {
2575                         ret = -ENOMEM;
2576                         kfree(oos_page);
2577                         goto fail;
2578                 }
2579
2580                 INIT_LIST_HEAD(&oos_page->list);
2581                 INIT_LIST_HEAD(&oos_page->vm_list);
2582                 oos_page->id = i;
2583                 list_add_tail(&oos_page->list, &gtt->oos_page_free_list_head);
2584         }
2585
2586         gvt_dbg_mm("%d oos pages preallocated\n", i);
2587
2588         return 0;
2589 fail:
2590         clean_spt_oos(gvt);
2591         return ret;
2592 }
2593
2594 /**
2595  * intel_vgpu_find_ppgtt_mm - find a PPGTT mm object
2596  * @vgpu: a vGPU
2597  * @pdps: pdp root array
2598  *
2599  * This function is used to find a PPGTT mm object from mm object pool
2600  *
2601  * Returns:
2602  * pointer to mm object on success, NULL if failed.
2603  */
2604 struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
2605                 u64 pdps[])
2606 {
2607         struct intel_vgpu_mm *mm;
2608         struct list_head *pos;
2609
2610         list_for_each(pos, &vgpu->gtt.ppgtt_mm_list_head) {
2611                 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
2612
2613                 switch (mm->ppgtt_mm.root_entry_type) {
2614                 case GTT_TYPE_PPGTT_ROOT_L4_ENTRY:
2615                         if (pdps[0] == mm->ppgtt_mm.guest_pdps[0])
2616                                 return mm;
2617                         break;
2618                 case GTT_TYPE_PPGTT_ROOT_L3_ENTRY:
2619                         if (!memcmp(pdps, mm->ppgtt_mm.guest_pdps,
2620                                     sizeof(mm->ppgtt_mm.guest_pdps)))
2621                                 return mm;
2622                         break;
2623                 default:
2624                         GEM_BUG_ON(1);
2625                 }
2626         }
2627         return NULL;
2628 }
2629
2630 /**
2631  * intel_vgpu_get_ppgtt_mm - get or create a PPGTT mm object.
2632  * @vgpu: a vGPU
2633  * @root_entry_type: ppgtt root entry type
2634  * @pdps: guest pdps
2635  *
2636  * This function is used to find or create a PPGTT mm object from a guest.
2637  *
2638  * Returns:
2639  * Zero on success, negative error code if failed.
2640  */
2641 struct intel_vgpu_mm *intel_vgpu_get_ppgtt_mm(struct intel_vgpu *vgpu,
2642                 enum intel_gvt_gtt_type root_entry_type, u64 pdps[])
2643 {
2644         struct intel_vgpu_mm *mm;
2645
2646         mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps);
2647         if (mm) {
2648                 intel_vgpu_mm_get(mm);
2649         } else {
2650                 mm = intel_vgpu_create_ppgtt_mm(vgpu, root_entry_type, pdps);
2651                 if (IS_ERR(mm))
2652                         gvt_vgpu_err("fail to create mm\n");
2653         }
2654         return mm;
2655 }
2656
2657 /**
2658  * intel_vgpu_put_ppgtt_mm - find and put a PPGTT mm object.
2659  * @vgpu: a vGPU
2660  * @pdps: guest pdps
2661  *
2662  * This function is used to find a PPGTT mm object from a guest and destroy it.
2663  *
2664  * Returns:
2665  * Zero on success, negative error code if failed.
2666  */
2667 int intel_vgpu_put_ppgtt_mm(struct intel_vgpu *vgpu, u64 pdps[])
2668 {
2669         struct intel_vgpu_mm *mm;
2670
2671         mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps);
2672         if (!mm) {
2673                 gvt_vgpu_err("fail to find ppgtt instance.\n");
2674                 return -EINVAL;
2675         }
2676         intel_vgpu_mm_put(mm);
2677         return 0;
2678 }
2679
2680 /**
2681  * intel_gvt_init_gtt - initialize mm components of a GVT device
2682  * @gvt: GVT device
2683  *
2684  * This function is called at the initialization stage, to initialize
2685  * the mm components of a GVT device.
2686  *
2687  * Returns:
2688  * zero on success, negative error code if failed.
2689  */
2690 int intel_gvt_init_gtt(struct intel_gvt *gvt)
2691 {
2692         int ret;
2693         void *page;
2694         struct device *dev = &gvt->dev_priv->drm.pdev->dev;
2695         dma_addr_t daddr;
2696
2697         gvt_dbg_core("init gtt\n");
2698
2699         gvt->gtt.pte_ops = &gen8_gtt_pte_ops;
2700         gvt->gtt.gma_ops = &gen8_gtt_gma_ops;
2701
2702         page = (void *)get_zeroed_page(GFP_KERNEL);
2703         if (!page) {
2704                 gvt_err("fail to allocate scratch ggtt page\n");
2705                 return -ENOMEM;
2706         }
2707
2708         daddr = dma_map_page(dev, virt_to_page(page), 0,
2709                         4096, PCI_DMA_BIDIRECTIONAL);
2710         if (dma_mapping_error(dev, daddr)) {
2711                 gvt_err("fail to dmamap scratch ggtt page\n");
2712                 __free_page(virt_to_page(page));
2713                 return -ENOMEM;
2714         }
2715
2716         gvt->gtt.scratch_page = virt_to_page(page);
2717         gvt->gtt.scratch_mfn = (unsigned long)(daddr >> I915_GTT_PAGE_SHIFT);
2718
2719         if (enable_out_of_sync) {
2720                 ret = setup_spt_oos(gvt);
2721                 if (ret) {
2722                         gvt_err("fail to initialize SPT oos\n");
2723                         dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
2724                         __free_page(gvt->gtt.scratch_page);
2725                         return ret;
2726                 }
2727         }
2728         INIT_LIST_HEAD(&gvt->gtt.ppgtt_mm_lru_list_head);
2729         mutex_init(&gvt->gtt.ppgtt_mm_lock);
2730         return 0;
2731 }
2732
2733 /**
2734  * intel_gvt_clean_gtt - clean up mm components of a GVT device
2735  * @gvt: GVT device
2736  *
2737  * This function is called at the driver unloading stage, to clean up the
2738  * the mm components of a GVT device.
2739  *
2740  */
2741 void intel_gvt_clean_gtt(struct intel_gvt *gvt)
2742 {
2743         struct device *dev = &gvt->dev_priv->drm.pdev->dev;
2744         dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_mfn <<
2745                                         I915_GTT_PAGE_SHIFT);
2746
2747         dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
2748
2749         __free_page(gvt->gtt.scratch_page);
2750
2751         if (enable_out_of_sync)
2752                 clean_spt_oos(gvt);
2753 }
2754
2755 /**
2756  * intel_vgpu_invalidate_ppgtt - invalidate PPGTT instances
2757  * @vgpu: a vGPU
2758  *
2759  * This function is called when invalidate all PPGTT instances of a vGPU.
2760  *
2761  */
2762 void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu)
2763 {
2764         struct list_head *pos, *n;
2765         struct intel_vgpu_mm *mm;
2766
2767         list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) {
2768                 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
2769                 if (mm->type == INTEL_GVT_MM_PPGTT) {
2770                         mutex_lock(&vgpu->gvt->gtt.ppgtt_mm_lock);
2771                         list_del_init(&mm->ppgtt_mm.lru_list);
2772                         mutex_unlock(&vgpu->gvt->gtt.ppgtt_mm_lock);
2773                         if (mm->ppgtt_mm.shadowed)
2774                                 invalidate_ppgtt_mm(mm);
2775                 }
2776         }
2777 }
2778
2779 /**
2780  * intel_vgpu_reset_ggtt - reset the GGTT entry
2781  * @vgpu: a vGPU
2782  * @invalidate_old: invalidate old entries
2783  *
2784  * This function is called at the vGPU create stage
2785  * to reset all the GGTT entries.
2786  *
2787  */
2788 void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old)
2789 {
2790         struct intel_gvt *gvt = vgpu->gvt;
2791         struct drm_i915_private *dev_priv = gvt->dev_priv;
2792         struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
2793         struct intel_gvt_gtt_entry entry = {.type = GTT_TYPE_GGTT_PTE};
2794         struct intel_gvt_gtt_entry old_entry;
2795         u32 index;
2796         u32 num_entries;
2797
2798         pte_ops->set_pfn(&entry, gvt->gtt.scratch_mfn);
2799         pte_ops->set_present(&entry);
2800
2801         index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT;
2802         num_entries = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT;
2803         while (num_entries--) {
2804                 if (invalidate_old) {
2805                         ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index);
2806                         ggtt_invalidate_pte(vgpu, &old_entry);
2807                 }
2808                 ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++);
2809         }
2810
2811         index = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT;
2812         num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT;
2813         while (num_entries--) {
2814                 if (invalidate_old) {
2815                         ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index);
2816                         ggtt_invalidate_pte(vgpu, &old_entry);
2817                 }
2818                 ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++);
2819         }
2820
2821         ggtt_invalidate(dev_priv);
2822 }
2823
2824 /**
2825  * intel_vgpu_reset_gtt - reset the all GTT related status
2826  * @vgpu: a vGPU
2827  *
2828  * This function is called from vfio core to reset reset all
2829  * GTT related status, including GGTT, PPGTT, scratch page.
2830  *
2831  */
2832 void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu)
2833 {
2834         /* Shadow pages are only created when there is no page
2835          * table tracking data, so remove page tracking data after
2836          * removing the shadow pages.
2837          */
2838         intel_vgpu_destroy_all_ppgtt_mm(vgpu);
2839         intel_vgpu_reset_ggtt(vgpu, true);
2840 }