1 // SPDX-License-Identifier: MIT
3 * Copyright © 2020 Intel Corporation
6 #include <linux/log2.h>
8 #include "gen6_ppgtt.h"
9 #include "i915_scatterlist.h"
10 #include "i915_trace.h"
11 #include "i915_vgpu.h"
14 /* Write pde (index) from the page directory @pd to the page table @pt */
15 static inline void gen6_write_pde(const struct gen6_ppgtt *ppgtt,
16 const unsigned int pde,
17 const struct i915_page_table *pt)
19 /* Caller needs to make sure the write completes if necessary */
20 iowrite32(GEN6_PDE_ADDR_ENCODE(px_dma(pt)) | GEN6_PDE_VALID,
21 ppgtt->pd_addr + pde);
24 void gen7_ppgtt_enable(struct intel_gt *gt)
26 struct drm_i915_private *i915 = gt->i915;
27 struct intel_uncore *uncore = gt->uncore;
28 struct intel_engine_cs *engine;
29 enum intel_engine_id id;
32 intel_uncore_rmw(uncore, GAC_ECO_BITS, 0, ECOBITS_PPGTT_CACHE64B);
34 ecochk = intel_uncore_read(uncore, GAM_ECOCHK);
35 if (IS_HASWELL(i915)) {
36 ecochk |= ECOCHK_PPGTT_WB_HSW;
38 ecochk |= ECOCHK_PPGTT_LLC_IVB;
39 ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
41 intel_uncore_write(uncore, GAM_ECOCHK, ecochk);
43 for_each_engine(engine, gt, id) {
44 /* GFX_MODE is per-ring on gen7+ */
47 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
51 void gen6_ppgtt_enable(struct intel_gt *gt)
53 struct intel_uncore *uncore = gt->uncore;
55 intel_uncore_rmw(uncore,
58 ECOBITS_SNB_BIT | ECOBITS_PPGTT_CACHE64B);
60 intel_uncore_rmw(uncore,
63 GAB_CTL_CONT_AFTER_PAGEFAULT);
65 intel_uncore_rmw(uncore,
68 ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
70 if (HAS_PPGTT(uncore->i915)) /* may be disabled for VT-d */
71 intel_uncore_write(uncore,
73 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
76 /* PPGTT support for Sandybdrige/Gen6 and later */
77 static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
78 u64 start, u64 length)
80 struct gen6_ppgtt * const ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
81 const unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
82 const gen6_pte_t scratch_pte = vm->scratch[0].encode;
83 unsigned int pde = first_entry / GEN6_PTES;
84 unsigned int pte = first_entry % GEN6_PTES;
85 unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
88 struct i915_page_table * const pt =
89 i915_pt_entry(ppgtt->base.pd, pde++);
90 const unsigned int count = min(num_entries, GEN6_PTES - pte);
93 GEM_BUG_ON(px_base(pt) == px_base(&vm->scratch[1]));
97 GEM_BUG_ON(count > atomic_read(&pt->used));
98 if (!atomic_sub_return(count, &pt->used))
99 ppgtt->scan_for_unused_pt = true;
102 * Note that the hw doesn't support removing PDE on the fly
103 * (they are cached inside the context with no means to
104 * invalidate the cache), so we can only reset the PTE
105 * entries back to scratch.
108 vaddr = kmap_atomic_px(pt);
109 memset32(vaddr + pte, scratch_pte, count);
110 kunmap_atomic(vaddr);
116 static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
117 struct i915_vma *vma,
118 enum i915_cache_level cache_level,
121 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
122 struct i915_page_directory * const pd = ppgtt->pd;
123 unsigned int first_entry = vma->node.start / I915_GTT_PAGE_SIZE;
124 unsigned int act_pt = first_entry / GEN6_PTES;
125 unsigned int act_pte = first_entry % GEN6_PTES;
126 const u32 pte_encode = vm->pte_encode(0, cache_level, flags);
127 struct sgt_dma iter = sgt_dma(vma);
130 GEM_BUG_ON(pd->entry[act_pt] == &vm->scratch[1]);
132 vaddr = kmap_atomic_px(i915_pt_entry(pd, act_pt));
134 GEM_BUG_ON(iter.sg->length < I915_GTT_PAGE_SIZE);
135 vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
137 iter.dma += I915_GTT_PAGE_SIZE;
138 if (iter.dma == iter.max) {
139 iter.sg = __sg_next(iter.sg);
143 iter.dma = sg_dma_address(iter.sg);
144 iter.max = iter.dma + iter.sg->length;
147 if (++act_pte == GEN6_PTES) {
148 kunmap_atomic(vaddr);
149 vaddr = kmap_atomic_px(i915_pt_entry(pd, ++act_pt));
153 kunmap_atomic(vaddr);
155 vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
158 static void gen6_flush_pd(struct gen6_ppgtt *ppgtt, u64 start, u64 end)
160 struct i915_page_directory * const pd = ppgtt->base.pd;
161 struct i915_page_table *pt;
164 start = round_down(start, SZ_64K);
165 end = round_up(end, SZ_64K) - start;
167 mutex_lock(&ppgtt->flush);
169 gen6_for_each_pde(pt, pd, start, end, pde)
170 gen6_write_pde(ppgtt, pde, pt);
173 ioread32(ppgtt->pd_addr + pde - 1);
174 gen6_ggtt_invalidate(ppgtt->base.vm.gt->ggtt);
177 mutex_unlock(&ppgtt->flush);
180 static void gen6_alloc_va_range(struct i915_address_space *vm,
181 struct i915_vm_pt_stash *stash,
182 u64 start, u64 length)
184 struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
185 struct i915_page_directory * const pd = ppgtt->base.pd;
186 struct i915_page_table *pt;
191 spin_lock(&pd->lock);
192 gen6_for_each_pde(pt, pd, start, length, pde) {
193 const unsigned int count = gen6_pte_count(start, length);
195 if (px_base(pt) == px_base(&vm->scratch[1])) {
196 spin_unlock(&pd->lock);
201 fill32_px(pt, vm->scratch[0].encode);
203 spin_lock(&pd->lock);
204 if (pd->entry[pde] == &vm->scratch[1]) {
205 stash->pt[0] = pt->stash;
206 atomic_set(&pt->used, 0);
215 atomic_add(count, &pt->used);
217 spin_unlock(&pd->lock);
219 if (flush && i915_vma_is_bound(ppgtt->vma, I915_VMA_GLOBAL_BIND)) {
220 intel_wakeref_t wakeref;
222 with_intel_runtime_pm(&vm->i915->runtime_pm, wakeref)
223 gen6_flush_pd(ppgtt, from, start);
227 static int gen6_ppgtt_init_scratch(struct gen6_ppgtt *ppgtt)
229 struct i915_address_space * const vm = &ppgtt->base.vm;
230 struct i915_page_directory * const pd = ppgtt->base.pd;
233 ret = setup_scratch_page(vm, __GFP_HIGHMEM);
237 vm->scratch[0].encode =
238 vm->pte_encode(px_dma(&vm->scratch[0]),
239 I915_CACHE_NONE, PTE_READ_ONLY);
241 if (unlikely(setup_page_dma(vm, px_base(&vm->scratch[1])))) {
242 cleanup_scratch_page(vm);
246 fill32_px(&vm->scratch[1], vm->scratch[0].encode);
247 memset_p(pd->entry, &vm->scratch[1], I915_PDES);
252 static void gen6_ppgtt_free_pd(struct gen6_ppgtt *ppgtt)
254 struct i915_page_directory * const pd = ppgtt->base.pd;
255 struct i915_page_dma * const scratch =
256 px_base(&ppgtt->base.vm.scratch[1]);
257 struct i915_page_table *pt;
260 gen6_for_all_pdes(pt, pd, pde)
261 if (px_base(pt) != scratch)
262 free_px(&ppgtt->base.vm, pt);
265 static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
267 struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
269 __i915_vma_put(ppgtt->vma);
271 gen6_ppgtt_free_pd(ppgtt);
274 mutex_destroy(&ppgtt->flush);
275 mutex_destroy(&ppgtt->pin_mutex);
276 kfree(ppgtt->base.pd);
279 static int pd_vma_set_pages(struct i915_vma *vma)
281 vma->pages = ERR_PTR(-ENODEV);
285 static void pd_vma_clear_pages(struct i915_vma *vma)
287 GEM_BUG_ON(!vma->pages);
292 static void pd_vma_bind(struct i915_address_space *vm,
293 struct i915_vm_pt_stash *stash,
294 struct i915_vma *vma,
295 enum i915_cache_level cache_level,
298 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
299 struct gen6_ppgtt *ppgtt = vma->private;
300 u32 ggtt_offset = i915_ggtt_offset(vma) / I915_GTT_PAGE_SIZE;
302 px_base(ppgtt->base.pd)->ggtt_offset = ggtt_offset * sizeof(gen6_pte_t);
303 ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm + ggtt_offset;
305 gen6_flush_pd(ppgtt, 0, ppgtt->base.vm.total);
308 static void pd_vma_unbind(struct i915_address_space *vm, struct i915_vma *vma)
310 struct gen6_ppgtt *ppgtt = vma->private;
311 struct i915_page_directory * const pd = ppgtt->base.pd;
312 struct i915_page_dma * const scratch =
313 px_base(&ppgtt->base.vm.scratch[1]);
314 struct i915_page_table *pt;
317 if (!ppgtt->scan_for_unused_pt)
320 /* Free all no longer used page tables */
321 gen6_for_all_pdes(pt, ppgtt->base.pd, pde) {
322 if (px_base(pt) == scratch || atomic_read(&pt->used))
325 free_px(&ppgtt->base.vm, pt);
326 pd->entry[pde] = scratch;
329 ppgtt->scan_for_unused_pt = false;
332 static const struct i915_vma_ops pd_vma_ops = {
333 .set_pages = pd_vma_set_pages,
334 .clear_pages = pd_vma_clear_pages,
335 .bind_vma = pd_vma_bind,
336 .unbind_vma = pd_vma_unbind,
339 static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size)
341 struct i915_ggtt *ggtt = ppgtt->base.vm.gt->ggtt;
342 struct i915_vma *vma;
344 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
345 GEM_BUG_ON(size > ggtt->vm.total);
347 vma = i915_vma_alloc();
349 return ERR_PTR(-ENOMEM);
351 i915_active_init(&vma->active, NULL, NULL);
353 kref_init(&vma->ref);
354 mutex_init(&vma->pages_mutex);
355 vma->vm = i915_vm_get(&ggtt->vm);
356 vma->ops = &pd_vma_ops;
357 vma->private = ppgtt;
360 vma->fence_size = size;
361 atomic_set(&vma->flags, I915_VMA_GGTT);
362 vma->ggtt_view.type = I915_GGTT_VIEW_ROTATED; /* prevent fencing */
364 INIT_LIST_HEAD(&vma->obj_link);
365 INIT_LIST_HEAD(&vma->closed_link);
370 int gen6_ppgtt_pin(struct i915_ppgtt *base)
372 struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
375 GEM_BUG_ON(!atomic_read(&ppgtt->base.vm.open));
378 * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt
379 * which will be pinned into every active context.
380 * (When vma->pin_count becomes atomic, I expect we will naturally
381 * need a larger, unpacked, type and kill this redundancy.)
383 if (atomic_add_unless(&ppgtt->pin_count, 1, 0))
386 if (mutex_lock_interruptible(&ppgtt->pin_mutex))
390 * PPGTT PDEs reside in the GGTT and consists of 512 entries. The
391 * allocator works in address space sizes, so it's multiplied by page
392 * size. We allocate at the top of the GTT to avoid fragmentation.
395 if (!atomic_read(&ppgtt->pin_count))
396 err = i915_ggtt_pin(ppgtt->vma, GEN6_PD_ALIGN, PIN_HIGH);
398 atomic_inc(&ppgtt->pin_count);
399 mutex_unlock(&ppgtt->pin_mutex);
404 void gen6_ppgtt_unpin(struct i915_ppgtt *base)
406 struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
408 GEM_BUG_ON(!atomic_read(&ppgtt->pin_count));
409 if (atomic_dec_and_test(&ppgtt->pin_count))
410 i915_vma_unpin(ppgtt->vma);
413 void gen6_ppgtt_unpin_all(struct i915_ppgtt *base)
415 struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
417 if (!atomic_read(&ppgtt->pin_count))
420 i915_vma_unpin(ppgtt->vma);
421 atomic_set(&ppgtt->pin_count, 0);
424 struct i915_ppgtt *gen6_ppgtt_create(struct intel_gt *gt)
426 struct i915_ggtt * const ggtt = gt->ggtt;
427 struct gen6_ppgtt *ppgtt;
430 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
432 return ERR_PTR(-ENOMEM);
434 mutex_init(&ppgtt->flush);
435 mutex_init(&ppgtt->pin_mutex);
437 ppgtt_init(&ppgtt->base, gt);
438 ppgtt->base.vm.pd_shift = ilog2(SZ_4K * SZ_4K / sizeof(gen6_pte_t));
439 ppgtt->base.vm.top = 1;
441 ppgtt->base.vm.bind_async_flags = I915_VMA_LOCAL_BIND;
442 ppgtt->base.vm.allocate_va_range = gen6_alloc_va_range;
443 ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range;
444 ppgtt->base.vm.insert_entries = gen6_ppgtt_insert_entries;
445 ppgtt->base.vm.cleanup = gen6_ppgtt_cleanup;
447 ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode;
449 ppgtt->base.pd = __alloc_pd(sizeof(*ppgtt->base.pd));
450 if (!ppgtt->base.pd) {
455 err = gen6_ppgtt_init_scratch(ppgtt);
459 ppgtt->vma = pd_vma_create(ppgtt, GEN6_PD_SIZE);
460 if (IS_ERR(ppgtt->vma)) {
461 err = PTR_ERR(ppgtt->vma);
468 free_scratch(&ppgtt->base.vm);
470 kfree(ppgtt->base.pd);
472 mutex_destroy(&ppgtt->pin_mutex);