1 // SPDX-License-Identifier: MIT
3 * Copyright © 2020 Intel Corporation
6 #include <linux/log2.h>
8 #include "gen6_ppgtt.h"
9 #include "i915_scatterlist.h"
10 #include "i915_trace.h"
11 #include "i915_vgpu.h"
14 /* Write pde (index) from the page directory @pd to the page table @pt */
15 static void gen6_write_pde(const struct gen6_ppgtt *ppgtt,
16 const unsigned int pde,
17 const struct i915_page_table *pt)
19 dma_addr_t addr = pt ? px_dma(pt) : px_dma(ppgtt->base.vm.scratch[1]);
21 /* Caller needs to make sure the write completes if necessary */
22 iowrite32(GEN6_PDE_ADDR_ENCODE(addr) | GEN6_PDE_VALID,
23 ppgtt->pd_addr + pde);
26 void gen7_ppgtt_enable(struct intel_gt *gt)
28 struct drm_i915_private *i915 = gt->i915;
29 struct intel_uncore *uncore = gt->uncore;
32 intel_uncore_rmw(uncore, GAC_ECO_BITS, 0, ECOBITS_PPGTT_CACHE64B);
34 ecochk = intel_uncore_read(uncore, GAM_ECOCHK);
35 if (IS_HASWELL(i915)) {
36 ecochk |= ECOCHK_PPGTT_WB_HSW;
38 ecochk |= ECOCHK_PPGTT_LLC_IVB;
39 ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
41 intel_uncore_write(uncore, GAM_ECOCHK, ecochk);
44 void gen6_ppgtt_enable(struct intel_gt *gt)
46 struct intel_uncore *uncore = gt->uncore;
48 intel_uncore_rmw(uncore,
51 ECOBITS_SNB_BIT | ECOBITS_PPGTT_CACHE64B);
53 intel_uncore_rmw(uncore,
56 GAB_CTL_CONT_AFTER_PAGEFAULT);
58 intel_uncore_rmw(uncore,
61 ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
63 if (HAS_PPGTT(uncore->i915)) /* may be disabled for VT-d */
64 intel_uncore_write(uncore,
66 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
69 /* PPGTT support for Sandybdrige/Gen6 and later */
70 static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
71 u64 start, u64 length)
73 struct gen6_ppgtt * const ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
74 const unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
75 const gen6_pte_t scratch_pte = vm->scratch[0]->encode;
76 unsigned int pde = first_entry / GEN6_PTES;
77 unsigned int pte = first_entry % GEN6_PTES;
78 unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
81 struct i915_page_table * const pt =
82 i915_pt_entry(ppgtt->base.pd, pde++);
83 const unsigned int count = min(num_entries, GEN6_PTES - pte);
88 GEM_BUG_ON(count > atomic_read(&pt->used));
89 if (!atomic_sub_return(count, &pt->used))
90 ppgtt->scan_for_unused_pt = true;
93 * Note that the hw doesn't support removing PDE on the fly
94 * (they are cached inside the context with no means to
95 * invalidate the cache), so we can only reset the PTE
96 * entries back to scratch.
100 memset32(vaddr + pte, scratch_pte, count);
106 static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
107 struct i915_vma *vma,
108 enum i915_cache_level cache_level,
111 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
112 struct i915_page_directory * const pd = ppgtt->pd;
113 unsigned int first_entry = vma->node.start / I915_GTT_PAGE_SIZE;
114 unsigned int act_pt = first_entry / GEN6_PTES;
115 unsigned int act_pte = first_entry % GEN6_PTES;
116 const u32 pte_encode = vm->pte_encode(0, cache_level, flags);
117 struct sgt_dma iter = sgt_dma(vma);
120 GEM_BUG_ON(!pd->entry[act_pt]);
122 vaddr = px_vaddr(i915_pt_entry(pd, act_pt));
124 GEM_BUG_ON(sg_dma_len(iter.sg) < I915_GTT_PAGE_SIZE);
125 vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
127 iter.dma += I915_GTT_PAGE_SIZE;
128 if (iter.dma == iter.max) {
129 iter.sg = __sg_next(iter.sg);
130 if (!iter.sg || sg_dma_len(iter.sg) == 0)
133 iter.dma = sg_dma_address(iter.sg);
134 iter.max = iter.dma + sg_dma_len(iter.sg);
137 if (++act_pte == GEN6_PTES) {
138 vaddr = px_vaddr(i915_pt_entry(pd, ++act_pt));
143 vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
146 static void gen6_flush_pd(struct gen6_ppgtt *ppgtt, u64 start, u64 end)
148 struct i915_page_directory * const pd = ppgtt->base.pd;
149 struct i915_page_table *pt;
152 start = round_down(start, SZ_64K);
153 end = round_up(end, SZ_64K) - start;
155 mutex_lock(&ppgtt->flush);
157 gen6_for_each_pde(pt, pd, start, end, pde)
158 gen6_write_pde(ppgtt, pde, pt);
161 ioread32(ppgtt->pd_addr + pde - 1);
162 gen6_ggtt_invalidate(ppgtt->base.vm.gt->ggtt);
165 mutex_unlock(&ppgtt->flush);
168 static void gen6_alloc_va_range(struct i915_address_space *vm,
169 struct i915_vm_pt_stash *stash,
170 u64 start, u64 length)
172 struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
173 struct i915_page_directory * const pd = ppgtt->base.pd;
174 struct i915_page_table *pt;
179 spin_lock(&pd->lock);
180 gen6_for_each_pde(pt, pd, start, length, pde) {
181 const unsigned int count = gen6_pte_count(start, length);
184 spin_unlock(&pd->lock);
187 __i915_gem_object_pin_pages(pt->base);
189 fill32_px(pt, vm->scratch[0]->encode);
191 spin_lock(&pd->lock);
192 if (!pd->entry[pde]) {
193 stash->pt[0] = pt->stash;
194 atomic_set(&pt->used, 0);
203 atomic_add(count, &pt->used);
205 spin_unlock(&pd->lock);
207 if (flush && i915_vma_is_bound(ppgtt->vma, I915_VMA_GLOBAL_BIND)) {
208 intel_wakeref_t wakeref;
210 with_intel_runtime_pm(&vm->i915->runtime_pm, wakeref)
211 gen6_flush_pd(ppgtt, from, start);
215 static int gen6_ppgtt_init_scratch(struct gen6_ppgtt *ppgtt)
217 struct i915_address_space * const vm = &ppgtt->base.vm;
220 ret = setup_scratch_page(vm);
224 vm->scratch[0]->encode =
225 vm->pte_encode(px_dma(vm->scratch[0]),
226 I915_CACHE_NONE, PTE_READ_ONLY);
228 vm->scratch[1] = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K);
229 if (IS_ERR(vm->scratch[1])) {
230 ret = PTR_ERR(vm->scratch[1]);
234 ret = map_pt_dma(vm, vm->scratch[1]);
238 fill32_px(vm->scratch[1], vm->scratch[0]->encode);
243 i915_gem_object_put(vm->scratch[1]);
245 i915_gem_object_put(vm->scratch[0]);
249 static void gen6_ppgtt_free_pd(struct gen6_ppgtt *ppgtt)
251 struct i915_page_directory * const pd = ppgtt->base.pd;
252 struct i915_page_table *pt;
255 gen6_for_all_pdes(pt, pd, pde)
257 free_pt(&ppgtt->base.vm, pt);
260 static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
262 struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
264 gen6_ppgtt_free_pd(ppgtt);
267 mutex_destroy(&ppgtt->flush);
269 free_pd(&ppgtt->base.vm, ppgtt->base.pd);
272 static void pd_vma_bind(struct i915_address_space *vm,
273 struct i915_vm_pt_stash *stash,
274 struct i915_vma *vma,
275 enum i915_cache_level cache_level,
278 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
279 struct gen6_ppgtt *ppgtt = vma->private;
280 u32 ggtt_offset = i915_ggtt_offset(vma) / I915_GTT_PAGE_SIZE;
282 ppgtt->pp_dir = ggtt_offset * sizeof(gen6_pte_t) << 10;
283 ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm + ggtt_offset;
285 gen6_flush_pd(ppgtt, 0, ppgtt->base.vm.total);
288 static void pd_vma_unbind(struct i915_address_space *vm, struct i915_vma *vma)
290 struct gen6_ppgtt *ppgtt = vma->private;
291 struct i915_page_directory * const pd = ppgtt->base.pd;
292 struct i915_page_table *pt;
295 if (!ppgtt->scan_for_unused_pt)
298 /* Free all no longer used page tables */
299 gen6_for_all_pdes(pt, ppgtt->base.pd, pde) {
300 if (!pt || atomic_read(&pt->used))
303 free_pt(&ppgtt->base.vm, pt);
304 pd->entry[pde] = NULL;
307 ppgtt->scan_for_unused_pt = false;
310 static const struct i915_vma_ops pd_vma_ops = {
311 .bind_vma = pd_vma_bind,
312 .unbind_vma = pd_vma_unbind,
315 int gen6_ppgtt_pin(struct i915_ppgtt *base, struct i915_gem_ww_ctx *ww)
317 struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
320 GEM_BUG_ON(!atomic_read(&ppgtt->base.vm.open));
323 * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt
324 * which will be pinned into every active context.
325 * (When vma->pin_count becomes atomic, I expect we will naturally
326 * need a larger, unpacked, type and kill this redundancy.)
328 if (atomic_add_unless(&ppgtt->pin_count, 1, 0))
331 /* grab the ppgtt resv to pin the object */
332 err = i915_vm_lock_objects(&ppgtt->base.vm, ww);
337 * PPGTT PDEs reside in the GGTT and consists of 512 entries. The
338 * allocator works in address space sizes, so it's multiplied by page
339 * size. We allocate at the top of the GTT to avoid fragmentation.
341 if (!atomic_read(&ppgtt->pin_count)) {
342 err = i915_ggtt_pin(ppgtt->vma, ww, GEN6_PD_ALIGN, PIN_HIGH);
344 GEM_BUG_ON(ppgtt->vma->fence);
345 clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(ppgtt->vma));
348 atomic_inc(&ppgtt->pin_count);
353 static int pd_dummy_obj_get_pages(struct drm_i915_gem_object *obj)
355 obj->mm.pages = ZERO_SIZE_PTR;
359 static void pd_dummy_obj_put_pages(struct drm_i915_gem_object *obj,
360 struct sg_table *pages)
364 static const struct drm_i915_gem_object_ops pd_dummy_obj_ops = {
365 .name = "pd_dummy_obj",
366 .get_pages = pd_dummy_obj_get_pages,
367 .put_pages = pd_dummy_obj_put_pages,
370 static struct i915_page_directory *
371 gen6_alloc_top_pd(struct gen6_ppgtt *ppgtt)
373 struct i915_ggtt * const ggtt = ppgtt->base.vm.gt->ggtt;
374 struct i915_page_directory *pd;
377 pd = __alloc_pd(I915_PDES);
379 return ERR_PTR(-ENOMEM);
381 pd->pt.base = __i915_gem_object_create_internal(ppgtt->base.vm.gt->i915,
384 if (IS_ERR(pd->pt.base)) {
385 err = PTR_ERR(pd->pt.base);
390 pd->pt.base->base.resv = i915_vm_resv_get(&ppgtt->base.vm);
391 pd->pt.base->shares_resv_from = &ppgtt->base.vm;
393 ppgtt->vma = i915_vma_instance(pd->pt.base, &ggtt->vm, NULL);
394 if (IS_ERR(ppgtt->vma)) {
395 err = PTR_ERR(ppgtt->vma);
400 /* The dummy object we create is special, override ops.. */
401 ppgtt->vma->ops = &pd_vma_ops;
402 ppgtt->vma->private = ppgtt;
406 free_pd(&ppgtt->base.vm, pd);
410 void gen6_ppgtt_unpin(struct i915_ppgtt *base)
412 struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
414 GEM_BUG_ON(!atomic_read(&ppgtt->pin_count));
415 if (atomic_dec_and_test(&ppgtt->pin_count))
416 i915_vma_unpin(ppgtt->vma);
419 struct i915_ppgtt *gen6_ppgtt_create(struct intel_gt *gt)
421 struct i915_ggtt * const ggtt = gt->ggtt;
422 struct gen6_ppgtt *ppgtt;
425 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
427 return ERR_PTR(-ENOMEM);
429 mutex_init(&ppgtt->flush);
431 ppgtt_init(&ppgtt->base, gt, 0);
432 ppgtt->base.vm.pd_shift = ilog2(SZ_4K * SZ_4K / sizeof(gen6_pte_t));
433 ppgtt->base.vm.top = 1;
435 ppgtt->base.vm.bind_async_flags = I915_VMA_LOCAL_BIND;
436 ppgtt->base.vm.allocate_va_range = gen6_alloc_va_range;
437 ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range;
438 ppgtt->base.vm.insert_entries = gen6_ppgtt_insert_entries;
439 ppgtt->base.vm.cleanup = gen6_ppgtt_cleanup;
441 ppgtt->base.vm.alloc_pt_dma = alloc_pt_dma;
442 ppgtt->base.vm.alloc_scratch_dma = alloc_pt_dma;
443 ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode;
445 err = gen6_ppgtt_init_scratch(ppgtt);
449 ppgtt->base.pd = gen6_alloc_top_pd(ppgtt);
450 if (IS_ERR(ppgtt->base.pd)) {
451 err = PTR_ERR(ppgtt->base.pd);
458 free_scratch(&ppgtt->base.vm);