1 // SPDX-License-Identifier: MIT
3 * Copyright © 2020 Intel Corporation
6 #include <linux/stop_machine.h>
8 #include <asm/set_memory.h>
11 #include <drm/i915_drm.h>
13 #include "gem/i915_gem_lmem.h"
17 #include "i915_scatterlist.h"
18 #include "i915_vgpu.h"
20 #include "intel_gtt.h"
23 i915_get_ggtt_vma_pages(struct i915_vma *vma);
25 static void i915_ggtt_color_adjust(const struct drm_mm_node *node,
30 if (i915_node_color_differs(node, color))
31 *start += I915_GTT_PAGE_SIZE;
34 * Also leave a space between the unallocated reserved node after the
35 * GTT and any objects within the GTT, i.e. we use the color adjustment
36 * to insert a guard page to prevent prefetches crossing over the
39 node = list_next_entry(node, node_list);
40 if (node->color != color)
41 *end -= I915_GTT_PAGE_SIZE;
44 static int ggtt_init_hw(struct i915_ggtt *ggtt)
46 struct drm_i915_private *i915 = ggtt->vm.i915;
48 i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
50 ggtt->vm.is_ggtt = true;
52 /* Only VLV supports read-only GGTT mappings */
53 ggtt->vm.has_read_only = IS_VALLEYVIEW(i915);
55 if (!HAS_LLC(i915) && !HAS_PPGTT(i915))
56 ggtt->vm.mm.color_adjust = i915_ggtt_color_adjust;
58 if (ggtt->mappable_end) {
59 if (!io_mapping_init_wc(&ggtt->iomap,
61 ggtt->mappable_end)) {
62 ggtt->vm.cleanup(&ggtt->vm);
66 ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start,
70 intel_ggtt_init_fences(ggtt);
76 * i915_ggtt_init_hw - Initialize GGTT hardware
79 int i915_ggtt_init_hw(struct drm_i915_private *i915)
84 * Note that we use page colouring to enforce a guard page at the
85 * end of the address space. This is required as the CS may prefetch
86 * beyond the end of the batch buffer, across the page boundary,
87 * and beyond the end of the GTT if we do not provide a guard.
89 ret = ggtt_init_hw(&i915->ggtt);
97 * Certain Gen5 chipsets require idling the GPU before
98 * unmapping anything from the GTT when VT-d is enabled.
100 static bool needs_idle_maps(struct drm_i915_private *i915)
103 * Query intel_iommu to see if we need the workaround. Presumably that
106 if (!intel_vtd_active())
109 if (IS_GEN(i915, 5) && IS_MOBILE(i915))
112 if (IS_GEN(i915, 12))
113 return true; /* XXX DMAR fault reason 7 */
118 void i915_ggtt_suspend(struct i915_ggtt *ggtt)
120 struct i915_vma *vma, *vn;
123 mutex_lock(&ggtt->vm.mutex);
125 /* Skip rewriting PTE on VMA unbind. */
126 open = atomic_xchg(&ggtt->vm.open, 0);
128 list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) {
129 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
130 i915_vma_wait_for_bind(vma);
132 if (i915_vma_is_pinned(vma))
135 if (!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) {
136 __i915_vma_evict(vma);
137 drm_mm_remove_node(&vma->node);
141 ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
142 ggtt->invalidate(ggtt);
143 atomic_set(&ggtt->vm.open, open);
145 mutex_unlock(&ggtt->vm.mutex);
147 intel_gt_check_and_clear_faults(ggtt->vm.gt);
150 void gen6_ggtt_invalidate(struct i915_ggtt *ggtt)
152 struct intel_uncore *uncore = ggtt->vm.gt->uncore;
154 spin_lock_irq(&uncore->lock);
155 intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
156 intel_uncore_read_fw(uncore, GFX_FLSH_CNTL_GEN6);
157 spin_unlock_irq(&uncore->lock);
160 static void gen8_ggtt_invalidate(struct i915_ggtt *ggtt)
162 struct intel_uncore *uncore = ggtt->vm.gt->uncore;
165 * Note that as an uncached mmio write, this will flush the
166 * WCB of the writes into the GGTT before it triggers the invalidate.
168 intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
171 static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
173 struct intel_uncore *uncore = ggtt->vm.gt->uncore;
174 struct drm_i915_private *i915 = ggtt->vm.i915;
176 gen8_ggtt_invalidate(ggtt);
178 if (INTEL_GEN(i915) >= 12)
179 intel_uncore_write_fw(uncore, GEN12_GUC_TLB_INV_CR,
180 GEN12_GUC_TLB_INV_CR_INVALIDATE);
182 intel_uncore_write_fw(uncore, GEN8_GTCR, GEN8_GTCR_INVALIDATE);
185 static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt)
187 intel_gtt_chipset_flush();
190 static u64 gen8_ggtt_pte_encode(dma_addr_t addr,
191 enum i915_cache_level level,
194 gen8_pte_t pte = addr | _PAGE_PRESENT;
197 pte |= GEN12_GGTT_PTE_LM;
202 static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
207 static void gen8_ggtt_insert_page(struct i915_address_space *vm,
210 enum i915_cache_level level,
213 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
214 gen8_pte_t __iomem *pte =
215 (gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
217 gen8_set_pte(pte, gen8_ggtt_pte_encode(addr, level, flags));
219 ggtt->invalidate(ggtt);
222 static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
223 struct i915_vma *vma,
224 enum i915_cache_level level,
227 const gen8_pte_t pte_encode = gen8_ggtt_pte_encode(0, level, flags);
228 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
229 gen8_pte_t __iomem *gte;
230 gen8_pte_t __iomem *end;
231 struct sgt_iter iter;
235 * Note that we ignore PTE_READ_ONLY here. The caller must be careful
236 * not to allow the user to override access to a read only page.
239 gte = (gen8_pte_t __iomem *)ggtt->gsm;
240 gte += vma->node.start / I915_GTT_PAGE_SIZE;
241 end = gte + vma->node.size / I915_GTT_PAGE_SIZE;
243 for_each_sgt_daddr(addr, iter, vma->pages)
244 gen8_set_pte(gte++, pte_encode | addr);
245 GEM_BUG_ON(gte > end);
247 /* Fill the allocated but "unused" space beyond the end of the buffer */
249 gen8_set_pte(gte++, vm->scratch[0]->encode);
252 * We want to flush the TLBs only after we're certain all the PTE
253 * updates have finished.
255 ggtt->invalidate(ggtt);
258 static void gen6_ggtt_insert_page(struct i915_address_space *vm,
261 enum i915_cache_level level,
264 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
265 gen6_pte_t __iomem *pte =
266 (gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
268 iowrite32(vm->pte_encode(addr, level, flags), pte);
270 ggtt->invalidate(ggtt);
274 * Binds an object into the global gtt with the specified cache level.
275 * The object will be accessible to the GPU via commands whose operands
276 * reference offsets within the global GTT as well as accessible by the GPU
277 * through the GMADR mapped BAR (i915->mm.gtt->gtt).
279 static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
280 struct i915_vma *vma,
281 enum i915_cache_level level,
284 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
285 gen6_pte_t __iomem *gte;
286 gen6_pte_t __iomem *end;
287 struct sgt_iter iter;
290 gte = (gen6_pte_t __iomem *)ggtt->gsm;
291 gte += vma->node.start / I915_GTT_PAGE_SIZE;
292 end = gte + vma->node.size / I915_GTT_PAGE_SIZE;
294 for_each_sgt_daddr(addr, iter, vma->pages)
295 iowrite32(vm->pte_encode(addr, level, flags), gte++);
296 GEM_BUG_ON(gte > end);
298 /* Fill the allocated but "unused" space beyond the end of the buffer */
300 iowrite32(vm->scratch[0]->encode, gte++);
303 * We want to flush the TLBs only after we're certain all the PTE
304 * updates have finished.
306 ggtt->invalidate(ggtt);
309 static void nop_clear_range(struct i915_address_space *vm,
310 u64 start, u64 length)
314 static void gen8_ggtt_clear_range(struct i915_address_space *vm,
315 u64 start, u64 length)
317 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
318 unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
319 unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
320 const gen8_pte_t scratch_pte = vm->scratch[0]->encode;
321 gen8_pte_t __iomem *gtt_base =
322 (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
323 const int max_entries = ggtt_total_entries(ggtt) - first_entry;
326 if (WARN(num_entries > max_entries,
327 "First entry = %d; Num entries = %d (max=%d)\n",
328 first_entry, num_entries, max_entries))
329 num_entries = max_entries;
331 for (i = 0; i < num_entries; i++)
332 gen8_set_pte(>t_base[i], scratch_pte);
335 static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
338 * Make sure the internal GAM fifo has been cleared of all GTT
339 * writes before exiting stop_machine(). This guarantees that
340 * any aperture accesses waiting to start in another process
341 * cannot back up behind the GTT writes causing a hang.
342 * The register can be any arbitrary GAM register.
344 intel_uncore_posting_read_fw(vm->gt->uncore, GFX_FLSH_CNTL_GEN6);
348 struct i915_address_space *vm;
351 enum i915_cache_level level;
354 static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
356 struct insert_page *arg = _arg;
358 gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0);
359 bxt_vtd_ggtt_wa(arg->vm);
364 static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
367 enum i915_cache_level level,
370 struct insert_page arg = { vm, addr, offset, level };
372 stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
375 struct insert_entries {
376 struct i915_address_space *vm;
377 struct i915_vma *vma;
378 enum i915_cache_level level;
382 static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
384 struct insert_entries *arg = _arg;
386 gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, arg->flags);
387 bxt_vtd_ggtt_wa(arg->vm);
392 static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
393 struct i915_vma *vma,
394 enum i915_cache_level level,
397 struct insert_entries arg = { vm, vma, level, flags };
399 stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
402 static void gen6_ggtt_clear_range(struct i915_address_space *vm,
403 u64 start, u64 length)
405 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
406 unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
407 unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
408 gen6_pte_t scratch_pte, __iomem *gtt_base =
409 (gen6_pte_t __iomem *)ggtt->gsm + first_entry;
410 const int max_entries = ggtt_total_entries(ggtt) - first_entry;
413 if (WARN(num_entries > max_entries,
414 "First entry = %d; Num entries = %d (max=%d)\n",
415 first_entry, num_entries, max_entries))
416 num_entries = max_entries;
418 scratch_pte = vm->scratch[0]->encode;
419 for (i = 0; i < num_entries; i++)
420 iowrite32(scratch_pte, >t_base[i]);
423 static void i915_ggtt_insert_page(struct i915_address_space *vm,
426 enum i915_cache_level cache_level,
429 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
430 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
432 intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
435 static void i915_ggtt_insert_entries(struct i915_address_space *vm,
436 struct i915_vma *vma,
437 enum i915_cache_level cache_level,
440 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
441 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
443 intel_gtt_insert_sg_entries(vma->pages, vma->node.start >> PAGE_SHIFT,
447 static void i915_ggtt_clear_range(struct i915_address_space *vm,
448 u64 start, u64 length)
450 intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
453 static void ggtt_bind_vma(struct i915_address_space *vm,
454 struct i915_vm_pt_stash *stash,
455 struct i915_vma *vma,
456 enum i915_cache_level cache_level,
459 struct drm_i915_gem_object *obj = vma->obj;
462 if (i915_vma_is_bound(vma, ~flags & I915_VMA_BIND_MASK))
465 /* Applicable to VLV (gen8+ do not support RO in the GGTT) */
467 if (i915_gem_object_is_readonly(obj))
468 pte_flags |= PTE_READ_ONLY;
469 if (i915_gem_object_is_lmem(obj))
472 vm->insert_entries(vm, vma, cache_level, pte_flags);
473 vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
476 static void ggtt_unbind_vma(struct i915_address_space *vm, struct i915_vma *vma)
478 vm->clear_range(vm, vma->node.start, vma->size);
481 static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt)
486 if (!intel_uc_uses_guc(&ggtt->vm.gt->uc))
489 GEM_BUG_ON(ggtt->vm.total <= GUC_GGTT_TOP);
490 size = ggtt->vm.total - GUC_GGTT_TOP;
492 ret = i915_gem_gtt_reserve(&ggtt->vm, &ggtt->uc_fw, size,
493 GUC_GGTT_TOP, I915_COLOR_UNEVICTABLE,
496 drm_dbg(&ggtt->vm.i915->drm,
497 "Failed to reserve top of GGTT for GuC\n");
502 static void ggtt_release_guc_top(struct i915_ggtt *ggtt)
504 if (drm_mm_node_allocated(&ggtt->uc_fw))
505 drm_mm_remove_node(&ggtt->uc_fw);
508 static void cleanup_init_ggtt(struct i915_ggtt *ggtt)
510 ggtt_release_guc_top(ggtt);
511 if (drm_mm_node_allocated(&ggtt->error_capture))
512 drm_mm_remove_node(&ggtt->error_capture);
513 mutex_destroy(&ggtt->error_mutex);
516 static int init_ggtt(struct i915_ggtt *ggtt)
519 * Let GEM Manage all of the aperture.
521 * However, leave one page at the end still bound to the scratch page.
522 * There are a number of places where the hardware apparently prefetches
523 * past the end of the object, and we've seen multiple hangs with the
524 * GPU head pointer stuck in a batchbuffer bound at the last page of the
525 * aperture. One page should be enough to keep any prefetching inside
528 unsigned long hole_start, hole_end;
529 struct drm_mm_node *entry;
533 * GuC requires all resources that we're sharing with it to be placed in
534 * non-WOPCM memory. If GuC is not present or not in use we still need a
535 * small bias as ring wraparound at offset 0 sometimes hangs. No idea
538 ggtt->pin_bias = max_t(u32, I915_GTT_PAGE_SIZE,
539 intel_wopcm_guc_size(&ggtt->vm.i915->wopcm));
541 ret = intel_vgt_balloon(ggtt);
545 mutex_init(&ggtt->error_mutex);
546 if (ggtt->mappable_end) {
548 * Reserve a mappable slot for our lockless error capture.
550 * We strongly prefer taking address 0x0 in order to protect
551 * other critical buffers against accidental overwrites,
552 * as writing to address 0 is a very common mistake.
554 * Since 0 may already be in use by the system (e.g. the BIOS
555 * framebuffer), we let the reservation fail quietly and hope
556 * 0 remains reserved always.
558 * If we fail to reserve 0, and then fail to find any space
559 * for an error-capture, remain silent. We can afford not
560 * to reserve an error_capture node as we have fallback
561 * paths, and we trust that 0 will remain reserved. However,
562 * the only likely reason for failure to insert is a driver
563 * bug, which we expect to cause other failures...
565 ggtt->error_capture.size = I915_GTT_PAGE_SIZE;
566 ggtt->error_capture.color = I915_COLOR_UNEVICTABLE;
567 if (drm_mm_reserve_node(&ggtt->vm.mm, &ggtt->error_capture))
568 drm_mm_insert_node_in_range(&ggtt->vm.mm,
569 &ggtt->error_capture,
570 ggtt->error_capture.size, 0,
571 ggtt->error_capture.color,
572 0, ggtt->mappable_end,
575 if (drm_mm_node_allocated(&ggtt->error_capture))
576 drm_dbg(&ggtt->vm.i915->drm,
577 "Reserved GGTT:[%llx, %llx] for use by error capture\n",
578 ggtt->error_capture.start,
579 ggtt->error_capture.start + ggtt->error_capture.size);
582 * The upper portion of the GuC address space has a sizeable hole
583 * (several MB) that is inaccessible by GuC. Reserve this range within
584 * GGTT as it can comfortably hold GuC/HuC firmware images.
586 ret = ggtt_reserve_guc_top(ggtt);
590 /* Clear any non-preallocated blocks */
591 drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) {
592 drm_dbg(&ggtt->vm.i915->drm,
593 "clearing unused GTT space: [%lx, %lx]\n",
594 hole_start, hole_end);
595 ggtt->vm.clear_range(&ggtt->vm, hole_start,
596 hole_end - hole_start);
599 /* And finally clear the reserved guard page */
600 ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE);
605 cleanup_init_ggtt(ggtt);
609 static void aliasing_gtt_bind_vma(struct i915_address_space *vm,
610 struct i915_vm_pt_stash *stash,
611 struct i915_vma *vma,
612 enum i915_cache_level cache_level,
617 /* Currently applicable only to VLV */
619 if (i915_gem_object_is_readonly(vma->obj))
620 pte_flags |= PTE_READ_ONLY;
622 if (flags & I915_VMA_LOCAL_BIND)
623 ppgtt_bind_vma(&i915_vm_to_ggtt(vm)->alias->vm,
624 stash, vma, cache_level, flags);
626 if (flags & I915_VMA_GLOBAL_BIND)
627 vm->insert_entries(vm, vma, cache_level, pte_flags);
630 static void aliasing_gtt_unbind_vma(struct i915_address_space *vm,
631 struct i915_vma *vma)
633 if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
634 vm->clear_range(vm, vma->node.start, vma->size);
636 if (i915_vma_is_bound(vma, I915_VMA_LOCAL_BIND))
637 ppgtt_unbind_vma(&i915_vm_to_ggtt(vm)->alias->vm, vma);
640 static int init_aliasing_ppgtt(struct i915_ggtt *ggtt)
642 struct i915_vm_pt_stash stash = {};
643 struct i915_ppgtt *ppgtt;
646 ppgtt = i915_ppgtt_create(ggtt->vm.gt);
648 return PTR_ERR(ppgtt);
650 if (GEM_WARN_ON(ppgtt->vm.total < ggtt->vm.total)) {
655 err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, ggtt->vm.total);
659 i915_gem_object_lock(ppgtt->vm.scratch[0], NULL);
660 err = i915_vm_pin_pt_stash(&ppgtt->vm, &stash);
661 i915_gem_object_unlock(ppgtt->vm.scratch[0]);
666 * Note we only pre-allocate as far as the end of the global
667 * GTT. On 48b / 4-level page-tables, the difference is very,
668 * very significant! We have to preallocate as GVT/vgpu does
669 * not like the page directory disappearing.
671 ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash, 0, ggtt->vm.total);
674 ggtt->vm.bind_async_flags |= ppgtt->vm.bind_async_flags;
676 GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != ggtt_bind_vma);
677 ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma;
679 GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != ggtt_unbind_vma);
680 ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma;
682 i915_vm_free_pt_stash(&ppgtt->vm, &stash);
686 i915_vm_free_pt_stash(&ppgtt->vm, &stash);
688 i915_vm_put(&ppgtt->vm);
692 static void fini_aliasing_ppgtt(struct i915_ggtt *ggtt)
694 struct i915_ppgtt *ppgtt;
696 ppgtt = fetch_and_zero(&ggtt->alias);
700 i915_vm_put(&ppgtt->vm);
702 ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
703 ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
706 int i915_init_ggtt(struct drm_i915_private *i915)
710 ret = init_ggtt(&i915->ggtt);
714 if (INTEL_PPGTT(i915) == INTEL_PPGTT_ALIASING) {
715 ret = init_aliasing_ppgtt(&i915->ggtt);
717 cleanup_init_ggtt(&i915->ggtt);
723 static void ggtt_cleanup_hw(struct i915_ggtt *ggtt)
725 struct i915_vma *vma, *vn;
727 atomic_set(&ggtt->vm.open, 0);
729 rcu_barrier(); /* flush the RCU'ed__i915_vm_release */
730 flush_workqueue(ggtt->vm.i915->wq);
732 mutex_lock(&ggtt->vm.mutex);
734 list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link)
735 WARN_ON(__i915_vma_unbind(vma));
737 if (drm_mm_node_allocated(&ggtt->error_capture))
738 drm_mm_remove_node(&ggtt->error_capture);
739 mutex_destroy(&ggtt->error_mutex);
741 ggtt_release_guc_top(ggtt);
742 intel_vgt_deballoon(ggtt);
744 ggtt->vm.cleanup(&ggtt->vm);
746 mutex_unlock(&ggtt->vm.mutex);
747 i915_address_space_fini(&ggtt->vm);
748 dma_resv_fini(&ggtt->vm.resv);
750 arch_phys_wc_del(ggtt->mtrr);
752 if (ggtt->iomap.size)
753 io_mapping_fini(&ggtt->iomap);
757 * i915_ggtt_driver_release - Clean up GGTT hardware initialization
760 void i915_ggtt_driver_release(struct drm_i915_private *i915)
762 struct i915_ggtt *ggtt = &i915->ggtt;
764 fini_aliasing_ppgtt(ggtt);
766 intel_ggtt_fini_fences(ggtt);
767 ggtt_cleanup_hw(ggtt);
770 static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
772 snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
773 snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
774 return snb_gmch_ctl << 20;
777 static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
779 bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
780 bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
782 bdw_gmch_ctl = 1 << bdw_gmch_ctl;
785 /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * I915_GTT_PAGE_SIZE */
786 if (bdw_gmch_ctl > 4)
790 return bdw_gmch_ctl << 20;
793 static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
795 gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
796 gmch_ctrl &= SNB_GMCH_GGMS_MASK;
799 return 1 << (20 + gmch_ctrl);
804 static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
806 struct drm_i915_private *i915 = ggtt->vm.i915;
807 struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
808 phys_addr_t phys_addr;
812 /* For Modern GENs the PTEs and register space are split in the BAR */
813 phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2;
816 * On BXT+/CNL+ writes larger than 64 bit to the GTT pagetable range
817 * will be dropped. For WC mappings in general we have 64 byte burst
818 * writes when the WC buffer is flushed, so we can't use it, but have to
819 * resort to an uncached mapping. The WC issue is easily caught by the
820 * readback check when writing GTT PTE entries.
822 if (IS_GEN9_LP(i915) || INTEL_GEN(i915) >= 10)
823 ggtt->gsm = ioremap(phys_addr, size);
825 ggtt->gsm = ioremap_wc(phys_addr, size);
827 drm_err(&i915->drm, "Failed to map the ggtt page table\n");
831 ret = setup_scratch_page(&ggtt->vm);
833 drm_err(&i915->drm, "Scratch setup failed\n");
834 /* iounmap will also get called at remove, but meh */
840 if (i915_gem_object_is_lmem(ggtt->vm.scratch[0]))
843 ggtt->vm.scratch[0]->encode =
844 ggtt->vm.pte_encode(px_dma(ggtt->vm.scratch[0]),
845 I915_CACHE_NONE, pte_flags);
850 int ggtt_set_pages(struct i915_vma *vma)
854 GEM_BUG_ON(vma->pages);
856 ret = i915_get_ggtt_vma_pages(vma);
860 vma->page_sizes = vma->obj->mm.page_sizes;
865 static void gen6_gmch_remove(struct i915_address_space *vm)
867 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
873 static struct resource pci_resource(struct pci_dev *pdev, int bar)
875 return (struct resource)DEFINE_RES_MEM(pci_resource_start(pdev, bar),
876 pci_resource_len(pdev, bar));
879 static int gen8_gmch_probe(struct i915_ggtt *ggtt)
881 struct drm_i915_private *i915 = ggtt->vm.i915;
882 struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
886 /* TODO: We're not aware of mappable constraints on gen8 yet */
887 if (!HAS_LMEM(i915)) {
888 ggtt->gmadr = pci_resource(pdev, 2);
889 ggtt->mappable_end = resource_size(&ggtt->gmadr);
892 pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
893 if (IS_CHERRYVIEW(i915))
894 size = chv_get_total_gtt_size(snb_gmch_ctl);
896 size = gen8_get_total_gtt_size(snb_gmch_ctl);
898 ggtt->vm.alloc_pt_dma = alloc_pt_dma;
900 ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
901 ggtt->vm.cleanup = gen6_gmch_remove;
902 ggtt->vm.insert_page = gen8_ggtt_insert_page;
903 ggtt->vm.clear_range = nop_clear_range;
904 if (intel_scanout_needs_vtd_wa(i915))
905 ggtt->vm.clear_range = gen8_ggtt_clear_range;
907 ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
909 /* Serialize GTT updates with aperture access on BXT if VT-d is on. */
910 if (intel_ggtt_update_needs_vtd_wa(i915) ||
911 IS_CHERRYVIEW(i915) /* fails with concurrent use/update */) {
912 ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
913 ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL;
914 ggtt->vm.bind_async_flags =
915 I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
918 ggtt->invalidate = gen8_ggtt_invalidate;
920 ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
921 ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
922 ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
923 ggtt->vm.vma_ops.clear_pages = clear_pages;
925 ggtt->vm.pte_encode = gen8_ggtt_pte_encode;
927 setup_private_pat(ggtt->vm.gt->uncore);
929 return ggtt_probe_common(ggtt, size);
932 static u64 snb_pte_encode(dma_addr_t addr,
933 enum i915_cache_level level,
936 gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
939 case I915_CACHE_L3_LLC:
941 pte |= GEN6_PTE_CACHE_LLC;
943 case I915_CACHE_NONE:
944 pte |= GEN6_PTE_UNCACHED;
953 static u64 ivb_pte_encode(dma_addr_t addr,
954 enum i915_cache_level level,
957 gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
960 case I915_CACHE_L3_LLC:
961 pte |= GEN7_PTE_CACHE_L3_LLC;
964 pte |= GEN6_PTE_CACHE_LLC;
966 case I915_CACHE_NONE:
967 pte |= GEN6_PTE_UNCACHED;
976 static u64 byt_pte_encode(dma_addr_t addr,
977 enum i915_cache_level level,
980 gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
982 if (!(flags & PTE_READ_ONLY))
983 pte |= BYT_PTE_WRITEABLE;
985 if (level != I915_CACHE_NONE)
986 pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
991 static u64 hsw_pte_encode(dma_addr_t addr,
992 enum i915_cache_level level,
995 gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
997 if (level != I915_CACHE_NONE)
998 pte |= HSW_WB_LLC_AGE3;
1003 static u64 iris_pte_encode(dma_addr_t addr,
1004 enum i915_cache_level level,
1007 gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
1010 case I915_CACHE_NONE:
1013 pte |= HSW_WT_ELLC_LLC_AGE3;
1016 pte |= HSW_WB_ELLC_LLC_AGE3;
1023 static int gen6_gmch_probe(struct i915_ggtt *ggtt)
1025 struct drm_i915_private *i915 = ggtt->vm.i915;
1026 struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
1030 ggtt->gmadr = pci_resource(pdev, 2);
1031 ggtt->mappable_end = resource_size(&ggtt->gmadr);
1034 * 64/512MB is the current min/max we actually know of, but this is
1035 * just a coarse sanity check.
1037 if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) {
1038 drm_err(&i915->drm, "Unknown GMADR size (%pa)\n",
1039 &ggtt->mappable_end);
1043 pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
1045 size = gen6_get_total_gtt_size(snb_gmch_ctl);
1046 ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE;
1048 ggtt->vm.alloc_pt_dma = alloc_pt_dma;
1050 ggtt->vm.clear_range = nop_clear_range;
1051 if (!HAS_FULL_PPGTT(i915) || intel_scanout_needs_vtd_wa(i915))
1052 ggtt->vm.clear_range = gen6_ggtt_clear_range;
1053 ggtt->vm.insert_page = gen6_ggtt_insert_page;
1054 ggtt->vm.insert_entries = gen6_ggtt_insert_entries;
1055 ggtt->vm.cleanup = gen6_gmch_remove;
1057 ggtt->invalidate = gen6_ggtt_invalidate;
1059 if (HAS_EDRAM(i915))
1060 ggtt->vm.pte_encode = iris_pte_encode;
1061 else if (IS_HASWELL(i915))
1062 ggtt->vm.pte_encode = hsw_pte_encode;
1063 else if (IS_VALLEYVIEW(i915))
1064 ggtt->vm.pte_encode = byt_pte_encode;
1065 else if (INTEL_GEN(i915) >= 7)
1066 ggtt->vm.pte_encode = ivb_pte_encode;
1068 ggtt->vm.pte_encode = snb_pte_encode;
1070 ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
1071 ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
1072 ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
1073 ggtt->vm.vma_ops.clear_pages = clear_pages;
1075 return ggtt_probe_common(ggtt, size);
1078 static void i915_gmch_remove(struct i915_address_space *vm)
1080 intel_gmch_remove();
1083 static int i915_gmch_probe(struct i915_ggtt *ggtt)
1085 struct drm_i915_private *i915 = ggtt->vm.i915;
1086 phys_addr_t gmadr_base;
1089 ret = intel_gmch_probe(i915->bridge_dev, to_pci_dev(i915->drm.dev), NULL);
1091 drm_err(&i915->drm, "failed to set up gmch\n");
1095 intel_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end);
1098 (struct resource)DEFINE_RES_MEM(gmadr_base, ggtt->mappable_end);
1100 ggtt->vm.alloc_pt_dma = alloc_pt_dma;
1102 if (needs_idle_maps(i915)) {
1103 drm_notice(&i915->drm,
1104 "Flushing DMA requests before IOMMU unmaps; performance may be degraded\n");
1105 ggtt->do_idle_maps = true;
1108 ggtt->vm.insert_page = i915_ggtt_insert_page;
1109 ggtt->vm.insert_entries = i915_ggtt_insert_entries;
1110 ggtt->vm.clear_range = i915_ggtt_clear_range;
1111 ggtt->vm.cleanup = i915_gmch_remove;
1113 ggtt->invalidate = gmch_ggtt_invalidate;
1115 ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
1116 ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
1117 ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
1118 ggtt->vm.vma_ops.clear_pages = clear_pages;
1120 if (unlikely(ggtt->do_idle_maps))
1121 drm_notice(&i915->drm,
1122 "Applying Ironlake quirks for intel_iommu\n");
1127 static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt)
1129 struct drm_i915_private *i915 = gt->i915;
1133 ggtt->vm.i915 = i915;
1134 ggtt->vm.dma = i915->drm.dev;
1135 dma_resv_init(&ggtt->vm.resv);
1137 if (INTEL_GEN(i915) <= 5)
1138 ret = i915_gmch_probe(ggtt);
1139 else if (INTEL_GEN(i915) < 8)
1140 ret = gen6_gmch_probe(ggtt);
1142 ret = gen8_gmch_probe(ggtt);
1144 dma_resv_fini(&ggtt->vm.resv);
1148 if ((ggtt->vm.total - 1) >> 32) {
1150 "We never expected a Global GTT with more than 32bits"
1151 " of address space! Found %lldM!\n",
1152 ggtt->vm.total >> 20);
1153 ggtt->vm.total = 1ULL << 32;
1154 ggtt->mappable_end =
1155 min_t(u64, ggtt->mappable_end, ggtt->vm.total);
1158 if (ggtt->mappable_end > ggtt->vm.total) {
1160 "mappable aperture extends past end of GGTT,"
1161 " aperture=%pa, total=%llx\n",
1162 &ggtt->mappable_end, ggtt->vm.total);
1163 ggtt->mappable_end = ggtt->vm.total;
1166 /* GMADR is the PCI mmio aperture into the global GTT. */
1167 drm_dbg(&i915->drm, "GGTT size = %lluM\n", ggtt->vm.total >> 20);
1168 drm_dbg(&i915->drm, "GMADR size = %lluM\n",
1169 (u64)ggtt->mappable_end >> 20);
1170 drm_dbg(&i915->drm, "DSM size = %lluM\n",
1171 (u64)resource_size(&intel_graphics_stolen_res) >> 20);
1177 * i915_ggtt_probe_hw - Probe GGTT hardware location
1178 * @i915: i915 device
1180 int i915_ggtt_probe_hw(struct drm_i915_private *i915)
1184 ret = ggtt_probe_hw(&i915->ggtt, &i915->gt);
1188 if (intel_vtd_active())
1189 drm_info(&i915->drm, "VT-d active for gfx access\n");
1194 int i915_ggtt_enable_hw(struct drm_i915_private *i915)
1196 if (INTEL_GEN(i915) < 6 && !intel_enable_gtt())
1202 void i915_ggtt_enable_guc(struct i915_ggtt *ggtt)
1204 GEM_BUG_ON(ggtt->invalidate != gen8_ggtt_invalidate);
1206 ggtt->invalidate = guc_ggtt_invalidate;
1208 ggtt->invalidate(ggtt);
1211 void i915_ggtt_disable_guc(struct i915_ggtt *ggtt)
1213 /* XXX Temporary pardon for error unload */
1214 if (ggtt->invalidate == gen8_ggtt_invalidate)
1217 /* We should only be called after i915_ggtt_enable_guc() */
1218 GEM_BUG_ON(ggtt->invalidate != guc_ggtt_invalidate);
1220 ggtt->invalidate = gen8_ggtt_invalidate;
1222 ggtt->invalidate(ggtt);
1225 void i915_ggtt_resume(struct i915_ggtt *ggtt)
1227 struct i915_vma *vma;
1231 intel_gt_check_and_clear_faults(ggtt->vm.gt);
1233 /* First fill our portion of the GTT with scratch pages */
1234 ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
1236 /* Skip rewriting PTE on VMA unbind. */
1237 open = atomic_xchg(&ggtt->vm.open, 0);
1239 /* clflush objects bound into the GGTT and rebind them. */
1240 list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link) {
1241 struct drm_i915_gem_object *obj = vma->obj;
1242 unsigned int was_bound =
1243 atomic_read(&vma->flags) & I915_VMA_BIND_MASK;
1245 GEM_BUG_ON(!was_bound);
1246 vma->ops->bind_vma(&ggtt->vm, NULL, vma,
1247 obj ? obj->cache_level : 0,
1249 if (obj) { /* only used during resume => exclusive access */
1250 flush |= fetch_and_zero(&obj->write_domain);
1251 obj->read_domains |= I915_GEM_DOMAIN_GTT;
1255 atomic_set(&ggtt->vm.open, open);
1256 ggtt->invalidate(ggtt);
1259 wbinvd_on_all_cpus();
1261 if (INTEL_GEN(ggtt->vm.i915) >= 8)
1262 setup_private_pat(ggtt->vm.gt->uncore);
1264 intel_ggtt_restore_fences(ggtt);
1267 static struct scatterlist *
1268 rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset,
1269 unsigned int width, unsigned int height,
1270 unsigned int src_stride, unsigned int dst_stride,
1271 struct sg_table *st, struct scatterlist *sg)
1273 unsigned int column, row;
1274 unsigned int src_idx;
1276 for (column = 0; column < width; column++) {
1279 src_idx = src_stride * (height - 1) + column + offset;
1280 for (row = 0; row < height; row++) {
1283 * We don't need the pages, but need to initialize
1284 * the entries so the sg list can be happily traversed.
1285 * The only thing we need are DMA addresses.
1287 sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0);
1288 sg_dma_address(sg) =
1289 i915_gem_object_get_dma_address(obj, src_idx);
1290 sg_dma_len(sg) = I915_GTT_PAGE_SIZE;
1292 src_idx -= src_stride;
1295 left = (dst_stride - height) * I915_GTT_PAGE_SIZE;
1303 * The DE ignores the PTEs for the padding tiles, the sg entry
1304 * here is just a conenience to indicate how many padding PTEs
1305 * to insert at this spot.
1307 sg_set_page(sg, NULL, left, 0);
1308 sg_dma_address(sg) = 0;
1309 sg_dma_len(sg) = left;
1316 static noinline struct sg_table *
1317 intel_rotate_pages(struct intel_rotation_info *rot_info,
1318 struct drm_i915_gem_object *obj)
1320 unsigned int size = intel_rotation_info_size(rot_info);
1321 struct drm_i915_private *i915 = to_i915(obj->base.dev);
1322 struct sg_table *st;
1323 struct scatterlist *sg;
1327 /* Allocate target SG list. */
1328 st = kmalloc(sizeof(*st), GFP_KERNEL);
1332 ret = sg_alloc_table(st, size, GFP_KERNEL);
1339 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
1340 sg = rotate_pages(obj, rot_info->plane[i].offset,
1341 rot_info->plane[i].width, rot_info->plane[i].height,
1342 rot_info->plane[i].src_stride,
1343 rot_info->plane[i].dst_stride,
1352 drm_dbg(&i915->drm, "Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
1353 obj->base.size, rot_info->plane[0].width,
1354 rot_info->plane[0].height, size);
1356 return ERR_PTR(ret);
1359 static struct scatterlist *
1360 remap_pages(struct drm_i915_gem_object *obj, unsigned int offset,
1361 unsigned int width, unsigned int height,
1362 unsigned int src_stride, unsigned int dst_stride,
1363 struct sg_table *st, struct scatterlist *sg)
1367 for (row = 0; row < height; row++) {
1368 unsigned int left = width * I915_GTT_PAGE_SIZE;
1372 unsigned int length;
1375 * We don't need the pages, but need to initialize
1376 * the entries so the sg list can be happily traversed.
1377 * The only thing we need are DMA addresses.
1380 addr = i915_gem_object_get_dma_address_len(obj, offset, &length);
1382 length = min(left, length);
1386 sg_set_page(sg, NULL, length, 0);
1387 sg_dma_address(sg) = addr;
1388 sg_dma_len(sg) = length;
1391 offset += length / I915_GTT_PAGE_SIZE;
1395 offset += src_stride - width;
1397 left = (dst_stride - width) * I915_GTT_PAGE_SIZE;
1405 * The DE ignores the PTEs for the padding tiles, the sg entry
1406 * here is just a conenience to indicate how many padding PTEs
1407 * to insert at this spot.
1409 sg_set_page(sg, NULL, left, 0);
1410 sg_dma_address(sg) = 0;
1411 sg_dma_len(sg) = left;
1418 static noinline struct sg_table *
1419 intel_remap_pages(struct intel_remapped_info *rem_info,
1420 struct drm_i915_gem_object *obj)
1422 unsigned int size = intel_remapped_info_size(rem_info);
1423 struct drm_i915_private *i915 = to_i915(obj->base.dev);
1424 struct sg_table *st;
1425 struct scatterlist *sg;
1429 /* Allocate target SG list. */
1430 st = kmalloc(sizeof(*st), GFP_KERNEL);
1434 ret = sg_alloc_table(st, size, GFP_KERNEL);
1441 for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) {
1442 sg = remap_pages(obj, rem_info->plane[i].offset,
1443 rem_info->plane[i].width, rem_info->plane[i].height,
1444 rem_info->plane[i].src_stride, rem_info->plane[i].dst_stride,
1456 drm_dbg(&i915->drm, "Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n",
1457 obj->base.size, rem_info->plane[0].width,
1458 rem_info->plane[0].height, size);
1460 return ERR_PTR(ret);
1463 static noinline struct sg_table *
1464 intel_partial_pages(const struct i915_ggtt_view *view,
1465 struct drm_i915_gem_object *obj)
1467 struct sg_table *st;
1468 struct scatterlist *sg, *iter;
1469 unsigned int count = view->partial.size;
1470 unsigned int offset;
1473 st = kmalloc(sizeof(*st), GFP_KERNEL);
1477 ret = sg_alloc_table(st, count, GFP_KERNEL);
1481 iter = i915_gem_object_get_sg_dma(obj, view->partial.offset, &offset, true);
1489 len = min(sg_dma_len(iter) - (offset << PAGE_SHIFT),
1490 count << PAGE_SHIFT);
1491 sg_set_page(sg, NULL, len, 0);
1492 sg_dma_address(sg) =
1493 sg_dma_address(iter) + (offset << PAGE_SHIFT);
1494 sg_dma_len(sg) = len;
1497 count -= len >> PAGE_SHIFT;
1500 i915_sg_trim(st); /* Drop any unused tail entries. */
1506 iter = __sg_next(iter);
1513 return ERR_PTR(ret);
1517 i915_get_ggtt_vma_pages(struct i915_vma *vma)
1522 * The vma->pages are only valid within the lifespan of the borrowed
1523 * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so
1524 * must be the vma->pages. A simple rule is that vma->pages must only
1525 * be accessed when the obj->mm.pages are pinned.
1527 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
1529 switch (vma->ggtt_view.type) {
1531 GEM_BUG_ON(vma->ggtt_view.type);
1533 case I915_GGTT_VIEW_NORMAL:
1534 vma->pages = vma->obj->mm.pages;
1537 case I915_GGTT_VIEW_ROTATED:
1539 intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj);
1542 case I915_GGTT_VIEW_REMAPPED:
1544 intel_remap_pages(&vma->ggtt_view.remapped, vma->obj);
1547 case I915_GGTT_VIEW_PARTIAL:
1548 vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
1553 if (IS_ERR(vma->pages)) {
1554 ret = PTR_ERR(vma->pages);
1556 drm_err(&vma->vm->i915->drm,
1557 "Failed to get pages for VMA view type %u (%d)!\n",
1558 vma->ggtt_view.type, ret);