1 // SPDX-License-Identifier: MIT
3 * Copyright © 2020 Intel Corporation
6 #include <linux/stop_machine.h>
8 #include <asm/set_memory.h>
13 #include "i915_scatterlist.h"
14 #include "i915_vgpu.h"
16 #include "intel_gtt.h"
19 i915_get_ggtt_vma_pages(struct i915_vma *vma);
21 static void i915_ggtt_color_adjust(const struct drm_mm_node *node,
26 if (i915_node_color_differs(node, color))
27 *start += I915_GTT_PAGE_SIZE;
30 * Also leave a space between the unallocated reserved node after the
31 * GTT and any objects within the GTT, i.e. we use the color adjustment
32 * to insert a guard page to prevent prefetches crossing over the
35 node = list_next_entry(node, node_list);
36 if (node->color != color)
37 *end -= I915_GTT_PAGE_SIZE;
40 static int ggtt_init_hw(struct i915_ggtt *ggtt)
42 struct drm_i915_private *i915 = ggtt->vm.i915;
44 i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
46 ggtt->vm.is_ggtt = true;
48 /* Only VLV supports read-only GGTT mappings */
49 ggtt->vm.has_read_only = IS_VALLEYVIEW(i915);
51 if (!HAS_LLC(i915) && !HAS_PPGTT(i915))
52 ggtt->vm.mm.color_adjust = i915_ggtt_color_adjust;
54 if (ggtt->mappable_end) {
55 if (!io_mapping_init_wc(&ggtt->iomap,
57 ggtt->mappable_end)) {
58 ggtt->vm.cleanup(&ggtt->vm);
62 ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start,
66 i915_ggtt_init_fences(ggtt);
72 * i915_ggtt_init_hw - Initialize GGTT hardware
75 int i915_ggtt_init_hw(struct drm_i915_private *i915)
79 stash_init(&i915->mm.wc_stash);
82 * Note that we use page colouring to enforce a guard page at the
83 * end of the address space. This is required as the CS may prefetch
84 * beyond the end of the batch buffer, across the page boundary,
85 * and beyond the end of the GTT if we do not provide a guard.
87 ret = ggtt_init_hw(&i915->ggtt);
95 * Certain Gen5 chipsets require require idling the GPU before
96 * unmapping anything from the GTT when VT-d is enabled.
98 static bool needs_idle_maps(struct drm_i915_private *i915)
101 * Query intel_iommu to see if we need the workaround. Presumably that
104 return IS_GEN(i915, 5) && IS_MOBILE(i915) && intel_vtd_active();
107 void i915_ggtt_suspend(struct i915_ggtt *ggtt)
109 struct i915_vma *vma;
111 list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
112 i915_vma_wait_for_bind(vma);
114 ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
115 ggtt->invalidate(ggtt);
117 intel_gt_check_and_clear_faults(ggtt->vm.gt);
120 void gen6_ggtt_invalidate(struct i915_ggtt *ggtt)
122 struct intel_uncore *uncore = ggtt->vm.gt->uncore;
124 spin_lock_irq(&uncore->lock);
125 intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
126 intel_uncore_read_fw(uncore, GFX_FLSH_CNTL_GEN6);
127 spin_unlock_irq(&uncore->lock);
130 static void gen8_ggtt_invalidate(struct i915_ggtt *ggtt)
132 struct intel_uncore *uncore = ggtt->vm.gt->uncore;
135 * Note that as an uncached mmio write, this will flush the
136 * WCB of the writes into the GGTT before it triggers the invalidate.
138 intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
141 static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
143 struct intel_uncore *uncore = ggtt->vm.gt->uncore;
144 struct drm_i915_private *i915 = ggtt->vm.i915;
146 gen8_ggtt_invalidate(ggtt);
148 if (INTEL_GEN(i915) >= 12)
149 intel_uncore_write_fw(uncore, GEN12_GUC_TLB_INV_CR,
150 GEN12_GUC_TLB_INV_CR_INVALIDATE);
152 intel_uncore_write_fw(uncore, GEN8_GTCR, GEN8_GTCR_INVALIDATE);
155 static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt)
157 intel_gtt_chipset_flush();
160 static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
165 static void gen8_ggtt_insert_page(struct i915_address_space *vm,
168 enum i915_cache_level level,
171 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
172 gen8_pte_t __iomem *pte =
173 (gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
175 gen8_set_pte(pte, gen8_pte_encode(addr, level, 0));
177 ggtt->invalidate(ggtt);
180 static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
181 struct i915_vma *vma,
182 enum i915_cache_level level,
185 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
186 struct sgt_iter sgt_iter;
187 gen8_pte_t __iomem *gtt_entries;
188 const gen8_pte_t pte_encode = gen8_pte_encode(0, level, 0);
192 * Note that we ignore PTE_READ_ONLY here. The caller must be careful
193 * not to allow the user to override access to a read only page.
196 gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm;
197 gtt_entries += vma->node.start / I915_GTT_PAGE_SIZE;
198 for_each_sgt_daddr(addr, sgt_iter, vma->pages)
199 gen8_set_pte(gtt_entries++, pte_encode | addr);
202 * We want to flush the TLBs only after we're certain all the PTE
203 * updates have finished.
205 ggtt->invalidate(ggtt);
208 static void gen6_ggtt_insert_page(struct i915_address_space *vm,
211 enum i915_cache_level level,
214 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
215 gen6_pte_t __iomem *pte =
216 (gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
218 iowrite32(vm->pte_encode(addr, level, flags), pte);
220 ggtt->invalidate(ggtt);
224 * Binds an object into the global gtt with the specified cache level.
225 * The object will be accessible to the GPU via commands whose operands
226 * reference offsets within the global GTT as well as accessible by the GPU
227 * through the GMADR mapped BAR (i915->mm.gtt->gtt).
229 static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
230 struct i915_vma *vma,
231 enum i915_cache_level level,
234 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
235 gen6_pte_t __iomem *entries = (gen6_pte_t __iomem *)ggtt->gsm;
236 unsigned int i = vma->node.start / I915_GTT_PAGE_SIZE;
237 struct sgt_iter iter;
240 for_each_sgt_daddr(addr, iter, vma->pages)
241 iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]);
244 * We want to flush the TLBs only after we're certain all the PTE
245 * updates have finished.
247 ggtt->invalidate(ggtt);
250 static void nop_clear_range(struct i915_address_space *vm,
251 u64 start, u64 length)
255 static void gen8_ggtt_clear_range(struct i915_address_space *vm,
256 u64 start, u64 length)
258 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
259 unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
260 unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
261 const gen8_pte_t scratch_pte = vm->scratch[0].encode;
262 gen8_pte_t __iomem *gtt_base =
263 (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
264 const int max_entries = ggtt_total_entries(ggtt) - first_entry;
267 if (WARN(num_entries > max_entries,
268 "First entry = %d; Num entries = %d (max=%d)\n",
269 first_entry, num_entries, max_entries))
270 num_entries = max_entries;
272 for (i = 0; i < num_entries; i++)
273 gen8_set_pte(>t_base[i], scratch_pte);
276 static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
279 * Make sure the internal GAM fifo has been cleared of all GTT
280 * writes before exiting stop_machine(). This guarantees that
281 * any aperture accesses waiting to start in another process
282 * cannot back up behind the GTT writes causing a hang.
283 * The register can be any arbitrary GAM register.
285 intel_uncore_posting_read_fw(vm->gt->uncore, GFX_FLSH_CNTL_GEN6);
289 struct i915_address_space *vm;
292 enum i915_cache_level level;
295 static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
297 struct insert_page *arg = _arg;
299 gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0);
300 bxt_vtd_ggtt_wa(arg->vm);
305 static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
308 enum i915_cache_level level,
311 struct insert_page arg = { vm, addr, offset, level };
313 stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
316 struct insert_entries {
317 struct i915_address_space *vm;
318 struct i915_vma *vma;
319 enum i915_cache_level level;
323 static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
325 struct insert_entries *arg = _arg;
327 gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, arg->flags);
328 bxt_vtd_ggtt_wa(arg->vm);
333 static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
334 struct i915_vma *vma,
335 enum i915_cache_level level,
338 struct insert_entries arg = { vm, vma, level, flags };
340 stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
343 static void gen6_ggtt_clear_range(struct i915_address_space *vm,
344 u64 start, u64 length)
346 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
347 unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
348 unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
349 gen6_pte_t scratch_pte, __iomem *gtt_base =
350 (gen6_pte_t __iomem *)ggtt->gsm + first_entry;
351 const int max_entries = ggtt_total_entries(ggtt) - first_entry;
354 if (WARN(num_entries > max_entries,
355 "First entry = %d; Num entries = %d (max=%d)\n",
356 first_entry, num_entries, max_entries))
357 num_entries = max_entries;
359 scratch_pte = vm->scratch[0].encode;
360 for (i = 0; i < num_entries; i++)
361 iowrite32(scratch_pte, >t_base[i]);
364 static void i915_ggtt_insert_page(struct i915_address_space *vm,
367 enum i915_cache_level cache_level,
370 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
371 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
373 intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
376 static void i915_ggtt_insert_entries(struct i915_address_space *vm,
377 struct i915_vma *vma,
378 enum i915_cache_level cache_level,
381 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
382 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
384 intel_gtt_insert_sg_entries(vma->pages, vma->node.start >> PAGE_SHIFT,
388 static void i915_ggtt_clear_range(struct i915_address_space *vm,
389 u64 start, u64 length)
391 intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
394 static int ggtt_bind_vma(struct i915_vma *vma,
395 enum i915_cache_level cache_level,
398 struct drm_i915_gem_object *obj = vma->obj;
401 /* Applicable to VLV (gen8+ do not support RO in the GGTT) */
403 if (i915_gem_object_is_readonly(obj))
404 pte_flags |= PTE_READ_ONLY;
406 vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
408 vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
411 * Without aliasing PPGTT there's no difference between
412 * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
413 * upgrade to both bound if we bind either to avoid double-binding.
415 atomic_or(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND, &vma->flags);
420 static void ggtt_unbind_vma(struct i915_vma *vma)
422 vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
425 static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt)
430 if (!intel_uc_uses_guc(&ggtt->vm.gt->uc))
433 GEM_BUG_ON(ggtt->vm.total <= GUC_GGTT_TOP);
434 size = ggtt->vm.total - GUC_GGTT_TOP;
436 ret = i915_gem_gtt_reserve(&ggtt->vm, &ggtt->uc_fw, size,
437 GUC_GGTT_TOP, I915_COLOR_UNEVICTABLE,
440 drm_dbg(&ggtt->vm.i915->drm,
441 "Failed to reserve top of GGTT for GuC\n");
446 static void ggtt_release_guc_top(struct i915_ggtt *ggtt)
448 if (drm_mm_node_allocated(&ggtt->uc_fw))
449 drm_mm_remove_node(&ggtt->uc_fw);
452 static void cleanup_init_ggtt(struct i915_ggtt *ggtt)
454 ggtt_release_guc_top(ggtt);
455 if (drm_mm_node_allocated(&ggtt->error_capture))
456 drm_mm_remove_node(&ggtt->error_capture);
457 mutex_destroy(&ggtt->error_mutex);
460 static int init_ggtt(struct i915_ggtt *ggtt)
463 * Let GEM Manage all of the aperture.
465 * However, leave one page at the end still bound to the scratch page.
466 * There are a number of places where the hardware apparently prefetches
467 * past the end of the object, and we've seen multiple hangs with the
468 * GPU head pointer stuck in a batchbuffer bound at the last page of the
469 * aperture. One page should be enough to keep any prefetching inside
472 unsigned long hole_start, hole_end;
473 struct drm_mm_node *entry;
477 * GuC requires all resources that we're sharing with it to be placed in
478 * non-WOPCM memory. If GuC is not present or not in use we still need a
479 * small bias as ring wraparound at offset 0 sometimes hangs. No idea
482 ggtt->pin_bias = max_t(u32, I915_GTT_PAGE_SIZE,
483 intel_wopcm_guc_size(&ggtt->vm.i915->wopcm));
485 ret = intel_vgt_balloon(ggtt);
489 mutex_init(&ggtt->error_mutex);
490 if (ggtt->mappable_end) {
491 /* Reserve a mappable slot for our lockless error capture */
492 ret = drm_mm_insert_node_in_range(&ggtt->vm.mm,
493 &ggtt->error_capture,
495 I915_COLOR_UNEVICTABLE,
496 0, ggtt->mappable_end,
503 * The upper portion of the GuC address space has a sizeable hole
504 * (several MB) that is inaccessible by GuC. Reserve this range within
505 * GGTT as it can comfortably hold GuC/HuC firmware images.
507 ret = ggtt_reserve_guc_top(ggtt);
511 /* Clear any non-preallocated blocks */
512 drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) {
513 drm_dbg_kms(&ggtt->vm.i915->drm,
514 "clearing unused GTT space: [%lx, %lx]\n",
515 hole_start, hole_end);
516 ggtt->vm.clear_range(&ggtt->vm, hole_start,
517 hole_end - hole_start);
520 /* And finally clear the reserved guard page */
521 ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE);
526 cleanup_init_ggtt(ggtt);
530 static int aliasing_gtt_bind_vma(struct i915_vma *vma,
531 enum i915_cache_level cache_level,
537 /* Currently applicable only to VLV */
539 if (i915_gem_object_is_readonly(vma->obj))
540 pte_flags |= PTE_READ_ONLY;
542 if (flags & I915_VMA_LOCAL_BIND) {
543 struct i915_ppgtt *alias = i915_vm_to_ggtt(vma->vm)->alias;
545 if (flags & I915_VMA_ALLOC) {
546 ret = alias->vm.allocate_va_range(&alias->vm,
552 set_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma));
555 GEM_BUG_ON(!test_bit(I915_VMA_ALLOC_BIT,
556 __i915_vma_flags(vma)));
557 alias->vm.insert_entries(&alias->vm, vma,
558 cache_level, pte_flags);
561 if (flags & I915_VMA_GLOBAL_BIND)
562 vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
567 static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
569 if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) {
570 struct i915_address_space *vm = vma->vm;
572 vm->clear_range(vm, vma->node.start, vma->size);
575 if (test_and_clear_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma))) {
576 struct i915_address_space *vm =
577 &i915_vm_to_ggtt(vma->vm)->alias->vm;
579 vm->clear_range(vm, vma->node.start, vma->size);
583 static int init_aliasing_ppgtt(struct i915_ggtt *ggtt)
585 struct i915_ppgtt *ppgtt;
588 ppgtt = i915_ppgtt_create(ggtt->vm.gt);
590 return PTR_ERR(ppgtt);
592 if (GEM_WARN_ON(ppgtt->vm.total < ggtt->vm.total)) {
598 * Note we only pre-allocate as far as the end of the global
599 * GTT. On 48b / 4-level page-tables, the difference is very,
600 * very significant! We have to preallocate as GVT/vgpu does
601 * not like the page directory disappearing.
603 err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, ggtt->vm.total);
608 ggtt->vm.bind_async_flags |= ppgtt->vm.bind_async_flags;
610 GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != ggtt_bind_vma);
611 ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma;
613 GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != ggtt_unbind_vma);
614 ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma;
619 i915_vm_put(&ppgtt->vm);
623 static void fini_aliasing_ppgtt(struct i915_ggtt *ggtt)
625 struct i915_ppgtt *ppgtt;
627 ppgtt = fetch_and_zero(&ggtt->alias);
631 i915_vm_put(&ppgtt->vm);
633 ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
634 ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
637 int i915_init_ggtt(struct drm_i915_private *i915)
641 ret = init_ggtt(&i915->ggtt);
645 if (INTEL_PPGTT(i915) == INTEL_PPGTT_ALIASING) {
646 ret = init_aliasing_ppgtt(&i915->ggtt);
648 cleanup_init_ggtt(&i915->ggtt);
654 static void ggtt_cleanup_hw(struct i915_ggtt *ggtt)
656 struct i915_vma *vma, *vn;
658 atomic_set(&ggtt->vm.open, 0);
660 rcu_barrier(); /* flush the RCU'ed__i915_vm_release */
661 flush_workqueue(ggtt->vm.i915->wq);
663 mutex_lock(&ggtt->vm.mutex);
665 list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link)
666 WARN_ON(__i915_vma_unbind(vma));
668 if (drm_mm_node_allocated(&ggtt->error_capture))
669 drm_mm_remove_node(&ggtt->error_capture);
670 mutex_destroy(&ggtt->error_mutex);
672 ggtt_release_guc_top(ggtt);
673 intel_vgt_deballoon(ggtt);
675 ggtt->vm.cleanup(&ggtt->vm);
677 mutex_unlock(&ggtt->vm.mutex);
678 i915_address_space_fini(&ggtt->vm);
680 arch_phys_wc_del(ggtt->mtrr);
682 if (ggtt->iomap.size)
683 io_mapping_fini(&ggtt->iomap);
687 * i915_ggtt_driver_release - Clean up GGTT hardware initialization
690 void i915_ggtt_driver_release(struct drm_i915_private *i915)
692 struct pagevec *pvec;
694 fini_aliasing_ppgtt(&i915->ggtt);
696 ggtt_cleanup_hw(&i915->ggtt);
698 pvec = &i915->mm.wc_stash.pvec;
700 set_pages_array_wb(pvec->pages, pvec->nr);
701 __pagevec_release(pvec);
705 static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
707 snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
708 snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
709 return snb_gmch_ctl << 20;
712 static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
714 bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
715 bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
717 bdw_gmch_ctl = 1 << bdw_gmch_ctl;
720 /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * I915_GTT_PAGE_SIZE */
721 if (bdw_gmch_ctl > 4)
725 return bdw_gmch_ctl << 20;
728 static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
730 gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
731 gmch_ctrl &= SNB_GMCH_GGMS_MASK;
734 return 1 << (20 + gmch_ctrl);
739 static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
741 struct drm_i915_private *i915 = ggtt->vm.i915;
742 struct pci_dev *pdev = i915->drm.pdev;
743 phys_addr_t phys_addr;
746 /* For Modern GENs the PTEs and register space are split in the BAR */
747 phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2;
750 * On BXT+/CNL+ writes larger than 64 bit to the GTT pagetable range
751 * will be dropped. For WC mappings in general we have 64 byte burst
752 * writes when the WC buffer is flushed, so we can't use it, but have to
753 * resort to an uncached mapping. The WC issue is easily caught by the
754 * readback check when writing GTT PTE entries.
756 if (IS_GEN9_LP(i915) || INTEL_GEN(i915) >= 10)
757 ggtt->gsm = ioremap(phys_addr, size);
759 ggtt->gsm = ioremap_wc(phys_addr, size);
761 DRM_ERROR("Failed to map the ggtt page table\n");
765 ret = setup_scratch_page(&ggtt->vm, GFP_DMA32);
767 DRM_ERROR("Scratch setup failed\n");
768 /* iounmap will also get called at remove, but meh */
773 ggtt->vm.scratch[0].encode =
774 ggtt->vm.pte_encode(px_dma(&ggtt->vm.scratch[0]),
780 int ggtt_set_pages(struct i915_vma *vma)
784 GEM_BUG_ON(vma->pages);
786 ret = i915_get_ggtt_vma_pages(vma);
790 vma->page_sizes = vma->obj->mm.page_sizes;
795 static void gen6_gmch_remove(struct i915_address_space *vm)
797 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
800 cleanup_scratch_page(vm);
803 static struct resource pci_resource(struct pci_dev *pdev, int bar)
805 return (struct resource)DEFINE_RES_MEM(pci_resource_start(pdev, bar),
806 pci_resource_len(pdev, bar));
809 static int gen8_gmch_probe(struct i915_ggtt *ggtt)
811 struct drm_i915_private *i915 = ggtt->vm.i915;
812 struct pci_dev *pdev = i915->drm.pdev;
817 /* TODO: We're not aware of mappable constraints on gen8 yet */
818 if (!IS_DGFX(i915)) {
819 ggtt->gmadr = pci_resource(pdev, 2);
820 ggtt->mappable_end = resource_size(&ggtt->gmadr);
823 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(39));
825 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39));
827 DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
829 pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
830 if (IS_CHERRYVIEW(i915))
831 size = chv_get_total_gtt_size(snb_gmch_ctl);
833 size = gen8_get_total_gtt_size(snb_gmch_ctl);
835 ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
836 ggtt->vm.cleanup = gen6_gmch_remove;
837 ggtt->vm.insert_page = gen8_ggtt_insert_page;
838 ggtt->vm.clear_range = nop_clear_range;
839 if (intel_scanout_needs_vtd_wa(i915))
840 ggtt->vm.clear_range = gen8_ggtt_clear_range;
842 ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
844 /* Serialize GTT updates with aperture access on BXT if VT-d is on. */
845 if (intel_ggtt_update_needs_vtd_wa(i915) ||
846 IS_CHERRYVIEW(i915) /* fails with concurrent use/update */) {
847 ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
848 ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL;
849 ggtt->vm.bind_async_flags =
850 I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
853 ggtt->invalidate = gen8_ggtt_invalidate;
855 ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
856 ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
857 ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
858 ggtt->vm.vma_ops.clear_pages = clear_pages;
860 ggtt->vm.pte_encode = gen8_pte_encode;
862 setup_private_pat(ggtt->vm.gt->uncore);
864 return ggtt_probe_common(ggtt, size);
867 static u64 snb_pte_encode(dma_addr_t addr,
868 enum i915_cache_level level,
871 gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
874 case I915_CACHE_L3_LLC:
876 pte |= GEN6_PTE_CACHE_LLC;
878 case I915_CACHE_NONE:
879 pte |= GEN6_PTE_UNCACHED;
888 static u64 ivb_pte_encode(dma_addr_t addr,
889 enum i915_cache_level level,
892 gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
895 case I915_CACHE_L3_LLC:
896 pte |= GEN7_PTE_CACHE_L3_LLC;
899 pte |= GEN6_PTE_CACHE_LLC;
901 case I915_CACHE_NONE:
902 pte |= GEN6_PTE_UNCACHED;
911 static u64 byt_pte_encode(dma_addr_t addr,
912 enum i915_cache_level level,
915 gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
917 if (!(flags & PTE_READ_ONLY))
918 pte |= BYT_PTE_WRITEABLE;
920 if (level != I915_CACHE_NONE)
921 pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
926 static u64 hsw_pte_encode(dma_addr_t addr,
927 enum i915_cache_level level,
930 gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
932 if (level != I915_CACHE_NONE)
933 pte |= HSW_WB_LLC_AGE3;
938 static u64 iris_pte_encode(dma_addr_t addr,
939 enum i915_cache_level level,
942 gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
945 case I915_CACHE_NONE:
948 pte |= HSW_WT_ELLC_LLC_AGE3;
951 pte |= HSW_WB_ELLC_LLC_AGE3;
958 static int gen6_gmch_probe(struct i915_ggtt *ggtt)
960 struct drm_i915_private *i915 = ggtt->vm.i915;
961 struct pci_dev *pdev = i915->drm.pdev;
966 ggtt->gmadr = pci_resource(pdev, 2);
967 ggtt->mappable_end = resource_size(&ggtt->gmadr);
970 * 64/512MB is the current min/max we actually know of, but this is
971 * just a coarse sanity check.
973 if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) {
974 DRM_ERROR("Unknown GMADR size (%pa)\n", &ggtt->mappable_end);
978 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
980 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
982 DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
983 pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
985 size = gen6_get_total_gtt_size(snb_gmch_ctl);
986 ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE;
988 ggtt->vm.clear_range = nop_clear_range;
989 if (!HAS_FULL_PPGTT(i915) || intel_scanout_needs_vtd_wa(i915))
990 ggtt->vm.clear_range = gen6_ggtt_clear_range;
991 ggtt->vm.insert_page = gen6_ggtt_insert_page;
992 ggtt->vm.insert_entries = gen6_ggtt_insert_entries;
993 ggtt->vm.cleanup = gen6_gmch_remove;
995 ggtt->invalidate = gen6_ggtt_invalidate;
998 ggtt->vm.pte_encode = iris_pte_encode;
999 else if (IS_HASWELL(i915))
1000 ggtt->vm.pte_encode = hsw_pte_encode;
1001 else if (IS_VALLEYVIEW(i915))
1002 ggtt->vm.pte_encode = byt_pte_encode;
1003 else if (INTEL_GEN(i915) >= 7)
1004 ggtt->vm.pte_encode = ivb_pte_encode;
1006 ggtt->vm.pte_encode = snb_pte_encode;
1008 ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
1009 ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
1010 ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
1011 ggtt->vm.vma_ops.clear_pages = clear_pages;
1013 return ggtt_probe_common(ggtt, size);
1016 static void i915_gmch_remove(struct i915_address_space *vm)
1018 intel_gmch_remove();
1021 static int i915_gmch_probe(struct i915_ggtt *ggtt)
1023 struct drm_i915_private *i915 = ggtt->vm.i915;
1024 phys_addr_t gmadr_base;
1027 ret = intel_gmch_probe(i915->bridge_dev, i915->drm.pdev, NULL);
1029 DRM_ERROR("failed to set up gmch\n");
1033 intel_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end);
1036 (struct resource)DEFINE_RES_MEM(gmadr_base, ggtt->mappable_end);
1038 ggtt->do_idle_maps = needs_idle_maps(i915);
1039 ggtt->vm.insert_page = i915_ggtt_insert_page;
1040 ggtt->vm.insert_entries = i915_ggtt_insert_entries;
1041 ggtt->vm.clear_range = i915_ggtt_clear_range;
1042 ggtt->vm.cleanup = i915_gmch_remove;
1044 ggtt->invalidate = gmch_ggtt_invalidate;
1046 ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
1047 ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
1048 ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
1049 ggtt->vm.vma_ops.clear_pages = clear_pages;
1051 if (unlikely(ggtt->do_idle_maps))
1052 dev_notice(i915->drm.dev,
1053 "Applying Ironlake quirks for intel_iommu\n");
1058 static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt)
1060 struct drm_i915_private *i915 = gt->i915;
1064 ggtt->vm.i915 = i915;
1065 ggtt->vm.dma = &i915->drm.pdev->dev;
1067 if (INTEL_GEN(i915) <= 5)
1068 ret = i915_gmch_probe(ggtt);
1069 else if (INTEL_GEN(i915) < 8)
1070 ret = gen6_gmch_probe(ggtt);
1072 ret = gen8_gmch_probe(ggtt);
1076 if ((ggtt->vm.total - 1) >> 32) {
1077 DRM_ERROR("We never expected a Global GTT with more than 32bits"
1078 " of address space! Found %lldM!\n",
1079 ggtt->vm.total >> 20);
1080 ggtt->vm.total = 1ULL << 32;
1081 ggtt->mappable_end =
1082 min_t(u64, ggtt->mappable_end, ggtt->vm.total);
1085 if (ggtt->mappable_end > ggtt->vm.total) {
1086 DRM_ERROR("mappable aperture extends past end of GGTT,"
1087 " aperture=%pa, total=%llx\n",
1088 &ggtt->mappable_end, ggtt->vm.total);
1089 ggtt->mappable_end = ggtt->vm.total;
1092 /* GMADR is the PCI mmio aperture into the global GTT. */
1093 DRM_DEBUG_DRIVER("GGTT size = %lluM\n", ggtt->vm.total >> 20);
1094 DRM_DEBUG_DRIVER("GMADR size = %lluM\n", (u64)ggtt->mappable_end >> 20);
1095 DRM_DEBUG_DRIVER("DSM size = %lluM\n",
1096 (u64)resource_size(&intel_graphics_stolen_res) >> 20);
1102 * i915_ggtt_probe_hw - Probe GGTT hardware location
1103 * @i915: i915 device
1105 int i915_ggtt_probe_hw(struct drm_i915_private *i915)
1109 ret = ggtt_probe_hw(&i915->ggtt, &i915->gt);
1113 if (intel_vtd_active())
1114 dev_info(i915->drm.dev, "VT-d active for gfx access\n");
1119 int i915_ggtt_enable_hw(struct drm_i915_private *i915)
1121 if (INTEL_GEN(i915) < 6 && !intel_enable_gtt())
1127 void i915_ggtt_enable_guc(struct i915_ggtt *ggtt)
1129 GEM_BUG_ON(ggtt->invalidate != gen8_ggtt_invalidate);
1131 ggtt->invalidate = guc_ggtt_invalidate;
1133 ggtt->invalidate(ggtt);
1136 void i915_ggtt_disable_guc(struct i915_ggtt *ggtt)
1138 /* XXX Temporary pardon for error unload */
1139 if (ggtt->invalidate == gen8_ggtt_invalidate)
1142 /* We should only be called after i915_ggtt_enable_guc() */
1143 GEM_BUG_ON(ggtt->invalidate != guc_ggtt_invalidate);
1145 ggtt->invalidate = gen8_ggtt_invalidate;
1147 ggtt->invalidate(ggtt);
1150 void i915_ggtt_resume(struct i915_ggtt *ggtt)
1152 struct i915_vma *vma;
1156 intel_gt_check_and_clear_faults(ggtt->vm.gt);
1158 /* First fill our portion of the GTT with scratch pages */
1159 ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
1161 /* Skip rewriting PTE on VMA unbind. */
1162 open = atomic_xchg(&ggtt->vm.open, 0);
1164 /* clflush objects bound into the GGTT and rebind them. */
1165 list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link) {
1166 struct drm_i915_gem_object *obj = vma->obj;
1168 if (!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
1171 clear_bit(I915_VMA_GLOBAL_BIND_BIT, __i915_vma_flags(vma));
1172 WARN_ON(i915_vma_bind(vma,
1173 obj ? obj->cache_level : 0,
1175 if (obj) { /* only used during resume => exclusive access */
1176 flush |= fetch_and_zero(&obj->write_domain);
1177 obj->read_domains |= I915_GEM_DOMAIN_GTT;
1181 atomic_set(&ggtt->vm.open, open);
1182 ggtt->invalidate(ggtt);
1185 wbinvd_on_all_cpus();
1187 if (INTEL_GEN(ggtt->vm.i915) >= 8)
1188 setup_private_pat(ggtt->vm.gt->uncore);
1191 static struct scatterlist *
1192 rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset,
1193 unsigned int width, unsigned int height,
1194 unsigned int stride,
1195 struct sg_table *st, struct scatterlist *sg)
1197 unsigned int column, row;
1198 unsigned int src_idx;
1200 for (column = 0; column < width; column++) {
1201 src_idx = stride * (height - 1) + column + offset;
1202 for (row = 0; row < height; row++) {
1205 * We don't need the pages, but need to initialize
1206 * the entries so the sg list can be happily traversed.
1207 * The only thing we need are DMA addresses.
1209 sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0);
1210 sg_dma_address(sg) =
1211 i915_gem_object_get_dma_address(obj, src_idx);
1212 sg_dma_len(sg) = I915_GTT_PAGE_SIZE;
1221 static noinline struct sg_table *
1222 intel_rotate_pages(struct intel_rotation_info *rot_info,
1223 struct drm_i915_gem_object *obj)
1225 unsigned int size = intel_rotation_info_size(rot_info);
1226 struct drm_i915_private *i915 = to_i915(obj->base.dev);
1227 struct sg_table *st;
1228 struct scatterlist *sg;
1232 /* Allocate target SG list. */
1233 st = kmalloc(sizeof(*st), GFP_KERNEL);
1237 ret = sg_alloc_table(st, size, GFP_KERNEL);
1244 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) {
1245 sg = rotate_pages(obj, rot_info->plane[i].offset,
1246 rot_info->plane[i].width, rot_info->plane[i].height,
1247 rot_info->plane[i].stride, st, sg);
1256 drm_dbg(&i915->drm, "Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
1257 obj->base.size, rot_info->plane[0].width,
1258 rot_info->plane[0].height, size);
1260 return ERR_PTR(ret);
1263 static struct scatterlist *
1264 remap_pages(struct drm_i915_gem_object *obj, unsigned int offset,
1265 unsigned int width, unsigned int height,
1266 unsigned int stride,
1267 struct sg_table *st, struct scatterlist *sg)
1271 for (row = 0; row < height; row++) {
1272 unsigned int left = width * I915_GTT_PAGE_SIZE;
1276 unsigned int length;
1279 * We don't need the pages, but need to initialize
1280 * the entries so the sg list can be happily traversed.
1281 * The only thing we need are DMA addresses.
1284 addr = i915_gem_object_get_dma_address_len(obj, offset, &length);
1286 length = min(left, length);
1290 sg_set_page(sg, NULL, length, 0);
1291 sg_dma_address(sg) = addr;
1292 sg_dma_len(sg) = length;
1295 offset += length / I915_GTT_PAGE_SIZE;
1299 offset += stride - width;
1305 static noinline struct sg_table *
1306 intel_remap_pages(struct intel_remapped_info *rem_info,
1307 struct drm_i915_gem_object *obj)
1309 unsigned int size = intel_remapped_info_size(rem_info);
1310 struct drm_i915_private *i915 = to_i915(obj->base.dev);
1311 struct sg_table *st;
1312 struct scatterlist *sg;
1316 /* Allocate target SG list. */
1317 st = kmalloc(sizeof(*st), GFP_KERNEL);
1321 ret = sg_alloc_table(st, size, GFP_KERNEL);
1328 for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) {
1329 sg = remap_pages(obj, rem_info->plane[i].offset,
1330 rem_info->plane[i].width, rem_info->plane[i].height,
1331 rem_info->plane[i].stride, st, sg);
1342 drm_dbg(&i915->drm, "Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n",
1343 obj->base.size, rem_info->plane[0].width,
1344 rem_info->plane[0].height, size);
1346 return ERR_PTR(ret);
1349 static noinline struct sg_table *
1350 intel_partial_pages(const struct i915_ggtt_view *view,
1351 struct drm_i915_gem_object *obj)
1353 struct sg_table *st;
1354 struct scatterlist *sg, *iter;
1355 unsigned int count = view->partial.size;
1356 unsigned int offset;
1359 st = kmalloc(sizeof(*st), GFP_KERNEL);
1363 ret = sg_alloc_table(st, count, GFP_KERNEL);
1367 iter = i915_gem_object_get_sg(obj, view->partial.offset, &offset);
1375 len = min(iter->length - (offset << PAGE_SHIFT),
1376 count << PAGE_SHIFT);
1377 sg_set_page(sg, NULL, len, 0);
1378 sg_dma_address(sg) =
1379 sg_dma_address(iter) + (offset << PAGE_SHIFT);
1380 sg_dma_len(sg) = len;
1383 count -= len >> PAGE_SHIFT;
1386 i915_sg_trim(st); /* Drop any unused tail entries. */
1392 iter = __sg_next(iter);
1399 return ERR_PTR(ret);
1403 i915_get_ggtt_vma_pages(struct i915_vma *vma)
1408 * The vma->pages are only valid within the lifespan of the borrowed
1409 * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so
1410 * must be the vma->pages. A simple rule is that vma->pages must only
1411 * be accessed when the obj->mm.pages are pinned.
1413 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
1415 switch (vma->ggtt_view.type) {
1417 GEM_BUG_ON(vma->ggtt_view.type);
1419 case I915_GGTT_VIEW_NORMAL:
1420 vma->pages = vma->obj->mm.pages;
1423 case I915_GGTT_VIEW_ROTATED:
1425 intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj);
1428 case I915_GGTT_VIEW_REMAPPED:
1430 intel_remap_pages(&vma->ggtt_view.remapped, vma->obj);
1433 case I915_GGTT_VIEW_PARTIAL:
1434 vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
1439 if (IS_ERR(vma->pages)) {
1440 ret = PTR_ERR(vma->pages);
1442 drm_err(&vma->vm->i915->drm,
1443 "Failed to get pages for VMA view type %u (%d)!\n",
1444 vma->ggtt_view.type, ret);