1 // SPDX-License-Identifier: MIT
3 * Copyright © 2020 Intel Corporation
6 #include <linux/stop_machine.h>
8 #include <asm/set_memory.h>
11 #include <drm/i915_drm.h>
15 #include "i915_scatterlist.h"
16 #include "i915_vgpu.h"
18 #include "intel_gtt.h"
21 i915_get_ggtt_vma_pages(struct i915_vma *vma);
23 static void i915_ggtt_color_adjust(const struct drm_mm_node *node,
28 if (i915_node_color_differs(node, color))
29 *start += I915_GTT_PAGE_SIZE;
32 * Also leave a space between the unallocated reserved node after the
33 * GTT and any objects within the GTT, i.e. we use the color adjustment
34 * to insert a guard page to prevent prefetches crossing over the
37 node = list_next_entry(node, node_list);
38 if (node->color != color)
39 *end -= I915_GTT_PAGE_SIZE;
42 static int ggtt_init_hw(struct i915_ggtt *ggtt)
44 struct drm_i915_private *i915 = ggtt->vm.i915;
46 i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
48 ggtt->vm.is_ggtt = true;
50 /* Only VLV supports read-only GGTT mappings */
51 ggtt->vm.has_read_only = IS_VALLEYVIEW(i915);
53 if (!HAS_LLC(i915) && !HAS_PPGTT(i915))
54 ggtt->vm.mm.color_adjust = i915_ggtt_color_adjust;
56 if (ggtt->mappable_end) {
57 if (!io_mapping_init_wc(&ggtt->iomap,
59 ggtt->mappable_end)) {
60 ggtt->vm.cleanup(&ggtt->vm);
64 ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start,
68 intel_ggtt_init_fences(ggtt);
74 * i915_ggtt_init_hw - Initialize GGTT hardware
77 int i915_ggtt_init_hw(struct drm_i915_private *i915)
81 stash_init(&i915->mm.wc_stash);
84 * Note that we use page colouring to enforce a guard page at the
85 * end of the address space. This is required as the CS may prefetch
86 * beyond the end of the batch buffer, across the page boundary,
87 * and beyond the end of the GTT if we do not provide a guard.
89 ret = ggtt_init_hw(&i915->ggtt);
97 * Certain Gen5 chipsets require require idling the GPU before
98 * unmapping anything from the GTT when VT-d is enabled.
100 static bool needs_idle_maps(struct drm_i915_private *i915)
103 * Query intel_iommu to see if we need the workaround. Presumably that
106 return IS_GEN(i915, 5) && IS_MOBILE(i915) && intel_vtd_active();
109 void i915_ggtt_suspend(struct i915_ggtt *ggtt)
111 struct i915_vma *vma;
113 list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
114 i915_vma_wait_for_bind(vma);
116 ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
117 ggtt->invalidate(ggtt);
119 intel_gt_check_and_clear_faults(ggtt->vm.gt);
122 void gen6_ggtt_invalidate(struct i915_ggtt *ggtt)
124 struct intel_uncore *uncore = ggtt->vm.gt->uncore;
126 spin_lock_irq(&uncore->lock);
127 intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
128 intel_uncore_read_fw(uncore, GFX_FLSH_CNTL_GEN6);
129 spin_unlock_irq(&uncore->lock);
132 static void gen8_ggtt_invalidate(struct i915_ggtt *ggtt)
134 struct intel_uncore *uncore = ggtt->vm.gt->uncore;
137 * Note that as an uncached mmio write, this will flush the
138 * WCB of the writes into the GGTT before it triggers the invalidate.
140 intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
143 static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
145 struct intel_uncore *uncore = ggtt->vm.gt->uncore;
146 struct drm_i915_private *i915 = ggtt->vm.i915;
148 gen8_ggtt_invalidate(ggtt);
150 if (INTEL_GEN(i915) >= 12)
151 intel_uncore_write_fw(uncore, GEN12_GUC_TLB_INV_CR,
152 GEN12_GUC_TLB_INV_CR_INVALIDATE);
154 intel_uncore_write_fw(uncore, GEN8_GTCR, GEN8_GTCR_INVALIDATE);
157 static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt)
159 intel_gtt_chipset_flush();
162 static u64 gen8_ggtt_pte_encode(dma_addr_t addr,
163 enum i915_cache_level level,
166 return addr | _PAGE_PRESENT;
169 static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
174 static void gen8_ggtt_insert_page(struct i915_address_space *vm,
177 enum i915_cache_level level,
180 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
181 gen8_pte_t __iomem *pte =
182 (gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
184 gen8_set_pte(pte, gen8_ggtt_pte_encode(addr, level, 0));
186 ggtt->invalidate(ggtt);
189 static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
190 struct i915_vma *vma,
191 enum i915_cache_level level,
194 const gen8_pte_t pte_encode = gen8_ggtt_pte_encode(0, level, 0);
195 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
196 gen8_pte_t __iomem *gte;
197 gen8_pte_t __iomem *end;
198 struct sgt_iter iter;
202 * Note that we ignore PTE_READ_ONLY here. The caller must be careful
203 * not to allow the user to override access to a read only page.
206 gte = (gen8_pte_t __iomem *)ggtt->gsm;
207 gte += vma->node.start / I915_GTT_PAGE_SIZE;
208 end = gte + vma->node.size / I915_GTT_PAGE_SIZE;
210 for_each_sgt_daddr(addr, iter, vma->pages)
211 gen8_set_pte(gte++, pte_encode | addr);
212 GEM_BUG_ON(gte > end);
214 /* Fill the allocated but "unused" space beyond the end of the buffer */
216 gen8_set_pte(gte++, vm->scratch[0].encode);
219 * We want to flush the TLBs only after we're certain all the PTE
220 * updates have finished.
222 ggtt->invalidate(ggtt);
225 static void gen6_ggtt_insert_page(struct i915_address_space *vm,
228 enum i915_cache_level level,
231 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
232 gen6_pte_t __iomem *pte =
233 (gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
235 iowrite32(vm->pte_encode(addr, level, flags), pte);
237 ggtt->invalidate(ggtt);
241 * Binds an object into the global gtt with the specified cache level.
242 * The object will be accessible to the GPU via commands whose operands
243 * reference offsets within the global GTT as well as accessible by the GPU
244 * through the GMADR mapped BAR (i915->mm.gtt->gtt).
246 static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
247 struct i915_vma *vma,
248 enum i915_cache_level level,
251 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
252 gen6_pte_t __iomem *gte;
253 gen6_pte_t __iomem *end;
254 struct sgt_iter iter;
257 gte = (gen6_pte_t __iomem *)ggtt->gsm;
258 gte += vma->node.start / I915_GTT_PAGE_SIZE;
259 end = gte + vma->node.size / I915_GTT_PAGE_SIZE;
261 for_each_sgt_daddr(addr, iter, vma->pages)
262 iowrite32(vm->pte_encode(addr, level, flags), gte++);
263 GEM_BUG_ON(gte > end);
265 /* Fill the allocated but "unused" space beyond the end of the buffer */
267 iowrite32(vm->scratch[0].encode, gte++);
270 * We want to flush the TLBs only after we're certain all the PTE
271 * updates have finished.
273 ggtt->invalidate(ggtt);
276 static void nop_clear_range(struct i915_address_space *vm,
277 u64 start, u64 length)
281 static void gen8_ggtt_clear_range(struct i915_address_space *vm,
282 u64 start, u64 length)
284 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
285 unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
286 unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
287 const gen8_pte_t scratch_pte = vm->scratch[0].encode;
288 gen8_pte_t __iomem *gtt_base =
289 (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
290 const int max_entries = ggtt_total_entries(ggtt) - first_entry;
293 if (WARN(num_entries > max_entries,
294 "First entry = %d; Num entries = %d (max=%d)\n",
295 first_entry, num_entries, max_entries))
296 num_entries = max_entries;
298 for (i = 0; i < num_entries; i++)
299 gen8_set_pte(>t_base[i], scratch_pte);
302 static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
305 * Make sure the internal GAM fifo has been cleared of all GTT
306 * writes before exiting stop_machine(). This guarantees that
307 * any aperture accesses waiting to start in another process
308 * cannot back up behind the GTT writes causing a hang.
309 * The register can be any arbitrary GAM register.
311 intel_uncore_posting_read_fw(vm->gt->uncore, GFX_FLSH_CNTL_GEN6);
315 struct i915_address_space *vm;
318 enum i915_cache_level level;
321 static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
323 struct insert_page *arg = _arg;
325 gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0);
326 bxt_vtd_ggtt_wa(arg->vm);
331 static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
334 enum i915_cache_level level,
337 struct insert_page arg = { vm, addr, offset, level };
339 stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
342 struct insert_entries {
343 struct i915_address_space *vm;
344 struct i915_vma *vma;
345 enum i915_cache_level level;
349 static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
351 struct insert_entries *arg = _arg;
353 gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, arg->flags);
354 bxt_vtd_ggtt_wa(arg->vm);
359 static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
360 struct i915_vma *vma,
361 enum i915_cache_level level,
364 struct insert_entries arg = { vm, vma, level, flags };
366 stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
369 static void gen6_ggtt_clear_range(struct i915_address_space *vm,
370 u64 start, u64 length)
372 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
373 unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
374 unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
375 gen6_pte_t scratch_pte, __iomem *gtt_base =
376 (gen6_pte_t __iomem *)ggtt->gsm + first_entry;
377 const int max_entries = ggtt_total_entries(ggtt) - first_entry;
380 if (WARN(num_entries > max_entries,
381 "First entry = %d; Num entries = %d (max=%d)\n",
382 first_entry, num_entries, max_entries))
383 num_entries = max_entries;
385 scratch_pte = vm->scratch[0].encode;
386 for (i = 0; i < num_entries; i++)
387 iowrite32(scratch_pte, >t_base[i]);
390 static void i915_ggtt_insert_page(struct i915_address_space *vm,
393 enum i915_cache_level cache_level,
396 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
397 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
399 intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
402 static void i915_ggtt_insert_entries(struct i915_address_space *vm,
403 struct i915_vma *vma,
404 enum i915_cache_level cache_level,
407 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
408 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
410 intel_gtt_insert_sg_entries(vma->pages, vma->node.start >> PAGE_SHIFT,
414 static void i915_ggtt_clear_range(struct i915_address_space *vm,
415 u64 start, u64 length)
417 intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
420 static int ggtt_bind_vma(struct i915_vma *vma,
421 enum i915_cache_level cache_level,
424 struct drm_i915_gem_object *obj = vma->obj;
427 /* Applicable to VLV (gen8+ do not support RO in the GGTT) */
429 if (i915_gem_object_is_readonly(obj))
430 pte_flags |= PTE_READ_ONLY;
432 vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
434 vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
437 * Without aliasing PPGTT there's no difference between
438 * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
439 * upgrade to both bound if we bind either to avoid double-binding.
441 atomic_or(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND, &vma->flags);
446 static void ggtt_unbind_vma(struct i915_vma *vma)
448 vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
451 static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt)
456 if (!intel_uc_uses_guc(&ggtt->vm.gt->uc))
459 GEM_BUG_ON(ggtt->vm.total <= GUC_GGTT_TOP);
460 size = ggtt->vm.total - GUC_GGTT_TOP;
462 ret = i915_gem_gtt_reserve(&ggtt->vm, &ggtt->uc_fw, size,
463 GUC_GGTT_TOP, I915_COLOR_UNEVICTABLE,
466 drm_dbg(&ggtt->vm.i915->drm,
467 "Failed to reserve top of GGTT for GuC\n");
472 static void ggtt_release_guc_top(struct i915_ggtt *ggtt)
474 if (drm_mm_node_allocated(&ggtt->uc_fw))
475 drm_mm_remove_node(&ggtt->uc_fw);
478 static void cleanup_init_ggtt(struct i915_ggtt *ggtt)
480 ggtt_release_guc_top(ggtt);
481 if (drm_mm_node_allocated(&ggtt->error_capture))
482 drm_mm_remove_node(&ggtt->error_capture);
483 mutex_destroy(&ggtt->error_mutex);
486 static int init_ggtt(struct i915_ggtt *ggtt)
489 * Let GEM Manage all of the aperture.
491 * However, leave one page at the end still bound to the scratch page.
492 * There are a number of places where the hardware apparently prefetches
493 * past the end of the object, and we've seen multiple hangs with the
494 * GPU head pointer stuck in a batchbuffer bound at the last page of the
495 * aperture. One page should be enough to keep any prefetching inside
498 unsigned long hole_start, hole_end;
499 struct drm_mm_node *entry;
503 * GuC requires all resources that we're sharing with it to be placed in
504 * non-WOPCM memory. If GuC is not present or not in use we still need a
505 * small bias as ring wraparound at offset 0 sometimes hangs. No idea
508 ggtt->pin_bias = max_t(u32, I915_GTT_PAGE_SIZE,
509 intel_wopcm_guc_size(&ggtt->vm.i915->wopcm));
511 ret = intel_vgt_balloon(ggtt);
515 mutex_init(&ggtt->error_mutex);
516 if (ggtt->mappable_end) {
517 /* Reserve a mappable slot for our lockless error capture */
518 ret = drm_mm_insert_node_in_range(&ggtt->vm.mm,
519 &ggtt->error_capture,
521 I915_COLOR_UNEVICTABLE,
522 0, ggtt->mappable_end,
529 * The upper portion of the GuC address space has a sizeable hole
530 * (several MB) that is inaccessible by GuC. Reserve this range within
531 * GGTT as it can comfortably hold GuC/HuC firmware images.
533 ret = ggtt_reserve_guc_top(ggtt);
537 /* Clear any non-preallocated blocks */
538 drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) {
539 drm_dbg_kms(&ggtt->vm.i915->drm,
540 "clearing unused GTT space: [%lx, %lx]\n",
541 hole_start, hole_end);
542 ggtt->vm.clear_range(&ggtt->vm, hole_start,
543 hole_end - hole_start);
546 /* And finally clear the reserved guard page */
547 ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE);
552 cleanup_init_ggtt(ggtt);
556 static int aliasing_gtt_bind_vma(struct i915_vma *vma,
557 enum i915_cache_level cache_level,
563 /* Currently applicable only to VLV */
565 if (i915_gem_object_is_readonly(vma->obj))
566 pte_flags |= PTE_READ_ONLY;
568 if (flags & I915_VMA_LOCAL_BIND) {
569 struct i915_ppgtt *alias = i915_vm_to_ggtt(vma->vm)->alias;
571 if (flags & I915_VMA_ALLOC) {
572 ret = alias->vm.allocate_va_range(&alias->vm,
578 set_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma));
581 GEM_BUG_ON(!test_bit(I915_VMA_ALLOC_BIT,
582 __i915_vma_flags(vma)));
583 alias->vm.insert_entries(&alias->vm, vma,
584 cache_level, pte_flags);
587 if (flags & I915_VMA_GLOBAL_BIND)
588 vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
593 static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
595 if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) {
596 struct i915_address_space *vm = vma->vm;
598 vm->clear_range(vm, vma->node.start, vma->size);
601 if (test_and_clear_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma))) {
602 struct i915_address_space *vm =
603 &i915_vm_to_ggtt(vma->vm)->alias->vm;
605 vm->clear_range(vm, vma->node.start, vma->size);
609 static int init_aliasing_ppgtt(struct i915_ggtt *ggtt)
611 struct i915_ppgtt *ppgtt;
614 ppgtt = i915_ppgtt_create(ggtt->vm.gt);
616 return PTR_ERR(ppgtt);
618 if (GEM_WARN_ON(ppgtt->vm.total < ggtt->vm.total)) {
624 * Note we only pre-allocate as far as the end of the global
625 * GTT. On 48b / 4-level page-tables, the difference is very,
626 * very significant! We have to preallocate as GVT/vgpu does
627 * not like the page directory disappearing.
629 err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, ggtt->vm.total);
634 ggtt->vm.bind_async_flags |= ppgtt->vm.bind_async_flags;
636 GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != ggtt_bind_vma);
637 ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma;
639 GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != ggtt_unbind_vma);
640 ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma;
645 i915_vm_put(&ppgtt->vm);
649 static void fini_aliasing_ppgtt(struct i915_ggtt *ggtt)
651 struct i915_ppgtt *ppgtt;
653 ppgtt = fetch_and_zero(&ggtt->alias);
657 i915_vm_put(&ppgtt->vm);
659 ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
660 ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
663 int i915_init_ggtt(struct drm_i915_private *i915)
667 ret = init_ggtt(&i915->ggtt);
671 if (INTEL_PPGTT(i915) == INTEL_PPGTT_ALIASING) {
672 ret = init_aliasing_ppgtt(&i915->ggtt);
674 cleanup_init_ggtt(&i915->ggtt);
680 static void ggtt_cleanup_hw(struct i915_ggtt *ggtt)
682 struct i915_vma *vma, *vn;
684 atomic_set(&ggtt->vm.open, 0);
686 rcu_barrier(); /* flush the RCU'ed__i915_vm_release */
687 flush_workqueue(ggtt->vm.i915->wq);
689 mutex_lock(&ggtt->vm.mutex);
691 list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link)
692 WARN_ON(__i915_vma_unbind(vma));
694 if (drm_mm_node_allocated(&ggtt->error_capture))
695 drm_mm_remove_node(&ggtt->error_capture);
696 mutex_destroy(&ggtt->error_mutex);
698 ggtt_release_guc_top(ggtt);
699 intel_vgt_deballoon(ggtt);
701 ggtt->vm.cleanup(&ggtt->vm);
703 mutex_unlock(&ggtt->vm.mutex);
704 i915_address_space_fini(&ggtt->vm);
706 arch_phys_wc_del(ggtt->mtrr);
708 if (ggtt->iomap.size)
709 io_mapping_fini(&ggtt->iomap);
713 * i915_ggtt_driver_release - Clean up GGTT hardware initialization
716 void i915_ggtt_driver_release(struct drm_i915_private *i915)
718 struct i915_ggtt *ggtt = &i915->ggtt;
719 struct pagevec *pvec;
721 fini_aliasing_ppgtt(ggtt);
723 intel_ggtt_fini_fences(ggtt);
724 ggtt_cleanup_hw(ggtt);
726 pvec = &i915->mm.wc_stash.pvec;
728 set_pages_array_wb(pvec->pages, pvec->nr);
729 __pagevec_release(pvec);
733 static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
735 snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
736 snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
737 return snb_gmch_ctl << 20;
740 static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
742 bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
743 bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
745 bdw_gmch_ctl = 1 << bdw_gmch_ctl;
748 /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * I915_GTT_PAGE_SIZE */
749 if (bdw_gmch_ctl > 4)
753 return bdw_gmch_ctl << 20;
756 static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
758 gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
759 gmch_ctrl &= SNB_GMCH_GGMS_MASK;
762 return 1 << (20 + gmch_ctrl);
767 static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
769 struct drm_i915_private *i915 = ggtt->vm.i915;
770 struct pci_dev *pdev = i915->drm.pdev;
771 phys_addr_t phys_addr;
774 /* For Modern GENs the PTEs and register space are split in the BAR */
775 phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2;
778 * On BXT+/CNL+ writes larger than 64 bit to the GTT pagetable range
779 * will be dropped. For WC mappings in general we have 64 byte burst
780 * writes when the WC buffer is flushed, so we can't use it, but have to
781 * resort to an uncached mapping. The WC issue is easily caught by the
782 * readback check when writing GTT PTE entries.
784 if (IS_GEN9_LP(i915) || INTEL_GEN(i915) >= 10)
785 ggtt->gsm = ioremap(phys_addr, size);
787 ggtt->gsm = ioremap_wc(phys_addr, size);
789 drm_err(&i915->drm, "Failed to map the ggtt page table\n");
793 ret = setup_scratch_page(&ggtt->vm, GFP_DMA32);
795 drm_err(&i915->drm, "Scratch setup failed\n");
796 /* iounmap will also get called at remove, but meh */
801 ggtt->vm.scratch[0].encode =
802 ggtt->vm.pte_encode(px_dma(&ggtt->vm.scratch[0]),
808 int ggtt_set_pages(struct i915_vma *vma)
812 GEM_BUG_ON(vma->pages);
814 ret = i915_get_ggtt_vma_pages(vma);
818 vma->page_sizes = vma->obj->mm.page_sizes;
823 static void gen6_gmch_remove(struct i915_address_space *vm)
825 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
828 cleanup_scratch_page(vm);
831 static struct resource pci_resource(struct pci_dev *pdev, int bar)
833 return (struct resource)DEFINE_RES_MEM(pci_resource_start(pdev, bar),
834 pci_resource_len(pdev, bar));
837 static int gen8_gmch_probe(struct i915_ggtt *ggtt)
839 struct drm_i915_private *i915 = ggtt->vm.i915;
840 struct pci_dev *pdev = i915->drm.pdev;
845 /* TODO: We're not aware of mappable constraints on gen8 yet */
846 if (!IS_DGFX(i915)) {
847 ggtt->gmadr = pci_resource(pdev, 2);
848 ggtt->mappable_end = resource_size(&ggtt->gmadr);
851 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(39));
853 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39));
856 "Can't set DMA mask/consistent mask (%d)\n", err);
858 pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
859 if (IS_CHERRYVIEW(i915))
860 size = chv_get_total_gtt_size(snb_gmch_ctl);
862 size = gen8_get_total_gtt_size(snb_gmch_ctl);
864 ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
865 ggtt->vm.cleanup = gen6_gmch_remove;
866 ggtt->vm.insert_page = gen8_ggtt_insert_page;
867 ggtt->vm.clear_range = nop_clear_range;
868 if (intel_scanout_needs_vtd_wa(i915))
869 ggtt->vm.clear_range = gen8_ggtt_clear_range;
871 ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
873 /* Serialize GTT updates with aperture access on BXT if VT-d is on. */
874 if (intel_ggtt_update_needs_vtd_wa(i915) ||
875 IS_CHERRYVIEW(i915) /* fails with concurrent use/update */) {
876 ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
877 ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL;
878 ggtt->vm.bind_async_flags =
879 I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
882 ggtt->invalidate = gen8_ggtt_invalidate;
884 ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
885 ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
886 ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
887 ggtt->vm.vma_ops.clear_pages = clear_pages;
889 ggtt->vm.pte_encode = gen8_ggtt_pte_encode;
891 setup_private_pat(ggtt->vm.gt->uncore);
893 return ggtt_probe_common(ggtt, size);
896 static u64 snb_pte_encode(dma_addr_t addr,
897 enum i915_cache_level level,
900 gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
903 case I915_CACHE_L3_LLC:
905 pte |= GEN6_PTE_CACHE_LLC;
907 case I915_CACHE_NONE:
908 pte |= GEN6_PTE_UNCACHED;
917 static u64 ivb_pte_encode(dma_addr_t addr,
918 enum i915_cache_level level,
921 gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
924 case I915_CACHE_L3_LLC:
925 pte |= GEN7_PTE_CACHE_L3_LLC;
928 pte |= GEN6_PTE_CACHE_LLC;
930 case I915_CACHE_NONE:
931 pte |= GEN6_PTE_UNCACHED;
940 static u64 byt_pte_encode(dma_addr_t addr,
941 enum i915_cache_level level,
944 gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
946 if (!(flags & PTE_READ_ONLY))
947 pte |= BYT_PTE_WRITEABLE;
949 if (level != I915_CACHE_NONE)
950 pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
955 static u64 hsw_pte_encode(dma_addr_t addr,
956 enum i915_cache_level level,
959 gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
961 if (level != I915_CACHE_NONE)
962 pte |= HSW_WB_LLC_AGE3;
967 static u64 iris_pte_encode(dma_addr_t addr,
968 enum i915_cache_level level,
971 gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
974 case I915_CACHE_NONE:
977 pte |= HSW_WT_ELLC_LLC_AGE3;
980 pte |= HSW_WB_ELLC_LLC_AGE3;
987 static int gen6_gmch_probe(struct i915_ggtt *ggtt)
989 struct drm_i915_private *i915 = ggtt->vm.i915;
990 struct pci_dev *pdev = i915->drm.pdev;
995 ggtt->gmadr = pci_resource(pdev, 2);
996 ggtt->mappable_end = resource_size(&ggtt->gmadr);
999 * 64/512MB is the current min/max we actually know of, but this is
1000 * just a coarse sanity check.
1002 if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) {
1003 drm_err(&i915->drm, "Unknown GMADR size (%pa)\n",
1004 &ggtt->mappable_end);
1008 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
1010 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
1013 "Can't set DMA mask/consistent mask (%d)\n", err);
1014 pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
1016 size = gen6_get_total_gtt_size(snb_gmch_ctl);
1017 ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE;
1019 ggtt->vm.clear_range = nop_clear_range;
1020 if (!HAS_FULL_PPGTT(i915) || intel_scanout_needs_vtd_wa(i915))
1021 ggtt->vm.clear_range = gen6_ggtt_clear_range;
1022 ggtt->vm.insert_page = gen6_ggtt_insert_page;
1023 ggtt->vm.insert_entries = gen6_ggtt_insert_entries;
1024 ggtt->vm.cleanup = gen6_gmch_remove;
1026 ggtt->invalidate = gen6_ggtt_invalidate;
1028 if (HAS_EDRAM(i915))
1029 ggtt->vm.pte_encode = iris_pte_encode;
1030 else if (IS_HASWELL(i915))
1031 ggtt->vm.pte_encode = hsw_pte_encode;
1032 else if (IS_VALLEYVIEW(i915))
1033 ggtt->vm.pte_encode = byt_pte_encode;
1034 else if (INTEL_GEN(i915) >= 7)
1035 ggtt->vm.pte_encode = ivb_pte_encode;
1037 ggtt->vm.pte_encode = snb_pte_encode;
1039 ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
1040 ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
1041 ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
1042 ggtt->vm.vma_ops.clear_pages = clear_pages;
1044 return ggtt_probe_common(ggtt, size);
1047 static void i915_gmch_remove(struct i915_address_space *vm)
1049 intel_gmch_remove();
1052 static int i915_gmch_probe(struct i915_ggtt *ggtt)
1054 struct drm_i915_private *i915 = ggtt->vm.i915;
1055 phys_addr_t gmadr_base;
1058 ret = intel_gmch_probe(i915->bridge_dev, i915->drm.pdev, NULL);
1060 drm_err(&i915->drm, "failed to set up gmch\n");
1064 intel_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end);
1067 (struct resource)DEFINE_RES_MEM(gmadr_base, ggtt->mappable_end);
1069 ggtt->do_idle_maps = needs_idle_maps(i915);
1070 ggtt->vm.insert_page = i915_ggtt_insert_page;
1071 ggtt->vm.insert_entries = i915_ggtt_insert_entries;
1072 ggtt->vm.clear_range = i915_ggtt_clear_range;
1073 ggtt->vm.cleanup = i915_gmch_remove;
1075 ggtt->invalidate = gmch_ggtt_invalidate;
1077 ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
1078 ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
1079 ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
1080 ggtt->vm.vma_ops.clear_pages = clear_pages;
1082 if (unlikely(ggtt->do_idle_maps))
1083 drm_notice(&i915->drm,
1084 "Applying Ironlake quirks for intel_iommu\n");
1089 static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt)
1091 struct drm_i915_private *i915 = gt->i915;
1095 ggtt->vm.i915 = i915;
1096 ggtt->vm.dma = &i915->drm.pdev->dev;
1098 if (INTEL_GEN(i915) <= 5)
1099 ret = i915_gmch_probe(ggtt);
1100 else if (INTEL_GEN(i915) < 8)
1101 ret = gen6_gmch_probe(ggtt);
1103 ret = gen8_gmch_probe(ggtt);
1107 if ((ggtt->vm.total - 1) >> 32) {
1109 "We never expected a Global GTT with more than 32bits"
1110 " of address space! Found %lldM!\n",
1111 ggtt->vm.total >> 20);
1112 ggtt->vm.total = 1ULL << 32;
1113 ggtt->mappable_end =
1114 min_t(u64, ggtt->mappable_end, ggtt->vm.total);
1117 if (ggtt->mappable_end > ggtt->vm.total) {
1119 "mappable aperture extends past end of GGTT,"
1120 " aperture=%pa, total=%llx\n",
1121 &ggtt->mappable_end, ggtt->vm.total);
1122 ggtt->mappable_end = ggtt->vm.total;
1125 /* GMADR is the PCI mmio aperture into the global GTT. */
1126 drm_dbg(&i915->drm, "GGTT size = %lluM\n", ggtt->vm.total >> 20);
1127 drm_dbg(&i915->drm, "GMADR size = %lluM\n",
1128 (u64)ggtt->mappable_end >> 20);
1129 drm_dbg(&i915->drm, "DSM size = %lluM\n",
1130 (u64)resource_size(&intel_graphics_stolen_res) >> 20);
1136 * i915_ggtt_probe_hw - Probe GGTT hardware location
1137 * @i915: i915 device
1139 int i915_ggtt_probe_hw(struct drm_i915_private *i915)
1143 ret = ggtt_probe_hw(&i915->ggtt, &i915->gt);
1147 if (intel_vtd_active())
1148 drm_info(&i915->drm, "VT-d active for gfx access\n");
1153 int i915_ggtt_enable_hw(struct drm_i915_private *i915)
1155 if (INTEL_GEN(i915) < 6 && !intel_enable_gtt())
1161 void i915_ggtt_enable_guc(struct i915_ggtt *ggtt)
1163 GEM_BUG_ON(ggtt->invalidate != gen8_ggtt_invalidate);
1165 ggtt->invalidate = guc_ggtt_invalidate;
1167 ggtt->invalidate(ggtt);
1170 void i915_ggtt_disable_guc(struct i915_ggtt *ggtt)
1172 /* XXX Temporary pardon for error unload */
1173 if (ggtt->invalidate == gen8_ggtt_invalidate)
1176 /* We should only be called after i915_ggtt_enable_guc() */
1177 GEM_BUG_ON(ggtt->invalidate != guc_ggtt_invalidate);
1179 ggtt->invalidate = gen8_ggtt_invalidate;
1181 ggtt->invalidate(ggtt);
1184 void i915_ggtt_resume(struct i915_ggtt *ggtt)
1186 struct i915_vma *vma;
1190 intel_gt_check_and_clear_faults(ggtt->vm.gt);
1192 /* First fill our portion of the GTT with scratch pages */
1193 ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
1195 /* Skip rewriting PTE on VMA unbind. */
1196 open = atomic_xchg(&ggtt->vm.open, 0);
1198 /* clflush objects bound into the GGTT and rebind them. */
1199 list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link) {
1200 struct drm_i915_gem_object *obj = vma->obj;
1202 if (!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
1205 clear_bit(I915_VMA_GLOBAL_BIND_BIT, __i915_vma_flags(vma));
1206 WARN_ON(i915_vma_bind(vma,
1207 obj ? obj->cache_level : 0,
1209 if (obj) { /* only used during resume => exclusive access */
1210 flush |= fetch_and_zero(&obj->write_domain);
1211 obj->read_domains |= I915_GEM_DOMAIN_GTT;
1215 atomic_set(&ggtt->vm.open, open);
1216 ggtt->invalidate(ggtt);
1219 wbinvd_on_all_cpus();
1221 if (INTEL_GEN(ggtt->vm.i915) >= 8)
1222 setup_private_pat(ggtt->vm.gt->uncore);
1224 intel_ggtt_restore_fences(ggtt);
1227 static struct scatterlist *
1228 rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset,
1229 unsigned int width, unsigned int height,
1230 unsigned int stride,
1231 struct sg_table *st, struct scatterlist *sg)
1233 unsigned int column, row;
1234 unsigned int src_idx;
1236 for (column = 0; column < width; column++) {
1237 src_idx = stride * (height - 1) + column + offset;
1238 for (row = 0; row < height; row++) {
1241 * We don't need the pages, but need to initialize
1242 * the entries so the sg list can be happily traversed.
1243 * The only thing we need are DMA addresses.
1245 sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0);
1246 sg_dma_address(sg) =
1247 i915_gem_object_get_dma_address(obj, src_idx);
1248 sg_dma_len(sg) = I915_GTT_PAGE_SIZE;
1257 static noinline struct sg_table *
1258 intel_rotate_pages(struct intel_rotation_info *rot_info,
1259 struct drm_i915_gem_object *obj)
1261 unsigned int size = intel_rotation_info_size(rot_info);
1262 struct drm_i915_private *i915 = to_i915(obj->base.dev);
1263 struct sg_table *st;
1264 struct scatterlist *sg;
1268 /* Allocate target SG list. */
1269 st = kmalloc(sizeof(*st), GFP_KERNEL);
1273 ret = sg_alloc_table(st, size, GFP_KERNEL);
1280 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) {
1281 sg = rotate_pages(obj, rot_info->plane[i].offset,
1282 rot_info->plane[i].width, rot_info->plane[i].height,
1283 rot_info->plane[i].stride, st, sg);
1292 drm_dbg(&i915->drm, "Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
1293 obj->base.size, rot_info->plane[0].width,
1294 rot_info->plane[0].height, size);
1296 return ERR_PTR(ret);
1299 static struct scatterlist *
1300 remap_pages(struct drm_i915_gem_object *obj, unsigned int offset,
1301 unsigned int width, unsigned int height,
1302 unsigned int stride,
1303 struct sg_table *st, struct scatterlist *sg)
1307 for (row = 0; row < height; row++) {
1308 unsigned int left = width * I915_GTT_PAGE_SIZE;
1312 unsigned int length;
1315 * We don't need the pages, but need to initialize
1316 * the entries so the sg list can be happily traversed.
1317 * The only thing we need are DMA addresses.
1320 addr = i915_gem_object_get_dma_address_len(obj, offset, &length);
1322 length = min(left, length);
1326 sg_set_page(sg, NULL, length, 0);
1327 sg_dma_address(sg) = addr;
1328 sg_dma_len(sg) = length;
1331 offset += length / I915_GTT_PAGE_SIZE;
1335 offset += stride - width;
1341 static noinline struct sg_table *
1342 intel_remap_pages(struct intel_remapped_info *rem_info,
1343 struct drm_i915_gem_object *obj)
1345 unsigned int size = intel_remapped_info_size(rem_info);
1346 struct drm_i915_private *i915 = to_i915(obj->base.dev);
1347 struct sg_table *st;
1348 struct scatterlist *sg;
1352 /* Allocate target SG list. */
1353 st = kmalloc(sizeof(*st), GFP_KERNEL);
1357 ret = sg_alloc_table(st, size, GFP_KERNEL);
1364 for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) {
1365 sg = remap_pages(obj, rem_info->plane[i].offset,
1366 rem_info->plane[i].width, rem_info->plane[i].height,
1367 rem_info->plane[i].stride, st, sg);
1378 drm_dbg(&i915->drm, "Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n",
1379 obj->base.size, rem_info->plane[0].width,
1380 rem_info->plane[0].height, size);
1382 return ERR_PTR(ret);
1385 static noinline struct sg_table *
1386 intel_partial_pages(const struct i915_ggtt_view *view,
1387 struct drm_i915_gem_object *obj)
1389 struct sg_table *st;
1390 struct scatterlist *sg, *iter;
1391 unsigned int count = view->partial.size;
1392 unsigned int offset;
1395 st = kmalloc(sizeof(*st), GFP_KERNEL);
1399 ret = sg_alloc_table(st, count, GFP_KERNEL);
1403 iter = i915_gem_object_get_sg(obj, view->partial.offset, &offset);
1411 len = min(iter->length - (offset << PAGE_SHIFT),
1412 count << PAGE_SHIFT);
1413 sg_set_page(sg, NULL, len, 0);
1414 sg_dma_address(sg) =
1415 sg_dma_address(iter) + (offset << PAGE_SHIFT);
1416 sg_dma_len(sg) = len;
1419 count -= len >> PAGE_SHIFT;
1422 i915_sg_trim(st); /* Drop any unused tail entries. */
1428 iter = __sg_next(iter);
1435 return ERR_PTR(ret);
1439 i915_get_ggtt_vma_pages(struct i915_vma *vma)
1444 * The vma->pages are only valid within the lifespan of the borrowed
1445 * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so
1446 * must be the vma->pages. A simple rule is that vma->pages must only
1447 * be accessed when the obj->mm.pages are pinned.
1449 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
1451 switch (vma->ggtt_view.type) {
1453 GEM_BUG_ON(vma->ggtt_view.type);
1455 case I915_GGTT_VIEW_NORMAL:
1456 vma->pages = vma->obj->mm.pages;
1459 case I915_GGTT_VIEW_ROTATED:
1461 intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj);
1464 case I915_GGTT_VIEW_REMAPPED:
1466 intel_remap_pages(&vma->ggtt_view.remapped, vma->obj);
1469 case I915_GGTT_VIEW_PARTIAL:
1470 vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
1475 if (IS_ERR(vma->pages)) {
1476 ret = PTR_ERR(vma->pages);
1478 drm_err(&vma->vm->i915->drm,
1479 "Failed to get pages for VMA view type %u (%d)!\n",
1480 vma->ggtt_view.type, ret);