1 // SPDX-License-Identifier: MIT
3 * Copyright © 2020 Intel Corporation
6 #include <linux/agp_backend.h>
7 #include <linux/stop_machine.h>
9 #include <asm/set_memory.h>
12 #include <drm/i915_drm.h>
13 #include <drm/intel-gtt.h>
15 #include "gem/i915_gem_lmem.h"
18 #include "intel_gt_regs.h"
20 #include "i915_scatterlist.h"
21 #include "i915_vgpu.h"
23 #include "intel_gtt.h"
24 #include "gen8_ppgtt.h"
26 static void i915_ggtt_color_adjust(const struct drm_mm_node *node,
31 if (i915_node_color_differs(node, color))
32 *start += I915_GTT_PAGE_SIZE;
35 * Also leave a space between the unallocated reserved node after the
36 * GTT and any objects within the GTT, i.e. we use the color adjustment
37 * to insert a guard page to prevent prefetches crossing over the
40 node = list_next_entry(node, node_list);
41 if (node->color != color)
42 *end -= I915_GTT_PAGE_SIZE;
45 static int ggtt_init_hw(struct i915_ggtt *ggtt)
47 struct drm_i915_private *i915 = ggtt->vm.i915;
49 i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
51 ggtt->vm.is_ggtt = true;
53 /* Only VLV supports read-only GGTT mappings */
54 ggtt->vm.has_read_only = IS_VALLEYVIEW(i915);
56 if (!HAS_LLC(i915) && !HAS_PPGTT(i915))
57 ggtt->vm.mm.color_adjust = i915_ggtt_color_adjust;
59 if (ggtt->mappable_end) {
60 if (!io_mapping_init_wc(&ggtt->iomap,
62 ggtt->mappable_end)) {
63 ggtt->vm.cleanup(&ggtt->vm);
67 ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start,
71 intel_ggtt_init_fences(ggtt);
77 * i915_ggtt_init_hw - Initialize GGTT hardware
80 int i915_ggtt_init_hw(struct drm_i915_private *i915)
85 * Note that we use page colouring to enforce a guard page at the
86 * end of the address space. This is required as the CS may prefetch
87 * beyond the end of the batch buffer, across the page boundary,
88 * and beyond the end of the GTT if we do not provide a guard.
90 ret = ggtt_init_hw(to_gt(i915)->ggtt);
98 * Certain Gen5 chipsets require idling the GPU before
99 * unmapping anything from the GTT when VT-d is enabled.
101 static bool needs_idle_maps(struct drm_i915_private *i915)
104 * Query intel_iommu to see if we need the workaround. Presumably that
107 if (!intel_vtd_active(i915))
110 if (GRAPHICS_VER(i915) == 5 && IS_MOBILE(i915))
113 if (GRAPHICS_VER(i915) == 12)
114 return true; /* XXX DMAR fault reason 7 */
120 * i915_ggtt_suspend_vm - Suspend the memory mappings for a GGTT or DPT VM
121 * @vm: The VM to suspend the mappings for
123 * Suspend the memory mappings for all objects mapped to HW via the GGTT or a
126 void i915_ggtt_suspend_vm(struct i915_address_space *vm)
128 struct i915_vma *vma, *vn;
129 int save_skip_rewrite;
131 drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt);
134 i915_gem_drain_freed_objects(vm->i915);
136 mutex_lock(&vm->mutex);
139 * Skip rewriting PTE on VMA unbind.
140 * FIXME: Use an argument to i915_vma_unbind() instead?
142 save_skip_rewrite = vm->skip_pte_rewrite;
143 vm->skip_pte_rewrite = true;
145 list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) {
146 struct drm_i915_gem_object *obj = vma->obj;
148 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
150 if (i915_vma_is_pinned(vma) || !i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
153 /* unlikely to race when GPU is idle, so no worry about slowpath.. */
154 if (WARN_ON(!i915_gem_object_trylock(obj, NULL))) {
156 * No dead objects should appear here, GPU should be
157 * completely idle, and userspace suspended
159 i915_gem_object_get(obj);
161 mutex_unlock(&vm->mutex);
163 i915_gem_object_lock(obj, NULL);
164 GEM_WARN_ON(i915_vma_unbind(vma));
165 i915_gem_object_unlock(obj);
166 i915_gem_object_put(obj);
168 vm->skip_pte_rewrite = save_skip_rewrite;
172 if (!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) {
173 i915_vma_wait_for_bind(vma);
175 __i915_vma_evict(vma, false);
176 drm_mm_remove_node(&vma->node);
179 i915_gem_object_unlock(obj);
182 vm->clear_range(vm, 0, vm->total);
184 vm->skip_pte_rewrite = save_skip_rewrite;
186 mutex_unlock(&vm->mutex);
189 void i915_ggtt_suspend(struct i915_ggtt *ggtt)
191 i915_ggtt_suspend_vm(&ggtt->vm);
192 ggtt->invalidate(ggtt);
194 intel_gt_check_and_clear_faults(ggtt->vm.gt);
197 void gen6_ggtt_invalidate(struct i915_ggtt *ggtt)
199 struct intel_uncore *uncore = ggtt->vm.gt->uncore;
201 spin_lock_irq(&uncore->lock);
202 intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
203 intel_uncore_read_fw(uncore, GFX_FLSH_CNTL_GEN6);
204 spin_unlock_irq(&uncore->lock);
207 static void gen8_ggtt_invalidate(struct i915_ggtt *ggtt)
209 struct intel_uncore *uncore = ggtt->vm.gt->uncore;
212 * Note that as an uncached mmio write, this will flush the
213 * WCB of the writes into the GGTT before it triggers the invalidate.
215 intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
218 static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
220 struct intel_uncore *uncore = ggtt->vm.gt->uncore;
221 struct drm_i915_private *i915 = ggtt->vm.i915;
223 gen8_ggtt_invalidate(ggtt);
225 if (GRAPHICS_VER(i915) >= 12)
226 intel_uncore_write_fw(uncore, GEN12_GUC_TLB_INV_CR,
227 GEN12_GUC_TLB_INV_CR_INVALIDATE);
229 intel_uncore_write_fw(uncore, GEN8_GTCR, GEN8_GTCR_INVALIDATE);
232 static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt)
234 intel_gtt_chipset_flush();
237 u64 gen8_ggtt_pte_encode(dma_addr_t addr,
238 enum i915_cache_level level,
241 gen8_pte_t pte = addr | GEN8_PAGE_PRESENT;
244 pte |= GEN12_GGTT_PTE_LM;
249 static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
254 static void gen8_ggtt_insert_page(struct i915_address_space *vm,
257 enum i915_cache_level level,
260 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
261 gen8_pte_t __iomem *pte =
262 (gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
264 gen8_set_pte(pte, gen8_ggtt_pte_encode(addr, level, flags));
266 ggtt->invalidate(ggtt);
269 static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
270 struct i915_vma_resource *vma_res,
271 enum i915_cache_level level,
274 const gen8_pte_t pte_encode = gen8_ggtt_pte_encode(0, level, flags);
275 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
276 gen8_pte_t __iomem *gte;
277 gen8_pte_t __iomem *end;
278 struct sgt_iter iter;
282 * Note that we ignore PTE_READ_ONLY here. The caller must be careful
283 * not to allow the user to override access to a read only page.
286 gte = (gen8_pte_t __iomem *)ggtt->gsm;
287 gte += vma_res->start / I915_GTT_PAGE_SIZE;
288 end = gte + vma_res->node_size / I915_GTT_PAGE_SIZE;
290 for_each_sgt_daddr(addr, iter, vma_res->bi.pages)
291 gen8_set_pte(gte++, pte_encode | addr);
292 GEM_BUG_ON(gte > end);
294 /* Fill the allocated but "unused" space beyond the end of the buffer */
296 gen8_set_pte(gte++, vm->scratch[0]->encode);
299 * We want to flush the TLBs only after we're certain all the PTE
300 * updates have finished.
302 ggtt->invalidate(ggtt);
305 static void gen6_ggtt_insert_page(struct i915_address_space *vm,
308 enum i915_cache_level level,
311 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
312 gen6_pte_t __iomem *pte =
313 (gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
315 iowrite32(vm->pte_encode(addr, level, flags), pte);
317 ggtt->invalidate(ggtt);
321 * Binds an object into the global gtt with the specified cache level.
322 * The object will be accessible to the GPU via commands whose operands
323 * reference offsets within the global GTT as well as accessible by the GPU
324 * through the GMADR mapped BAR (i915->mm.gtt->gtt).
326 static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
327 struct i915_vma_resource *vma_res,
328 enum i915_cache_level level,
331 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
332 gen6_pte_t __iomem *gte;
333 gen6_pte_t __iomem *end;
334 struct sgt_iter iter;
337 gte = (gen6_pte_t __iomem *)ggtt->gsm;
338 gte += vma_res->start / I915_GTT_PAGE_SIZE;
339 end = gte + vma_res->node_size / I915_GTT_PAGE_SIZE;
341 for_each_sgt_daddr(addr, iter, vma_res->bi.pages)
342 iowrite32(vm->pte_encode(addr, level, flags), gte++);
343 GEM_BUG_ON(gte > end);
345 /* Fill the allocated but "unused" space beyond the end of the buffer */
347 iowrite32(vm->scratch[0]->encode, gte++);
350 * We want to flush the TLBs only after we're certain all the PTE
351 * updates have finished.
353 ggtt->invalidate(ggtt);
356 static void nop_clear_range(struct i915_address_space *vm,
357 u64 start, u64 length)
361 static void gen8_ggtt_clear_range(struct i915_address_space *vm,
362 u64 start, u64 length)
364 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
365 unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
366 unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
367 const gen8_pte_t scratch_pte = vm->scratch[0]->encode;
368 gen8_pte_t __iomem *gtt_base =
369 (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
370 const int max_entries = ggtt_total_entries(ggtt) - first_entry;
373 if (WARN(num_entries > max_entries,
374 "First entry = %d; Num entries = %d (max=%d)\n",
375 first_entry, num_entries, max_entries))
376 num_entries = max_entries;
378 for (i = 0; i < num_entries; i++)
379 gen8_set_pte(>t_base[i], scratch_pte);
382 static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
385 * Make sure the internal GAM fifo has been cleared of all GTT
386 * writes before exiting stop_machine(). This guarantees that
387 * any aperture accesses waiting to start in another process
388 * cannot back up behind the GTT writes causing a hang.
389 * The register can be any arbitrary GAM register.
391 intel_uncore_posting_read_fw(vm->gt->uncore, GFX_FLSH_CNTL_GEN6);
395 struct i915_address_space *vm;
398 enum i915_cache_level level;
401 static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
403 struct insert_page *arg = _arg;
405 gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0);
406 bxt_vtd_ggtt_wa(arg->vm);
411 static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
414 enum i915_cache_level level,
417 struct insert_page arg = { vm, addr, offset, level };
419 stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
422 struct insert_entries {
423 struct i915_address_space *vm;
424 struct i915_vma_resource *vma_res;
425 enum i915_cache_level level;
429 static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
431 struct insert_entries *arg = _arg;
433 gen8_ggtt_insert_entries(arg->vm, arg->vma_res, arg->level, arg->flags);
434 bxt_vtd_ggtt_wa(arg->vm);
439 static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
440 struct i915_vma_resource *vma_res,
441 enum i915_cache_level level,
444 struct insert_entries arg = { vm, vma_res, level, flags };
446 stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
449 static void gen6_ggtt_clear_range(struct i915_address_space *vm,
450 u64 start, u64 length)
452 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
453 unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
454 unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
455 gen6_pte_t scratch_pte, __iomem *gtt_base =
456 (gen6_pte_t __iomem *)ggtt->gsm + first_entry;
457 const int max_entries = ggtt_total_entries(ggtt) - first_entry;
460 if (WARN(num_entries > max_entries,
461 "First entry = %d; Num entries = %d (max=%d)\n",
462 first_entry, num_entries, max_entries))
463 num_entries = max_entries;
465 scratch_pte = vm->scratch[0]->encode;
466 for (i = 0; i < num_entries; i++)
467 iowrite32(scratch_pte, >t_base[i]);
470 static void i915_ggtt_insert_page(struct i915_address_space *vm,
473 enum i915_cache_level cache_level,
476 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
477 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
479 intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
482 static void i915_ggtt_insert_entries(struct i915_address_space *vm,
483 struct i915_vma_resource *vma_res,
484 enum i915_cache_level cache_level,
487 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
488 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
490 intel_gtt_insert_sg_entries(vma_res->bi.pages, vma_res->start >> PAGE_SHIFT,
494 static void i915_ggtt_clear_range(struct i915_address_space *vm,
495 u64 start, u64 length)
497 intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
500 static void ggtt_bind_vma(struct i915_address_space *vm,
501 struct i915_vm_pt_stash *stash,
502 struct i915_vma_resource *vma_res,
503 enum i915_cache_level cache_level,
508 if (vma_res->bound_flags & (~flags & I915_VMA_BIND_MASK))
511 vma_res->bound_flags |= flags;
513 /* Applicable to VLV (gen8+ do not support RO in the GGTT) */
515 if (vma_res->bi.readonly)
516 pte_flags |= PTE_READ_ONLY;
517 if (vma_res->bi.lmem)
520 vm->insert_entries(vm, vma_res, cache_level, pte_flags);
521 vma_res->page_sizes_gtt = I915_GTT_PAGE_SIZE;
524 static void ggtt_unbind_vma(struct i915_address_space *vm,
525 struct i915_vma_resource *vma_res)
527 vm->clear_range(vm, vma_res->start, vma_res->vma_size);
530 static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt)
535 if (!intel_uc_uses_guc(&ggtt->vm.gt->uc))
538 GEM_BUG_ON(ggtt->vm.total <= GUC_GGTT_TOP);
539 size = ggtt->vm.total - GUC_GGTT_TOP;
541 ret = i915_gem_gtt_reserve(&ggtt->vm, NULL, &ggtt->uc_fw, size,
542 GUC_GGTT_TOP, I915_COLOR_UNEVICTABLE,
545 drm_dbg(&ggtt->vm.i915->drm,
546 "Failed to reserve top of GGTT for GuC\n");
551 static void ggtt_release_guc_top(struct i915_ggtt *ggtt)
553 if (drm_mm_node_allocated(&ggtt->uc_fw))
554 drm_mm_remove_node(&ggtt->uc_fw);
557 static void cleanup_init_ggtt(struct i915_ggtt *ggtt)
559 ggtt_release_guc_top(ggtt);
560 if (drm_mm_node_allocated(&ggtt->error_capture))
561 drm_mm_remove_node(&ggtt->error_capture);
562 mutex_destroy(&ggtt->error_mutex);
565 static int init_ggtt(struct i915_ggtt *ggtt)
568 * Let GEM Manage all of the aperture.
570 * However, leave one page at the end still bound to the scratch page.
571 * There are a number of places where the hardware apparently prefetches
572 * past the end of the object, and we've seen multiple hangs with the
573 * GPU head pointer stuck in a batchbuffer bound at the last page of the
574 * aperture. One page should be enough to keep any prefetching inside
577 unsigned long hole_start, hole_end;
578 struct drm_mm_node *entry;
582 * GuC requires all resources that we're sharing with it to be placed in
583 * non-WOPCM memory. If GuC is not present or not in use we still need a
584 * small bias as ring wraparound at offset 0 sometimes hangs. No idea
587 ggtt->pin_bias = max_t(u32, I915_GTT_PAGE_SIZE,
588 intel_wopcm_guc_size(&ggtt->vm.i915->wopcm));
590 ret = intel_vgt_balloon(ggtt);
594 mutex_init(&ggtt->error_mutex);
595 if (ggtt->mappable_end) {
597 * Reserve a mappable slot for our lockless error capture.
599 * We strongly prefer taking address 0x0 in order to protect
600 * other critical buffers against accidental overwrites,
601 * as writing to address 0 is a very common mistake.
603 * Since 0 may already be in use by the system (e.g. the BIOS
604 * framebuffer), we let the reservation fail quietly and hope
605 * 0 remains reserved always.
607 * If we fail to reserve 0, and then fail to find any space
608 * for an error-capture, remain silent. We can afford not
609 * to reserve an error_capture node as we have fallback
610 * paths, and we trust that 0 will remain reserved. However,
611 * the only likely reason for failure to insert is a driver
612 * bug, which we expect to cause other failures...
614 ggtt->error_capture.size = I915_GTT_PAGE_SIZE;
615 ggtt->error_capture.color = I915_COLOR_UNEVICTABLE;
616 if (drm_mm_reserve_node(&ggtt->vm.mm, &ggtt->error_capture))
617 drm_mm_insert_node_in_range(&ggtt->vm.mm,
618 &ggtt->error_capture,
619 ggtt->error_capture.size, 0,
620 ggtt->error_capture.color,
621 0, ggtt->mappable_end,
624 if (drm_mm_node_allocated(&ggtt->error_capture))
625 drm_dbg(&ggtt->vm.i915->drm,
626 "Reserved GGTT:[%llx, %llx] for use by error capture\n",
627 ggtt->error_capture.start,
628 ggtt->error_capture.start + ggtt->error_capture.size);
631 * The upper portion of the GuC address space has a sizeable hole
632 * (several MB) that is inaccessible by GuC. Reserve this range within
633 * GGTT as it can comfortably hold GuC/HuC firmware images.
635 ret = ggtt_reserve_guc_top(ggtt);
639 /* Clear any non-preallocated blocks */
640 drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) {
641 drm_dbg(&ggtt->vm.i915->drm,
642 "clearing unused GTT space: [%lx, %lx]\n",
643 hole_start, hole_end);
644 ggtt->vm.clear_range(&ggtt->vm, hole_start,
645 hole_end - hole_start);
648 /* And finally clear the reserved guard page */
649 ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE);
654 cleanup_init_ggtt(ggtt);
658 static void aliasing_gtt_bind_vma(struct i915_address_space *vm,
659 struct i915_vm_pt_stash *stash,
660 struct i915_vma_resource *vma_res,
661 enum i915_cache_level cache_level,
666 /* Currently applicable only to VLV */
668 if (vma_res->bi.readonly)
669 pte_flags |= PTE_READ_ONLY;
671 if (flags & I915_VMA_LOCAL_BIND)
672 ppgtt_bind_vma(&i915_vm_to_ggtt(vm)->alias->vm,
673 stash, vma_res, cache_level, flags);
675 if (flags & I915_VMA_GLOBAL_BIND)
676 vm->insert_entries(vm, vma_res, cache_level, pte_flags);
678 vma_res->bound_flags |= flags;
681 static void aliasing_gtt_unbind_vma(struct i915_address_space *vm,
682 struct i915_vma_resource *vma_res)
684 if (vma_res->bound_flags & I915_VMA_GLOBAL_BIND)
685 vm->clear_range(vm, vma_res->start, vma_res->vma_size);
687 if (vma_res->bound_flags & I915_VMA_LOCAL_BIND)
688 ppgtt_unbind_vma(&i915_vm_to_ggtt(vm)->alias->vm, vma_res);
691 static int init_aliasing_ppgtt(struct i915_ggtt *ggtt)
693 struct i915_vm_pt_stash stash = {};
694 struct i915_ppgtt *ppgtt;
697 ppgtt = i915_ppgtt_create(ggtt->vm.gt, 0);
699 return PTR_ERR(ppgtt);
701 if (GEM_WARN_ON(ppgtt->vm.total < ggtt->vm.total)) {
706 err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, ggtt->vm.total);
710 i915_gem_object_lock(ppgtt->vm.scratch[0], NULL);
711 err = i915_vm_map_pt_stash(&ppgtt->vm, &stash);
712 i915_gem_object_unlock(ppgtt->vm.scratch[0]);
717 * Note we only pre-allocate as far as the end of the global
718 * GTT. On 48b / 4-level page-tables, the difference is very,
719 * very significant! We have to preallocate as GVT/vgpu does
720 * not like the page directory disappearing.
722 ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash, 0, ggtt->vm.total);
725 ggtt->vm.bind_async_flags |= ppgtt->vm.bind_async_flags;
727 GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != ggtt_bind_vma);
728 ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma;
730 GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != ggtt_unbind_vma);
731 ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma;
733 i915_vm_free_pt_stash(&ppgtt->vm, &stash);
737 i915_vm_free_pt_stash(&ppgtt->vm, &stash);
739 i915_vm_put(&ppgtt->vm);
743 static void fini_aliasing_ppgtt(struct i915_ggtt *ggtt)
745 struct i915_ppgtt *ppgtt;
747 ppgtt = fetch_and_zero(&ggtt->alias);
751 i915_vm_put(&ppgtt->vm);
753 ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
754 ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
757 int i915_init_ggtt(struct drm_i915_private *i915)
761 ret = init_ggtt(to_gt(i915)->ggtt);
765 if (INTEL_PPGTT(i915) == INTEL_PPGTT_ALIASING) {
766 ret = init_aliasing_ppgtt(to_gt(i915)->ggtt);
768 cleanup_init_ggtt(to_gt(i915)->ggtt);
774 static void ggtt_cleanup_hw(struct i915_ggtt *ggtt)
776 struct i915_vma *vma, *vn;
778 flush_workqueue(ggtt->vm.i915->wq);
779 i915_gem_drain_freed_objects(ggtt->vm.i915);
781 mutex_lock(&ggtt->vm.mutex);
783 ggtt->vm.skip_pte_rewrite = true;
785 list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) {
786 struct drm_i915_gem_object *obj = vma->obj;
789 trylock = i915_gem_object_trylock(obj, NULL);
792 WARN_ON(__i915_vma_unbind(vma));
794 i915_gem_object_unlock(obj);
797 if (drm_mm_node_allocated(&ggtt->error_capture))
798 drm_mm_remove_node(&ggtt->error_capture);
799 mutex_destroy(&ggtt->error_mutex);
801 ggtt_release_guc_top(ggtt);
802 intel_vgt_deballoon(ggtt);
804 ggtt->vm.cleanup(&ggtt->vm);
806 mutex_unlock(&ggtt->vm.mutex);
807 i915_address_space_fini(&ggtt->vm);
809 arch_phys_wc_del(ggtt->mtrr);
811 if (ggtt->iomap.size)
812 io_mapping_fini(&ggtt->iomap);
816 * i915_ggtt_driver_release - Clean up GGTT hardware initialization
819 void i915_ggtt_driver_release(struct drm_i915_private *i915)
821 struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
823 fini_aliasing_ppgtt(ggtt);
825 intel_ggtt_fini_fences(ggtt);
826 ggtt_cleanup_hw(ggtt);
830 * i915_ggtt_driver_late_release - Cleanup of GGTT that needs to be done after
831 * all free objects have been drained.
834 void i915_ggtt_driver_late_release(struct drm_i915_private *i915)
836 struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
838 GEM_WARN_ON(kref_read(&ggtt->vm.resv_ref) != 1);
839 dma_resv_fini(&ggtt->vm._resv);
842 static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
844 snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
845 snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
846 return snb_gmch_ctl << 20;
849 static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
851 bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
852 bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
854 bdw_gmch_ctl = 1 << bdw_gmch_ctl;
857 /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * I915_GTT_PAGE_SIZE */
858 if (bdw_gmch_ctl > 4)
862 return bdw_gmch_ctl << 20;
865 static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
867 gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
868 gmch_ctrl &= SNB_GMCH_GGMS_MASK;
871 return 1 << (20 + gmch_ctrl);
876 static unsigned int gen6_gttmmadr_size(struct drm_i915_private *i915)
879 * GEN6: GTTMMADR size is 4MB and GTTADR starts at 2MB offset
880 * GEN8: GTTMMADR size is 16MB and GTTADR starts at 8MB offset
882 GEM_BUG_ON(GRAPHICS_VER(i915) < 6);
883 return (GRAPHICS_VER(i915) < 8) ? SZ_4M : SZ_16M;
886 static unsigned int gen6_gttadr_offset(struct drm_i915_private *i915)
888 return gen6_gttmmadr_size(i915) / 2;
891 static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
893 struct drm_i915_private *i915 = ggtt->vm.i915;
894 struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
895 phys_addr_t phys_addr;
899 GEM_WARN_ON(pci_resource_len(pdev, 0) != gen6_gttmmadr_size(i915));
900 phys_addr = pci_resource_start(pdev, 0) + gen6_gttadr_offset(i915);
903 * On BXT+/ICL+ writes larger than 64 bit to the GTT pagetable range
904 * will be dropped. For WC mappings in general we have 64 byte burst
905 * writes when the WC buffer is flushed, so we can't use it, but have to
906 * resort to an uncached mapping. The WC issue is easily caught by the
907 * readback check when writing GTT PTE entries.
909 if (IS_GEN9_LP(i915) || GRAPHICS_VER(i915) >= 11)
910 ggtt->gsm = ioremap(phys_addr, size);
912 ggtt->gsm = ioremap_wc(phys_addr, size);
914 drm_err(&i915->drm, "Failed to map the ggtt page table\n");
918 kref_init(&ggtt->vm.resv_ref);
919 ret = setup_scratch_page(&ggtt->vm);
921 drm_err(&i915->drm, "Scratch setup failed\n");
922 /* iounmap will also get called at remove, but meh */
928 if (i915_gem_object_is_lmem(ggtt->vm.scratch[0]))
931 ggtt->vm.scratch[0]->encode =
932 ggtt->vm.pte_encode(px_dma(ggtt->vm.scratch[0]),
933 I915_CACHE_NONE, pte_flags);
938 static void gen6_gmch_remove(struct i915_address_space *vm)
940 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
946 static struct resource pci_resource(struct pci_dev *pdev, int bar)
948 return (struct resource)DEFINE_RES_MEM(pci_resource_start(pdev, bar),
949 pci_resource_len(pdev, bar));
952 static int gen8_gmch_probe(struct i915_ggtt *ggtt)
954 struct drm_i915_private *i915 = ggtt->vm.i915;
955 struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
959 /* TODO: We're not aware of mappable constraints on gen8 yet */
960 if (!HAS_LMEM(i915)) {
961 ggtt->gmadr = pci_resource(pdev, 2);
962 ggtt->mappable_end = resource_size(&ggtt->gmadr);
965 pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
966 if (IS_CHERRYVIEW(i915))
967 size = chv_get_total_gtt_size(snb_gmch_ctl);
969 size = gen8_get_total_gtt_size(snb_gmch_ctl);
971 ggtt->vm.alloc_pt_dma = alloc_pt_dma;
972 ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
973 ggtt->vm.lmem_pt_obj_flags = I915_BO_ALLOC_PM_EARLY;
975 ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
976 ggtt->vm.cleanup = gen6_gmch_remove;
977 ggtt->vm.insert_page = gen8_ggtt_insert_page;
978 ggtt->vm.clear_range = nop_clear_range;
979 if (intel_scanout_needs_vtd_wa(i915))
980 ggtt->vm.clear_range = gen8_ggtt_clear_range;
982 ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
985 * Serialize GTT updates with aperture access on BXT if VT-d is on,
988 if (intel_vm_no_concurrent_access_wa(i915)) {
989 ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
990 ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL;
991 ggtt->vm.bind_async_flags =
992 I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
995 ggtt->invalidate = gen8_ggtt_invalidate;
997 ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
998 ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
1000 ggtt->vm.pte_encode = gen8_ggtt_pte_encode;
1002 setup_private_pat(ggtt->vm.gt->uncore);
1004 return ggtt_probe_common(ggtt, size);
1007 static u64 snb_pte_encode(dma_addr_t addr,
1008 enum i915_cache_level level,
1011 gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
1014 case I915_CACHE_L3_LLC:
1015 case I915_CACHE_LLC:
1016 pte |= GEN6_PTE_CACHE_LLC;
1018 case I915_CACHE_NONE:
1019 pte |= GEN6_PTE_UNCACHED;
1022 MISSING_CASE(level);
1028 static u64 ivb_pte_encode(dma_addr_t addr,
1029 enum i915_cache_level level,
1032 gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
1035 case I915_CACHE_L3_LLC:
1036 pte |= GEN7_PTE_CACHE_L3_LLC;
1038 case I915_CACHE_LLC:
1039 pte |= GEN6_PTE_CACHE_LLC;
1041 case I915_CACHE_NONE:
1042 pte |= GEN6_PTE_UNCACHED;
1045 MISSING_CASE(level);
1051 static u64 byt_pte_encode(dma_addr_t addr,
1052 enum i915_cache_level level,
1055 gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
1057 if (!(flags & PTE_READ_ONLY))
1058 pte |= BYT_PTE_WRITEABLE;
1060 if (level != I915_CACHE_NONE)
1061 pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
1066 static u64 hsw_pte_encode(dma_addr_t addr,
1067 enum i915_cache_level level,
1070 gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
1072 if (level != I915_CACHE_NONE)
1073 pte |= HSW_WB_LLC_AGE3;
1078 static u64 iris_pte_encode(dma_addr_t addr,
1079 enum i915_cache_level level,
1082 gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
1085 case I915_CACHE_NONE:
1088 pte |= HSW_WT_ELLC_LLC_AGE3;
1091 pte |= HSW_WB_ELLC_LLC_AGE3;
1098 static int gen6_gmch_probe(struct i915_ggtt *ggtt)
1100 struct drm_i915_private *i915 = ggtt->vm.i915;
1101 struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
1105 ggtt->gmadr = pci_resource(pdev, 2);
1106 ggtt->mappable_end = resource_size(&ggtt->gmadr);
1109 * 64/512MB is the current min/max we actually know of, but this is
1110 * just a coarse sanity check.
1112 if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) {
1113 drm_err(&i915->drm, "Unknown GMADR size (%pa)\n",
1114 &ggtt->mappable_end);
1118 pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
1120 size = gen6_get_total_gtt_size(snb_gmch_ctl);
1121 ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE;
1123 ggtt->vm.alloc_pt_dma = alloc_pt_dma;
1124 ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
1126 ggtt->vm.clear_range = nop_clear_range;
1127 if (!HAS_FULL_PPGTT(i915) || intel_scanout_needs_vtd_wa(i915))
1128 ggtt->vm.clear_range = gen6_ggtt_clear_range;
1129 ggtt->vm.insert_page = gen6_ggtt_insert_page;
1130 ggtt->vm.insert_entries = gen6_ggtt_insert_entries;
1131 ggtt->vm.cleanup = gen6_gmch_remove;
1133 ggtt->invalidate = gen6_ggtt_invalidate;
1135 if (HAS_EDRAM(i915))
1136 ggtt->vm.pte_encode = iris_pte_encode;
1137 else if (IS_HASWELL(i915))
1138 ggtt->vm.pte_encode = hsw_pte_encode;
1139 else if (IS_VALLEYVIEW(i915))
1140 ggtt->vm.pte_encode = byt_pte_encode;
1141 else if (GRAPHICS_VER(i915) >= 7)
1142 ggtt->vm.pte_encode = ivb_pte_encode;
1144 ggtt->vm.pte_encode = snb_pte_encode;
1146 ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
1147 ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
1149 return ggtt_probe_common(ggtt, size);
1152 static void i915_gmch_remove(struct i915_address_space *vm)
1154 intel_gmch_remove();
1157 static int i915_gmch_probe(struct i915_ggtt *ggtt)
1159 struct drm_i915_private *i915 = ggtt->vm.i915;
1160 phys_addr_t gmadr_base;
1163 ret = intel_gmch_probe(i915->bridge_dev, to_pci_dev(i915->drm.dev), NULL);
1165 drm_err(&i915->drm, "failed to set up gmch\n");
1169 intel_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end);
1172 (struct resource)DEFINE_RES_MEM(gmadr_base, ggtt->mappable_end);
1174 ggtt->vm.alloc_pt_dma = alloc_pt_dma;
1175 ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
1177 if (needs_idle_maps(i915)) {
1178 drm_notice(&i915->drm,
1179 "Flushing DMA requests before IOMMU unmaps; performance may be degraded\n");
1180 ggtt->do_idle_maps = true;
1183 ggtt->vm.insert_page = i915_ggtt_insert_page;
1184 ggtt->vm.insert_entries = i915_ggtt_insert_entries;
1185 ggtt->vm.clear_range = i915_ggtt_clear_range;
1186 ggtt->vm.cleanup = i915_gmch_remove;
1188 ggtt->invalidate = gmch_ggtt_invalidate;
1190 ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
1191 ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
1193 if (unlikely(ggtt->do_idle_maps))
1194 drm_notice(&i915->drm,
1195 "Applying Ironlake quirks for intel_iommu\n");
1200 static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt)
1202 struct drm_i915_private *i915 = gt->i915;
1206 ggtt->vm.i915 = i915;
1207 ggtt->vm.dma = i915->drm.dev;
1208 dma_resv_init(&ggtt->vm._resv);
1210 if (GRAPHICS_VER(i915) <= 5)
1211 ret = i915_gmch_probe(ggtt);
1212 else if (GRAPHICS_VER(i915) < 8)
1213 ret = gen6_gmch_probe(ggtt);
1215 ret = gen8_gmch_probe(ggtt);
1217 dma_resv_fini(&ggtt->vm._resv);
1221 if ((ggtt->vm.total - 1) >> 32) {
1223 "We never expected a Global GTT with more than 32bits"
1224 " of address space! Found %lldM!\n",
1225 ggtt->vm.total >> 20);
1226 ggtt->vm.total = 1ULL << 32;
1227 ggtt->mappable_end =
1228 min_t(u64, ggtt->mappable_end, ggtt->vm.total);
1231 if (ggtt->mappable_end > ggtt->vm.total) {
1233 "mappable aperture extends past end of GGTT,"
1234 " aperture=%pa, total=%llx\n",
1235 &ggtt->mappable_end, ggtt->vm.total);
1236 ggtt->mappable_end = ggtt->vm.total;
1239 /* GMADR is the PCI mmio aperture into the global GTT. */
1240 drm_dbg(&i915->drm, "GGTT size = %lluM\n", ggtt->vm.total >> 20);
1241 drm_dbg(&i915->drm, "GMADR size = %lluM\n",
1242 (u64)ggtt->mappable_end >> 20);
1243 drm_dbg(&i915->drm, "DSM size = %lluM\n",
1244 (u64)resource_size(&intel_graphics_stolen_res) >> 20);
1250 * i915_ggtt_probe_hw - Probe GGTT hardware location
1251 * @i915: i915 device
1253 int i915_ggtt_probe_hw(struct drm_i915_private *i915)
1257 ret = ggtt_probe_hw(to_gt(i915)->ggtt, to_gt(i915));
1261 if (intel_vtd_active(i915))
1262 drm_info(&i915->drm, "VT-d active for gfx access\n");
1267 int i915_ggtt_enable_hw(struct drm_i915_private *i915)
1269 if (GRAPHICS_VER(i915) < 6 && !intel_enable_gtt())
1275 void i915_ggtt_enable_guc(struct i915_ggtt *ggtt)
1277 GEM_BUG_ON(ggtt->invalidate != gen8_ggtt_invalidate);
1279 ggtt->invalidate = guc_ggtt_invalidate;
1281 ggtt->invalidate(ggtt);
1284 void i915_ggtt_disable_guc(struct i915_ggtt *ggtt)
1286 /* XXX Temporary pardon for error unload */
1287 if (ggtt->invalidate == gen8_ggtt_invalidate)
1290 /* We should only be called after i915_ggtt_enable_guc() */
1291 GEM_BUG_ON(ggtt->invalidate != guc_ggtt_invalidate);
1293 ggtt->invalidate = gen8_ggtt_invalidate;
1295 ggtt->invalidate(ggtt);
1299 * i915_ggtt_resume_vm - Restore the memory mappings for a GGTT or DPT VM
1300 * @vm: The VM to restore the mappings for
1302 * Restore the memory mappings for all objects mapped to HW via the GGTT or a
1305 * Returns %true if restoring the mapping for any object that was in a write
1306 * domain before suspend.
1308 bool i915_ggtt_resume_vm(struct i915_address_space *vm)
1310 struct i915_vma *vma;
1311 bool write_domain_objs = false;
1313 drm_WARN_ON(&vm->i915->drm, !vm->is_ggtt && !vm->is_dpt);
1315 /* First fill our portion of the GTT with scratch pages */
1316 vm->clear_range(vm, 0, vm->total);
1318 /* clflush objects bound into the GGTT and rebind them. */
1319 list_for_each_entry(vma, &vm->bound_list, vm_link) {
1320 struct drm_i915_gem_object *obj = vma->obj;
1321 unsigned int was_bound =
1322 atomic_read(&vma->flags) & I915_VMA_BIND_MASK;
1324 GEM_BUG_ON(!was_bound);
1325 vma->ops->bind_vma(vm, NULL, vma->resource,
1326 obj ? obj->cache_level : 0,
1328 if (obj) { /* only used during resume => exclusive access */
1329 write_domain_objs |= fetch_and_zero(&obj->write_domain);
1330 obj->read_domains |= I915_GEM_DOMAIN_GTT;
1334 return write_domain_objs;
1337 void i915_ggtt_resume(struct i915_ggtt *ggtt)
1341 intel_gt_check_and_clear_faults(ggtt->vm.gt);
1343 flush = i915_ggtt_resume_vm(&ggtt->vm);
1345 ggtt->invalidate(ggtt);
1348 wbinvd_on_all_cpus();
1350 if (GRAPHICS_VER(ggtt->vm.i915) >= 8)
1351 setup_private_pat(ggtt->vm.gt->uncore);
1353 intel_ggtt_restore_fences(ggtt);