1 // SPDX-License-Identifier: MIT
3 * Copyright © 2020 Intel Corporation
6 #include <linux/stop_machine.h>
8 #include <asm/set_memory.h>
11 #include <drm/i915_drm.h>
13 #include "gem/i915_gem_lmem.h"
17 #include "i915_scatterlist.h"
18 #include "i915_vgpu.h"
20 #include "intel_gtt.h"
21 #include "gen8_ppgtt.h"
24 i915_get_ggtt_vma_pages(struct i915_vma *vma);
26 static void i915_ggtt_color_adjust(const struct drm_mm_node *node,
31 if (i915_node_color_differs(node, color))
32 *start += I915_GTT_PAGE_SIZE;
35 * Also leave a space between the unallocated reserved node after the
36 * GTT and any objects within the GTT, i.e. we use the color adjustment
37 * to insert a guard page to prevent prefetches crossing over the
40 node = list_next_entry(node, node_list);
41 if (node->color != color)
42 *end -= I915_GTT_PAGE_SIZE;
45 static int ggtt_init_hw(struct i915_ggtt *ggtt)
47 struct drm_i915_private *i915 = ggtt->vm.i915;
49 i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
51 ggtt->vm.is_ggtt = true;
53 /* Only VLV supports read-only GGTT mappings */
54 ggtt->vm.has_read_only = IS_VALLEYVIEW(i915);
56 if (!HAS_LLC(i915) && !HAS_PPGTT(i915))
57 ggtt->vm.mm.color_adjust = i915_ggtt_color_adjust;
59 if (ggtt->mappable_end) {
60 if (!io_mapping_init_wc(&ggtt->iomap,
62 ggtt->mappable_end)) {
63 ggtt->vm.cleanup(&ggtt->vm);
67 ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start,
71 intel_ggtt_init_fences(ggtt);
77 * i915_ggtt_init_hw - Initialize GGTT hardware
80 int i915_ggtt_init_hw(struct drm_i915_private *i915)
85 * Note that we use page colouring to enforce a guard page at the
86 * end of the address space. This is required as the CS may prefetch
87 * beyond the end of the batch buffer, across the page boundary,
88 * and beyond the end of the GTT if we do not provide a guard.
90 ret = ggtt_init_hw(&i915->ggtt);
98 * Certain Gen5 chipsets require idling the GPU before
99 * unmapping anything from the GTT when VT-d is enabled.
101 static bool needs_idle_maps(struct drm_i915_private *i915)
104 * Query intel_iommu to see if we need the workaround. Presumably that
107 if (!intel_vtd_active())
110 if (IS_GEN(i915, 5) && IS_MOBILE(i915))
113 if (IS_GEN(i915, 12))
114 return true; /* XXX DMAR fault reason 7 */
119 void i915_ggtt_suspend(struct i915_ggtt *ggtt)
121 struct i915_vma *vma, *vn;
124 mutex_lock(&ggtt->vm.mutex);
126 /* Skip rewriting PTE on VMA unbind. */
127 open = atomic_xchg(&ggtt->vm.open, 0);
129 list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) {
130 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
131 i915_vma_wait_for_bind(vma);
133 if (i915_vma_is_pinned(vma))
136 if (!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) {
137 __i915_vma_evict(vma);
138 drm_mm_remove_node(&vma->node);
142 ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
143 ggtt->invalidate(ggtt);
144 atomic_set(&ggtt->vm.open, open);
146 mutex_unlock(&ggtt->vm.mutex);
148 intel_gt_check_and_clear_faults(ggtt->vm.gt);
151 void gen6_ggtt_invalidate(struct i915_ggtt *ggtt)
153 struct intel_uncore *uncore = ggtt->vm.gt->uncore;
155 spin_lock_irq(&uncore->lock);
156 intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
157 intel_uncore_read_fw(uncore, GFX_FLSH_CNTL_GEN6);
158 spin_unlock_irq(&uncore->lock);
161 static void gen8_ggtt_invalidate(struct i915_ggtt *ggtt)
163 struct intel_uncore *uncore = ggtt->vm.gt->uncore;
166 * Note that as an uncached mmio write, this will flush the
167 * WCB of the writes into the GGTT before it triggers the invalidate.
169 intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
172 static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
174 struct intel_uncore *uncore = ggtt->vm.gt->uncore;
175 struct drm_i915_private *i915 = ggtt->vm.i915;
177 gen8_ggtt_invalidate(ggtt);
179 if (INTEL_GEN(i915) >= 12)
180 intel_uncore_write_fw(uncore, GEN12_GUC_TLB_INV_CR,
181 GEN12_GUC_TLB_INV_CR_INVALIDATE);
183 intel_uncore_write_fw(uncore, GEN8_GTCR, GEN8_GTCR_INVALIDATE);
186 static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt)
188 intel_gtt_chipset_flush();
191 u64 gen8_ggtt_pte_encode(dma_addr_t addr,
192 enum i915_cache_level level,
195 gen8_pte_t pte = addr | _PAGE_PRESENT;
198 pte |= GEN12_GGTT_PTE_LM;
203 static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
208 static void gen8_ggtt_insert_page(struct i915_address_space *vm,
211 enum i915_cache_level level,
214 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
215 gen8_pte_t __iomem *pte =
216 (gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
218 gen8_set_pte(pte, gen8_ggtt_pte_encode(addr, level, flags));
220 ggtt->invalidate(ggtt);
223 static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
224 struct i915_vma *vma,
225 enum i915_cache_level level,
228 const gen8_pte_t pte_encode = gen8_ggtt_pte_encode(0, level, flags);
229 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
230 gen8_pte_t __iomem *gte;
231 gen8_pte_t __iomem *end;
232 struct sgt_iter iter;
236 * Note that we ignore PTE_READ_ONLY here. The caller must be careful
237 * not to allow the user to override access to a read only page.
240 gte = (gen8_pte_t __iomem *)ggtt->gsm;
241 gte += vma->node.start / I915_GTT_PAGE_SIZE;
242 end = gte + vma->node.size / I915_GTT_PAGE_SIZE;
244 for_each_sgt_daddr(addr, iter, vma->pages)
245 gen8_set_pte(gte++, pte_encode | addr);
246 GEM_BUG_ON(gte > end);
248 /* Fill the allocated but "unused" space beyond the end of the buffer */
250 gen8_set_pte(gte++, vm->scratch[0]->encode);
253 * We want to flush the TLBs only after we're certain all the PTE
254 * updates have finished.
256 ggtt->invalidate(ggtt);
259 static void gen6_ggtt_insert_page(struct i915_address_space *vm,
262 enum i915_cache_level level,
265 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
266 gen6_pte_t __iomem *pte =
267 (gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
269 iowrite32(vm->pte_encode(addr, level, flags), pte);
271 ggtt->invalidate(ggtt);
275 * Binds an object into the global gtt with the specified cache level.
276 * The object will be accessible to the GPU via commands whose operands
277 * reference offsets within the global GTT as well as accessible by the GPU
278 * through the GMADR mapped BAR (i915->mm.gtt->gtt).
280 static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
281 struct i915_vma *vma,
282 enum i915_cache_level level,
285 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
286 gen6_pte_t __iomem *gte;
287 gen6_pte_t __iomem *end;
288 struct sgt_iter iter;
291 gte = (gen6_pte_t __iomem *)ggtt->gsm;
292 gte += vma->node.start / I915_GTT_PAGE_SIZE;
293 end = gte + vma->node.size / I915_GTT_PAGE_SIZE;
295 for_each_sgt_daddr(addr, iter, vma->pages)
296 iowrite32(vm->pte_encode(addr, level, flags), gte++);
297 GEM_BUG_ON(gte > end);
299 /* Fill the allocated but "unused" space beyond the end of the buffer */
301 iowrite32(vm->scratch[0]->encode, gte++);
304 * We want to flush the TLBs only after we're certain all the PTE
305 * updates have finished.
307 ggtt->invalidate(ggtt);
310 static void nop_clear_range(struct i915_address_space *vm,
311 u64 start, u64 length)
315 static void gen8_ggtt_clear_range(struct i915_address_space *vm,
316 u64 start, u64 length)
318 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
319 unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
320 unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
321 const gen8_pte_t scratch_pte = vm->scratch[0]->encode;
322 gen8_pte_t __iomem *gtt_base =
323 (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
324 const int max_entries = ggtt_total_entries(ggtt) - first_entry;
327 if (WARN(num_entries > max_entries,
328 "First entry = %d; Num entries = %d (max=%d)\n",
329 first_entry, num_entries, max_entries))
330 num_entries = max_entries;
332 for (i = 0; i < num_entries; i++)
333 gen8_set_pte(>t_base[i], scratch_pte);
336 static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
339 * Make sure the internal GAM fifo has been cleared of all GTT
340 * writes before exiting stop_machine(). This guarantees that
341 * any aperture accesses waiting to start in another process
342 * cannot back up behind the GTT writes causing a hang.
343 * The register can be any arbitrary GAM register.
345 intel_uncore_posting_read_fw(vm->gt->uncore, GFX_FLSH_CNTL_GEN6);
349 struct i915_address_space *vm;
352 enum i915_cache_level level;
355 static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
357 struct insert_page *arg = _arg;
359 gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0);
360 bxt_vtd_ggtt_wa(arg->vm);
365 static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
368 enum i915_cache_level level,
371 struct insert_page arg = { vm, addr, offset, level };
373 stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
376 struct insert_entries {
377 struct i915_address_space *vm;
378 struct i915_vma *vma;
379 enum i915_cache_level level;
383 static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
385 struct insert_entries *arg = _arg;
387 gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, arg->flags);
388 bxt_vtd_ggtt_wa(arg->vm);
393 static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
394 struct i915_vma *vma,
395 enum i915_cache_level level,
398 struct insert_entries arg = { vm, vma, level, flags };
400 stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
403 static void gen6_ggtt_clear_range(struct i915_address_space *vm,
404 u64 start, u64 length)
406 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
407 unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
408 unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
409 gen6_pte_t scratch_pte, __iomem *gtt_base =
410 (gen6_pte_t __iomem *)ggtt->gsm + first_entry;
411 const int max_entries = ggtt_total_entries(ggtt) - first_entry;
414 if (WARN(num_entries > max_entries,
415 "First entry = %d; Num entries = %d (max=%d)\n",
416 first_entry, num_entries, max_entries))
417 num_entries = max_entries;
419 scratch_pte = vm->scratch[0]->encode;
420 for (i = 0; i < num_entries; i++)
421 iowrite32(scratch_pte, >t_base[i]);
424 static void i915_ggtt_insert_page(struct i915_address_space *vm,
427 enum i915_cache_level cache_level,
430 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
431 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
433 intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
436 static void i915_ggtt_insert_entries(struct i915_address_space *vm,
437 struct i915_vma *vma,
438 enum i915_cache_level cache_level,
441 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
442 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
444 intel_gtt_insert_sg_entries(vma->pages, vma->node.start >> PAGE_SHIFT,
448 static void i915_ggtt_clear_range(struct i915_address_space *vm,
449 u64 start, u64 length)
451 intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
454 static void ggtt_bind_vma(struct i915_address_space *vm,
455 struct i915_vm_pt_stash *stash,
456 struct i915_vma *vma,
457 enum i915_cache_level cache_level,
460 struct drm_i915_gem_object *obj = vma->obj;
463 if (i915_vma_is_bound(vma, ~flags & I915_VMA_BIND_MASK))
466 /* Applicable to VLV (gen8+ do not support RO in the GGTT) */
468 if (i915_gem_object_is_readonly(obj))
469 pte_flags |= PTE_READ_ONLY;
470 if (i915_gem_object_is_lmem(obj))
473 vm->insert_entries(vm, vma, cache_level, pte_flags);
474 vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
477 static void ggtt_unbind_vma(struct i915_address_space *vm, struct i915_vma *vma)
479 vm->clear_range(vm, vma->node.start, vma->size);
482 static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt)
487 if (!intel_uc_uses_guc(&ggtt->vm.gt->uc))
490 GEM_BUG_ON(ggtt->vm.total <= GUC_GGTT_TOP);
491 size = ggtt->vm.total - GUC_GGTT_TOP;
493 ret = i915_gem_gtt_reserve(&ggtt->vm, &ggtt->uc_fw, size,
494 GUC_GGTT_TOP, I915_COLOR_UNEVICTABLE,
497 drm_dbg(&ggtt->vm.i915->drm,
498 "Failed to reserve top of GGTT for GuC\n");
503 static void ggtt_release_guc_top(struct i915_ggtt *ggtt)
505 if (drm_mm_node_allocated(&ggtt->uc_fw))
506 drm_mm_remove_node(&ggtt->uc_fw);
509 static void cleanup_init_ggtt(struct i915_ggtt *ggtt)
511 ggtt_release_guc_top(ggtt);
512 if (drm_mm_node_allocated(&ggtt->error_capture))
513 drm_mm_remove_node(&ggtt->error_capture);
514 mutex_destroy(&ggtt->error_mutex);
517 static int init_ggtt(struct i915_ggtt *ggtt)
520 * Let GEM Manage all of the aperture.
522 * However, leave one page at the end still bound to the scratch page.
523 * There are a number of places where the hardware apparently prefetches
524 * past the end of the object, and we've seen multiple hangs with the
525 * GPU head pointer stuck in a batchbuffer bound at the last page of the
526 * aperture. One page should be enough to keep any prefetching inside
529 unsigned long hole_start, hole_end;
530 struct drm_mm_node *entry;
534 * GuC requires all resources that we're sharing with it to be placed in
535 * non-WOPCM memory. If GuC is not present or not in use we still need a
536 * small bias as ring wraparound at offset 0 sometimes hangs. No idea
539 ggtt->pin_bias = max_t(u32, I915_GTT_PAGE_SIZE,
540 intel_wopcm_guc_size(&ggtt->vm.i915->wopcm));
542 ret = intel_vgt_balloon(ggtt);
546 mutex_init(&ggtt->error_mutex);
547 if (ggtt->mappable_end) {
549 * Reserve a mappable slot for our lockless error capture.
551 * We strongly prefer taking address 0x0 in order to protect
552 * other critical buffers against accidental overwrites,
553 * as writing to address 0 is a very common mistake.
555 * Since 0 may already be in use by the system (e.g. the BIOS
556 * framebuffer), we let the reservation fail quietly and hope
557 * 0 remains reserved always.
559 * If we fail to reserve 0, and then fail to find any space
560 * for an error-capture, remain silent. We can afford not
561 * to reserve an error_capture node as we have fallback
562 * paths, and we trust that 0 will remain reserved. However,
563 * the only likely reason for failure to insert is a driver
564 * bug, which we expect to cause other failures...
566 ggtt->error_capture.size = I915_GTT_PAGE_SIZE;
567 ggtt->error_capture.color = I915_COLOR_UNEVICTABLE;
568 if (drm_mm_reserve_node(&ggtt->vm.mm, &ggtt->error_capture))
569 drm_mm_insert_node_in_range(&ggtt->vm.mm,
570 &ggtt->error_capture,
571 ggtt->error_capture.size, 0,
572 ggtt->error_capture.color,
573 0, ggtt->mappable_end,
576 if (drm_mm_node_allocated(&ggtt->error_capture))
577 drm_dbg(&ggtt->vm.i915->drm,
578 "Reserved GGTT:[%llx, %llx] for use by error capture\n",
579 ggtt->error_capture.start,
580 ggtt->error_capture.start + ggtt->error_capture.size);
583 * The upper portion of the GuC address space has a sizeable hole
584 * (several MB) that is inaccessible by GuC. Reserve this range within
585 * GGTT as it can comfortably hold GuC/HuC firmware images.
587 ret = ggtt_reserve_guc_top(ggtt);
591 /* Clear any non-preallocated blocks */
592 drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) {
593 drm_dbg(&ggtt->vm.i915->drm,
594 "clearing unused GTT space: [%lx, %lx]\n",
595 hole_start, hole_end);
596 ggtt->vm.clear_range(&ggtt->vm, hole_start,
597 hole_end - hole_start);
600 /* And finally clear the reserved guard page */
601 ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE);
606 cleanup_init_ggtt(ggtt);
610 static void aliasing_gtt_bind_vma(struct i915_address_space *vm,
611 struct i915_vm_pt_stash *stash,
612 struct i915_vma *vma,
613 enum i915_cache_level cache_level,
618 /* Currently applicable only to VLV */
620 if (i915_gem_object_is_readonly(vma->obj))
621 pte_flags |= PTE_READ_ONLY;
623 if (flags & I915_VMA_LOCAL_BIND)
624 ppgtt_bind_vma(&i915_vm_to_ggtt(vm)->alias->vm,
625 stash, vma, cache_level, flags);
627 if (flags & I915_VMA_GLOBAL_BIND)
628 vm->insert_entries(vm, vma, cache_level, pte_flags);
631 static void aliasing_gtt_unbind_vma(struct i915_address_space *vm,
632 struct i915_vma *vma)
634 if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND))
635 vm->clear_range(vm, vma->node.start, vma->size);
637 if (i915_vma_is_bound(vma, I915_VMA_LOCAL_BIND))
638 ppgtt_unbind_vma(&i915_vm_to_ggtt(vm)->alias->vm, vma);
641 static int init_aliasing_ppgtt(struct i915_ggtt *ggtt)
643 struct i915_vm_pt_stash stash = {};
644 struct i915_ppgtt *ppgtt;
647 ppgtt = i915_ppgtt_create(ggtt->vm.gt);
649 return PTR_ERR(ppgtt);
651 if (GEM_WARN_ON(ppgtt->vm.total < ggtt->vm.total)) {
656 err = i915_vm_alloc_pt_stash(&ppgtt->vm, &stash, ggtt->vm.total);
660 i915_gem_object_lock(ppgtt->vm.scratch[0], NULL);
661 err = i915_vm_map_pt_stash(&ppgtt->vm, &stash);
662 i915_gem_object_unlock(ppgtt->vm.scratch[0]);
667 * Note we only pre-allocate as far as the end of the global
668 * GTT. On 48b / 4-level page-tables, the difference is very,
669 * very significant! We have to preallocate as GVT/vgpu does
670 * not like the page directory disappearing.
672 ppgtt->vm.allocate_va_range(&ppgtt->vm, &stash, 0, ggtt->vm.total);
675 ggtt->vm.bind_async_flags |= ppgtt->vm.bind_async_flags;
677 GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != ggtt_bind_vma);
678 ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma;
680 GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != ggtt_unbind_vma);
681 ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma;
683 i915_vm_free_pt_stash(&ppgtt->vm, &stash);
687 i915_vm_free_pt_stash(&ppgtt->vm, &stash);
689 i915_vm_put(&ppgtt->vm);
693 static void fini_aliasing_ppgtt(struct i915_ggtt *ggtt)
695 struct i915_ppgtt *ppgtt;
697 ppgtt = fetch_and_zero(&ggtt->alias);
701 i915_vm_put(&ppgtt->vm);
703 ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
704 ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
707 int i915_init_ggtt(struct drm_i915_private *i915)
711 ret = init_ggtt(&i915->ggtt);
715 if (INTEL_PPGTT(i915) == INTEL_PPGTT_ALIASING) {
716 ret = init_aliasing_ppgtt(&i915->ggtt);
718 cleanup_init_ggtt(&i915->ggtt);
724 static void ggtt_cleanup_hw(struct i915_ggtt *ggtt)
726 struct i915_vma *vma, *vn;
728 atomic_set(&ggtt->vm.open, 0);
730 rcu_barrier(); /* flush the RCU'ed__i915_vm_release */
731 flush_workqueue(ggtt->vm.i915->wq);
733 mutex_lock(&ggtt->vm.mutex);
735 list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link)
736 WARN_ON(__i915_vma_unbind(vma));
738 if (drm_mm_node_allocated(&ggtt->error_capture))
739 drm_mm_remove_node(&ggtt->error_capture);
740 mutex_destroy(&ggtt->error_mutex);
742 ggtt_release_guc_top(ggtt);
743 intel_vgt_deballoon(ggtt);
745 ggtt->vm.cleanup(&ggtt->vm);
747 mutex_unlock(&ggtt->vm.mutex);
748 i915_address_space_fini(&ggtt->vm);
749 dma_resv_fini(&ggtt->vm.resv);
751 arch_phys_wc_del(ggtt->mtrr);
753 if (ggtt->iomap.size)
754 io_mapping_fini(&ggtt->iomap);
758 * i915_ggtt_driver_release - Clean up GGTT hardware initialization
761 void i915_ggtt_driver_release(struct drm_i915_private *i915)
763 struct i915_ggtt *ggtt = &i915->ggtt;
765 fini_aliasing_ppgtt(ggtt);
767 intel_ggtt_fini_fences(ggtt);
768 ggtt_cleanup_hw(ggtt);
771 static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
773 snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
774 snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
775 return snb_gmch_ctl << 20;
778 static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
780 bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
781 bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
783 bdw_gmch_ctl = 1 << bdw_gmch_ctl;
786 /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * I915_GTT_PAGE_SIZE */
787 if (bdw_gmch_ctl > 4)
791 return bdw_gmch_ctl << 20;
794 static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
796 gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
797 gmch_ctrl &= SNB_GMCH_GGMS_MASK;
800 return 1 << (20 + gmch_ctrl);
805 static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
807 struct drm_i915_private *i915 = ggtt->vm.i915;
808 struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
809 phys_addr_t phys_addr;
813 /* For Modern GENs the PTEs and register space are split in the BAR */
814 phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2;
817 * On BXT+/CNL+ writes larger than 64 bit to the GTT pagetable range
818 * will be dropped. For WC mappings in general we have 64 byte burst
819 * writes when the WC buffer is flushed, so we can't use it, but have to
820 * resort to an uncached mapping. The WC issue is easily caught by the
821 * readback check when writing GTT PTE entries.
823 if (IS_GEN9_LP(i915) || INTEL_GEN(i915) >= 10)
824 ggtt->gsm = ioremap(phys_addr, size);
826 ggtt->gsm = ioremap_wc(phys_addr, size);
828 drm_err(&i915->drm, "Failed to map the ggtt page table\n");
832 ret = setup_scratch_page(&ggtt->vm);
834 drm_err(&i915->drm, "Scratch setup failed\n");
835 /* iounmap will also get called at remove, but meh */
841 if (i915_gem_object_is_lmem(ggtt->vm.scratch[0]))
844 ggtt->vm.scratch[0]->encode =
845 ggtt->vm.pte_encode(px_dma(ggtt->vm.scratch[0]),
846 I915_CACHE_NONE, pte_flags);
851 int ggtt_set_pages(struct i915_vma *vma)
855 GEM_BUG_ON(vma->pages);
857 ret = i915_get_ggtt_vma_pages(vma);
861 vma->page_sizes = vma->obj->mm.page_sizes;
866 static void gen6_gmch_remove(struct i915_address_space *vm)
868 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
874 static struct resource pci_resource(struct pci_dev *pdev, int bar)
876 return (struct resource)DEFINE_RES_MEM(pci_resource_start(pdev, bar),
877 pci_resource_len(pdev, bar));
880 static int gen8_gmch_probe(struct i915_ggtt *ggtt)
882 struct drm_i915_private *i915 = ggtt->vm.i915;
883 struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
887 /* TODO: We're not aware of mappable constraints on gen8 yet */
888 if (!HAS_LMEM(i915)) {
889 ggtt->gmadr = pci_resource(pdev, 2);
890 ggtt->mappable_end = resource_size(&ggtt->gmadr);
893 pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
894 if (IS_CHERRYVIEW(i915))
895 size = chv_get_total_gtt_size(snb_gmch_ctl);
897 size = gen8_get_total_gtt_size(snb_gmch_ctl);
899 ggtt->vm.alloc_pt_dma = alloc_pt_dma;
901 ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
902 ggtt->vm.cleanup = gen6_gmch_remove;
903 ggtt->vm.insert_page = gen8_ggtt_insert_page;
904 ggtt->vm.clear_range = nop_clear_range;
905 if (intel_scanout_needs_vtd_wa(i915))
906 ggtt->vm.clear_range = gen8_ggtt_clear_range;
908 ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
911 * Serialize GTT updates with aperture access on BXT if VT-d is on,
914 if (intel_vm_no_concurrent_access_wa(i915)) {
915 ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
916 ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL;
917 ggtt->vm.bind_async_flags =
918 I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
921 ggtt->invalidate = gen8_ggtt_invalidate;
923 ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
924 ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
925 ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
926 ggtt->vm.vma_ops.clear_pages = clear_pages;
928 ggtt->vm.pte_encode = gen8_ggtt_pte_encode;
930 setup_private_pat(ggtt->vm.gt->uncore);
932 return ggtt_probe_common(ggtt, size);
935 static u64 snb_pte_encode(dma_addr_t addr,
936 enum i915_cache_level level,
939 gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
942 case I915_CACHE_L3_LLC:
944 pte |= GEN6_PTE_CACHE_LLC;
946 case I915_CACHE_NONE:
947 pte |= GEN6_PTE_UNCACHED;
956 static u64 ivb_pte_encode(dma_addr_t addr,
957 enum i915_cache_level level,
960 gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
963 case I915_CACHE_L3_LLC:
964 pte |= GEN7_PTE_CACHE_L3_LLC;
967 pte |= GEN6_PTE_CACHE_LLC;
969 case I915_CACHE_NONE:
970 pte |= GEN6_PTE_UNCACHED;
979 static u64 byt_pte_encode(dma_addr_t addr,
980 enum i915_cache_level level,
983 gen6_pte_t pte = GEN6_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
985 if (!(flags & PTE_READ_ONLY))
986 pte |= BYT_PTE_WRITEABLE;
988 if (level != I915_CACHE_NONE)
989 pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
994 static u64 hsw_pte_encode(dma_addr_t addr,
995 enum i915_cache_level level,
998 gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
1000 if (level != I915_CACHE_NONE)
1001 pte |= HSW_WB_LLC_AGE3;
1006 static u64 iris_pte_encode(dma_addr_t addr,
1007 enum i915_cache_level level,
1010 gen6_pte_t pte = HSW_PTE_ADDR_ENCODE(addr) | GEN6_PTE_VALID;
1013 case I915_CACHE_NONE:
1016 pte |= HSW_WT_ELLC_LLC_AGE3;
1019 pte |= HSW_WB_ELLC_LLC_AGE3;
1026 static int gen6_gmch_probe(struct i915_ggtt *ggtt)
1028 struct drm_i915_private *i915 = ggtt->vm.i915;
1029 struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
1033 ggtt->gmadr = pci_resource(pdev, 2);
1034 ggtt->mappable_end = resource_size(&ggtt->gmadr);
1037 * 64/512MB is the current min/max we actually know of, but this is
1038 * just a coarse sanity check.
1040 if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) {
1041 drm_err(&i915->drm, "Unknown GMADR size (%pa)\n",
1042 &ggtt->mappable_end);
1046 pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
1048 size = gen6_get_total_gtt_size(snb_gmch_ctl);
1049 ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE;
1051 ggtt->vm.alloc_pt_dma = alloc_pt_dma;
1053 ggtt->vm.clear_range = nop_clear_range;
1054 if (!HAS_FULL_PPGTT(i915) || intel_scanout_needs_vtd_wa(i915))
1055 ggtt->vm.clear_range = gen6_ggtt_clear_range;
1056 ggtt->vm.insert_page = gen6_ggtt_insert_page;
1057 ggtt->vm.insert_entries = gen6_ggtt_insert_entries;
1058 ggtt->vm.cleanup = gen6_gmch_remove;
1060 ggtt->invalidate = gen6_ggtt_invalidate;
1062 if (HAS_EDRAM(i915))
1063 ggtt->vm.pte_encode = iris_pte_encode;
1064 else if (IS_HASWELL(i915))
1065 ggtt->vm.pte_encode = hsw_pte_encode;
1066 else if (IS_VALLEYVIEW(i915))
1067 ggtt->vm.pte_encode = byt_pte_encode;
1068 else if (INTEL_GEN(i915) >= 7)
1069 ggtt->vm.pte_encode = ivb_pte_encode;
1071 ggtt->vm.pte_encode = snb_pte_encode;
1073 ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
1074 ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
1075 ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
1076 ggtt->vm.vma_ops.clear_pages = clear_pages;
1078 return ggtt_probe_common(ggtt, size);
1081 static void i915_gmch_remove(struct i915_address_space *vm)
1083 intel_gmch_remove();
1086 static int i915_gmch_probe(struct i915_ggtt *ggtt)
1088 struct drm_i915_private *i915 = ggtt->vm.i915;
1089 phys_addr_t gmadr_base;
1092 ret = intel_gmch_probe(i915->bridge_dev, to_pci_dev(i915->drm.dev), NULL);
1094 drm_err(&i915->drm, "failed to set up gmch\n");
1098 intel_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end);
1101 (struct resource)DEFINE_RES_MEM(gmadr_base, ggtt->mappable_end);
1103 ggtt->vm.alloc_pt_dma = alloc_pt_dma;
1105 if (needs_idle_maps(i915)) {
1106 drm_notice(&i915->drm,
1107 "Flushing DMA requests before IOMMU unmaps; performance may be degraded\n");
1108 ggtt->do_idle_maps = true;
1111 ggtt->vm.insert_page = i915_ggtt_insert_page;
1112 ggtt->vm.insert_entries = i915_ggtt_insert_entries;
1113 ggtt->vm.clear_range = i915_ggtt_clear_range;
1114 ggtt->vm.cleanup = i915_gmch_remove;
1116 ggtt->invalidate = gmch_ggtt_invalidate;
1118 ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
1119 ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
1120 ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
1121 ggtt->vm.vma_ops.clear_pages = clear_pages;
1123 if (unlikely(ggtt->do_idle_maps))
1124 drm_notice(&i915->drm,
1125 "Applying Ironlake quirks for intel_iommu\n");
1130 static int ggtt_probe_hw(struct i915_ggtt *ggtt, struct intel_gt *gt)
1132 struct drm_i915_private *i915 = gt->i915;
1136 ggtt->vm.i915 = i915;
1137 ggtt->vm.dma = i915->drm.dev;
1138 dma_resv_init(&ggtt->vm.resv);
1140 if (INTEL_GEN(i915) <= 5)
1141 ret = i915_gmch_probe(ggtt);
1142 else if (INTEL_GEN(i915) < 8)
1143 ret = gen6_gmch_probe(ggtt);
1145 ret = gen8_gmch_probe(ggtt);
1147 dma_resv_fini(&ggtt->vm.resv);
1151 if ((ggtt->vm.total - 1) >> 32) {
1153 "We never expected a Global GTT with more than 32bits"
1154 " of address space! Found %lldM!\n",
1155 ggtt->vm.total >> 20);
1156 ggtt->vm.total = 1ULL << 32;
1157 ggtt->mappable_end =
1158 min_t(u64, ggtt->mappable_end, ggtt->vm.total);
1161 if (ggtt->mappable_end > ggtt->vm.total) {
1163 "mappable aperture extends past end of GGTT,"
1164 " aperture=%pa, total=%llx\n",
1165 &ggtt->mappable_end, ggtt->vm.total);
1166 ggtt->mappable_end = ggtt->vm.total;
1169 /* GMADR is the PCI mmio aperture into the global GTT. */
1170 drm_dbg(&i915->drm, "GGTT size = %lluM\n", ggtt->vm.total >> 20);
1171 drm_dbg(&i915->drm, "GMADR size = %lluM\n",
1172 (u64)ggtt->mappable_end >> 20);
1173 drm_dbg(&i915->drm, "DSM size = %lluM\n",
1174 (u64)resource_size(&intel_graphics_stolen_res) >> 20);
1180 * i915_ggtt_probe_hw - Probe GGTT hardware location
1181 * @i915: i915 device
1183 int i915_ggtt_probe_hw(struct drm_i915_private *i915)
1187 ret = ggtt_probe_hw(&i915->ggtt, &i915->gt);
1191 if (intel_vtd_active())
1192 drm_info(&i915->drm, "VT-d active for gfx access\n");
1197 int i915_ggtt_enable_hw(struct drm_i915_private *i915)
1199 if (INTEL_GEN(i915) < 6 && !intel_enable_gtt())
1205 void i915_ggtt_enable_guc(struct i915_ggtt *ggtt)
1207 GEM_BUG_ON(ggtt->invalidate != gen8_ggtt_invalidate);
1209 ggtt->invalidate = guc_ggtt_invalidate;
1211 ggtt->invalidate(ggtt);
1214 void i915_ggtt_disable_guc(struct i915_ggtt *ggtt)
1216 /* XXX Temporary pardon for error unload */
1217 if (ggtt->invalidate == gen8_ggtt_invalidate)
1220 /* We should only be called after i915_ggtt_enable_guc() */
1221 GEM_BUG_ON(ggtt->invalidate != guc_ggtt_invalidate);
1223 ggtt->invalidate = gen8_ggtt_invalidate;
1225 ggtt->invalidate(ggtt);
1228 void i915_ggtt_resume(struct i915_ggtt *ggtt)
1230 struct i915_vma *vma;
1234 intel_gt_check_and_clear_faults(ggtt->vm.gt);
1236 /* First fill our portion of the GTT with scratch pages */
1237 ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
1239 /* Skip rewriting PTE on VMA unbind. */
1240 open = atomic_xchg(&ggtt->vm.open, 0);
1242 /* clflush objects bound into the GGTT and rebind them. */
1243 list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link) {
1244 struct drm_i915_gem_object *obj = vma->obj;
1245 unsigned int was_bound =
1246 atomic_read(&vma->flags) & I915_VMA_BIND_MASK;
1248 GEM_BUG_ON(!was_bound);
1249 vma->ops->bind_vma(&ggtt->vm, NULL, vma,
1250 obj ? obj->cache_level : 0,
1252 if (obj) { /* only used during resume => exclusive access */
1253 flush |= fetch_and_zero(&obj->write_domain);
1254 obj->read_domains |= I915_GEM_DOMAIN_GTT;
1258 atomic_set(&ggtt->vm.open, open);
1259 ggtt->invalidate(ggtt);
1262 wbinvd_on_all_cpus();
1264 if (INTEL_GEN(ggtt->vm.i915) >= 8)
1265 setup_private_pat(ggtt->vm.gt->uncore);
1267 intel_ggtt_restore_fences(ggtt);
1270 static struct scatterlist *
1271 rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset,
1272 unsigned int width, unsigned int height,
1273 unsigned int src_stride, unsigned int dst_stride,
1274 struct sg_table *st, struct scatterlist *sg)
1276 unsigned int column, row;
1277 unsigned int src_idx;
1279 for (column = 0; column < width; column++) {
1282 src_idx = src_stride * (height - 1) + column + offset;
1283 for (row = 0; row < height; row++) {
1286 * We don't need the pages, but need to initialize
1287 * the entries so the sg list can be happily traversed.
1288 * The only thing we need are DMA addresses.
1290 sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0);
1291 sg_dma_address(sg) =
1292 i915_gem_object_get_dma_address(obj, src_idx);
1293 sg_dma_len(sg) = I915_GTT_PAGE_SIZE;
1295 src_idx -= src_stride;
1298 left = (dst_stride - height) * I915_GTT_PAGE_SIZE;
1306 * The DE ignores the PTEs for the padding tiles, the sg entry
1307 * here is just a conenience to indicate how many padding PTEs
1308 * to insert at this spot.
1310 sg_set_page(sg, NULL, left, 0);
1311 sg_dma_address(sg) = 0;
1312 sg_dma_len(sg) = left;
1319 static noinline struct sg_table *
1320 intel_rotate_pages(struct intel_rotation_info *rot_info,
1321 struct drm_i915_gem_object *obj)
1323 unsigned int size = intel_rotation_info_size(rot_info);
1324 struct drm_i915_private *i915 = to_i915(obj->base.dev);
1325 struct sg_table *st;
1326 struct scatterlist *sg;
1330 /* Allocate target SG list. */
1331 st = kmalloc(sizeof(*st), GFP_KERNEL);
1335 ret = sg_alloc_table(st, size, GFP_KERNEL);
1342 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
1343 sg = rotate_pages(obj, rot_info->plane[i].offset,
1344 rot_info->plane[i].width, rot_info->plane[i].height,
1345 rot_info->plane[i].src_stride,
1346 rot_info->plane[i].dst_stride,
1355 drm_dbg(&i915->drm, "Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
1356 obj->base.size, rot_info->plane[0].width,
1357 rot_info->plane[0].height, size);
1359 return ERR_PTR(ret);
1362 static struct scatterlist *
1363 remap_pages(struct drm_i915_gem_object *obj, unsigned int offset,
1364 unsigned int width, unsigned int height,
1365 unsigned int src_stride, unsigned int dst_stride,
1366 struct sg_table *st, struct scatterlist *sg)
1370 for (row = 0; row < height; row++) {
1371 unsigned int left = width * I915_GTT_PAGE_SIZE;
1375 unsigned int length;
1378 * We don't need the pages, but need to initialize
1379 * the entries so the sg list can be happily traversed.
1380 * The only thing we need are DMA addresses.
1383 addr = i915_gem_object_get_dma_address_len(obj, offset, &length);
1385 length = min(left, length);
1389 sg_set_page(sg, NULL, length, 0);
1390 sg_dma_address(sg) = addr;
1391 sg_dma_len(sg) = length;
1394 offset += length / I915_GTT_PAGE_SIZE;
1398 offset += src_stride - width;
1400 left = (dst_stride - width) * I915_GTT_PAGE_SIZE;
1408 * The DE ignores the PTEs for the padding tiles, the sg entry
1409 * here is just a conenience to indicate how many padding PTEs
1410 * to insert at this spot.
1412 sg_set_page(sg, NULL, left, 0);
1413 sg_dma_address(sg) = 0;
1414 sg_dma_len(sg) = left;
1421 static noinline struct sg_table *
1422 intel_remap_pages(struct intel_remapped_info *rem_info,
1423 struct drm_i915_gem_object *obj)
1425 unsigned int size = intel_remapped_info_size(rem_info);
1426 struct drm_i915_private *i915 = to_i915(obj->base.dev);
1427 struct sg_table *st;
1428 struct scatterlist *sg;
1432 /* Allocate target SG list. */
1433 st = kmalloc(sizeof(*st), GFP_KERNEL);
1437 ret = sg_alloc_table(st, size, GFP_KERNEL);
1444 for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) {
1445 sg = remap_pages(obj, rem_info->plane[i].offset,
1446 rem_info->plane[i].width, rem_info->plane[i].height,
1447 rem_info->plane[i].src_stride, rem_info->plane[i].dst_stride,
1459 drm_dbg(&i915->drm, "Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n",
1460 obj->base.size, rem_info->plane[0].width,
1461 rem_info->plane[0].height, size);
1463 return ERR_PTR(ret);
1466 static noinline struct sg_table *
1467 intel_partial_pages(const struct i915_ggtt_view *view,
1468 struct drm_i915_gem_object *obj)
1470 struct sg_table *st;
1471 struct scatterlist *sg, *iter;
1472 unsigned int count = view->partial.size;
1473 unsigned int offset;
1476 st = kmalloc(sizeof(*st), GFP_KERNEL);
1480 ret = sg_alloc_table(st, count, GFP_KERNEL);
1484 iter = i915_gem_object_get_sg_dma(obj, view->partial.offset, &offset, true);
1492 len = min(sg_dma_len(iter) - (offset << PAGE_SHIFT),
1493 count << PAGE_SHIFT);
1494 sg_set_page(sg, NULL, len, 0);
1495 sg_dma_address(sg) =
1496 sg_dma_address(iter) + (offset << PAGE_SHIFT);
1497 sg_dma_len(sg) = len;
1500 count -= len >> PAGE_SHIFT;
1503 i915_sg_trim(st); /* Drop any unused tail entries. */
1509 iter = __sg_next(iter);
1516 return ERR_PTR(ret);
1520 i915_get_ggtt_vma_pages(struct i915_vma *vma)
1525 * The vma->pages are only valid within the lifespan of the borrowed
1526 * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so
1527 * must be the vma->pages. A simple rule is that vma->pages must only
1528 * be accessed when the obj->mm.pages are pinned.
1530 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
1532 switch (vma->ggtt_view.type) {
1534 GEM_BUG_ON(vma->ggtt_view.type);
1536 case I915_GGTT_VIEW_NORMAL:
1537 vma->pages = vma->obj->mm.pages;
1540 case I915_GGTT_VIEW_ROTATED:
1542 intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj);
1545 case I915_GGTT_VIEW_REMAPPED:
1547 intel_remap_pages(&vma->ggtt_view.remapped, vma->obj);
1550 case I915_GGTT_VIEW_PARTIAL:
1551 vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
1556 if (IS_ERR(vma->pages)) {
1557 ret = PTR_ERR(vma->pages);
1559 drm_err(&vma->vm->i915->drm,
1560 "Failed to get pages for VMA view type %u (%d)!\n",
1561 vma->ggtt_view.type, ret);