2 * Copyright © 2010 Daniel Vetter
3 * Copyright © 2011-2014 Intel Corporation
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include <linux/slab.h> /* fault-inject.h is not standalone! */
28 #include <linux/fault-inject.h>
29 #include <linux/log2.h>
30 #include <linux/random.h>
31 #include <linux/seq_file.h>
32 #include <linux/stop_machine.h>
34 #include <asm/set_memory.h>
36 #include <drm/i915_drm.h>
38 #include "display/intel_frontbuffer.h"
41 #include "i915_scatterlist.h"
42 #include "i915_trace.h"
43 #include "i915_vgpu.h"
44 #include "intel_drv.h"
46 #define I915_GFP_ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
49 * DOC: Global GTT views
51 * Background and previous state
53 * Historically objects could exists (be bound) in global GTT space only as
54 * singular instances with a view representing all of the object's backing pages
55 * in a linear fashion. This view will be called a normal view.
57 * To support multiple views of the same object, where the number of mapped
58 * pages is not equal to the backing store, or where the layout of the pages
59 * is not linear, concept of a GGTT view was added.
61 * One example of an alternative view is a stereo display driven by a single
62 * image. In this case we would have a framebuffer looking like this
68 * Above would represent a normal GGTT view as normally mapped for GPU or CPU
69 * rendering. In contrast, fed to the display engine would be an alternative
70 * view which could look something like this:
75 * In this example both the size and layout of pages in the alternative view is
76 * different from the normal view.
78 * Implementation and usage
80 * GGTT views are implemented using VMAs and are distinguished via enum
81 * i915_ggtt_view_type and struct i915_ggtt_view.
83 * A new flavour of core GEM functions which work with GGTT bound objects were
84 * added with the _ggtt_ infix, and sometimes with _view postfix to avoid
85 * renaming in large amounts of code. They take the struct i915_ggtt_view
86 * parameter encapsulating all metadata required to implement a view.
88 * As a helper for callers which are only interested in the normal view,
89 * globally const i915_ggtt_view_normal singleton instance exists. All old core
90 * GEM API functions, the ones not taking the view parameter, are operating on,
91 * or with the normal GGTT view.
93 * Code wanting to add or use a new GGTT view needs to:
95 * 1. Add a new enum with a suitable name.
96 * 2. Extend the metadata in the i915_ggtt_view structure if required.
97 * 3. Add support to i915_get_vma_pages().
99 * New views are required to build a scatter-gather table from within the
100 * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and
101 * exists for the lifetime of an VMA.
103 * Core API is designed to have copy semantics which means that passed in
104 * struct i915_ggtt_view does not need to be persistent (left around after
105 * calling the core API functions).
110 i915_get_ggtt_vma_pages(struct i915_vma *vma);
112 static void gen6_ggtt_invalidate(struct drm_i915_private *i915)
114 struct intel_uncore *uncore = &i915->uncore;
117 * Note that as an uncached mmio write, this will flush the
118 * WCB of the writes into the GGTT before it triggers the invalidate.
120 intel_uncore_write_fw(uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
123 static void guc_ggtt_invalidate(struct drm_i915_private *i915)
125 struct intel_uncore *uncore = &i915->uncore;
127 gen6_ggtt_invalidate(i915);
128 intel_uncore_write_fw(uncore, GEN8_GTCR, GEN8_GTCR_INVALIDATE);
131 static void gmch_ggtt_invalidate(struct drm_i915_private *i915)
133 intel_gtt_chipset_flush();
136 static inline void i915_ggtt_invalidate(struct drm_i915_private *i915)
138 i915->ggtt.invalidate(i915);
141 static int ppgtt_bind_vma(struct i915_vma *vma,
142 enum i915_cache_level cache_level,
148 if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
149 err = vma->vm->allocate_va_range(vma->vm,
150 vma->node.start, vma->size);
155 /* Applicable to VLV, and gen8+ */
157 if (i915_gem_object_is_readonly(vma->obj))
158 pte_flags |= PTE_READ_ONLY;
160 vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
165 static void ppgtt_unbind_vma(struct i915_vma *vma)
167 vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
170 static int ppgtt_set_pages(struct i915_vma *vma)
172 GEM_BUG_ON(vma->pages);
174 vma->pages = vma->obj->mm.pages;
176 vma->page_sizes = vma->obj->mm.page_sizes;
181 static void clear_pages(struct i915_vma *vma)
183 GEM_BUG_ON(!vma->pages);
185 if (vma->pages != vma->obj->mm.pages) {
186 sg_free_table(vma->pages);
191 memset(&vma->page_sizes, 0, sizeof(vma->page_sizes));
194 static u64 gen8_pte_encode(dma_addr_t addr,
195 enum i915_cache_level level,
198 gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW;
200 if (unlikely(flags & PTE_READ_ONLY))
204 case I915_CACHE_NONE:
205 pte |= PPAT_UNCACHED;
208 pte |= PPAT_DISPLAY_ELLC;
218 static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
219 const enum i915_cache_level level)
221 gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
223 if (level != I915_CACHE_NONE)
224 pde |= PPAT_CACHED_PDE;
226 pde |= PPAT_UNCACHED;
230 #define gen8_pdpe_encode gen8_pde_encode
231 #define gen8_pml4e_encode gen8_pde_encode
233 static u64 snb_pte_encode(dma_addr_t addr,
234 enum i915_cache_level level,
237 gen6_pte_t pte = GEN6_PTE_VALID;
238 pte |= GEN6_PTE_ADDR_ENCODE(addr);
241 case I915_CACHE_L3_LLC:
243 pte |= GEN6_PTE_CACHE_LLC;
245 case I915_CACHE_NONE:
246 pte |= GEN6_PTE_UNCACHED;
255 static u64 ivb_pte_encode(dma_addr_t addr,
256 enum i915_cache_level level,
259 gen6_pte_t pte = GEN6_PTE_VALID;
260 pte |= GEN6_PTE_ADDR_ENCODE(addr);
263 case I915_CACHE_L3_LLC:
264 pte |= GEN7_PTE_CACHE_L3_LLC;
267 pte |= GEN6_PTE_CACHE_LLC;
269 case I915_CACHE_NONE:
270 pte |= GEN6_PTE_UNCACHED;
279 static u64 byt_pte_encode(dma_addr_t addr,
280 enum i915_cache_level level,
283 gen6_pte_t pte = GEN6_PTE_VALID;
284 pte |= GEN6_PTE_ADDR_ENCODE(addr);
286 if (!(flags & PTE_READ_ONLY))
287 pte |= BYT_PTE_WRITEABLE;
289 if (level != I915_CACHE_NONE)
290 pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
295 static u64 hsw_pte_encode(dma_addr_t addr,
296 enum i915_cache_level level,
299 gen6_pte_t pte = GEN6_PTE_VALID;
300 pte |= HSW_PTE_ADDR_ENCODE(addr);
302 if (level != I915_CACHE_NONE)
303 pte |= HSW_WB_LLC_AGE3;
308 static u64 iris_pte_encode(dma_addr_t addr,
309 enum i915_cache_level level,
312 gen6_pte_t pte = GEN6_PTE_VALID;
313 pte |= HSW_PTE_ADDR_ENCODE(addr);
316 case I915_CACHE_NONE:
319 pte |= HSW_WT_ELLC_LLC_AGE3;
322 pte |= HSW_WB_ELLC_LLC_AGE3;
329 static void stash_init(struct pagestash *stash)
331 pagevec_init(&stash->pvec);
332 spin_lock_init(&stash->lock);
335 static struct page *stash_pop_page(struct pagestash *stash)
337 struct page *page = NULL;
339 spin_lock(&stash->lock);
340 if (likely(stash->pvec.nr))
341 page = stash->pvec.pages[--stash->pvec.nr];
342 spin_unlock(&stash->lock);
347 static void stash_push_pagevec(struct pagestash *stash, struct pagevec *pvec)
351 spin_lock_nested(&stash->lock, SINGLE_DEPTH_NESTING);
353 nr = min_t(typeof(nr), pvec->nr, pagevec_space(&stash->pvec));
354 memcpy(stash->pvec.pages + stash->pvec.nr,
355 pvec->pages + pvec->nr - nr,
356 sizeof(pvec->pages[0]) * nr);
357 stash->pvec.nr += nr;
359 spin_unlock(&stash->lock);
364 static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
366 struct pagevec stack;
369 if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
370 i915_gem_shrink_all(vm->i915);
372 page = stash_pop_page(&vm->free_pages);
377 return alloc_page(gfp);
379 /* Look in our global stash of WC pages... */
380 page = stash_pop_page(&vm->i915->mm.wc_stash);
385 * Otherwise batch allocate pages to amortize cost of set_pages_wc.
387 * We have to be careful as page allocation may trigger the shrinker
388 * (via direct reclaim) which will fill up the WC stash underneath us.
389 * So we add our WB pages into a temporary pvec on the stack and merge
390 * them into the WC stash after all the allocations are complete.
392 pagevec_init(&stack);
396 page = alloc_page(gfp);
400 stack.pages[stack.nr++] = page;
401 } while (pagevec_space(&stack));
403 if (stack.nr && !set_pages_array_wc(stack.pages, stack.nr)) {
404 page = stack.pages[--stack.nr];
406 /* Merge spare WC pages to the global stash */
408 stash_push_pagevec(&vm->i915->mm.wc_stash, &stack);
410 /* Push any surplus WC pages onto the local VM stash */
412 stash_push_pagevec(&vm->free_pages, &stack);
415 /* Return unwanted leftovers */
416 if (unlikely(stack.nr)) {
417 WARN_ON_ONCE(set_pages_array_wb(stack.pages, stack.nr));
418 __pagevec_release(&stack);
424 static void vm_free_pages_release(struct i915_address_space *vm,
427 struct pagevec *pvec = &vm->free_pages.pvec;
428 struct pagevec stack;
430 lockdep_assert_held(&vm->free_pages.lock);
431 GEM_BUG_ON(!pagevec_count(pvec));
433 if (vm->pt_kmap_wc) {
435 * When we use WC, first fill up the global stash and then
436 * only if full immediately free the overflow.
438 stash_push_pagevec(&vm->i915->mm.wc_stash, pvec);
441 * As we have made some room in the VM's free_pages,
442 * we can wait for it to fill again. Unless we are
443 * inside i915_address_space_fini() and must
444 * immediately release the pages!
446 if (pvec->nr <= (immediate ? 0 : PAGEVEC_SIZE - 1))
450 * We have to drop the lock to allow ourselves to sleep,
451 * so take a copy of the pvec and clear the stash for
452 * others to use it as we sleep.
455 pagevec_reinit(pvec);
456 spin_unlock(&vm->free_pages.lock);
459 set_pages_array_wb(pvec->pages, pvec->nr);
461 spin_lock(&vm->free_pages.lock);
464 __pagevec_release(pvec);
467 static void vm_free_page(struct i915_address_space *vm, struct page *page)
470 * On !llc, we need to change the pages back to WB. We only do so
471 * in bulk, so we rarely need to change the page attributes here,
472 * but doing so requires a stop_machine() from deep inside arch/x86/mm.
473 * To make detection of the possible sleep more likely, use an
474 * unconditional might_sleep() for everybody.
477 spin_lock(&vm->free_pages.lock);
478 while (!pagevec_space(&vm->free_pages.pvec))
479 vm_free_pages_release(vm, false);
480 GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec) >= PAGEVEC_SIZE);
481 pagevec_add(&vm->free_pages.pvec, page);
482 spin_unlock(&vm->free_pages.lock);
485 static void i915_address_space_init(struct i915_address_space *vm, int subclass)
490 * The vm->mutex must be reclaim safe (for use in the shrinker).
491 * Do a dummy acquire now under fs_reclaim so that any allocation
492 * attempt holding the lock is immediately reported by lockdep.
494 mutex_init(&vm->mutex);
495 lockdep_set_subclass(&vm->mutex, subclass);
496 i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex);
498 GEM_BUG_ON(!vm->total);
499 drm_mm_init(&vm->mm, 0, vm->total);
500 vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
502 stash_init(&vm->free_pages);
504 INIT_LIST_HEAD(&vm->unbound_list);
505 INIT_LIST_HEAD(&vm->bound_list);
508 static void i915_address_space_fini(struct i915_address_space *vm)
510 spin_lock(&vm->free_pages.lock);
511 if (pagevec_count(&vm->free_pages.pvec))
512 vm_free_pages_release(vm, true);
513 GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec));
514 spin_unlock(&vm->free_pages.lock);
516 drm_mm_takedown(&vm->mm);
518 mutex_destroy(&vm->mutex);
521 static int __setup_page_dma(struct i915_address_space *vm,
522 struct i915_page_dma *p,
525 p->page = vm_alloc_page(vm, gfp | I915_GFP_ALLOW_FAIL);
526 if (unlikely(!p->page))
529 p->daddr = dma_map_page_attrs(vm->dma,
530 p->page, 0, PAGE_SIZE,
531 PCI_DMA_BIDIRECTIONAL,
532 DMA_ATTR_SKIP_CPU_SYNC |
534 if (unlikely(dma_mapping_error(vm->dma, p->daddr))) {
535 vm_free_page(vm, p->page);
542 static int setup_page_dma(struct i915_address_space *vm,
543 struct i915_page_dma *p)
545 return __setup_page_dma(vm, p, __GFP_HIGHMEM);
548 static void cleanup_page_dma(struct i915_address_space *vm,
549 struct i915_page_dma *p)
551 dma_unmap_page(vm->dma, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
552 vm_free_page(vm, p->page);
555 #define kmap_atomic_px(px) kmap_atomic(px_base(px)->page)
557 #define setup_px(vm, px) setup_page_dma((vm), px_base(px))
558 #define cleanup_px(vm, px) cleanup_page_dma((vm), px_base(px))
559 #define fill_px(vm, px, v) fill_page_dma((vm), px_base(px), (v))
560 #define fill32_px(vm, px, v) fill_page_dma_32((vm), px_base(px), (v))
562 static void fill_page_dma(struct i915_address_space *vm,
563 struct i915_page_dma *p,
566 u64 * const vaddr = kmap_atomic(p->page);
568 memset64(vaddr, val, PAGE_SIZE / sizeof(val));
570 kunmap_atomic(vaddr);
573 static void fill_page_dma_32(struct i915_address_space *vm,
574 struct i915_page_dma *p,
577 fill_page_dma(vm, p, (u64)v << 32 | v);
581 setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
586 * In order to utilize 64K pages for an object with a size < 2M, we will
587 * need to support a 64K scratch page, given that every 16th entry for a
588 * page-table operating in 64K mode must point to a properly aligned 64K
589 * region, including any PTEs which happen to point to scratch.
591 * This is only relevant for the 48b PPGTT where we support
592 * huge-gtt-pages, see also i915_vma_insert(). However, as we share the
593 * scratch (read-only) between all vm, we create one 64k scratch page
596 size = I915_GTT_PAGE_SIZE_4K;
597 if (i915_vm_is_4lvl(vm) &&
598 HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K)) {
599 size = I915_GTT_PAGE_SIZE_64K;
602 gfp |= __GFP_ZERO | __GFP_RETRY_MAYFAIL;
605 int order = get_order(size);
609 page = alloc_pages(gfp, order);
613 addr = dma_map_page_attrs(vm->dma,
615 PCI_DMA_BIDIRECTIONAL,
616 DMA_ATTR_SKIP_CPU_SYNC |
618 if (unlikely(dma_mapping_error(vm->dma, addr)))
621 if (unlikely(!IS_ALIGNED(addr, size)))
624 vm->scratch_page.page = page;
625 vm->scratch_page.daddr = addr;
626 vm->scratch_order = order;
630 dma_unmap_page(vm->dma, addr, size, PCI_DMA_BIDIRECTIONAL);
632 __free_pages(page, order);
634 if (size == I915_GTT_PAGE_SIZE_4K)
637 size = I915_GTT_PAGE_SIZE_4K;
638 gfp &= ~__GFP_NOWARN;
642 static void cleanup_scratch_page(struct i915_address_space *vm)
644 struct i915_page_dma *p = &vm->scratch_page;
645 int order = vm->scratch_order;
647 dma_unmap_page(vm->dma, p->daddr, BIT(order) << PAGE_SHIFT,
648 PCI_DMA_BIDIRECTIONAL);
649 __free_pages(p->page, order);
652 static struct i915_page_table *alloc_pt(struct i915_address_space *vm)
654 struct i915_page_table *pt;
656 pt = kmalloc(sizeof(*pt), I915_GFP_ALLOW_FAIL);
658 return ERR_PTR(-ENOMEM);
660 if (unlikely(setup_px(vm, pt))) {
662 return ERR_PTR(-ENOMEM);
665 atomic_set(&pt->used, 0);
670 static void free_pt(struct i915_address_space *vm, struct i915_page_table *pt)
676 static void gen8_initialize_pt(struct i915_address_space *vm,
677 struct i915_page_table *pt)
679 fill_px(vm, pt, vm->scratch_pte);
682 static void gen6_initialize_pt(struct i915_address_space *vm,
683 struct i915_page_table *pt)
685 fill32_px(vm, pt, vm->scratch_pte);
688 static struct i915_page_directory *__alloc_pd(void)
690 struct i915_page_directory *pd;
692 pd = kmalloc(sizeof(*pd), I915_GFP_ALLOW_FAIL);
697 memset(&pd->base, 0, sizeof(pd->base));
698 atomic_set(&pd->used, 0);
699 spin_lock_init(&pd->lock);
707 static struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
709 struct i915_page_directory *pd;
713 return ERR_PTR(-ENOMEM);
715 if (unlikely(setup_px(vm, pd))) {
717 return ERR_PTR(-ENOMEM);
723 static inline bool pd_has_phys_page(const struct i915_page_directory * const pd)
725 return pd->base.page;
728 static void free_pd(struct i915_address_space *vm,
729 struct i915_page_directory *pd)
731 if (likely(pd_has_phys_page(pd)))
737 static void init_pd_with_page(struct i915_address_space *vm,
738 struct i915_page_directory * const pd,
739 struct i915_page_table *pt)
741 fill_px(vm, pd, gen8_pde_encode(px_dma(pt), I915_CACHE_LLC));
742 memset_p(pd->entry, pt, 512);
745 static void init_pd(struct i915_address_space *vm,
746 struct i915_page_directory * const pd,
747 struct i915_page_directory * const to)
749 GEM_DEBUG_BUG_ON(!pd_has_phys_page(pd));
751 fill_px(vm, pd, gen8_pdpe_encode(px_dma(to), I915_CACHE_LLC));
752 memset_p(pd->entry, to, 512);
756 * PDE TLBs are a pain to invalidate on GEN8+. When we modify
757 * the page table structures, we mark them dirty so that
758 * context switching/execlist queuing code takes extra steps
759 * to ensure that tlbs are flushed.
761 static void mark_tlbs_dirty(struct i915_ppgtt *ppgtt)
763 ppgtt->pd_dirty_engines = ALL_ENGINES;
766 /* Removes entries from a single page table, releasing it if it's empty.
767 * Caller can use the return value to update higher-level entries.
769 static bool gen8_ppgtt_clear_pt(const struct i915_address_space *vm,
770 struct i915_page_table *pt,
771 u64 start, u64 length)
773 unsigned int num_entries = gen8_pte_count(start, length);
776 vaddr = kmap_atomic_px(pt);
777 memset64(vaddr + gen8_pte_index(start), vm->scratch_pte, num_entries);
778 kunmap_atomic(vaddr);
780 GEM_BUG_ON(num_entries > atomic_read(&pt->used));
781 return !atomic_sub_return(num_entries, &pt->used);
784 static void gen8_ppgtt_set_pde(struct i915_address_space *vm,
785 struct i915_page_directory *pd,
786 struct i915_page_table *pt,
791 vaddr = kmap_atomic_px(pd);
792 vaddr[pde] = gen8_pde_encode(px_dma(pt), I915_CACHE_LLC);
793 kunmap_atomic(vaddr);
796 static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm,
797 struct i915_page_directory *pd,
798 u64 start, u64 length)
800 struct i915_page_table *pt;
803 gen8_for_each_pde(pt, pd, start, length, pde) {
806 GEM_BUG_ON(pt == vm->scratch_pt);
808 if (!gen8_ppgtt_clear_pt(vm, pt, start, length))
811 spin_lock(&pd->lock);
812 if (!atomic_read(&pt->used)) {
813 gen8_ppgtt_set_pde(vm, pd, vm->scratch_pt, pde);
814 pd->entry[pde] = vm->scratch_pt;
816 GEM_BUG_ON(!atomic_read(&pd->used));
817 atomic_dec(&pd->used);
820 spin_unlock(&pd->lock);
825 return !atomic_read(&pd->used);
828 static void gen8_ppgtt_set_pdpe(struct i915_page_directory *pdp,
829 struct i915_page_directory *pd,
832 gen8_ppgtt_pdpe_t *vaddr;
834 if (!pd_has_phys_page(pdp))
837 vaddr = kmap_atomic_px(pdp);
838 vaddr[pdpe] = gen8_pdpe_encode(px_dma(pd), I915_CACHE_LLC);
839 kunmap_atomic(vaddr);
842 /* Removes entries from a single page dir pointer, releasing it if it's empty.
843 * Caller can use the return value to update higher-level entries
845 static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
846 struct i915_page_directory * const pdp,
847 u64 start, u64 length)
849 struct i915_page_directory *pd;
852 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
855 GEM_BUG_ON(pd == vm->scratch_pd);
857 if (!gen8_ppgtt_clear_pd(vm, pd, start, length))
860 spin_lock(&pdp->lock);
861 if (!atomic_read(&pd->used)) {
862 gen8_ppgtt_set_pdpe(pdp, vm->scratch_pd, pdpe);
863 pdp->entry[pdpe] = vm->scratch_pd;
865 GEM_BUG_ON(!atomic_read(&pdp->used));
866 atomic_dec(&pdp->used);
869 spin_unlock(&pdp->lock);
874 return !atomic_read(&pdp->used);
877 static void gen8_ppgtt_clear_3lvl(struct i915_address_space *vm,
878 u64 start, u64 length)
880 gen8_ppgtt_clear_pdp(vm, i915_vm_to_ppgtt(vm)->pd, start, length);
883 static void gen8_ppgtt_set_pml4e(struct i915_page_directory *pml4,
884 struct i915_page_directory *pdp,
887 gen8_ppgtt_pml4e_t *vaddr;
889 vaddr = kmap_atomic_px(pml4);
890 vaddr[pml4e] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC);
891 kunmap_atomic(vaddr);
894 /* Removes entries from a single pml4.
895 * This is the top-level structure in 4-level page tables used on gen8+.
896 * Empty entries are always scratch pml4e.
898 static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm,
899 u64 start, u64 length)
901 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
902 struct i915_page_directory * const pml4 = ppgtt->pd;
903 struct i915_page_directory *pdp;
906 GEM_BUG_ON(!i915_vm_is_4lvl(vm));
908 gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
910 GEM_BUG_ON(pdp == vm->scratch_pdp);
912 if (!gen8_ppgtt_clear_pdp(vm, pdp, start, length))
915 spin_lock(&pml4->lock);
916 if (!atomic_read(&pdp->used)) {
917 gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
918 pml4->entry[pml4e] = vm->scratch_pdp;
921 spin_unlock(&pml4->lock);
927 static inline struct sgt_dma {
928 struct scatterlist *sg;
930 } sgt_dma(struct i915_vma *vma) {
931 struct scatterlist *sg = vma->pages->sgl;
932 dma_addr_t addr = sg_dma_address(sg);
933 return (struct sgt_dma) { sg, addr, addr + sg->length };
936 struct gen8_insert_pte {
943 static __always_inline struct gen8_insert_pte gen8_insert_pte(u64 start)
945 return (struct gen8_insert_pte) {
946 gen8_pml4e_index(start),
947 gen8_pdpe_index(start),
948 gen8_pde_index(start),
949 gen8_pte_index(start),
953 static __always_inline bool
954 gen8_ppgtt_insert_pte_entries(struct i915_ppgtt *ppgtt,
955 struct i915_page_directory *pdp,
956 struct sgt_dma *iter,
957 struct gen8_insert_pte *idx,
958 enum i915_cache_level cache_level,
961 struct i915_page_directory *pd;
962 const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
966 GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->vm));
967 pd = i915_pd_entry(pdp, idx->pdpe);
968 vaddr = kmap_atomic_px(i915_pt_entry(pd, idx->pde));
970 vaddr[idx->pte] = pte_encode | iter->dma;
972 iter->dma += I915_GTT_PAGE_SIZE;
973 if (iter->dma >= iter->max) {
974 iter->sg = __sg_next(iter->sg);
980 iter->dma = sg_dma_address(iter->sg);
981 iter->max = iter->dma + iter->sg->length;
984 if (++idx->pte == GEN8_PTES) {
987 if (++idx->pde == I915_PDES) {
990 /* Limited by sg length for 3lvl */
991 if (++idx->pdpe == GEN8_PML4ES_PER_PML4) {
997 GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->vm));
998 pd = pdp->entry[idx->pdpe];
1001 kunmap_atomic(vaddr);
1002 vaddr = kmap_atomic_px(i915_pt_entry(pd, idx->pde));
1005 kunmap_atomic(vaddr);
1010 static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
1011 struct i915_vma *vma,
1012 enum i915_cache_level cache_level,
1015 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1016 struct sgt_dma iter = sgt_dma(vma);
1017 struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
1019 gen8_ppgtt_insert_pte_entries(ppgtt, ppgtt->pd, &iter, &idx,
1020 cache_level, flags);
1022 vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
1025 static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
1026 struct i915_page_directory *pml4,
1027 struct sgt_dma *iter,
1028 enum i915_cache_level cache_level,
1031 const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
1032 u64 start = vma->node.start;
1033 dma_addr_t rem = iter->sg->length;
1036 struct gen8_insert_pte idx = gen8_insert_pte(start);
1037 struct i915_page_directory *pdp =
1038 i915_pdp_entry(pml4, idx.pml4e);
1039 struct i915_page_directory *pd = i915_pd_entry(pdp, idx.pdpe);
1040 unsigned int page_size;
1041 bool maybe_64K = false;
1042 gen8_pte_t encode = pte_encode;
1046 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_2M &&
1047 IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) &&
1048 rem >= I915_GTT_PAGE_SIZE_2M && !idx.pte) {
1051 page_size = I915_GTT_PAGE_SIZE_2M;
1053 encode |= GEN8_PDE_PS_2M;
1055 vaddr = kmap_atomic_px(pd);
1057 struct i915_page_table *pt = i915_pt_entry(pd, idx.pde);
1061 page_size = I915_GTT_PAGE_SIZE;
1064 vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K &&
1065 IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
1066 (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
1067 rem >= (max - index) * I915_GTT_PAGE_SIZE))
1070 vaddr = kmap_atomic_px(pt);
1074 GEM_BUG_ON(iter->sg->length < page_size);
1075 vaddr[index++] = encode | iter->dma;
1078 iter->dma += page_size;
1080 if (iter->dma >= iter->max) {
1081 iter->sg = __sg_next(iter->sg);
1085 rem = iter->sg->length;
1086 iter->dma = sg_dma_address(iter->sg);
1087 iter->max = iter->dma + rem;
1089 if (maybe_64K && index < max &&
1090 !(IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
1091 (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
1092 rem >= (max - index) * I915_GTT_PAGE_SIZE)))
1095 if (unlikely(!IS_ALIGNED(iter->dma, page_size)))
1098 } while (rem >= page_size && index < max);
1100 kunmap_atomic(vaddr);
1103 * Is it safe to mark the 2M block as 64K? -- Either we have
1104 * filled whole page-table with 64K entries, or filled part of
1105 * it and have reached the end of the sg table and we have
1110 (i915_vm_has_scratch_64K(vma->vm) &&
1111 !iter->sg && IS_ALIGNED(vma->node.start +
1113 I915_GTT_PAGE_SIZE_2M)))) {
1114 vaddr = kmap_atomic_px(pd);
1115 vaddr[idx.pde] |= GEN8_PDE_IPS_64K;
1116 kunmap_atomic(vaddr);
1117 page_size = I915_GTT_PAGE_SIZE_64K;
1120 * We write all 4K page entries, even when using 64K
1121 * pages. In order to verify that the HW isn't cheating
1122 * by using the 4K PTE instead of the 64K PTE, we want
1123 * to remove all the surplus entries. If the HW skipped
1124 * the 64K PTE, it will read/write into the scratch page
1125 * instead - which we detect as missing results during
1128 if (I915_SELFTEST_ONLY(vma->vm->scrub_64K)) {
1131 encode = vma->vm->scratch_pte;
1132 vaddr = kmap_atomic_px(i915_pt_entry(pd,
1135 for (i = 1; i < index; i += 16)
1136 memset64(vaddr + i, encode, 15);
1138 kunmap_atomic(vaddr);
1142 vma->page_sizes.gtt |= page_size;
1146 static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm,
1147 struct i915_vma *vma,
1148 enum i915_cache_level cache_level,
1151 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1152 struct sgt_dma iter = sgt_dma(vma);
1153 struct i915_page_directory * const pml4 = ppgtt->pd;
1155 if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
1156 gen8_ppgtt_insert_huge_entries(vma, pml4, &iter, cache_level,
1159 struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
1161 while (gen8_ppgtt_insert_pte_entries(ppgtt,
1162 i915_pdp_entry(pml4, idx.pml4e++),
1163 &iter, &idx, cache_level,
1165 GEM_BUG_ON(idx.pml4e >= GEN8_PML4ES_PER_PML4);
1167 vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
1171 static void gen8_free_page_tables(struct i915_address_space *vm,
1172 struct i915_page_directory *pd)
1176 for (i = 0; i < I915_PDES; i++) {
1177 if (pd->entry[i] != vm->scratch_pt)
1178 free_pt(vm, pd->entry[i]);
1182 static int gen8_init_scratch(struct i915_address_space *vm)
1187 * If everybody agrees to not to write into the scratch page,
1188 * we can reuse it for all vm, keeping contexts and processes separate.
1190 if (vm->has_read_only &&
1191 vm->i915->kernel_context &&
1192 vm->i915->kernel_context->vm) {
1193 struct i915_address_space *clone = vm->i915->kernel_context->vm;
1195 GEM_BUG_ON(!clone->has_read_only);
1197 vm->scratch_order = clone->scratch_order;
1198 vm->scratch_pte = clone->scratch_pte;
1199 vm->scratch_pt = clone->scratch_pt;
1200 vm->scratch_pd = clone->scratch_pd;
1201 vm->scratch_pdp = clone->scratch_pdp;
1205 ret = setup_scratch_page(vm, __GFP_HIGHMEM);
1210 gen8_pte_encode(vm->scratch_page.daddr,
1214 vm->scratch_pt = alloc_pt(vm);
1215 if (IS_ERR(vm->scratch_pt)) {
1216 ret = PTR_ERR(vm->scratch_pt);
1217 goto free_scratch_page;
1220 vm->scratch_pd = alloc_pd(vm);
1221 if (IS_ERR(vm->scratch_pd)) {
1222 ret = PTR_ERR(vm->scratch_pd);
1226 if (i915_vm_is_4lvl(vm)) {
1227 vm->scratch_pdp = alloc_pd(vm);
1228 if (IS_ERR(vm->scratch_pdp)) {
1229 ret = PTR_ERR(vm->scratch_pdp);
1234 gen8_initialize_pt(vm, vm->scratch_pt);
1235 init_pd_with_page(vm, vm->scratch_pd, vm->scratch_pt);
1236 if (i915_vm_is_4lvl(vm))
1237 init_pd(vm, vm->scratch_pdp, vm->scratch_pd);
1242 free_pd(vm, vm->scratch_pd);
1244 free_pt(vm, vm->scratch_pt);
1246 cleanup_scratch_page(vm);
1251 static int gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create)
1253 struct i915_address_space *vm = &ppgtt->vm;
1254 struct drm_i915_private *dev_priv = vm->i915;
1255 enum vgt_g2v_type msg;
1258 if (i915_vm_is_4lvl(vm)) {
1259 const u64 daddr = px_dma(ppgtt->pd);
1261 I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
1262 I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
1264 msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
1265 VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY);
1267 for (i = 0; i < GEN8_3LVL_PDPES; i++) {
1268 const u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
1270 I915_WRITE(vgtif_reg(pdp[i].lo), lower_32_bits(daddr));
1271 I915_WRITE(vgtif_reg(pdp[i].hi), upper_32_bits(daddr));
1274 msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
1275 VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY);
1278 I915_WRITE(vgtif_reg(g2v_notify), msg);
1283 static void gen8_free_scratch(struct i915_address_space *vm)
1285 if (!vm->scratch_page.daddr)
1288 if (i915_vm_is_4lvl(vm))
1289 free_pd(vm, vm->scratch_pdp);
1290 free_pd(vm, vm->scratch_pd);
1291 free_pt(vm, vm->scratch_pt);
1292 cleanup_scratch_page(vm);
1295 static void gen8_ppgtt_cleanup_3lvl(struct i915_address_space *vm,
1296 struct i915_page_directory *pdp)
1298 const unsigned int pdpes = i915_pdpes_per_pdp(vm);
1301 for (i = 0; i < pdpes; i++) {
1302 if (pdp->entry[i] == vm->scratch_pd)
1305 gen8_free_page_tables(vm, pdp->entry[i]);
1306 free_pd(vm, pdp->entry[i]);
1312 static void gen8_ppgtt_cleanup_4lvl(struct i915_ppgtt *ppgtt)
1314 struct i915_page_directory * const pml4 = ppgtt->pd;
1317 for (i = 0; i < GEN8_PML4ES_PER_PML4; i++) {
1318 struct i915_page_directory *pdp = i915_pdp_entry(pml4, i);
1320 if (pdp == ppgtt->vm.scratch_pdp)
1323 gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, pdp);
1326 free_pd(&ppgtt->vm, pml4);
1329 static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
1331 struct drm_i915_private *i915 = vm->i915;
1332 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1334 if (intel_vgpu_active(i915))
1335 gen8_ppgtt_notify_vgt(ppgtt, false);
1337 if (i915_vm_is_4lvl(vm))
1338 gen8_ppgtt_cleanup_4lvl(ppgtt);
1340 gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, ppgtt->pd);
1342 gen8_free_scratch(vm);
1345 static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm,
1346 struct i915_page_directory *pd,
1347 u64 start, u64 length)
1349 struct i915_page_table *pt, *alloc = NULL;
1354 spin_lock(&pd->lock);
1355 gen8_for_each_pde(pt, pd, start, length, pde) {
1356 const int count = gen8_pte_count(start, length);
1358 if (pt == vm->scratch_pt) {
1359 spin_unlock(&pd->lock);
1361 pt = fetch_and_zero(&alloc);
1369 if (count < GEN8_PTES || intel_vgpu_active(vm->i915))
1370 gen8_initialize_pt(vm, pt);
1372 spin_lock(&pd->lock);
1373 if (pd->entry[pde] == vm->scratch_pt) {
1374 gen8_ppgtt_set_pde(vm, pd, pt, pde);
1375 pd->entry[pde] = pt;
1376 atomic_inc(&pd->used);
1379 pt = pd->entry[pde];
1383 atomic_add(count, &pt->used);
1385 spin_unlock(&pd->lock);
1389 gen8_ppgtt_clear_pd(vm, pd, from, start - from);
1396 static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm,
1397 struct i915_page_directory *pdp,
1398 u64 start, u64 length)
1400 struct i915_page_directory *pd, *alloc = NULL;
1405 spin_lock(&pdp->lock);
1406 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1407 if (pd == vm->scratch_pd) {
1408 spin_unlock(&pdp->lock);
1410 pd = fetch_and_zero(&alloc);
1418 init_pd_with_page(vm, pd, vm->scratch_pt);
1420 spin_lock(&pdp->lock);
1421 if (pdp->entry[pdpe] == vm->scratch_pd) {
1422 gen8_ppgtt_set_pdpe(pdp, pd, pdpe);
1423 pdp->entry[pdpe] = pd;
1424 atomic_inc(&pdp->used);
1427 pd = pdp->entry[pdpe];
1430 atomic_inc(&pd->used);
1431 spin_unlock(&pdp->lock);
1433 ret = gen8_ppgtt_alloc_pd(vm, pd, start, length);
1437 spin_lock(&pdp->lock);
1438 atomic_dec(&pd->used);
1440 spin_unlock(&pdp->lock);
1444 spin_lock(&pdp->lock);
1445 if (atomic_dec_and_test(&pd->used)) {
1446 gen8_ppgtt_set_pdpe(pdp, vm->scratch_pd, pdpe);
1447 pdp->entry[pdpe] = vm->scratch_pd;
1448 GEM_BUG_ON(!atomic_read(&pdp->used));
1449 atomic_dec(&pdp->used);
1451 alloc = pd; /* defer the free to after the lock */
1453 spin_unlock(&pdp->lock);
1455 gen8_ppgtt_clear_pdp(vm, pdp, from, start - from);
1462 static int gen8_ppgtt_alloc_3lvl(struct i915_address_space *vm,
1463 u64 start, u64 length)
1465 return gen8_ppgtt_alloc_pdp(vm,
1466 i915_vm_to_ppgtt(vm)->pd, start, length);
1469 static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm,
1470 u64 start, u64 length)
1472 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1473 struct i915_page_directory * const pml4 = ppgtt->pd;
1474 struct i915_page_directory *pdp, *alloc = NULL;
1479 spin_lock(&pml4->lock);
1480 gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
1481 if (pdp == vm->scratch_pdp) {
1482 spin_unlock(&pml4->lock);
1484 pdp = fetch_and_zero(&alloc);
1492 init_pd(vm, pdp, vm->scratch_pd);
1494 spin_lock(&pml4->lock);
1495 if (pml4->entry[pml4e] == vm->scratch_pdp) {
1496 gen8_ppgtt_set_pml4e(pml4, pdp, pml4e);
1497 pml4->entry[pml4e] = pdp;
1500 pdp = pml4->entry[pml4e];
1503 atomic_inc(&pdp->used);
1504 spin_unlock(&pml4->lock);
1506 ret = gen8_ppgtt_alloc_pdp(vm, pdp, start, length);
1510 spin_lock(&pml4->lock);
1511 atomic_dec(&pdp->used);
1513 spin_unlock(&pml4->lock);
1517 spin_lock(&pml4->lock);
1518 if (atomic_dec_and_test(&pdp->used)) {
1519 gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
1520 pml4->entry[pml4e] = vm->scratch_pdp;
1522 alloc = pdp; /* defer the free until after the lock */
1524 spin_unlock(&pml4->lock);
1526 gen8_ppgtt_clear_4lvl(vm, from, start - from);
1533 static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt)
1535 struct i915_address_space *vm = &ppgtt->vm;
1536 struct i915_page_directory *pdp = ppgtt->pd;
1537 struct i915_page_directory *pd;
1538 u64 start = 0, length = ppgtt->vm.total;
1542 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1547 init_pd_with_page(vm, pd, vm->scratch_pt);
1548 gen8_ppgtt_set_pdpe(pdp, pd, pdpe);
1550 atomic_inc(&pdp->used);
1553 atomic_inc(&pdp->used); /* never remove */
1559 gen8_for_each_pdpe(pd, pdp, from, start, pdpe) {
1560 gen8_ppgtt_set_pdpe(pdp, vm->scratch_pd, pdpe);
1563 atomic_set(&pdp->used, 0);
1567 static void ppgtt_init(struct drm_i915_private *i915,
1568 struct i915_ppgtt *ppgtt)
1570 ppgtt->vm.i915 = i915;
1571 ppgtt->vm.dma = &i915->drm.pdev->dev;
1572 ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size);
1574 i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
1576 ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma;
1577 ppgtt->vm.vma_ops.unbind_vma = ppgtt_unbind_vma;
1578 ppgtt->vm.vma_ops.set_pages = ppgtt_set_pages;
1579 ppgtt->vm.vma_ops.clear_pages = clear_pages;
1583 * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
1584 * with a net effect resembling a 2-level page table in normal x86 terms. Each
1585 * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
1589 static struct i915_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
1591 struct i915_ppgtt *ppgtt;
1594 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
1596 return ERR_PTR(-ENOMEM);
1598 ppgtt_init(i915, ppgtt);
1601 * From bdw, there is hw support for read-only pages in the PPGTT.
1603 * Gen11 has HSDES#:1807136187 unresolved. Disable ro support
1606 ppgtt->vm.has_read_only = INTEL_GEN(i915) != 11;
1608 /* There are only few exceptions for gen >=6. chv and bxt.
1609 * And we are not sure about the latter so play safe for now.
1611 if (IS_CHERRYVIEW(i915) || IS_BROXTON(i915))
1612 ppgtt->vm.pt_kmap_wc = true;
1614 err = gen8_init_scratch(&ppgtt->vm);
1618 ppgtt->pd = __alloc_pd();
1621 goto err_free_scratch;
1624 if (i915_vm_is_4lvl(&ppgtt->vm)) {
1625 err = setup_px(&ppgtt->vm, ppgtt->pd);
1629 init_pd(&ppgtt->vm, ppgtt->pd, ppgtt->vm.scratch_pdp);
1631 ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_4lvl;
1632 ppgtt->vm.insert_entries = gen8_ppgtt_insert_4lvl;
1633 ppgtt->vm.clear_range = gen8_ppgtt_clear_4lvl;
1636 * We don't need to setup dma for top level pdp, only
1637 * for entries. So point entries to scratch.
1639 memset_p(ppgtt->pd->entry, ppgtt->vm.scratch_pd,
1642 if (intel_vgpu_active(i915)) {
1643 err = gen8_preallocate_top_level_pdp(ppgtt);
1648 ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_3lvl;
1649 ppgtt->vm.insert_entries = gen8_ppgtt_insert_3lvl;
1650 ppgtt->vm.clear_range = gen8_ppgtt_clear_3lvl;
1653 if (intel_vgpu_active(i915))
1654 gen8_ppgtt_notify_vgt(ppgtt, true);
1656 ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
1661 free_pd(&ppgtt->vm, ppgtt->pd);
1663 gen8_free_scratch(&ppgtt->vm);
1666 return ERR_PTR(err);
1669 /* Write pde (index) from the page directory @pd to the page table @pt */
1670 static inline void gen6_write_pde(const struct gen6_ppgtt *ppgtt,
1671 const unsigned int pde,
1672 const struct i915_page_table *pt)
1674 /* Caller needs to make sure the write completes if necessary */
1675 iowrite32(GEN6_PDE_ADDR_ENCODE(px_dma(pt)) | GEN6_PDE_VALID,
1676 ppgtt->pd_addr + pde);
1679 static void gen7_ppgtt_enable(struct drm_i915_private *dev_priv)
1681 struct intel_engine_cs *engine;
1682 u32 ecochk, ecobits;
1683 enum intel_engine_id id;
1685 ecobits = I915_READ(GAC_ECO_BITS);
1686 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
1688 ecochk = I915_READ(GAM_ECOCHK);
1689 if (IS_HASWELL(dev_priv)) {
1690 ecochk |= ECOCHK_PPGTT_WB_HSW;
1692 ecochk |= ECOCHK_PPGTT_LLC_IVB;
1693 ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
1695 I915_WRITE(GAM_ECOCHK, ecochk);
1697 for_each_engine(engine, dev_priv, id) {
1698 /* GFX_MODE is per-ring on gen7+ */
1699 ENGINE_WRITE(engine,
1701 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
1705 static void gen6_ppgtt_enable(struct drm_i915_private *dev_priv)
1707 u32 ecochk, gab_ctl, ecobits;
1709 ecobits = I915_READ(GAC_ECO_BITS);
1710 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
1711 ECOBITS_PPGTT_CACHE64B);
1713 gab_ctl = I915_READ(GAB_CTL);
1714 I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
1716 ecochk = I915_READ(GAM_ECOCHK);
1717 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
1719 if (HAS_PPGTT(dev_priv)) /* may be disabled for VT-d */
1720 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
1723 /* PPGTT support for Sandybdrige/Gen6 and later */
1724 static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
1725 u64 start, u64 length)
1727 struct gen6_ppgtt * const ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
1728 const unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
1729 const gen6_pte_t scratch_pte = vm->scratch_pte;
1730 unsigned int pde = first_entry / GEN6_PTES;
1731 unsigned int pte = first_entry % GEN6_PTES;
1732 unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
1734 while (num_entries) {
1735 struct i915_page_table * const pt =
1736 i915_pt_entry(ppgtt->base.pd, pde++);
1737 const unsigned int count = min(num_entries, GEN6_PTES - pte);
1740 GEM_BUG_ON(pt == vm->scratch_pt);
1742 num_entries -= count;
1744 GEM_BUG_ON(count > atomic_read(&pt->used));
1745 if (!atomic_sub_return(count, &pt->used))
1746 ppgtt->scan_for_unused_pt = true;
1749 * Note that the hw doesn't support removing PDE on the fly
1750 * (they are cached inside the context with no means to
1751 * invalidate the cache), so we can only reset the PTE
1752 * entries back to scratch.
1755 vaddr = kmap_atomic_px(pt);
1756 memset32(vaddr + pte, scratch_pte, count);
1757 kunmap_atomic(vaddr);
1763 static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
1764 struct i915_vma *vma,
1765 enum i915_cache_level cache_level,
1768 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1769 struct i915_page_directory * const pd = ppgtt->pd;
1770 unsigned first_entry = vma->node.start / I915_GTT_PAGE_SIZE;
1771 unsigned act_pt = first_entry / GEN6_PTES;
1772 unsigned act_pte = first_entry % GEN6_PTES;
1773 const u32 pte_encode = vm->pte_encode(0, cache_level, flags);
1774 struct sgt_dma iter = sgt_dma(vma);
1777 GEM_BUG_ON(i915_pt_entry(pd, act_pt) == vm->scratch_pt);
1779 vaddr = kmap_atomic_px(i915_pt_entry(pd, act_pt));
1781 vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
1783 iter.dma += I915_GTT_PAGE_SIZE;
1784 if (iter.dma == iter.max) {
1785 iter.sg = __sg_next(iter.sg);
1789 iter.dma = sg_dma_address(iter.sg);
1790 iter.max = iter.dma + iter.sg->length;
1793 if (++act_pte == GEN6_PTES) {
1794 kunmap_atomic(vaddr);
1795 vaddr = kmap_atomic_px(i915_pt_entry(pd, ++act_pt));
1799 kunmap_atomic(vaddr);
1801 vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
1804 static int gen6_alloc_va_range(struct i915_address_space *vm,
1805 u64 start, u64 length)
1807 struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
1808 struct i915_page_directory * const pd = ppgtt->base.pd;
1809 struct i915_page_table *pt, *alloc = NULL;
1810 intel_wakeref_t wakeref;
1816 wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
1818 spin_lock(&pd->lock);
1819 gen6_for_each_pde(pt, pd, start, length, pde) {
1820 const unsigned int count = gen6_pte_count(start, length);
1822 if (pt == vm->scratch_pt) {
1823 spin_unlock(&pd->lock);
1825 pt = fetch_and_zero(&alloc);
1833 gen6_initialize_pt(vm, pt);
1835 spin_lock(&pd->lock);
1836 if (pd->entry[pde] == vm->scratch_pt) {
1837 pd->entry[pde] = pt;
1838 if (i915_vma_is_bound(ppgtt->vma,
1839 I915_VMA_GLOBAL_BIND)) {
1840 gen6_write_pde(ppgtt, pde, pt);
1845 pt = pd->entry[pde];
1849 atomic_add(count, &pt->used);
1851 spin_unlock(&pd->lock);
1854 mark_tlbs_dirty(&ppgtt->base);
1855 gen6_ggtt_invalidate(vm->i915);
1861 gen6_ppgtt_clear_range(vm, from, start - from);
1865 intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
1869 static int gen6_ppgtt_init_scratch(struct gen6_ppgtt *ppgtt)
1871 struct i915_address_space * const vm = &ppgtt->base.vm;
1872 struct i915_page_directory * const pd = ppgtt->base.pd;
1873 struct i915_page_table *unused;
1877 ret = setup_scratch_page(vm, __GFP_HIGHMEM);
1881 vm->scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
1885 vm->scratch_pt = alloc_pt(vm);
1886 if (IS_ERR(vm->scratch_pt)) {
1887 cleanup_scratch_page(vm);
1888 return PTR_ERR(vm->scratch_pt);
1891 gen6_initialize_pt(vm, vm->scratch_pt);
1893 gen6_for_all_pdes(unused, pd, pde)
1894 pd->entry[pde] = vm->scratch_pt;
1899 static void gen6_ppgtt_free_scratch(struct i915_address_space *vm)
1901 free_pt(vm, vm->scratch_pt);
1902 cleanup_scratch_page(vm);
1905 static void gen6_ppgtt_free_pd(struct gen6_ppgtt *ppgtt)
1907 struct i915_page_directory * const pd = ppgtt->base.pd;
1908 struct i915_page_table *pt;
1911 gen6_for_all_pdes(pt, pd, pde)
1912 if (pt != ppgtt->base.vm.scratch_pt)
1913 free_pt(&ppgtt->base.vm, pt);
1916 struct gen6_ppgtt_cleanup_work {
1917 struct work_struct base;
1918 struct i915_vma *vma;
1921 static void gen6_ppgtt_cleanup_work(struct work_struct *wrk)
1923 struct gen6_ppgtt_cleanup_work *work =
1924 container_of(wrk, typeof(*work), base);
1925 /* Side note, vma->vm is the GGTT not the ppgtt we just destroyed! */
1926 struct drm_i915_private *i915 = work->vma->vm->i915;
1928 mutex_lock(&i915->drm.struct_mutex);
1929 i915_vma_destroy(work->vma);
1930 mutex_unlock(&i915->drm.struct_mutex);
1935 static int nop_set_pages(struct i915_vma *vma)
1940 static void nop_clear_pages(struct i915_vma *vma)
1944 static int nop_bind(struct i915_vma *vma,
1945 enum i915_cache_level cache_level,
1951 static void nop_unbind(struct i915_vma *vma)
1955 static const struct i915_vma_ops nop_vma_ops = {
1956 .set_pages = nop_set_pages,
1957 .clear_pages = nop_clear_pages,
1958 .bind_vma = nop_bind,
1959 .unbind_vma = nop_unbind,
1962 static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
1964 struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
1965 struct gen6_ppgtt_cleanup_work *work = ppgtt->work;
1967 /* FIXME remove the struct_mutex to bring the locking under control */
1968 INIT_WORK(&work->base, gen6_ppgtt_cleanup_work);
1969 work->vma = ppgtt->vma;
1970 work->vma->ops = &nop_vma_ops;
1971 schedule_work(&work->base);
1973 gen6_ppgtt_free_pd(ppgtt);
1974 gen6_ppgtt_free_scratch(vm);
1975 kfree(ppgtt->base.pd);
1978 static int pd_vma_set_pages(struct i915_vma *vma)
1980 vma->pages = ERR_PTR(-ENODEV);
1984 static void pd_vma_clear_pages(struct i915_vma *vma)
1986 GEM_BUG_ON(!vma->pages);
1991 static int pd_vma_bind(struct i915_vma *vma,
1992 enum i915_cache_level cache_level,
1995 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm);
1996 struct gen6_ppgtt *ppgtt = vma->private;
1997 u32 ggtt_offset = i915_ggtt_offset(vma) / I915_GTT_PAGE_SIZE;
1998 struct i915_page_table *pt;
2001 ppgtt->base.pd->base.ggtt_offset = ggtt_offset * sizeof(gen6_pte_t);
2002 ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm + ggtt_offset;
2004 gen6_for_all_pdes(pt, ppgtt->base.pd, pde)
2005 gen6_write_pde(ppgtt, pde, pt);
2007 mark_tlbs_dirty(&ppgtt->base);
2008 gen6_ggtt_invalidate(ppgtt->base.vm.i915);
2013 static void pd_vma_unbind(struct i915_vma *vma)
2015 struct gen6_ppgtt *ppgtt = vma->private;
2016 struct i915_page_directory * const pd = ppgtt->base.pd;
2017 struct i915_page_table * const scratch_pt = ppgtt->base.vm.scratch_pt;
2018 struct i915_page_table *pt;
2021 if (!ppgtt->scan_for_unused_pt)
2024 /* Free all no longer used page tables */
2025 gen6_for_all_pdes(pt, ppgtt->base.pd, pde) {
2026 if (atomic_read(&pt->used) || pt == scratch_pt)
2029 free_pt(&ppgtt->base.vm, pt);
2030 pd->entry[pde] = scratch_pt;
2033 ppgtt->scan_for_unused_pt = false;
2036 static const struct i915_vma_ops pd_vma_ops = {
2037 .set_pages = pd_vma_set_pages,
2038 .clear_pages = pd_vma_clear_pages,
2039 .bind_vma = pd_vma_bind,
2040 .unbind_vma = pd_vma_unbind,
2043 static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size)
2045 struct drm_i915_private *i915 = ppgtt->base.vm.i915;
2046 struct i915_ggtt *ggtt = &i915->ggtt;
2047 struct i915_vma *vma;
2049 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
2050 GEM_BUG_ON(size > ggtt->vm.total);
2052 vma = i915_vma_alloc();
2054 return ERR_PTR(-ENOMEM);
2056 i915_active_init(i915, &vma->active, NULL);
2057 INIT_ACTIVE_REQUEST(&vma->last_fence);
2059 vma->vm = &ggtt->vm;
2060 vma->ops = &pd_vma_ops;
2061 vma->private = ppgtt;
2064 vma->fence_size = size;
2065 vma->flags = I915_VMA_GGTT;
2066 vma->ggtt_view.type = I915_GGTT_VIEW_ROTATED; /* prevent fencing */
2068 INIT_LIST_HEAD(&vma->obj_link);
2069 INIT_LIST_HEAD(&vma->closed_link);
2071 mutex_lock(&vma->vm->mutex);
2072 list_add(&vma->vm_link, &vma->vm->unbound_list);
2073 mutex_unlock(&vma->vm->mutex);
2078 int gen6_ppgtt_pin(struct i915_ppgtt *base)
2080 struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
2083 GEM_BUG_ON(ppgtt->base.vm.closed);
2086 * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt
2087 * which will be pinned into every active context.
2088 * (When vma->pin_count becomes atomic, I expect we will naturally
2089 * need a larger, unpacked, type and kill this redundancy.)
2091 if (ppgtt->pin_count++)
2095 * PPGTT PDEs reside in the GGTT and consists of 512 entries. The
2096 * allocator works in address space sizes, so it's multiplied by page
2097 * size. We allocate at the top of the GTT to avoid fragmentation.
2099 err = i915_vma_pin(ppgtt->vma,
2101 PIN_GLOBAL | PIN_HIGH);
2108 ppgtt->pin_count = 0;
2112 void gen6_ppgtt_unpin(struct i915_ppgtt *base)
2114 struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
2116 GEM_BUG_ON(!ppgtt->pin_count);
2117 if (--ppgtt->pin_count)
2120 i915_vma_unpin(ppgtt->vma);
2123 void gen6_ppgtt_unpin_all(struct i915_ppgtt *base)
2125 struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base);
2127 if (!ppgtt->pin_count)
2130 ppgtt->pin_count = 0;
2131 i915_vma_unpin(ppgtt->vma);
2134 static struct i915_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
2136 struct i915_ggtt * const ggtt = &i915->ggtt;
2137 struct gen6_ppgtt *ppgtt;
2140 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
2142 return ERR_PTR(-ENOMEM);
2144 ppgtt_init(i915, &ppgtt->base);
2146 ppgtt->base.vm.allocate_va_range = gen6_alloc_va_range;
2147 ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range;
2148 ppgtt->base.vm.insert_entries = gen6_ppgtt_insert_entries;
2149 ppgtt->base.vm.cleanup = gen6_ppgtt_cleanup;
2151 ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode;
2153 ppgtt->work = kmalloc(sizeof(*ppgtt->work), GFP_KERNEL);
2159 ppgtt->base.pd = __alloc_pd();
2160 if (!ppgtt->base.pd) {
2165 err = gen6_ppgtt_init_scratch(ppgtt);
2169 ppgtt->vma = pd_vma_create(ppgtt, GEN6_PD_SIZE);
2170 if (IS_ERR(ppgtt->vma)) {
2171 err = PTR_ERR(ppgtt->vma);
2175 return &ppgtt->base;
2178 gen6_ppgtt_free_scratch(&ppgtt->base.vm);
2180 kfree(ppgtt->base.pd);
2185 return ERR_PTR(err);
2188 static void gtt_write_workarounds(struct drm_i915_private *dev_priv)
2190 /* This function is for gtt related workarounds. This function is
2191 * called on driver load and after a GPU reset, so you can place
2192 * workarounds here even if they get overwritten by GPU reset.
2194 /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl,cnl,icl */
2195 if (IS_BROADWELL(dev_priv))
2196 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
2197 else if (IS_CHERRYVIEW(dev_priv))
2198 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
2199 else if (IS_GEN9_LP(dev_priv))
2200 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
2201 else if (INTEL_GEN(dev_priv) >= 9)
2202 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
2205 * To support 64K PTEs we need to first enable the use of the
2206 * Intermediate-Page-Size(IPS) bit of the PDE field via some magical
2207 * mmio, otherwise the page-walker will simply ignore the IPS bit. This
2208 * shouldn't be needed after GEN10.
2210 * 64K pages were first introduced from BDW+, although technically they
2211 * only *work* from gen9+. For pre-BDW we instead have the option for
2212 * 32K pages, but we don't currently have any support for it in our
2215 if (HAS_PAGE_SIZES(dev_priv, I915_GTT_PAGE_SIZE_64K) &&
2216 INTEL_GEN(dev_priv) <= 10)
2217 I915_WRITE(GEN8_GAMW_ECO_DEV_RW_IA,
2218 I915_READ(GEN8_GAMW_ECO_DEV_RW_IA) |
2219 GAMW_ECO_ENABLE_64K_IPS_FIELD);
2222 int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
2224 gtt_write_workarounds(dev_priv);
2226 if (IS_GEN(dev_priv, 6))
2227 gen6_ppgtt_enable(dev_priv);
2228 else if (IS_GEN(dev_priv, 7))
2229 gen7_ppgtt_enable(dev_priv);
2234 static struct i915_ppgtt *
2235 __ppgtt_create(struct drm_i915_private *i915)
2237 if (INTEL_GEN(i915) < 8)
2238 return gen6_ppgtt_create(i915);
2240 return gen8_ppgtt_create(i915);
2244 i915_ppgtt_create(struct drm_i915_private *i915)
2246 struct i915_ppgtt *ppgtt;
2248 ppgtt = __ppgtt_create(i915);
2252 trace_i915_ppgtt_create(&ppgtt->vm);
2257 static void ppgtt_destroy_vma(struct i915_address_space *vm)
2259 struct list_head *phases[] = {
2266 for (phase = phases; *phase; phase++) {
2267 struct i915_vma *vma, *vn;
2269 list_for_each_entry_safe(vma, vn, *phase, vm_link)
2270 i915_vma_destroy(vma);
2274 void i915_vm_release(struct kref *kref)
2276 struct i915_address_space *vm =
2277 container_of(kref, struct i915_address_space, ref);
2279 GEM_BUG_ON(i915_is_ggtt(vm));
2280 trace_i915_ppgtt_release(vm);
2282 ppgtt_destroy_vma(vm);
2284 GEM_BUG_ON(!list_empty(&vm->bound_list));
2285 GEM_BUG_ON(!list_empty(&vm->unbound_list));
2288 i915_address_space_fini(vm);
2293 /* Certain Gen5 chipsets require require idling the GPU before
2294 * unmapping anything from the GTT when VT-d is enabled.
2296 static bool needs_idle_maps(struct drm_i915_private *dev_priv)
2298 /* Query intel_iommu to see if we need the workaround. Presumably that
2301 return IS_GEN(dev_priv, 5) && IS_MOBILE(dev_priv) && intel_vtd_active();
2304 void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv)
2306 struct i915_ggtt *ggtt = &dev_priv->ggtt;
2308 /* Don't bother messing with faults pre GEN6 as we have little
2309 * documentation supporting that it's a good idea.
2311 if (INTEL_GEN(dev_priv) < 6)
2314 i915_check_and_clear_faults(dev_priv);
2316 ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
2318 i915_ggtt_invalidate(dev_priv);
2321 int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
2322 struct sg_table *pages)
2325 if (dma_map_sg_attrs(&obj->base.dev->pdev->dev,
2326 pages->sgl, pages->nents,
2327 PCI_DMA_BIDIRECTIONAL,
2332 * If the DMA remap fails, one cause can be that we have
2333 * too many objects pinned in a small remapping table,
2334 * such as swiotlb. Incrementally purge all other objects and
2335 * try again - if there are no more pages to remove from
2336 * the DMA remapper, i915_gem_shrink will return 0.
2338 GEM_BUG_ON(obj->mm.pages == pages);
2339 } while (i915_gem_shrink(to_i915(obj->base.dev),
2340 obj->base.size >> PAGE_SHIFT, NULL,
2342 I915_SHRINK_UNBOUND));
2347 static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
2352 static void gen8_ggtt_insert_page(struct i915_address_space *vm,
2355 enum i915_cache_level level,
2358 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2359 gen8_pte_t __iomem *pte =
2360 (gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
2362 gen8_set_pte(pte, gen8_pte_encode(addr, level, 0));
2364 ggtt->invalidate(vm->i915);
2367 static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
2368 struct i915_vma *vma,
2369 enum i915_cache_level level,
2372 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2373 struct sgt_iter sgt_iter;
2374 gen8_pte_t __iomem *gtt_entries;
2375 const gen8_pte_t pte_encode = gen8_pte_encode(0, level, 0);
2379 * Note that we ignore PTE_READ_ONLY here. The caller must be careful
2380 * not to allow the user to override access to a read only page.
2383 gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm;
2384 gtt_entries += vma->node.start / I915_GTT_PAGE_SIZE;
2385 for_each_sgt_dma(addr, sgt_iter, vma->pages)
2386 gen8_set_pte(gtt_entries++, pte_encode | addr);
2389 * We want to flush the TLBs only after we're certain all the PTE
2390 * updates have finished.
2392 ggtt->invalidate(vm->i915);
2395 static void gen6_ggtt_insert_page(struct i915_address_space *vm,
2398 enum i915_cache_level level,
2401 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2402 gen6_pte_t __iomem *pte =
2403 (gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
2405 iowrite32(vm->pte_encode(addr, level, flags), pte);
2407 ggtt->invalidate(vm->i915);
2411 * Binds an object into the global gtt with the specified cache level. The object
2412 * will be accessible to the GPU via commands whose operands reference offsets
2413 * within the global GTT as well as accessible by the GPU through the GMADR
2414 * mapped BAR (dev_priv->mm.gtt->gtt).
2416 static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
2417 struct i915_vma *vma,
2418 enum i915_cache_level level,
2421 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2422 gen6_pte_t __iomem *entries = (gen6_pte_t __iomem *)ggtt->gsm;
2423 unsigned int i = vma->node.start / I915_GTT_PAGE_SIZE;
2424 struct sgt_iter iter;
2426 for_each_sgt_dma(addr, iter, vma->pages)
2427 iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]);
2430 * We want to flush the TLBs only after we're certain all the PTE
2431 * updates have finished.
2433 ggtt->invalidate(vm->i915);
2436 static void nop_clear_range(struct i915_address_space *vm,
2437 u64 start, u64 length)
2441 static void gen8_ggtt_clear_range(struct i915_address_space *vm,
2442 u64 start, u64 length)
2444 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2445 unsigned first_entry = start / I915_GTT_PAGE_SIZE;
2446 unsigned num_entries = length / I915_GTT_PAGE_SIZE;
2447 const gen8_pte_t scratch_pte = vm->scratch_pte;
2448 gen8_pte_t __iomem *gtt_base =
2449 (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
2450 const int max_entries = ggtt_total_entries(ggtt) - first_entry;
2453 if (WARN(num_entries > max_entries,
2454 "First entry = %d; Num entries = %d (max=%d)\n",
2455 first_entry, num_entries, max_entries))
2456 num_entries = max_entries;
2458 for (i = 0; i < num_entries; i++)
2459 gen8_set_pte(>t_base[i], scratch_pte);
2462 static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
2464 struct drm_i915_private *dev_priv = vm->i915;
2467 * Make sure the internal GAM fifo has been cleared of all GTT
2468 * writes before exiting stop_machine(). This guarantees that
2469 * any aperture accesses waiting to start in another process
2470 * cannot back up behind the GTT writes causing a hang.
2471 * The register can be any arbitrary GAM register.
2473 POSTING_READ(GFX_FLSH_CNTL_GEN6);
2476 struct insert_page {
2477 struct i915_address_space *vm;
2480 enum i915_cache_level level;
2483 static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
2485 struct insert_page *arg = _arg;
2487 gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0);
2488 bxt_vtd_ggtt_wa(arg->vm);
2493 static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
2496 enum i915_cache_level level,
2499 struct insert_page arg = { vm, addr, offset, level };
2501 stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
2504 struct insert_entries {
2505 struct i915_address_space *vm;
2506 struct i915_vma *vma;
2507 enum i915_cache_level level;
2511 static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
2513 struct insert_entries *arg = _arg;
2515 gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, arg->flags);
2516 bxt_vtd_ggtt_wa(arg->vm);
2521 static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
2522 struct i915_vma *vma,
2523 enum i915_cache_level level,
2526 struct insert_entries arg = { vm, vma, level, flags };
2528 stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
2531 struct clear_range {
2532 struct i915_address_space *vm;
2537 static int bxt_vtd_ggtt_clear_range__cb(void *_arg)
2539 struct clear_range *arg = _arg;
2541 gen8_ggtt_clear_range(arg->vm, arg->start, arg->length);
2542 bxt_vtd_ggtt_wa(arg->vm);
2547 static void bxt_vtd_ggtt_clear_range__BKL(struct i915_address_space *vm,
2551 struct clear_range arg = { vm, start, length };
2553 stop_machine(bxt_vtd_ggtt_clear_range__cb, &arg, NULL);
2556 static void gen6_ggtt_clear_range(struct i915_address_space *vm,
2557 u64 start, u64 length)
2559 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2560 unsigned first_entry = start / I915_GTT_PAGE_SIZE;
2561 unsigned num_entries = length / I915_GTT_PAGE_SIZE;
2562 gen6_pte_t scratch_pte, __iomem *gtt_base =
2563 (gen6_pte_t __iomem *)ggtt->gsm + first_entry;
2564 const int max_entries = ggtt_total_entries(ggtt) - first_entry;
2567 if (WARN(num_entries > max_entries,
2568 "First entry = %d; Num entries = %d (max=%d)\n",
2569 first_entry, num_entries, max_entries))
2570 num_entries = max_entries;
2572 scratch_pte = vm->scratch_pte;
2574 for (i = 0; i < num_entries; i++)
2575 iowrite32(scratch_pte, >t_base[i]);
2578 static void i915_ggtt_insert_page(struct i915_address_space *vm,
2581 enum i915_cache_level cache_level,
2584 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
2585 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
2587 intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
2590 static void i915_ggtt_insert_entries(struct i915_address_space *vm,
2591 struct i915_vma *vma,
2592 enum i915_cache_level cache_level,
2595 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
2596 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
2598 intel_gtt_insert_sg_entries(vma->pages, vma->node.start >> PAGE_SHIFT,
2602 static void i915_ggtt_clear_range(struct i915_address_space *vm,
2603 u64 start, u64 length)
2605 intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
2608 static int ggtt_bind_vma(struct i915_vma *vma,
2609 enum i915_cache_level cache_level,
2612 struct drm_i915_private *i915 = vma->vm->i915;
2613 struct drm_i915_gem_object *obj = vma->obj;
2614 intel_wakeref_t wakeref;
2617 /* Applicable to VLV (gen8+ do not support RO in the GGTT) */
2619 if (i915_gem_object_is_readonly(obj))
2620 pte_flags |= PTE_READ_ONLY;
2622 with_intel_runtime_pm(&i915->runtime_pm, wakeref)
2623 vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
2625 vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
2628 * Without aliasing PPGTT there's no difference between
2629 * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
2630 * upgrade to both bound if we bind either to avoid double-binding.
2632 vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
2637 static void ggtt_unbind_vma(struct i915_vma *vma)
2639 struct drm_i915_private *i915 = vma->vm->i915;
2640 intel_wakeref_t wakeref;
2642 with_intel_runtime_pm(&i915->runtime_pm, wakeref)
2643 vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
2646 static int aliasing_gtt_bind_vma(struct i915_vma *vma,
2647 enum i915_cache_level cache_level,
2650 struct drm_i915_private *i915 = vma->vm->i915;
2654 /* Currently applicable only to VLV */
2656 if (i915_gem_object_is_readonly(vma->obj))
2657 pte_flags |= PTE_READ_ONLY;
2659 if (flags & I915_VMA_LOCAL_BIND) {
2660 struct i915_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
2662 if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
2663 ret = appgtt->vm.allocate_va_range(&appgtt->vm,
2670 appgtt->vm.insert_entries(&appgtt->vm, vma, cache_level,
2674 if (flags & I915_VMA_GLOBAL_BIND) {
2675 intel_wakeref_t wakeref;
2677 with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
2678 vma->vm->insert_entries(vma->vm, vma,
2679 cache_level, pte_flags);
2686 static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
2688 struct drm_i915_private *i915 = vma->vm->i915;
2690 if (vma->flags & I915_VMA_GLOBAL_BIND) {
2691 struct i915_address_space *vm = vma->vm;
2692 intel_wakeref_t wakeref;
2694 with_intel_runtime_pm(&i915->runtime_pm, wakeref)
2695 vm->clear_range(vm, vma->node.start, vma->size);
2698 if (vma->flags & I915_VMA_LOCAL_BIND) {
2699 struct i915_address_space *vm = &i915->mm.aliasing_ppgtt->vm;
2701 vm->clear_range(vm, vma->node.start, vma->size);
2705 void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
2706 struct sg_table *pages)
2708 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2709 struct device *kdev = &dev_priv->drm.pdev->dev;
2710 struct i915_ggtt *ggtt = &dev_priv->ggtt;
2712 if (unlikely(ggtt->do_idle_maps)) {
2713 if (i915_gem_wait_for_idle(dev_priv, 0, MAX_SCHEDULE_TIMEOUT)) {
2714 DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
2715 /* Wait a bit, in hopes it avoids the hang */
2720 dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL);
2723 static int ggtt_set_pages(struct i915_vma *vma)
2727 GEM_BUG_ON(vma->pages);
2729 ret = i915_get_ggtt_vma_pages(vma);
2733 vma->page_sizes = vma->obj->mm.page_sizes;
2738 static void i915_gtt_color_adjust(const struct drm_mm_node *node,
2739 unsigned long color,
2743 if (node->allocated && node->color != color)
2744 *start += I915_GTT_PAGE_SIZE;
2746 /* Also leave a space between the unallocated reserved node after the
2747 * GTT and any objects within the GTT, i.e. we use the color adjustment
2748 * to insert a guard page to prevent prefetches crossing over the
2751 node = list_next_entry(node, node_list);
2752 if (node->color != color)
2753 *end -= I915_GTT_PAGE_SIZE;
2756 static int init_aliasing_ppgtt(struct drm_i915_private *i915)
2758 struct i915_ggtt *ggtt = &i915->ggtt;
2759 struct i915_ppgtt *ppgtt;
2762 ppgtt = i915_ppgtt_create(i915);
2764 return PTR_ERR(ppgtt);
2766 if (GEM_WARN_ON(ppgtt->vm.total < ggtt->vm.total)) {
2772 * Note we only pre-allocate as far as the end of the global
2773 * GTT. On 48b / 4-level page-tables, the difference is very,
2774 * very significant! We have to preallocate as GVT/vgpu does
2775 * not like the page directory disappearing.
2777 err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, ggtt->vm.total);
2781 i915->mm.aliasing_ppgtt = ppgtt;
2783 GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != ggtt_bind_vma);
2784 ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma;
2786 GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != ggtt_unbind_vma);
2787 ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma;
2792 i915_vm_put(&ppgtt->vm);
2796 static void fini_aliasing_ppgtt(struct drm_i915_private *i915)
2798 struct i915_ggtt *ggtt = &i915->ggtt;
2799 struct i915_ppgtt *ppgtt;
2801 ppgtt = fetch_and_zero(&i915->mm.aliasing_ppgtt);
2805 i915_vm_put(&ppgtt->vm);
2807 ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
2808 ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
2811 static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt)
2816 if (!USES_GUC(ggtt->vm.i915))
2819 GEM_BUG_ON(ggtt->vm.total <= GUC_GGTT_TOP);
2820 size = ggtt->vm.total - GUC_GGTT_TOP;
2822 ret = i915_gem_gtt_reserve(&ggtt->vm, &ggtt->uc_fw, size,
2823 GUC_GGTT_TOP, I915_COLOR_UNEVICTABLE,
2826 DRM_DEBUG_DRIVER("Failed to reserve top of GGTT for GuC\n");
2831 static void ggtt_release_guc_top(struct i915_ggtt *ggtt)
2833 if (drm_mm_node_allocated(&ggtt->uc_fw))
2834 drm_mm_remove_node(&ggtt->uc_fw);
2837 int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
2839 /* Let GEM Manage all of the aperture.
2841 * However, leave one page at the end still bound to the scratch page.
2842 * There are a number of places where the hardware apparently prefetches
2843 * past the end of the object, and we've seen multiple hangs with the
2844 * GPU head pointer stuck in a batchbuffer bound at the last page of the
2845 * aperture. One page should be enough to keep any prefetching inside
2848 struct i915_ggtt *ggtt = &dev_priv->ggtt;
2849 unsigned long hole_start, hole_end;
2850 struct drm_mm_node *entry;
2854 * GuC requires all resources that we're sharing with it to be placed in
2855 * non-WOPCM memory. If GuC is not present or not in use we still need a
2856 * small bias as ring wraparound at offset 0 sometimes hangs. No idea
2859 ggtt->pin_bias = max_t(u32, I915_GTT_PAGE_SIZE,
2860 intel_wopcm_guc_size(&dev_priv->wopcm));
2862 ret = intel_vgt_balloon(dev_priv);
2866 /* Reserve a mappable slot for our lockless error capture */
2867 ret = drm_mm_insert_node_in_range(&ggtt->vm.mm, &ggtt->error_capture,
2868 PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
2869 0, ggtt->mappable_end,
2875 * The upper portion of the GuC address space has a sizeable hole
2876 * (several MB) that is inaccessible by GuC. Reserve this range within
2877 * GGTT as it can comfortably hold GuC/HuC firmware images.
2879 ret = ggtt_reserve_guc_top(ggtt);
2883 /* Clear any non-preallocated blocks */
2884 drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) {
2885 DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
2886 hole_start, hole_end);
2887 ggtt->vm.clear_range(&ggtt->vm, hole_start,
2888 hole_end - hole_start);
2891 /* And finally clear the reserved guard page */
2892 ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE);
2894 if (INTEL_PPGTT(dev_priv) == INTEL_PPGTT_ALIASING) {
2895 ret = init_aliasing_ppgtt(dev_priv);
2903 ggtt_release_guc_top(ggtt);
2905 drm_mm_remove_node(&ggtt->error_capture);
2910 * i915_ggtt_cleanup_hw - Clean up GGTT hardware initialization
2911 * @dev_priv: i915 device
2913 void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
2915 struct i915_ggtt *ggtt = &dev_priv->ggtt;
2916 struct i915_vma *vma, *vn;
2917 struct pagevec *pvec;
2919 ggtt->vm.closed = true;
2921 mutex_lock(&dev_priv->drm.struct_mutex);
2922 fini_aliasing_ppgtt(dev_priv);
2924 list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link)
2925 WARN_ON(i915_vma_unbind(vma));
2927 if (drm_mm_node_allocated(&ggtt->error_capture))
2928 drm_mm_remove_node(&ggtt->error_capture);
2930 ggtt_release_guc_top(ggtt);
2932 if (drm_mm_initialized(&ggtt->vm.mm)) {
2933 intel_vgt_deballoon(dev_priv);
2934 i915_address_space_fini(&ggtt->vm);
2937 ggtt->vm.cleanup(&ggtt->vm);
2939 pvec = &dev_priv->mm.wc_stash.pvec;
2941 set_pages_array_wb(pvec->pages, pvec->nr);
2942 __pagevec_release(pvec);
2945 mutex_unlock(&dev_priv->drm.struct_mutex);
2947 arch_phys_wc_del(ggtt->mtrr);
2948 io_mapping_fini(&ggtt->iomap);
2950 i915_gem_cleanup_stolen(dev_priv);
2953 static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
2955 snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
2956 snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
2957 return snb_gmch_ctl << 20;
2960 static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
2962 bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
2963 bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
2965 bdw_gmch_ctl = 1 << bdw_gmch_ctl;
2967 #ifdef CONFIG_X86_32
2968 /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * I915_GTT_PAGE_SIZE */
2969 if (bdw_gmch_ctl > 4)
2973 return bdw_gmch_ctl << 20;
2976 static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
2978 gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
2979 gmch_ctrl &= SNB_GMCH_GGMS_MASK;
2982 return 1 << (20 + gmch_ctrl);
2987 static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
2989 struct drm_i915_private *dev_priv = ggtt->vm.i915;
2990 struct pci_dev *pdev = dev_priv->drm.pdev;
2991 phys_addr_t phys_addr;
2994 /* For Modern GENs the PTEs and register space are split in the BAR */
2995 phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2;
2998 * On BXT+/CNL+ writes larger than 64 bit to the GTT pagetable range
2999 * will be dropped. For WC mappings in general we have 64 byte burst
3000 * writes when the WC buffer is flushed, so we can't use it, but have to
3001 * resort to an uncached mapping. The WC issue is easily caught by the
3002 * readback check when writing GTT PTE entries.
3004 if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10)
3005 ggtt->gsm = ioremap_nocache(phys_addr, size);
3007 ggtt->gsm = ioremap_wc(phys_addr, size);
3009 DRM_ERROR("Failed to map the ggtt page table\n");
3013 ret = setup_scratch_page(&ggtt->vm, GFP_DMA32);
3015 DRM_ERROR("Scratch setup failed\n");
3016 /* iounmap will also get called at remove, but meh */
3021 ggtt->vm.scratch_pte =
3022 ggtt->vm.pte_encode(ggtt->vm.scratch_page.daddr,
3023 I915_CACHE_NONE, 0);
3028 static struct intel_ppat_entry *
3029 __alloc_ppat_entry(struct intel_ppat *ppat, unsigned int index, u8 value)
3031 struct intel_ppat_entry *entry = &ppat->entries[index];
3033 GEM_BUG_ON(index >= ppat->max_entries);
3034 GEM_BUG_ON(test_bit(index, ppat->used));
3037 entry->value = value;
3038 kref_init(&entry->ref);
3039 set_bit(index, ppat->used);
3040 set_bit(index, ppat->dirty);
3045 static void __free_ppat_entry(struct intel_ppat_entry *entry)
3047 struct intel_ppat *ppat = entry->ppat;
3048 unsigned int index = entry - ppat->entries;
3050 GEM_BUG_ON(index >= ppat->max_entries);
3051 GEM_BUG_ON(!test_bit(index, ppat->used));
3053 entry->value = ppat->clear_value;
3054 clear_bit(index, ppat->used);
3055 set_bit(index, ppat->dirty);
3059 * intel_ppat_get - get a usable PPAT entry
3060 * @i915: i915 device instance
3061 * @value: the PPAT value required by the caller
3063 * The function tries to search if there is an existing PPAT entry which
3064 * matches with the required value. If perfectly matched, the existing PPAT
3065 * entry will be used. If only partially matched, it will try to check if
3066 * there is any available PPAT index. If yes, it will allocate a new PPAT
3067 * index for the required entry and update the HW. If not, the partially
3068 * matched entry will be used.
3070 const struct intel_ppat_entry *
3071 intel_ppat_get(struct drm_i915_private *i915, u8 value)
3073 struct intel_ppat *ppat = &i915->ppat;
3074 struct intel_ppat_entry *entry = NULL;
3075 unsigned int scanned, best_score;
3078 GEM_BUG_ON(!ppat->max_entries);
3080 scanned = best_score = 0;
3081 for_each_set_bit(i, ppat->used, ppat->max_entries) {
3084 score = ppat->match(ppat->entries[i].value, value);
3085 if (score > best_score) {
3086 entry = &ppat->entries[i];
3087 if (score == INTEL_PPAT_PERFECT_MATCH) {
3088 kref_get(&entry->ref);
3096 if (scanned == ppat->max_entries) {
3098 return ERR_PTR(-ENOSPC);
3100 kref_get(&entry->ref);
3104 i = find_first_zero_bit(ppat->used, ppat->max_entries);
3105 entry = __alloc_ppat_entry(ppat, i, value);
3106 ppat->update_hw(i915);
3110 static void release_ppat(struct kref *kref)
3112 struct intel_ppat_entry *entry =
3113 container_of(kref, struct intel_ppat_entry, ref);
3114 struct drm_i915_private *i915 = entry->ppat->i915;
3116 __free_ppat_entry(entry);
3117 entry->ppat->update_hw(i915);
3121 * intel_ppat_put - put back the PPAT entry got from intel_ppat_get()
3122 * @entry: an intel PPAT entry
3124 * Put back the PPAT entry got from intel_ppat_get(). If the PPAT index of the
3125 * entry is dynamically allocated, its reference count will be decreased. Once
3126 * the reference count becomes into zero, the PPAT index becomes free again.
3128 void intel_ppat_put(const struct intel_ppat_entry *entry)
3130 struct intel_ppat *ppat = entry->ppat;
3131 unsigned int index = entry - ppat->entries;
3133 GEM_BUG_ON(!ppat->max_entries);
3135 kref_put(&ppat->entries[index].ref, release_ppat);
3138 static void cnl_private_pat_update_hw(struct drm_i915_private *dev_priv)
3140 struct intel_ppat *ppat = &dev_priv->ppat;
3143 for_each_set_bit(i, ppat->dirty, ppat->max_entries) {
3144 I915_WRITE(GEN10_PAT_INDEX(i), ppat->entries[i].value);
3145 clear_bit(i, ppat->dirty);
3149 static void bdw_private_pat_update_hw(struct drm_i915_private *dev_priv)
3151 struct intel_ppat *ppat = &dev_priv->ppat;
3155 for (i = 0; i < ppat->max_entries; i++)
3156 pat |= GEN8_PPAT(i, ppat->entries[i].value);
3158 bitmap_clear(ppat->dirty, 0, ppat->max_entries);
3160 I915_WRITE(GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
3161 I915_WRITE(GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
3164 static unsigned int bdw_private_pat_match(u8 src, u8 dst)
3166 unsigned int score = 0;
3173 /* Cache attribute has to be matched. */
3174 if (GEN8_PPAT_GET_CA(src) != GEN8_PPAT_GET_CA(dst))
3179 if (GEN8_PPAT_GET_TC(src) == GEN8_PPAT_GET_TC(dst))
3182 if (GEN8_PPAT_GET_AGE(src) == GEN8_PPAT_GET_AGE(dst))
3185 if (score == (AGE_MATCH | TC_MATCH | CA_MATCH))
3186 return INTEL_PPAT_PERFECT_MATCH;
3191 static unsigned int chv_private_pat_match(u8 src, u8 dst)
3193 return (CHV_PPAT_GET_SNOOP(src) == CHV_PPAT_GET_SNOOP(dst)) ?
3194 INTEL_PPAT_PERFECT_MATCH : 0;
3197 static void cnl_setup_private_ppat(struct intel_ppat *ppat)
3199 ppat->max_entries = 8;
3200 ppat->update_hw = cnl_private_pat_update_hw;
3201 ppat->match = bdw_private_pat_match;
3202 ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3);
3204 __alloc_ppat_entry(ppat, 0, GEN8_PPAT_WB | GEN8_PPAT_LLC);
3205 __alloc_ppat_entry(ppat, 1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
3206 __alloc_ppat_entry(ppat, 2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
3207 __alloc_ppat_entry(ppat, 3, GEN8_PPAT_UC);
3208 __alloc_ppat_entry(ppat, 4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
3209 __alloc_ppat_entry(ppat, 5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
3210 __alloc_ppat_entry(ppat, 6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
3211 __alloc_ppat_entry(ppat, 7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
3214 /* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
3215 * bits. When using advanced contexts each context stores its own PAT, but
3216 * writing this data shouldn't be harmful even in those cases. */
3217 static void bdw_setup_private_ppat(struct intel_ppat *ppat)
3219 ppat->max_entries = 8;
3220 ppat->update_hw = bdw_private_pat_update_hw;
3221 ppat->match = bdw_private_pat_match;
3222 ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3);
3224 if (!HAS_PPGTT(ppat->i915)) {
3225 /* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry,
3226 * so RTL will always use the value corresponding to
3228 * So let's disable cache for GGTT to avoid screen corruptions.
3229 * MOCS still can be used though.
3230 * - System agent ggtt writes (i.e. cpu gtt mmaps) already work
3231 * before this patch, i.e. the same uncached + snooping access
3232 * like on gen6/7 seems to be in effect.
3233 * - So this just fixes blitter/render access. Again it looks
3234 * like it's not just uncached access, but uncached + snooping.
3235 * So we can still hold onto all our assumptions wrt cpu
3236 * clflushing on LLC machines.
3238 __alloc_ppat_entry(ppat, 0, GEN8_PPAT_UC);
3242 __alloc_ppat_entry(ppat, 0, GEN8_PPAT_WB | GEN8_PPAT_LLC); /* for normal objects, no eLLC */
3243 __alloc_ppat_entry(ppat, 1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC); /* for something pointing to ptes? */
3244 __alloc_ppat_entry(ppat, 2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC); /* for scanout with eLLC */
3245 __alloc_ppat_entry(ppat, 3, GEN8_PPAT_UC); /* Uncached objects, mostly for scanout */
3246 __alloc_ppat_entry(ppat, 4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
3247 __alloc_ppat_entry(ppat, 5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
3248 __alloc_ppat_entry(ppat, 6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
3249 __alloc_ppat_entry(ppat, 7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
3252 static void chv_setup_private_ppat(struct intel_ppat *ppat)
3254 ppat->max_entries = 8;
3255 ppat->update_hw = bdw_private_pat_update_hw;
3256 ppat->match = chv_private_pat_match;
3257 ppat->clear_value = CHV_PPAT_SNOOP;
3260 * Map WB on BDW to snooped on CHV.
3262 * Only the snoop bit has meaning for CHV, the rest is
3265 * The hardware will never snoop for certain types of accesses:
3266 * - CPU GTT (GMADR->GGTT->no snoop->memory)
3267 * - PPGTT page tables
3268 * - some other special cycles
3270 * As with BDW, we also need to consider the following for GT accesses:
3271 * "For GGTT, there is NO pat_sel[2:0] from the entry,
3272 * so RTL will always use the value corresponding to
3274 * Which means we must set the snoop bit in PAT entry 0
3275 * in order to keep the global status page working.
3278 __alloc_ppat_entry(ppat, 0, CHV_PPAT_SNOOP);
3279 __alloc_ppat_entry(ppat, 1, 0);
3280 __alloc_ppat_entry(ppat, 2, 0);
3281 __alloc_ppat_entry(ppat, 3, 0);
3282 __alloc_ppat_entry(ppat, 4, CHV_PPAT_SNOOP);
3283 __alloc_ppat_entry(ppat, 5, CHV_PPAT_SNOOP);
3284 __alloc_ppat_entry(ppat, 6, CHV_PPAT_SNOOP);
3285 __alloc_ppat_entry(ppat, 7, CHV_PPAT_SNOOP);
3288 static void gen6_gmch_remove(struct i915_address_space *vm)
3290 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
3293 cleanup_scratch_page(vm);
3296 static void setup_private_pat(struct drm_i915_private *dev_priv)
3298 struct intel_ppat *ppat = &dev_priv->ppat;
3301 ppat->i915 = dev_priv;
3303 if (INTEL_GEN(dev_priv) >= 10)
3304 cnl_setup_private_ppat(ppat);
3305 else if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
3306 chv_setup_private_ppat(ppat);
3308 bdw_setup_private_ppat(ppat);
3310 GEM_BUG_ON(ppat->max_entries > INTEL_MAX_PPAT_ENTRIES);
3312 for_each_clear_bit(i, ppat->used, ppat->max_entries) {
3313 ppat->entries[i].value = ppat->clear_value;
3314 ppat->entries[i].ppat = ppat;
3315 set_bit(i, ppat->dirty);
3318 ppat->update_hw(dev_priv);
3321 static int gen8_gmch_probe(struct i915_ggtt *ggtt)
3323 struct drm_i915_private *dev_priv = ggtt->vm.i915;
3324 struct pci_dev *pdev = dev_priv->drm.pdev;
3329 /* TODO: We're not aware of mappable constraints on gen8 yet */
3331 (struct resource) DEFINE_RES_MEM(pci_resource_start(pdev, 2),
3332 pci_resource_len(pdev, 2));
3333 ggtt->mappable_end = resource_size(&ggtt->gmadr);
3335 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(39));
3337 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39));
3339 DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
3341 pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
3342 if (IS_CHERRYVIEW(dev_priv))
3343 size = chv_get_total_gtt_size(snb_gmch_ctl);
3345 size = gen8_get_total_gtt_size(snb_gmch_ctl);
3347 ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
3348 ggtt->vm.cleanup = gen6_gmch_remove;
3349 ggtt->vm.insert_page = gen8_ggtt_insert_page;
3350 ggtt->vm.clear_range = nop_clear_range;
3351 if (intel_scanout_needs_vtd_wa(dev_priv))
3352 ggtt->vm.clear_range = gen8_ggtt_clear_range;
3354 ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
3356 /* Serialize GTT updates with aperture access on BXT if VT-d is on. */
3357 if (intel_ggtt_update_needs_vtd_wa(dev_priv) ||
3358 IS_CHERRYVIEW(dev_priv) /* fails with concurrent use/update */) {
3359 ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
3360 ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL;
3361 if (ggtt->vm.clear_range != nop_clear_range)
3362 ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL;
3364 /* Prevent recursively calling stop_machine() and deadlocks. */
3365 dev_info(dev_priv->drm.dev,
3366 "Disabling error capture for VT-d workaround\n");
3367 i915_disable_error_state(dev_priv, -ENODEV);
3370 ggtt->invalidate = gen6_ggtt_invalidate;
3372 ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
3373 ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
3374 ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
3375 ggtt->vm.vma_ops.clear_pages = clear_pages;
3377 ggtt->vm.pte_encode = gen8_pte_encode;
3379 setup_private_pat(dev_priv);
3381 return ggtt_probe_common(ggtt, size);
3384 static int gen6_gmch_probe(struct i915_ggtt *ggtt)
3386 struct drm_i915_private *dev_priv = ggtt->vm.i915;
3387 struct pci_dev *pdev = dev_priv->drm.pdev;
3393 (struct resource) DEFINE_RES_MEM(pci_resource_start(pdev, 2),
3394 pci_resource_len(pdev, 2));
3395 ggtt->mappable_end = resource_size(&ggtt->gmadr);
3397 /* 64/512MB is the current min/max we actually know of, but this is just
3398 * a coarse sanity check.
3400 if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) {
3401 DRM_ERROR("Unknown GMADR size (%pa)\n", &ggtt->mappable_end);
3405 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
3407 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
3409 DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
3410 pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
3412 size = gen6_get_total_gtt_size(snb_gmch_ctl);
3413 ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE;
3415 ggtt->vm.clear_range = nop_clear_range;
3416 if (!HAS_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv))
3417 ggtt->vm.clear_range = gen6_ggtt_clear_range;
3418 ggtt->vm.insert_page = gen6_ggtt_insert_page;
3419 ggtt->vm.insert_entries = gen6_ggtt_insert_entries;
3420 ggtt->vm.cleanup = gen6_gmch_remove;
3422 ggtt->invalidate = gen6_ggtt_invalidate;
3424 if (HAS_EDRAM(dev_priv))
3425 ggtt->vm.pte_encode = iris_pte_encode;
3426 else if (IS_HASWELL(dev_priv))
3427 ggtt->vm.pte_encode = hsw_pte_encode;
3428 else if (IS_VALLEYVIEW(dev_priv))
3429 ggtt->vm.pte_encode = byt_pte_encode;
3430 else if (INTEL_GEN(dev_priv) >= 7)
3431 ggtt->vm.pte_encode = ivb_pte_encode;
3433 ggtt->vm.pte_encode = snb_pte_encode;
3435 ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
3436 ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
3437 ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
3438 ggtt->vm.vma_ops.clear_pages = clear_pages;
3440 return ggtt_probe_common(ggtt, size);
3443 static void i915_gmch_remove(struct i915_address_space *vm)
3445 intel_gmch_remove();
3448 static int i915_gmch_probe(struct i915_ggtt *ggtt)
3450 struct drm_i915_private *dev_priv = ggtt->vm.i915;
3451 phys_addr_t gmadr_base;
3454 ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL);
3456 DRM_ERROR("failed to set up gmch\n");
3460 intel_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end);
3463 (struct resource) DEFINE_RES_MEM(gmadr_base,
3464 ggtt->mappable_end);
3466 ggtt->do_idle_maps = needs_idle_maps(dev_priv);
3467 ggtt->vm.insert_page = i915_ggtt_insert_page;
3468 ggtt->vm.insert_entries = i915_ggtt_insert_entries;
3469 ggtt->vm.clear_range = i915_ggtt_clear_range;
3470 ggtt->vm.cleanup = i915_gmch_remove;
3472 ggtt->invalidate = gmch_ggtt_invalidate;
3474 ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma;
3475 ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
3476 ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
3477 ggtt->vm.vma_ops.clear_pages = clear_pages;
3479 if (unlikely(ggtt->do_idle_maps))
3480 DRM_INFO("applying Ironlake quirks for intel_iommu\n");
3486 * i915_ggtt_probe_hw - Probe GGTT hardware location
3487 * @dev_priv: i915 device
3489 int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
3491 struct i915_ggtt *ggtt = &dev_priv->ggtt;
3494 ggtt->vm.i915 = dev_priv;
3495 ggtt->vm.dma = &dev_priv->drm.pdev->dev;
3497 if (INTEL_GEN(dev_priv) <= 5)
3498 ret = i915_gmch_probe(ggtt);
3499 else if (INTEL_GEN(dev_priv) < 8)
3500 ret = gen6_gmch_probe(ggtt);
3502 ret = gen8_gmch_probe(ggtt);
3506 if ((ggtt->vm.total - 1) >> 32) {
3507 DRM_ERROR("We never expected a Global GTT with more than 32bits"
3508 " of address space! Found %lldM!\n",
3509 ggtt->vm.total >> 20);
3510 ggtt->vm.total = 1ULL << 32;
3511 ggtt->mappable_end =
3512 min_t(u64, ggtt->mappable_end, ggtt->vm.total);
3515 if (ggtt->mappable_end > ggtt->vm.total) {
3516 DRM_ERROR("mappable aperture extends past end of GGTT,"
3517 " aperture=%pa, total=%llx\n",
3518 &ggtt->mappable_end, ggtt->vm.total);
3519 ggtt->mappable_end = ggtt->vm.total;
3522 /* GMADR is the PCI mmio aperture into the global GTT. */
3523 DRM_DEBUG_DRIVER("GGTT size = %lluM\n", ggtt->vm.total >> 20);
3524 DRM_DEBUG_DRIVER("GMADR size = %lluM\n", (u64)ggtt->mappable_end >> 20);
3525 DRM_DEBUG_DRIVER("DSM size = %lluM\n",
3526 (u64)resource_size(&intel_graphics_stolen_res) >> 20);
3527 if (intel_vtd_active())
3528 DRM_INFO("VT-d active for gfx access\n");
3534 * i915_ggtt_init_hw - Initialize GGTT hardware
3535 * @dev_priv: i915 device
3537 int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
3539 struct i915_ggtt *ggtt = &dev_priv->ggtt;
3542 stash_init(&dev_priv->mm.wc_stash);
3544 /* Note that we use page colouring to enforce a guard page at the
3545 * end of the address space. This is required as the CS may prefetch
3546 * beyond the end of the batch buffer, across the page boundary,
3547 * and beyond the end of the GTT if we do not provide a guard.
3549 mutex_lock(&dev_priv->drm.struct_mutex);
3550 i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
3552 ggtt->vm.is_ggtt = true;
3554 /* Only VLV supports read-only GGTT mappings */
3555 ggtt->vm.has_read_only = IS_VALLEYVIEW(dev_priv);
3557 if (!HAS_LLC(dev_priv) && !HAS_PPGTT(dev_priv))
3558 ggtt->vm.mm.color_adjust = i915_gtt_color_adjust;
3559 mutex_unlock(&dev_priv->drm.struct_mutex);
3561 if (!io_mapping_init_wc(&dev_priv->ggtt.iomap,
3562 dev_priv->ggtt.gmadr.start,
3563 dev_priv->ggtt.mappable_end)) {
3565 goto out_gtt_cleanup;
3568 ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start, ggtt->mappable_end);
3570 i915_ggtt_init_fences(ggtt);
3573 * Initialise stolen early so that we may reserve preallocated
3574 * objects for the BIOS to KMS transition.
3576 ret = i915_gem_init_stolen(dev_priv);
3578 goto out_gtt_cleanup;
3583 ggtt->vm.cleanup(&ggtt->vm);
3587 int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv)
3589 if (INTEL_GEN(dev_priv) < 6 && !intel_enable_gtt())
3595 void i915_ggtt_enable_guc(struct drm_i915_private *i915)
3597 GEM_BUG_ON(i915->ggtt.invalidate != gen6_ggtt_invalidate);
3599 i915->ggtt.invalidate = guc_ggtt_invalidate;
3601 i915_ggtt_invalidate(i915);
3604 void i915_ggtt_disable_guc(struct drm_i915_private *i915)
3606 /* XXX Temporary pardon for error unload */
3607 if (i915->ggtt.invalidate == gen6_ggtt_invalidate)
3610 /* We should only be called after i915_ggtt_enable_guc() */
3611 GEM_BUG_ON(i915->ggtt.invalidate != guc_ggtt_invalidate);
3613 i915->ggtt.invalidate = gen6_ggtt_invalidate;
3615 i915_ggtt_invalidate(i915);
3618 void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
3620 struct i915_ggtt *ggtt = &dev_priv->ggtt;
3621 struct i915_vma *vma, *vn;
3623 i915_check_and_clear_faults(dev_priv);
3625 mutex_lock(&ggtt->vm.mutex);
3627 /* First fill our portion of the GTT with scratch pages */
3628 ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
3629 ggtt->vm.closed = true; /* skip rewriting PTE on VMA unbind */
3631 /* clflush objects bound into the GGTT and rebind them. */
3632 list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) {
3633 struct drm_i915_gem_object *obj = vma->obj;
3635 if (!(vma->flags & I915_VMA_GLOBAL_BIND))
3638 mutex_unlock(&ggtt->vm.mutex);
3640 if (!i915_vma_unbind(vma))
3643 WARN_ON(i915_vma_bind(vma,
3644 obj ? obj->cache_level : 0,
3647 i915_gem_object_lock(obj);
3648 WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
3649 i915_gem_object_unlock(obj);
3653 mutex_lock(&ggtt->vm.mutex);
3656 ggtt->vm.closed = false;
3657 i915_ggtt_invalidate(dev_priv);
3659 mutex_unlock(&ggtt->vm.mutex);
3661 if (INTEL_GEN(dev_priv) >= 8) {
3662 struct intel_ppat *ppat = &dev_priv->ppat;
3664 bitmap_set(ppat->dirty, 0, ppat->max_entries);
3665 dev_priv->ppat.update_hw(dev_priv);
3670 static struct scatterlist *
3671 rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset,
3672 unsigned int width, unsigned int height,
3673 unsigned int stride,
3674 struct sg_table *st, struct scatterlist *sg)
3676 unsigned int column, row;
3677 unsigned int src_idx;
3679 for (column = 0; column < width; column++) {
3680 src_idx = stride * (height - 1) + column + offset;
3681 for (row = 0; row < height; row++) {
3683 /* We don't need the pages, but need to initialize
3684 * the entries so the sg list can be happily traversed.
3685 * The only thing we need are DMA addresses.
3687 sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0);
3688 sg_dma_address(sg) =
3689 i915_gem_object_get_dma_address(obj, src_idx);
3690 sg_dma_len(sg) = I915_GTT_PAGE_SIZE;
3699 static noinline struct sg_table *
3700 intel_rotate_pages(struct intel_rotation_info *rot_info,
3701 struct drm_i915_gem_object *obj)
3703 unsigned int size = intel_rotation_info_size(rot_info);
3704 struct sg_table *st;
3705 struct scatterlist *sg;
3709 /* Allocate target SG list. */
3710 st = kmalloc(sizeof(*st), GFP_KERNEL);
3714 ret = sg_alloc_table(st, size, GFP_KERNEL);
3721 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) {
3722 sg = rotate_pages(obj, rot_info->plane[i].offset,
3723 rot_info->plane[i].width, rot_info->plane[i].height,
3724 rot_info->plane[i].stride, st, sg);
3733 DRM_DEBUG_DRIVER("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
3734 obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
3736 return ERR_PTR(ret);
3739 static struct scatterlist *
3740 remap_pages(struct drm_i915_gem_object *obj, unsigned int offset,
3741 unsigned int width, unsigned int height,
3742 unsigned int stride,
3743 struct sg_table *st, struct scatterlist *sg)
3747 for (row = 0; row < height; row++) {
3748 unsigned int left = width * I915_GTT_PAGE_SIZE;
3752 unsigned int length;
3754 /* We don't need the pages, but need to initialize
3755 * the entries so the sg list can be happily traversed.
3756 * The only thing we need are DMA addresses.
3759 addr = i915_gem_object_get_dma_address_len(obj, offset, &length);
3761 length = min(left, length);
3765 sg_set_page(sg, NULL, length, 0);
3766 sg_dma_address(sg) = addr;
3767 sg_dma_len(sg) = length;
3770 offset += length / I915_GTT_PAGE_SIZE;
3774 offset += stride - width;
3780 static noinline struct sg_table *
3781 intel_remap_pages(struct intel_remapped_info *rem_info,
3782 struct drm_i915_gem_object *obj)
3784 unsigned int size = intel_remapped_info_size(rem_info);
3785 struct sg_table *st;
3786 struct scatterlist *sg;
3790 /* Allocate target SG list. */
3791 st = kmalloc(sizeof(*st), GFP_KERNEL);
3795 ret = sg_alloc_table(st, size, GFP_KERNEL);
3802 for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) {
3803 sg = remap_pages(obj, rem_info->plane[i].offset,
3804 rem_info->plane[i].width, rem_info->plane[i].height,
3805 rem_info->plane[i].stride, st, sg);
3816 DRM_DEBUG_DRIVER("Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n",
3817 obj->base.size, rem_info->plane[0].width, rem_info->plane[0].height, size);
3819 return ERR_PTR(ret);
3822 static noinline struct sg_table *
3823 intel_partial_pages(const struct i915_ggtt_view *view,
3824 struct drm_i915_gem_object *obj)
3826 struct sg_table *st;
3827 struct scatterlist *sg, *iter;
3828 unsigned int count = view->partial.size;
3829 unsigned int offset;
3832 st = kmalloc(sizeof(*st), GFP_KERNEL);
3836 ret = sg_alloc_table(st, count, GFP_KERNEL);
3840 iter = i915_gem_object_get_sg(obj, view->partial.offset, &offset);
3848 len = min(iter->length - (offset << PAGE_SHIFT),
3849 count << PAGE_SHIFT);
3850 sg_set_page(sg, NULL, len, 0);
3851 sg_dma_address(sg) =
3852 sg_dma_address(iter) + (offset << PAGE_SHIFT);
3853 sg_dma_len(sg) = len;
3856 count -= len >> PAGE_SHIFT;
3859 i915_sg_trim(st); /* Drop any unused tail entries. */
3865 iter = __sg_next(iter);
3872 return ERR_PTR(ret);
3876 i915_get_ggtt_vma_pages(struct i915_vma *vma)
3880 /* The vma->pages are only valid within the lifespan of the borrowed
3881 * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so
3882 * must be the vma->pages. A simple rule is that vma->pages must only
3883 * be accessed when the obj->mm.pages are pinned.
3885 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
3887 switch (vma->ggtt_view.type) {
3889 GEM_BUG_ON(vma->ggtt_view.type);
3891 case I915_GGTT_VIEW_NORMAL:
3892 vma->pages = vma->obj->mm.pages;
3895 case I915_GGTT_VIEW_ROTATED:
3897 intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj);
3900 case I915_GGTT_VIEW_REMAPPED:
3902 intel_remap_pages(&vma->ggtt_view.remapped, vma->obj);
3905 case I915_GGTT_VIEW_PARTIAL:
3906 vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
3911 if (IS_ERR(vma->pages)) {
3912 ret = PTR_ERR(vma->pages);
3914 DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n",
3915 vma->ggtt_view.type, ret);
3921 * i915_gem_gtt_reserve - reserve a node in an address_space (GTT)
3922 * @vm: the &struct i915_address_space
3923 * @node: the &struct drm_mm_node (typically i915_vma.mode)
3924 * @size: how much space to allocate inside the GTT,
3925 * must be #I915_GTT_PAGE_SIZE aligned
3926 * @offset: where to insert inside the GTT,
3927 * must be #I915_GTT_MIN_ALIGNMENT aligned, and the node
3928 * (@offset + @size) must fit within the address space
3929 * @color: color to apply to node, if this node is not from a VMA,
3930 * color must be #I915_COLOR_UNEVICTABLE
3931 * @flags: control search and eviction behaviour
3933 * i915_gem_gtt_reserve() tries to insert the @node at the exact @offset inside
3934 * the address space (using @size and @color). If the @node does not fit, it
3935 * tries to evict any overlapping nodes from the GTT, including any
3936 * neighbouring nodes if the colors do not match (to ensure guard pages between
3937 * differing domains). See i915_gem_evict_for_node() for the gory details
3938 * on the eviction algorithm. #PIN_NONBLOCK may used to prevent waiting on
3939 * evicting active overlapping objects, and any overlapping node that is pinned
3940 * or marked as unevictable will also result in failure.
3942 * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
3943 * asked to wait for eviction and interrupted.
3945 int i915_gem_gtt_reserve(struct i915_address_space *vm,
3946 struct drm_mm_node *node,
3947 u64 size, u64 offset, unsigned long color,
3953 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
3954 GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT));
3955 GEM_BUG_ON(range_overflows(offset, size, vm->total));
3956 GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
3957 GEM_BUG_ON(drm_mm_node_allocated(node));
3960 node->start = offset;
3961 node->color = color;
3963 err = drm_mm_reserve_node(&vm->mm, node);
3967 if (flags & PIN_NOEVICT)
3970 err = i915_gem_evict_for_node(vm, node, flags);
3972 err = drm_mm_reserve_node(&vm->mm, node);
3977 static u64 random_offset(u64 start, u64 end, u64 len, u64 align)
3981 GEM_BUG_ON(range_overflows(start, len, end));
3982 GEM_BUG_ON(round_up(start, align) > round_down(end - len, align));
3984 range = round_down(end - len, align) - round_up(start, align);
3986 if (sizeof(unsigned long) == sizeof(u64)) {
3987 addr = get_random_long();
3989 addr = get_random_int();
3990 if (range > U32_MAX) {
3992 addr |= get_random_int();
3995 div64_u64_rem(addr, range, &addr);
3999 return round_up(start, align);
4003 * i915_gem_gtt_insert - insert a node into an address_space (GTT)
4004 * @vm: the &struct i915_address_space
4005 * @node: the &struct drm_mm_node (typically i915_vma.node)
4006 * @size: how much space to allocate inside the GTT,
4007 * must be #I915_GTT_PAGE_SIZE aligned
4008 * @alignment: required alignment of starting offset, may be 0 but
4009 * if specified, this must be a power-of-two and at least
4010 * #I915_GTT_MIN_ALIGNMENT
4011 * @color: color to apply to node
4012 * @start: start of any range restriction inside GTT (0 for all),
4013 * must be #I915_GTT_PAGE_SIZE aligned
4014 * @end: end of any range restriction inside GTT (U64_MAX for all),
4015 * must be #I915_GTT_PAGE_SIZE aligned if not U64_MAX
4016 * @flags: control search and eviction behaviour
4018 * i915_gem_gtt_insert() first searches for an available hole into which
4019 * is can insert the node. The hole address is aligned to @alignment and
4020 * its @size must then fit entirely within the [@start, @end] bounds. The
4021 * nodes on either side of the hole must match @color, or else a guard page
4022 * will be inserted between the two nodes (or the node evicted). If no
4023 * suitable hole is found, first a victim is randomly selected and tested
4024 * for eviction, otherwise then the LRU list of objects within the GTT
4025 * is scanned to find the first set of replacement nodes to create the hole.
4026 * Those old overlapping nodes are evicted from the GTT (and so must be
4027 * rebound before any future use). Any node that is currently pinned cannot
4028 * be evicted (see i915_vma_pin()). Similar if the node's VMA is currently
4029 * active and #PIN_NONBLOCK is specified, that node is also skipped when
4030 * searching for an eviction candidate. See i915_gem_evict_something() for
4031 * the gory details on the eviction algorithm.
4033 * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
4034 * asked to wait for eviction and interrupted.
4036 int i915_gem_gtt_insert(struct i915_address_space *vm,
4037 struct drm_mm_node *node,
4038 u64 size, u64 alignment, unsigned long color,
4039 u64 start, u64 end, unsigned int flags)
4041 enum drm_mm_insert_mode mode;
4045 lockdep_assert_held(&vm->i915->drm.struct_mutex);
4047 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
4048 GEM_BUG_ON(alignment && !is_power_of_2(alignment));
4049 GEM_BUG_ON(alignment && !IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
4050 GEM_BUG_ON(start >= end);
4051 GEM_BUG_ON(start > 0 && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
4052 GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
4053 GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
4054 GEM_BUG_ON(drm_mm_node_allocated(node));
4056 if (unlikely(range_overflows(start, size, end)))
4059 if (unlikely(round_up(start, alignment) > round_down(end - size, alignment)))
4062 mode = DRM_MM_INSERT_BEST;
4063 if (flags & PIN_HIGH)
4064 mode = DRM_MM_INSERT_HIGHEST;
4065 if (flags & PIN_MAPPABLE)
4066 mode = DRM_MM_INSERT_LOW;
4068 /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
4069 * so we know that we always have a minimum alignment of 4096.
4070 * The drm_mm range manager is optimised to return results
4071 * with zero alignment, so where possible use the optimal
4074 BUILD_BUG_ON(I915_GTT_MIN_ALIGNMENT > I915_GTT_PAGE_SIZE);
4075 if (alignment <= I915_GTT_MIN_ALIGNMENT)
4078 err = drm_mm_insert_node_in_range(&vm->mm, node,
4079 size, alignment, color,
4084 if (mode & DRM_MM_INSERT_ONCE) {
4085 err = drm_mm_insert_node_in_range(&vm->mm, node,
4086 size, alignment, color,
4088 DRM_MM_INSERT_BEST);
4093 if (flags & PIN_NOEVICT)
4096 /* No free space, pick a slot at random.
4098 * There is a pathological case here using a GTT shared between
4099 * mmap and GPU (i.e. ggtt/aliasing_ppgtt but not full-ppgtt):
4101 * |<-- 256 MiB aperture -->||<-- 1792 MiB unmappable -->|
4102 * (64k objects) (448k objects)
4104 * Now imagine that the eviction LRU is ordered top-down (just because
4105 * pathology meets real life), and that we need to evict an object to
4106 * make room inside the aperture. The eviction scan then has to walk
4107 * the 448k list before it finds one within range. And now imagine that
4108 * it has to search for a new hole between every byte inside the memcpy,
4109 * for several simultaneous clients.
4111 * On a full-ppgtt system, if we have run out of available space, there
4112 * will be lots and lots of objects in the eviction list! Again,
4113 * searching that LRU list may be slow if we are also applying any
4114 * range restrictions (e.g. restriction to low 4GiB) and so, for
4115 * simplicity and similarilty between different GTT, try the single
4116 * random replacement first.
4118 offset = random_offset(start, end,
4119 size, alignment ?: I915_GTT_MIN_ALIGNMENT);
4120 err = i915_gem_gtt_reserve(vm, node, size, offset, color, flags);
4124 /* Randomly selected placement is pinned, do a search */
4125 err = i915_gem_evict_something(vm, size, alignment, color,
4130 return drm_mm_insert_node_in_range(&vm->mm, node,
4131 size, alignment, color,
4132 start, end, DRM_MM_INSERT_EVICT);
4135 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
4136 #include "selftests/mock_gtt.c"
4137 #include "selftests/i915_gem_gtt.c"