2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23 * Please try to maintain the following order within this file unless it makes
24 * sense to do otherwise. From top to bottom:
26 * 2. #defines, and macros
27 * 3. structure definitions
28 * 4. function prototypes
30 * Within each section, please try to order by generation in ascending order,
31 * from top to bottom (ie. gen6 on the top, gen8 on the bottom).
34 #ifndef __I915_GEM_GTT_H__
35 #define __I915_GEM_GTT_H__
37 #include <linux/io-mapping.h>
39 #include <linux/pagevec.h>
41 #include "gt/intel_reset.h"
42 #include "i915_gem_fence_reg.h"
43 #include "i915_request.h"
44 #include "i915_scatterlist.h"
45 #include "i915_selftest.h"
46 #include "i915_timeline.h"
48 #define I915_GTT_PAGE_SIZE_4K BIT_ULL(12)
49 #define I915_GTT_PAGE_SIZE_64K BIT_ULL(16)
50 #define I915_GTT_PAGE_SIZE_2M BIT_ULL(21)
52 #define I915_GTT_PAGE_SIZE I915_GTT_PAGE_SIZE_4K
53 #define I915_GTT_MAX_PAGE_SIZE I915_GTT_PAGE_SIZE_2M
55 #define I915_GTT_PAGE_MASK -I915_GTT_PAGE_SIZE
57 #define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE
59 #define I915_FENCE_REG_NONE -1
60 #define I915_MAX_NUM_FENCES 32
61 /* 32 fences + sign bit for FENCE_REG_NONE */
62 #define I915_MAX_NUM_FENCE_BITS 6
64 struct drm_i915_file_private;
65 struct drm_i915_gem_object;
68 typedef u32 gen6_pte_t;
69 typedef u64 gen8_pte_t;
70 typedef u64 gen8_pde_t;
71 typedef u64 gen8_ppgtt_pdpe_t;
72 typedef u64 gen8_ppgtt_pml4e_t;
74 #define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT)
76 /* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
77 #define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
78 #define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
79 #define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
80 #define GEN6_PTE_CACHE_LLC (2 << 1)
81 #define GEN6_PTE_UNCACHED (1 << 1)
82 #define GEN6_PTE_VALID (1 << 0)
84 #define I915_PTES(pte_len) ((unsigned int)(PAGE_SIZE / (pte_len)))
85 #define I915_PTE_MASK(pte_len) (I915_PTES(pte_len) - 1)
87 #define I915_PDE_MASK (I915_PDES - 1)
88 #define NUM_PTE(pde_shift) (1 << (pde_shift - PAGE_SHIFT))
90 #define GEN6_PTES I915_PTES(sizeof(gen6_pte_t))
91 #define GEN6_PD_SIZE (I915_PDES * PAGE_SIZE)
92 #define GEN6_PD_ALIGN (PAGE_SIZE * 16)
93 #define GEN6_PDE_SHIFT 22
94 #define GEN6_PDE_VALID (1 << 0)
96 #define GEN7_PTE_CACHE_L3_LLC (3 << 1)
98 #define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2)
99 #define BYT_PTE_WRITEABLE (1 << 1)
101 /* Cacheability Control is a 4-bit value. The low three bits are stored in bits
102 * 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE.
104 #define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \
105 (((bits) & 0x8) << (11 - 3)))
106 #define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2)
107 #define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3)
108 #define HSW_WB_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x8)
109 #define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb)
110 #define HSW_WT_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x7)
111 #define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6)
112 #define HSW_PTE_UNCACHED (0)
113 #define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0))
114 #define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr)
116 /* GEN8 32b style address is defined as a 3 level page table:
117 * 31:30 | 29:21 | 20:12 | 11:0
118 * PDPE | PDE | PTE | offset
119 * The difference as compared to normal x86 3 level page table is the PDPEs are
120 * programmed via register.
122 #define GEN8_3LVL_PDPES 4
123 #define GEN8_PDE_SHIFT 21
124 #define GEN8_PDE_MASK 0x1ff
125 #define GEN8_PTE_SHIFT 12
126 #define GEN8_PTE_MASK 0x1ff
127 #define GEN8_PTES I915_PTES(sizeof(gen8_pte_t))
129 /* GEN8 48b style address is defined as a 4 level page table:
130 * 47:39 | 38:30 | 29:21 | 20:12 | 11:0
131 * PML4E | PDPE | PDE | PTE | offset
133 #define GEN8_PML4ES_PER_PML4 512
134 #define GEN8_PML4E_SHIFT 39
135 #define GEN8_PML4E_MASK (GEN8_PML4ES_PER_PML4 - 1)
136 #define GEN8_PDPE_SHIFT 30
137 /* NB: GEN8_PDPE_MASK is untrue for 32b platforms, but it has no impact on 32b page
139 #define GEN8_PDPE_MASK 0x1ff
141 #define PPAT_UNCACHED (_PAGE_PWT | _PAGE_PCD)
142 #define PPAT_CACHED_PDE 0 /* WB LLC */
143 #define PPAT_CACHED _PAGE_PAT /* WB LLCeLLC */
144 #define PPAT_DISPLAY_ELLC _PAGE_PCD /* WT eLLC */
146 #define CHV_PPAT_SNOOP (1<<6)
147 #define GEN8_PPAT_AGE(x) ((x)<<4)
148 #define GEN8_PPAT_LLCeLLC (3<<2)
149 #define GEN8_PPAT_LLCELLC (2<<2)
150 #define GEN8_PPAT_LLC (1<<2)
151 #define GEN8_PPAT_WB (3<<0)
152 #define GEN8_PPAT_WT (2<<0)
153 #define GEN8_PPAT_WC (1<<0)
154 #define GEN8_PPAT_UC (0<<0)
155 #define GEN8_PPAT_ELLC_OVERRIDE (0<<2)
156 #define GEN8_PPAT(i, x) ((u64)(x) << ((i) * 8))
158 #define GEN8_PPAT_GET_CA(x) ((x) & 3)
159 #define GEN8_PPAT_GET_TC(x) ((x) & (3 << 2))
160 #define GEN8_PPAT_GET_AGE(x) ((x) & (3 << 4))
161 #define CHV_PPAT_GET_SNOOP(x) ((x) & (1 << 6))
163 #define GEN8_PDE_IPS_64K BIT(11)
164 #define GEN8_PDE_PS_2M BIT(7)
166 #define for_each_sgt_dma(__dmap, __iter, __sgt) \
167 __for_each_sgt_dma(__dmap, __iter, __sgt, I915_GTT_PAGE_SIZE)
169 struct intel_remapped_plane_info {
171 unsigned int width, height, stride, offset;
174 struct intel_remapped_info {
175 struct intel_remapped_plane_info plane[2];
176 unsigned int unused_mbz;
179 struct intel_rotation_info {
180 struct intel_remapped_plane_info plane[2];
183 struct intel_partial_info {
188 enum i915_ggtt_view_type {
189 I915_GGTT_VIEW_NORMAL = 0,
190 I915_GGTT_VIEW_ROTATED = sizeof(struct intel_rotation_info),
191 I915_GGTT_VIEW_PARTIAL = sizeof(struct intel_partial_info),
192 I915_GGTT_VIEW_REMAPPED = sizeof(struct intel_remapped_info),
195 static inline void assert_i915_gem_gtt_types(void)
197 BUILD_BUG_ON(sizeof(struct intel_rotation_info) != 8*sizeof(unsigned int));
198 BUILD_BUG_ON(sizeof(struct intel_partial_info) != sizeof(u64) + sizeof(unsigned int));
199 BUILD_BUG_ON(sizeof(struct intel_remapped_info) != 9*sizeof(unsigned int));
201 /* Check that rotation/remapped shares offsets for simplicity */
202 BUILD_BUG_ON(offsetof(struct intel_remapped_info, plane[0]) !=
203 offsetof(struct intel_rotation_info, plane[0]));
204 BUILD_BUG_ON(offsetofend(struct intel_remapped_info, plane[1]) !=
205 offsetofend(struct intel_rotation_info, plane[1]));
207 /* As we encode the size of each branch inside the union into its type,
208 * we have to be careful that each branch has a unique size.
210 switch ((enum i915_ggtt_view_type)0) {
211 case I915_GGTT_VIEW_NORMAL:
212 case I915_GGTT_VIEW_PARTIAL:
213 case I915_GGTT_VIEW_ROTATED:
214 case I915_GGTT_VIEW_REMAPPED:
215 /* gcc complains if these are identical cases */
220 struct i915_ggtt_view {
221 enum i915_ggtt_view_type type;
223 /* Members need to contain no holes/padding */
224 struct intel_partial_info partial;
225 struct intel_rotation_info rotated;
226 struct intel_remapped_info remapped;
230 enum i915_cache_level;
234 struct i915_page_dma {
239 /* For gen6/gen7 only. This is the offset in the GGTT
240 * where the page directory entries for PPGTT begin
246 #define px_base(px) (&(px)->base)
247 #define px_dma(px) (px_base(px)->daddr)
249 struct i915_page_table {
250 struct i915_page_dma base;
254 struct i915_page_directory {
255 struct i915_page_dma base;
261 struct i915_vma_ops {
262 /* Map an object into an address space with the given cache flags. */
263 int (*bind_vma)(struct i915_vma *vma,
264 enum i915_cache_level cache_level,
267 * Unmap an object from an address space. This usually consists of
268 * setting the valid PTE entries to a reserved scratch page.
270 void (*unbind_vma)(struct i915_vma *vma);
272 int (*set_pages)(struct i915_vma *vma);
273 void (*clear_pages)(struct i915_vma *vma);
281 struct i915_address_space {
285 struct drm_i915_private *i915;
287 /* Every address space belongs to a struct file - except for the global
288 * GTT that is owned by the driver (and so @file is set to NULL). In
289 * principle, no information should leak from one context to another
290 * (or between files/processes etc) unless explicitly shared by the
291 * owner. Tracking the owner is important in order to free up per-file
292 * objects along with the file, to aide resource tracking, and to
295 struct drm_i915_file_private *file;
296 u64 total; /* size addr space maps (ex. 2GB for ggtt) */
297 u64 reserved; /* size addr space reserved */
301 struct mutex mutex; /* protects vma and our lists */
302 #define VM_CLASS_GGTT 0
303 #define VM_CLASS_PPGTT 1
307 struct i915_page_dma scratch_page;
308 struct i915_page_table *scratch_pt;
309 struct i915_page_directory *scratch_pd;
310 struct i915_page_directory *scratch_pdp; /* GEN8+ & 48b PPGTT */
313 * List of vma currently bound.
315 struct list_head bound_list;
318 * List of vma that are not unbound.
320 struct list_head unbound_list;
322 struct pagestash free_pages;
327 /* Some systems require uncached updates of the page directories */
330 /* Some systems support read-only mappings for GGTT and/or PPGTT */
331 bool has_read_only:1;
333 u64 (*pte_encode)(dma_addr_t addr,
334 enum i915_cache_level level,
335 u32 flags); /* Create a valid PTE */
336 #define PTE_READ_ONLY (1<<0)
338 int (*allocate_va_range)(struct i915_address_space *vm,
339 u64 start, u64 length);
340 void (*clear_range)(struct i915_address_space *vm,
341 u64 start, u64 length);
342 void (*insert_page)(struct i915_address_space *vm,
345 enum i915_cache_level cache_level,
347 void (*insert_entries)(struct i915_address_space *vm,
348 struct i915_vma *vma,
349 enum i915_cache_level cache_level,
351 void (*cleanup)(struct i915_address_space *vm);
353 struct i915_vma_ops vma_ops;
355 I915_SELFTEST_DECLARE(struct fault_attr fault_attr);
356 I915_SELFTEST_DECLARE(bool scrub_64K);
359 #define i915_is_ggtt(vm) ((vm)->is_ggtt)
362 i915_vm_is_4lvl(const struct i915_address_space *vm)
364 return (vm->total - 1) >> 32;
368 i915_vm_has_scratch_64K(struct i915_address_space *vm)
370 return vm->scratch_order == get_order(I915_GTT_PAGE_SIZE_64K);
373 /* The Graphics Translation Table is the way in which GEN hardware translates a
374 * Graphics Virtual Address into a Physical Address. In addition to the normal
375 * collateral associated with any va->pa translations GEN hardware also has a
376 * portion of the GTT which can be mapped by the CPU and remain both coherent
377 * and correct (in cases like swizzling). That region is referred to as GMADR in
381 struct i915_address_space vm;
383 struct io_mapping iomap; /* Mapping to our CPU mappable region */
384 struct resource gmadr; /* GMADR resource */
385 resource_size_t mappable_end; /* End offset that we can CPU map */
387 /** "Graphics Stolen Memory" holds the global PTEs */
389 void (*invalidate)(struct drm_i915_private *dev_priv);
397 unsigned int num_fences;
398 struct i915_fence_reg fence_regs[I915_MAX_NUM_FENCES];
399 struct list_head fence_list;
401 /** List of all objects in gtt_space, currently mmaped by userspace.
402 * All objects within this list must also be on bound_list.
404 struct list_head userfault_list;
406 /* Manual runtime pm autosuspend delay for user GGTT mmaps */
407 struct intel_wakeref_auto userfault_wakeref;
409 struct drm_mm_node error_capture;
410 struct drm_mm_node uc_fw;
414 struct i915_address_space vm;
416 intel_engine_mask_t pd_dirty_engines;
417 struct i915_page_directory *pd;
421 struct i915_ppgtt base;
423 struct i915_vma *vma;
424 gen6_pte_t __iomem *pd_addr;
426 unsigned int pin_count;
427 bool scan_for_unused_pt;
429 struct gen6_ppgtt_cleanup_work *work;
432 #define __to_gen6_ppgtt(base) container_of(base, struct gen6_ppgtt, base)
434 static inline struct gen6_ppgtt *to_gen6_ppgtt(struct i915_ppgtt *base)
436 BUILD_BUG_ON(offsetof(struct gen6_ppgtt, base));
437 return __to_gen6_ppgtt(base);
441 * gen6_for_each_pde() iterates over every pde from start until start+length.
442 * If start and start+length are not perfectly divisible, the macro will round
443 * down and up as needed. Start=0 and length=2G effectively iterates over
444 * every PDE in the system. The macro modifies ALL its parameters except 'pd',
445 * so each of the other parameters should preferably be a simple variable, or
446 * at most an lvalue with no side-effects!
448 #define gen6_for_each_pde(pt, pd, start, length, iter) \
449 for (iter = gen6_pde_index(start); \
450 length > 0 && iter < I915_PDES && \
451 (pt = i915_pt_entry(pd, iter), true); \
452 ({ u32 temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT); \
453 temp = min(temp - start, length); \
454 start += temp, length -= temp; }), ++iter)
456 #define gen6_for_all_pdes(pt, pd, iter) \
458 iter < I915_PDES && \
459 (pt = i915_pt_entry(pd, iter), true); \
462 static inline u32 i915_pte_index(u64 address, unsigned int pde_shift)
464 const u32 mask = NUM_PTE(pde_shift) - 1;
466 return (address >> PAGE_SHIFT) & mask;
469 /* Helper to counts the number of PTEs within the given length. This count
470 * does not cross a page table boundary, so the max value would be
471 * GEN6_PTES for GEN6, and GEN8_PTES for GEN8.
473 static inline u32 i915_pte_count(u64 addr, u64 length, unsigned int pde_shift)
475 const u64 mask = ~((1ULL << pde_shift) - 1);
478 GEM_BUG_ON(length == 0);
479 GEM_BUG_ON(offset_in_page(addr | length));
483 if ((addr & mask) != (end & mask))
484 return NUM_PTE(pde_shift) - i915_pte_index(addr, pde_shift);
486 return i915_pte_index(end, pde_shift) - i915_pte_index(addr, pde_shift);
489 static inline u32 i915_pde_index(u64 addr, u32 shift)
491 return (addr >> shift) & I915_PDE_MASK;
494 static inline u32 gen6_pte_index(u32 addr)
496 return i915_pte_index(addr, GEN6_PDE_SHIFT);
499 static inline u32 gen6_pte_count(u32 addr, u32 length)
501 return i915_pte_count(addr, length, GEN6_PDE_SHIFT);
504 static inline u32 gen6_pde_index(u32 addr)
506 return i915_pde_index(addr, GEN6_PDE_SHIFT);
509 static inline unsigned int
510 i915_pdpes_per_pdp(const struct i915_address_space *vm)
512 if (i915_vm_is_4lvl(vm))
513 return GEN8_PML4ES_PER_PML4;
515 return GEN8_3LVL_PDPES;
518 static inline struct i915_page_table *
519 i915_pt_entry(const struct i915_page_directory * const pd,
520 const unsigned short n)
525 static inline struct i915_page_directory *
526 i915_pd_entry(const struct i915_page_directory * const pdp,
527 const unsigned short n)
529 return pdp->entry[n];
532 static inline struct i915_page_directory *
533 i915_pdp_entry(const struct i915_page_directory * const pml4,
534 const unsigned short n)
536 return pml4->entry[n];
539 /* Equivalent to the gen6 version, For each pde iterates over every pde
540 * between from start until start + length. On gen8+ it simply iterates
541 * over every page directory entry in a page directory.
543 #define gen8_for_each_pde(pt, pd, start, length, iter) \
544 for (iter = gen8_pde_index(start); \
545 length > 0 && iter < I915_PDES && \
546 (pt = i915_pt_entry(pd, iter), true); \
547 ({ u64 temp = ALIGN(start+1, 1 << GEN8_PDE_SHIFT); \
548 temp = min(temp - start, length); \
549 start += temp, length -= temp; }), ++iter)
551 #define gen8_for_each_pdpe(pd, pdp, start, length, iter) \
552 for (iter = gen8_pdpe_index(start); \
553 length > 0 && iter < i915_pdpes_per_pdp(vm) && \
554 (pd = i915_pd_entry(pdp, iter), true); \
555 ({ u64 temp = ALIGN(start+1, 1 << GEN8_PDPE_SHIFT); \
556 temp = min(temp - start, length); \
557 start += temp, length -= temp; }), ++iter)
559 #define gen8_for_each_pml4e(pdp, pml4, start, length, iter) \
560 for (iter = gen8_pml4e_index(start); \
561 length > 0 && iter < GEN8_PML4ES_PER_PML4 && \
562 (pdp = i915_pdp_entry(pml4, iter), true); \
563 ({ u64 temp = ALIGN(start+1, 1ULL << GEN8_PML4E_SHIFT); \
564 temp = min(temp - start, length); \
565 start += temp, length -= temp; }), ++iter)
567 static inline u32 gen8_pte_index(u64 address)
569 return i915_pte_index(address, GEN8_PDE_SHIFT);
572 static inline u32 gen8_pde_index(u64 address)
574 return i915_pde_index(address, GEN8_PDE_SHIFT);
577 static inline u32 gen8_pdpe_index(u64 address)
579 return (address >> GEN8_PDPE_SHIFT) & GEN8_PDPE_MASK;
582 static inline u32 gen8_pml4e_index(u64 address)
584 return (address >> GEN8_PML4E_SHIFT) & GEN8_PML4E_MASK;
587 static inline u64 gen8_pte_count(u64 address, u64 length)
589 return i915_pte_count(address, length, GEN8_PDE_SHIFT);
592 static inline dma_addr_t
593 i915_page_dir_dma_addr(const struct i915_ppgtt *ppgtt, const unsigned int n)
595 struct i915_page_directory *pd;
597 pd = i915_pdp_entry(ppgtt->pd, n);
601 static inline struct i915_ggtt *
602 i915_vm_to_ggtt(struct i915_address_space *vm)
604 BUILD_BUG_ON(offsetof(struct i915_ggtt, vm));
605 GEM_BUG_ON(!i915_is_ggtt(vm));
606 return container_of(vm, struct i915_ggtt, vm);
609 static inline struct i915_ppgtt *
610 i915_vm_to_ppgtt(struct i915_address_space *vm)
612 BUILD_BUG_ON(offsetof(struct i915_ppgtt, vm));
613 GEM_BUG_ON(i915_is_ggtt(vm));
614 return container_of(vm, struct i915_ppgtt, vm);
617 #define INTEL_MAX_PPAT_ENTRIES 8
618 #define INTEL_PPAT_PERFECT_MATCH (~0U)
622 struct intel_ppat_entry {
623 struct intel_ppat *ppat;
629 struct intel_ppat_entry entries[INTEL_MAX_PPAT_ENTRIES];
630 DECLARE_BITMAP(used, INTEL_MAX_PPAT_ENTRIES);
631 DECLARE_BITMAP(dirty, INTEL_MAX_PPAT_ENTRIES);
632 unsigned int max_entries;
635 * Return a score to show how two PPAT values match,
636 * a INTEL_PPAT_PERFECT_MATCH indicates a perfect match
638 unsigned int (*match)(u8 src, u8 dst);
639 void (*update_hw)(struct drm_i915_private *i915);
641 struct drm_i915_private *i915;
644 const struct intel_ppat_entry *
645 intel_ppat_get(struct drm_i915_private *i915, u8 value);
646 void intel_ppat_put(const struct intel_ppat_entry *entry);
648 int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv);
649 int i915_ggtt_init_hw(struct drm_i915_private *dev_priv);
650 int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv);
651 void i915_ggtt_enable_guc(struct drm_i915_private *i915);
652 void i915_ggtt_disable_guc(struct drm_i915_private *i915);
653 int i915_gem_init_ggtt(struct drm_i915_private *dev_priv);
654 void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv);
656 int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv);
658 struct i915_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv);
660 static inline struct i915_address_space *
661 i915_vm_get(struct i915_address_space *vm)
667 void i915_vm_release(struct kref *kref);
669 static inline void i915_vm_put(struct i915_address_space *vm)
671 kref_put(&vm->ref, i915_vm_release);
674 int gen6_ppgtt_pin(struct i915_ppgtt *base);
675 void gen6_ppgtt_unpin(struct i915_ppgtt *base);
676 void gen6_ppgtt_unpin_all(struct i915_ppgtt *base);
678 void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv);
679 void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv);
681 int __must_check i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
682 struct sg_table *pages);
683 void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
684 struct sg_table *pages);
686 int i915_gem_gtt_reserve(struct i915_address_space *vm,
687 struct drm_mm_node *node,
688 u64 size, u64 offset, unsigned long color,
691 int i915_gem_gtt_insert(struct i915_address_space *vm,
692 struct drm_mm_node *node,
693 u64 size, u64 alignment, unsigned long color,
694 u64 start, u64 end, unsigned int flags);
696 /* Flags used by pin/bind&friends. */
697 #define PIN_NONBLOCK BIT_ULL(0)
698 #define PIN_NONFAULT BIT_ULL(1)
699 #define PIN_NOEVICT BIT_ULL(2)
700 #define PIN_MAPPABLE BIT_ULL(3)
701 #define PIN_ZONE_4G BIT_ULL(4)
702 #define PIN_HIGH BIT_ULL(5)
703 #define PIN_OFFSET_BIAS BIT_ULL(6)
704 #define PIN_OFFSET_FIXED BIT_ULL(7)
706 #define PIN_MBZ BIT_ULL(8) /* I915_VMA_PIN_OVERFLOW */
707 #define PIN_GLOBAL BIT_ULL(9) /* I915_VMA_GLOBAL_BIND */
708 #define PIN_USER BIT_ULL(10) /* I915_VMA_LOCAL_BIND */
709 #define PIN_UPDATE BIT_ULL(11)
711 #define PIN_OFFSET_MASK (-I915_GTT_PAGE_SIZE)