2 * Copyright © 2008-2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include <drm/i915_drm.h>
27 #include "i915_scatterlist.h"
28 #include "i915_vgpu.h"
31 * DOC: fence register handling
33 * Important to avoid confusions: "fences" in the i915 driver are not execution
34 * fences used to track command completion but hardware detiler objects which
35 * wrap a given range of the global GTT. Each platform has only a fairly limited
36 * set of these objects.
38 * Fences are used to detile GTT memory mappings. They're also connected to the
39 * hardware frontbuffer render tracking and hence interact with frontbuffer
40 * compression. Furthermore on older platforms fences are required for tiled
41 * objects used by the display engine. They can also be used by the render
42 * engine - they're required for blitter commands and are optional for render
43 * commands. But on gen4+ both display (with the exception of fbc) and rendering
44 * have their own tiling state bits and don't need fences.
46 * Also note that fences only support X and Y tiling and hence can't be used for
47 * the fancier new tiling formats like W, Ys and Yf.
49 * Finally note that because fences are such a restricted resource they're
50 * dynamically associated with objects. Furthermore fence state is committed to
51 * the hardware lazily to avoid unnecessary stalls on gen2/3. Therefore code must
52 * explicitly call i915_gem_object_get_fence() to synchronize fencing status
53 * for cpu access. Also note that some code wants an unfenced view, for those
54 * cases the fence can be removed forcefully with i915_gem_object_put_fence().
56 * Internally these functions will synchronize with userspace access by removing
57 * CPU ptes into GTT mmaps (not the GTT ptes themselves) as needed.
62 static void i965_write_fence_reg(struct i915_fence_reg *fence,
65 i915_reg_t fence_reg_lo, fence_reg_hi;
66 int fence_pitch_shift;
69 if (INTEL_GEN(fence->i915) >= 6) {
70 fence_reg_lo = FENCE_REG_GEN6_LO(fence->id);
71 fence_reg_hi = FENCE_REG_GEN6_HI(fence->id);
72 fence_pitch_shift = GEN6_FENCE_PITCH_SHIFT;
75 fence_reg_lo = FENCE_REG_965_LO(fence->id);
76 fence_reg_hi = FENCE_REG_965_HI(fence->id);
77 fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
82 unsigned int stride = i915_gem_object_get_stride(vma->obj);
84 GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
85 GEM_BUG_ON(!IS_ALIGNED(vma->node.start, I965_FENCE_PAGE));
86 GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I965_FENCE_PAGE));
87 GEM_BUG_ON(!IS_ALIGNED(stride, 128));
89 val = (vma->node.start + vma->fence_size - I965_FENCE_PAGE) << 32;
90 val |= vma->node.start;
91 val |= (u64)((stride / 128) - 1) << fence_pitch_shift;
92 if (i915_gem_object_get_tiling(vma->obj) == I915_TILING_Y)
93 val |= BIT(I965_FENCE_TILING_Y_SHIFT);
94 val |= I965_FENCE_REG_VALID;
98 struct intel_uncore *uncore = &fence->i915->uncore;
101 * To w/a incoherency with non-atomic 64-bit register updates,
102 * we split the 64-bit update into two 32-bit writes. In order
103 * for a partial fence not to be evaluated between writes, we
104 * precede the update with write to turn off the fence register,
105 * and only enable the fence as the last step.
107 * For extra levels of paranoia, we make sure each step lands
108 * before applying the next step.
110 intel_uncore_write_fw(uncore, fence_reg_lo, 0);
111 intel_uncore_posting_read_fw(uncore, fence_reg_lo);
113 intel_uncore_write_fw(uncore, fence_reg_hi, upper_32_bits(val));
114 intel_uncore_write_fw(uncore, fence_reg_lo, lower_32_bits(val));
115 intel_uncore_posting_read_fw(uncore, fence_reg_lo);
119 static void i915_write_fence_reg(struct i915_fence_reg *fence,
120 struct i915_vma *vma)
126 unsigned int tiling = i915_gem_object_get_tiling(vma->obj);
127 bool is_y_tiled = tiling == I915_TILING_Y;
128 unsigned int stride = i915_gem_object_get_stride(vma->obj);
130 GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
131 GEM_BUG_ON(vma->node.start & ~I915_FENCE_START_MASK);
132 GEM_BUG_ON(!is_power_of_2(vma->fence_size));
133 GEM_BUG_ON(!IS_ALIGNED(vma->node.start, vma->fence_size));
135 if (is_y_tiled && HAS_128_BYTE_Y_TILING(fence->i915))
139 GEM_BUG_ON(!is_power_of_2(stride));
141 val = vma->node.start;
143 val |= BIT(I830_FENCE_TILING_Y_SHIFT);
144 val |= I915_FENCE_SIZE_BITS(vma->fence_size);
145 val |= ilog2(stride) << I830_FENCE_PITCH_SHIFT;
147 val |= I830_FENCE_REG_VALID;
151 struct intel_uncore *uncore = &fence->i915->uncore;
152 i915_reg_t reg = FENCE_REG(fence->id);
154 intel_uncore_write_fw(uncore, reg, val);
155 intel_uncore_posting_read_fw(uncore, reg);
159 static void i830_write_fence_reg(struct i915_fence_reg *fence,
160 struct i915_vma *vma)
166 unsigned int stride = i915_gem_object_get_stride(vma->obj);
168 GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
169 GEM_BUG_ON(vma->node.start & ~I830_FENCE_START_MASK);
170 GEM_BUG_ON(!is_power_of_2(vma->fence_size));
171 GEM_BUG_ON(!is_power_of_2(stride / 128));
172 GEM_BUG_ON(!IS_ALIGNED(vma->node.start, vma->fence_size));
174 val = vma->node.start;
175 if (i915_gem_object_get_tiling(vma->obj) == I915_TILING_Y)
176 val |= BIT(I830_FENCE_TILING_Y_SHIFT);
177 val |= I830_FENCE_SIZE_BITS(vma->fence_size);
178 val |= ilog2(stride / 128) << I830_FENCE_PITCH_SHIFT;
179 val |= I830_FENCE_REG_VALID;
183 struct intel_uncore *uncore = &fence->i915->uncore;
184 i915_reg_t reg = FENCE_REG(fence->id);
186 intel_uncore_write_fw(uncore, reg, val);
187 intel_uncore_posting_read_fw(uncore, reg);
191 static void fence_write(struct i915_fence_reg *fence,
192 struct i915_vma *vma)
195 * Previous access through the fence register is marshalled by
196 * the mb() inside the fault handlers (i915_gem_release_mmaps)
197 * and explicitly managed for internal users.
200 if (IS_GEN(fence->i915, 2))
201 i830_write_fence_reg(fence, vma);
202 else if (IS_GEN(fence->i915, 3))
203 i915_write_fence_reg(fence, vma);
205 i965_write_fence_reg(fence, vma);
208 * Access through the fenced region afterwards is
209 * ordered by the posting reads whilst writing the registers.
212 fence->dirty = false;
215 static int fence_update(struct i915_fence_reg *fence,
216 struct i915_vma *vma)
218 intel_wakeref_t wakeref;
219 struct i915_vma *old;
223 if (!i915_vma_is_map_and_fenceable(vma))
226 if (WARN(!i915_gem_object_get_stride(vma->obj) ||
227 !i915_gem_object_get_tiling(vma->obj),
228 "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
229 i915_gem_object_get_stride(vma->obj),
230 i915_gem_object_get_tiling(vma->obj)))
233 ret = i915_active_request_retire(&vma->last_fence,
234 &vma->obj->base.dev->struct_mutex);
239 old = xchg(&fence->vma, NULL);
241 ret = i915_active_request_retire(&old->last_fence,
242 &old->obj->base.dev->struct_mutex);
248 i915_vma_flush_writes(old);
251 * Ensure that all userspace CPU access is completed before
252 * stealing the fence.
255 GEM_BUG_ON(old->fence != fence);
256 i915_vma_revoke_mmap(old);
260 list_move(&fence->link, &fence->i915->ggtt.fence_list);
264 * We only need to update the register itself if the device is awake.
265 * If the device is currently powered down, we will defer the write
266 * to the runtime resume, see i915_gem_restore_fences().
268 * This only works for removing the fence register, on acquisition
269 * the caller must hold the rpm wakeref. The fence register must
270 * be cleared before we can use any other fences to ensure that
271 * the new fences do not overlap the elided clears, confusing HW.
273 wakeref = intel_runtime_pm_get_if_in_use(&fence->i915->runtime_pm);
279 WRITE_ONCE(fence->vma, vma);
280 fence_write(fence, vma);
284 list_move_tail(&fence->link, &fence->i915->ggtt.fence_list);
287 intel_runtime_pm_put(&fence->i915->runtime_pm, wakeref);
292 * i915_vma_put_fence - force-remove fence for a VMA
293 * @vma: vma to map linearly (not through a fence reg)
295 * This function force-removes any fence from the given object, which is useful
296 * if the kernel wants to do untiled GTT access.
300 * 0 on success, negative error code on failure.
302 int i915_vma_put_fence(struct i915_vma *vma)
304 struct i915_fence_reg *fence = vma->fence;
309 if (fence->pin_count)
312 return fence_update(fence, NULL);
315 static struct i915_fence_reg *fence_find(struct drm_i915_private *i915)
317 struct i915_fence_reg *fence;
319 list_for_each_entry(fence, &i915->ggtt.fence_list, link) {
320 GEM_BUG_ON(fence->vma && fence->vma->fence != fence);
322 if (fence->pin_count)
328 /* Wait for completion of pending flips which consume fences */
329 if (intel_has_pending_fb_unpin(i915))
330 return ERR_PTR(-EAGAIN);
332 return ERR_PTR(-EDEADLK);
336 * i915_vma_pin_fence - set up fencing for a vma
337 * @vma: vma to map through a fence reg
339 * When mapping objects through the GTT, userspace wants to be able to write
340 * to them without having to worry about swizzling if the object is tiled.
341 * This function walks the fence regs looking for a free one for @obj,
342 * stealing one if it can't find any.
344 * It then sets up the reg based on the object's properties: address, pitch
347 * For an untiled surface, this removes any existing fence.
351 * 0 on success, negative error code on failure.
353 int i915_vma_pin_fence(struct i915_vma *vma)
355 struct i915_fence_reg *fence;
356 struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL;
360 * Note that we revoke fences on runtime suspend. Therefore the user
361 * must keep the device awake whilst using the fence.
363 assert_rpm_wakelock_held(&vma->vm->i915->runtime_pm);
365 /* Just update our place in the LRU if our fence is getting reused. */
368 GEM_BUG_ON(fence->vma != vma);
371 list_move_tail(&fence->link,
372 &fence->i915->ggtt.fence_list);
376 fence = fence_find(vma->vm->i915);
378 return PTR_ERR(fence);
380 GEM_BUG_ON(fence->pin_count);
385 err = fence_update(fence, set);
389 GEM_BUG_ON(fence->vma != set);
390 GEM_BUG_ON(vma->fence != (set ? fence : NULL));
401 * i915_reserve_fence - Reserve a fence for vGPU
402 * @i915: i915 device private
404 * This function walks the fence regs looking for a free one and remove
405 * it from the fence_list. It is used to reserve fence for vGPU to use.
407 struct i915_fence_reg *i915_reserve_fence(struct drm_i915_private *i915)
409 struct i915_fence_reg *fence;
413 lockdep_assert_held(&i915->drm.struct_mutex);
415 /* Keep at least one fence available for the display engine. */
417 list_for_each_entry(fence, &i915->ggtt.fence_list, link)
418 count += !fence->pin_count;
420 return ERR_PTR(-ENOSPC);
422 fence = fence_find(i915);
427 /* Force-remove fence from VMA */
428 ret = fence_update(fence, NULL);
433 list_del(&fence->link);
438 * i915_unreserve_fence - Reclaim a reserved fence
439 * @fence: the fence reg
441 * This function add a reserved fence register from vGPU to the fence_list.
443 void i915_unreserve_fence(struct i915_fence_reg *fence)
445 lockdep_assert_held(&fence->i915->drm.struct_mutex);
447 list_add(&fence->link, &fence->i915->ggtt.fence_list);
451 * i915_gem_restore_fences - restore fence state
452 * @i915: i915 device private
454 * Restore the hw fence state to match the software tracking again, to be called
455 * after a gpu reset and on resume. Note that on runtime suspend we only cancel
456 * the fences, to be reacquired by the user later.
458 void i915_gem_restore_fences(struct drm_i915_private *i915)
462 rcu_read_lock(); /* keep obj alive as we dereference */
463 for (i = 0; i < i915->ggtt.num_fences; i++) {
464 struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i];
465 struct i915_vma *vma = READ_ONCE(reg->vma);
467 GEM_BUG_ON(vma && vma->fence != reg);
470 * Commit delayed tiling changes if we have an object still
471 * attached to the fence, otherwise just clear the fence.
473 if (vma && !i915_gem_object_is_tiled(vma->obj))
476 fence_write(reg, vma);
482 * DOC: tiling swizzling details
484 * The idea behind tiling is to increase cache hit rates by rearranging
485 * pixel data so that a group of pixel accesses are in the same cacheline.
486 * Performance improvement from doing this on the back/depth buffer are on
489 * Intel architectures make this somewhat more complicated, though, by
490 * adjustments made to addressing of data when the memory is in interleaved
491 * mode (matched pairs of DIMMS) to improve memory bandwidth.
492 * For interleaved memory, the CPU sends every sequential 64 bytes
493 * to an alternate memory channel so it can get the bandwidth from both.
495 * The GPU also rearranges its accesses for increased bandwidth to interleaved
496 * memory, and it matches what the CPU does for non-tiled. However, when tiled
497 * it does it a little differently, since one walks addresses not just in the
498 * X direction but also Y. So, along with alternating channels when bit
499 * 6 of the address flips, it also alternates when other bits flip -- Bits 9
500 * (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines)
501 * are common to both the 915 and 965-class hardware.
503 * The CPU also sometimes XORs in higher bits as well, to improve
504 * bandwidth doing strided access like we do so frequently in graphics. This
505 * is called "Channel XOR Randomization" in the MCH documentation. The result
506 * is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address
509 * All of this bit 6 XORing has an effect on our memory management,
510 * as we need to make sure that the 3d driver can correctly address object
513 * If we don't have interleaved memory, all tiling is safe and no swizzling is
516 * When bit 17 is XORed in, we simply refuse to tile at all. Bit
517 * 17 is not just a page offset, so as we page an object out and back in,
518 * individual pages in it will have different bit 17 addresses, resulting in
519 * each 64 bytes being swapped with its neighbor!
521 * Otherwise, if interleaved, we have to tell the 3d driver what the address
522 * swizzling it needs to do is, since it's writing with the CPU to the pages
523 * (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the
524 * pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling
525 * required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order
526 * to match what the GPU expects.
530 * i915_gem_detect_bit_6_swizzle - detect bit 6 swizzling pattern
531 * @i915: i915 device private
533 * Detects bit 6 swizzling of address lookup between IGD access and CPU
534 * access through main memory.
536 static void detect_bit_6_swizzle(struct drm_i915_private *i915)
538 struct intel_uncore *uncore = &i915->uncore;
539 u32 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
540 u32 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
542 if (INTEL_GEN(i915) >= 8 || IS_VALLEYVIEW(i915)) {
544 * On BDW+, swizzling is not used. We leave the CPU memory
545 * controller in charge of optimizing memory accesses without
546 * the extra address manipulation GPU side.
548 * VLV and CHV don't have GPU swizzling.
550 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
551 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
552 } else if (INTEL_GEN(i915) >= 6) {
553 if (i915->preserve_bios_swizzle) {
554 if (intel_uncore_read(uncore, DISP_ARB_CTL) &
555 DISP_TILE_SURFACE_SWIZZLING) {
556 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
557 swizzle_y = I915_BIT_6_SWIZZLE_9;
559 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
560 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
563 u32 dimm_c0, dimm_c1;
564 dimm_c0 = intel_uncore_read(uncore, MAD_DIMM_C0);
565 dimm_c1 = intel_uncore_read(uncore, MAD_DIMM_C1);
566 dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
567 dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
569 * Enable swizzling when the channels are populated
570 * with identically sized dimms. We don't need to check
571 * the 3rd channel because no cpu with gpu attached
572 * ships in that configuration. Also, swizzling only
573 * makes sense for 2 channels anyway.
575 if (dimm_c0 == dimm_c1) {
576 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
577 swizzle_y = I915_BIT_6_SWIZZLE_9;
579 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
580 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
583 } else if (IS_GEN(i915, 5)) {
585 * On Ironlake whatever DRAM config, GPU always do
586 * same swizzling setup.
588 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
589 swizzle_y = I915_BIT_6_SWIZZLE_9;
590 } else if (IS_GEN(i915, 2)) {
592 * As far as we know, the 865 doesn't have these bit 6
595 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
596 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
597 } else if (IS_G45(i915) || IS_I965G(i915) || IS_G33(i915)) {
599 * The 965, G33, and newer, have a very flexible memory
600 * configuration. It will enable dual-channel mode
601 * (interleaving) on as much memory as it can, and the GPU
602 * will additionally sometimes enable different bit 6
603 * swizzling for tiled objects from the CPU.
605 * Here's what I found on the G965:
606 * slot fill memory size swizzling
607 * 0A 0B 1A 1B 1-ch 2-ch
609 * 512 0 512 0 16 1008 X
610 * 512 0 0 512 16 1008 X
611 * 0 512 0 512 16 1008 X
612 * 1024 1024 1024 0 2048 1024 O
614 * We could probably detect this based on either the DRB
615 * matching, which was the case for the swizzling required in
616 * the table above, or from the 1-ch value being less than
617 * the minimum size of a rank.
619 * Reports indicate that the swizzling actually
620 * varies depending upon page placement inside the
621 * channels, i.e. we see swizzled pages where the
622 * banks of memory are paired and unswizzled on the
623 * uneven portion, so leave that as unknown.
625 if (intel_uncore_read(uncore, C0DRB3) ==
626 intel_uncore_read(uncore, C1DRB3)) {
627 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
628 swizzle_y = I915_BIT_6_SWIZZLE_9;
631 u32 dcc = intel_uncore_read(uncore, DCC);
634 * On 9xx chipsets, channel interleave by the CPU is
635 * determined by DCC. For single-channel, neither the CPU
636 * nor the GPU do swizzling. For dual channel interleaved,
637 * the GPU's interleave is bit 9 and 10 for X tiled, and bit
638 * 9 for Y tiled. The CPU's interleave is independent, and
639 * can be based on either bit 11 (haven't seen this yet) or
642 switch (dcc & DCC_ADDRESSING_MODE_MASK) {
643 case DCC_ADDRESSING_MODE_SINGLE_CHANNEL:
644 case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC:
645 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
646 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
648 case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
649 if (dcc & DCC_CHANNEL_XOR_DISABLE) {
651 * This is the base swizzling by the GPU for
654 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
655 swizzle_y = I915_BIT_6_SWIZZLE_9;
656 } else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
657 /* Bit 11 swizzling by the CPU in addition. */
658 swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
659 swizzle_y = I915_BIT_6_SWIZZLE_9_11;
661 /* Bit 17 swizzling by the CPU in addition. */
662 swizzle_x = I915_BIT_6_SWIZZLE_9_10_17;
663 swizzle_y = I915_BIT_6_SWIZZLE_9_17;
668 /* check for L-shaped memory aka modified enhanced addressing */
669 if (IS_GEN(i915, 4) &&
670 !(intel_uncore_read(uncore, DCC2) & DCC2_MODIFIED_ENHANCED_DISABLE)) {
671 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
672 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
675 if (dcc == 0xffffffff) {
676 DRM_ERROR("Couldn't read from MCHBAR. "
677 "Disabling tiling.\n");
678 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
679 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
683 if (swizzle_x == I915_BIT_6_SWIZZLE_UNKNOWN ||
684 swizzle_y == I915_BIT_6_SWIZZLE_UNKNOWN) {
686 * Userspace likes to explode if it sees unknown swizzling,
687 * so lie. We will finish the lie when reporting through
688 * the get-tiling-ioctl by reporting the physical swizzle
689 * mode as unknown instead.
691 * As we don't strictly know what the swizzling is, it may be
692 * bit17 dependent, and so we need to also prevent the pages
695 i915->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
696 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
697 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
700 i915->mm.bit_6_swizzle_x = swizzle_x;
701 i915->mm.bit_6_swizzle_y = swizzle_y;
705 * Swap every 64 bytes of this page around, to account for it having a new
706 * bit 17 of its physical address and therefore being interpreted differently
709 static void i915_gem_swizzle_page(struct page *page)
717 for (i = 0; i < PAGE_SIZE; i += 128) {
718 memcpy(temp, &vaddr[i], 64);
719 memcpy(&vaddr[i], &vaddr[i + 64], 64);
720 memcpy(&vaddr[i + 64], temp, 64);
727 * i915_gem_object_do_bit_17_swizzle - fixup bit 17 swizzling
728 * @obj: i915 GEM buffer object
729 * @pages: the scattergather list of physical pages
731 * This function fixes up the swizzling in case any page frame number for this
732 * object has changed in bit 17 since that state has been saved with
733 * i915_gem_object_save_bit_17_swizzle().
735 * This is called when pinning backing storage again, since the kernel is free
736 * to move unpinned backing storage around (either by directly moving pages or
737 * by swapping them out and back in again).
740 i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
741 struct sg_table *pages)
743 struct sgt_iter sgt_iter;
747 if (obj->bit_17 == NULL)
751 for_each_sgt_page(page, sgt_iter, pages) {
752 char new_bit_17 = page_to_phys(page) >> 17;
753 if ((new_bit_17 & 0x1) != (test_bit(i, obj->bit_17) != 0)) {
754 i915_gem_swizzle_page(page);
755 set_page_dirty(page);
762 * i915_gem_object_save_bit_17_swizzle - save bit 17 swizzling
763 * @obj: i915 GEM buffer object
764 * @pages: the scattergather list of physical pages
766 * This function saves the bit 17 of each page frame number so that swizzling
767 * can be fixed up later on with i915_gem_object_do_bit_17_swizzle(). This must
768 * be called before the backing storage can be unpinned.
771 i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
772 struct sg_table *pages)
774 const unsigned int page_count = obj->base.size >> PAGE_SHIFT;
775 struct sgt_iter sgt_iter;
779 if (obj->bit_17 == NULL) {
780 obj->bit_17 = bitmap_zalloc(page_count, GFP_KERNEL);
781 if (obj->bit_17 == NULL) {
782 DRM_ERROR("Failed to allocate memory for bit 17 "
790 for_each_sgt_page(page, sgt_iter, pages) {
791 if (page_to_phys(page) & (1 << 17))
792 __set_bit(i, obj->bit_17);
794 __clear_bit(i, obj->bit_17);
799 void i915_ggtt_init_fences(struct i915_ggtt *ggtt)
801 struct drm_i915_private *i915 = ggtt->vm.i915;
805 INIT_LIST_HEAD(&ggtt->fence_list);
806 INIT_LIST_HEAD(&ggtt->userfault_list);
807 intel_wakeref_auto_init(&ggtt->userfault_wakeref, &i915->runtime_pm);
809 detect_bit_6_swizzle(i915);
811 if (INTEL_GEN(i915) >= 7 &&
812 !(IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)))
814 else if (INTEL_GEN(i915) >= 4 ||
815 IS_I945G(i915) || IS_I945GM(i915) ||
816 IS_G33(i915) || IS_PINEVIEW(i915))
821 if (intel_vgpu_active(i915))
822 num_fences = intel_uncore_read(&i915->uncore,
823 vgtif_reg(avail_rs.fence_num));
825 /* Initialize fence registers to zero */
826 for (i = 0; i < num_fences; i++) {
827 struct i915_fence_reg *fence = &ggtt->fence_regs[i];
831 list_add_tail(&fence->link, &ggtt->fence_list);
833 ggtt->num_fences = num_fences;
835 i915_gem_restore_fences(i915);