2 * Copyright © 2008-2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include <drm/i915_drm.h>
28 * DOC: fence register handling
30 * Important to avoid confusions: "fences" in the i915 driver are not execution
31 * fences used to track command completion but hardware detiler objects which
32 * wrap a given range of the global GTT. Each platform has only a fairly limited
33 * set of these objects.
35 * Fences are used to detile GTT memory mappings. They're also connected to the
36 * hardware frontbuffer render tracking and hence interact with frontbuffer
37 * compression. Furthermore on older platforms fences are required for tiled
38 * objects used by the display engine. They can also be used by the render
39 * engine - they're required for blitter commands and are optional for render
40 * commands. But on gen4+ both display (with the exception of fbc) and rendering
41 * have their own tiling state bits and don't need fences.
43 * Also note that fences only support X and Y tiling and hence can't be used for
44 * the fancier new tiling formats like W, Ys and Yf.
46 * Finally note that because fences are such a restricted resource they're
47 * dynamically associated with objects. Furthermore fence state is committed to
48 * the hardware lazily to avoid unnecessary stalls on gen2/3. Therefore code must
49 * explicitly call i915_gem_object_get_fence() to synchronize fencing status
50 * for cpu access. Also note that some code wants an unfenced view, for those
51 * cases the fence can be removed forcefully with i915_gem_object_put_fence().
53 * Internally these functions will synchronize with userspace access by removing
54 * CPU ptes into GTT mmaps (not the GTT ptes themselves) as needed.
59 static void i965_write_fence_reg(struct drm_i915_fence_reg *fence,
62 i915_reg_t fence_reg_lo, fence_reg_hi;
63 int fence_pitch_shift;
66 if (INTEL_GEN(fence->i915) >= 6) {
67 fence_reg_lo = FENCE_REG_GEN6_LO(fence->id);
68 fence_reg_hi = FENCE_REG_GEN6_HI(fence->id);
69 fence_pitch_shift = GEN6_FENCE_PITCH_SHIFT;
72 fence_reg_lo = FENCE_REG_965_LO(fence->id);
73 fence_reg_hi = FENCE_REG_965_HI(fence->id);
74 fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
79 unsigned int stride = i915_gem_object_get_stride(vma->obj);
81 GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
82 GEM_BUG_ON(!IS_ALIGNED(vma->node.start, I965_FENCE_PAGE));
83 GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I965_FENCE_PAGE));
84 GEM_BUG_ON(!IS_ALIGNED(stride, 128));
86 val = (vma->node.start + vma->fence_size - I965_FENCE_PAGE) << 32;
87 val |= vma->node.start;
88 val |= (u64)((stride / 128) - 1) << fence_pitch_shift;
89 if (i915_gem_object_get_tiling(vma->obj) == I915_TILING_Y)
90 val |= BIT(I965_FENCE_TILING_Y_SHIFT);
91 val |= I965_FENCE_REG_VALID;
95 struct drm_i915_private *dev_priv = fence->i915;
97 /* To w/a incoherency with non-atomic 64-bit register updates,
98 * we split the 64-bit update into two 32-bit writes. In order
99 * for a partial fence not to be evaluated between writes, we
100 * precede the update with write to turn off the fence register,
101 * and only enable the fence as the last step.
103 * For extra levels of paranoia, we make sure each step lands
104 * before applying the next step.
106 I915_WRITE(fence_reg_lo, 0);
107 POSTING_READ(fence_reg_lo);
109 I915_WRITE(fence_reg_hi, upper_32_bits(val));
110 I915_WRITE(fence_reg_lo, lower_32_bits(val));
111 POSTING_READ(fence_reg_lo);
115 static void i915_write_fence_reg(struct drm_i915_fence_reg *fence,
116 struct i915_vma *vma)
122 unsigned int tiling = i915_gem_object_get_tiling(vma->obj);
123 bool is_y_tiled = tiling == I915_TILING_Y;
124 unsigned int stride = i915_gem_object_get_stride(vma->obj);
126 GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
127 GEM_BUG_ON(vma->node.start & ~I915_FENCE_START_MASK);
128 GEM_BUG_ON(!is_power_of_2(vma->fence_size));
129 GEM_BUG_ON(!IS_ALIGNED(vma->node.start, vma->fence_size));
131 if (is_y_tiled && HAS_128_BYTE_Y_TILING(fence->i915))
135 GEM_BUG_ON(!is_power_of_2(stride));
137 val = vma->node.start;
139 val |= BIT(I830_FENCE_TILING_Y_SHIFT);
140 val |= I915_FENCE_SIZE_BITS(vma->fence_size);
141 val |= ilog2(stride) << I830_FENCE_PITCH_SHIFT;
143 val |= I830_FENCE_REG_VALID;
147 struct drm_i915_private *dev_priv = fence->i915;
148 i915_reg_t reg = FENCE_REG(fence->id);
150 I915_WRITE(reg, val);
155 static void i830_write_fence_reg(struct drm_i915_fence_reg *fence,
156 struct i915_vma *vma)
162 unsigned int stride = i915_gem_object_get_stride(vma->obj);
164 GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
165 GEM_BUG_ON(vma->node.start & ~I830_FENCE_START_MASK);
166 GEM_BUG_ON(!is_power_of_2(vma->fence_size));
167 GEM_BUG_ON(!is_power_of_2(stride / 128));
168 GEM_BUG_ON(!IS_ALIGNED(vma->node.start, vma->fence_size));
170 val = vma->node.start;
171 if (i915_gem_object_get_tiling(vma->obj) == I915_TILING_Y)
172 val |= BIT(I830_FENCE_TILING_Y_SHIFT);
173 val |= I830_FENCE_SIZE_BITS(vma->fence_size);
174 val |= ilog2(stride / 128) << I830_FENCE_PITCH_SHIFT;
175 val |= I830_FENCE_REG_VALID;
179 struct drm_i915_private *dev_priv = fence->i915;
180 i915_reg_t reg = FENCE_REG(fence->id);
182 I915_WRITE(reg, val);
187 static void fence_write(struct drm_i915_fence_reg *fence,
188 struct i915_vma *vma)
190 /* Previous access through the fence register is marshalled by
191 * the mb() inside the fault handlers (i915_gem_release_mmaps)
192 * and explicitly managed for internal users.
195 if (IS_GEN(fence->i915, 2))
196 i830_write_fence_reg(fence, vma);
197 else if (IS_GEN(fence->i915, 3))
198 i915_write_fence_reg(fence, vma);
200 i965_write_fence_reg(fence, vma);
202 /* Access through the fenced region afterwards is
203 * ordered by the posting reads whilst writing the registers.
206 fence->dirty = false;
209 static int fence_update(struct drm_i915_fence_reg *fence,
210 struct i915_vma *vma)
212 intel_wakeref_t wakeref;
213 struct i915_vma *old;
217 if (!i915_vma_is_map_and_fenceable(vma))
220 if (WARN(!i915_gem_object_get_stride(vma->obj) ||
221 !i915_gem_object_get_tiling(vma->obj),
222 "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
223 i915_gem_object_get_stride(vma->obj),
224 i915_gem_object_get_tiling(vma->obj)))
227 ret = i915_active_request_retire(&vma->last_fence,
228 &vma->obj->base.dev->struct_mutex);
233 old = xchg(&fence->vma, NULL);
235 ret = i915_active_request_retire(&old->last_fence,
236 &old->obj->base.dev->struct_mutex);
242 i915_vma_flush_writes(old);
245 * Ensure that all userspace CPU access is completed before
246 * stealing the fence.
249 GEM_BUG_ON(old->fence != fence);
250 i915_vma_revoke_mmap(old);
254 list_move(&fence->link, &fence->i915->mm.fence_list);
258 * We only need to update the register itself if the device is awake.
259 * If the device is currently powered down, we will defer the write
260 * to the runtime resume, see i915_gem_restore_fences().
262 * This only works for removing the fence register, on acquisition
263 * the caller must hold the rpm wakeref. The fence register must
264 * be cleared before we can use any other fences to ensure that
265 * the new fences do not overlap the elided clears, confusing HW.
267 wakeref = intel_runtime_pm_get_if_in_use(fence->i915);
273 WRITE_ONCE(fence->vma, vma);
274 fence_write(fence, vma);
278 list_move_tail(&fence->link, &fence->i915->mm.fence_list);
281 intel_runtime_pm_put(fence->i915, wakeref);
286 * i915_vma_put_fence - force-remove fence for a VMA
287 * @vma: vma to map linearly (not through a fence reg)
289 * This function force-removes any fence from the given object, which is useful
290 * if the kernel wants to do untiled GTT access.
294 * 0 on success, negative error code on failure.
296 int i915_vma_put_fence(struct i915_vma *vma)
298 struct drm_i915_fence_reg *fence = vma->fence;
303 if (fence->pin_count)
306 return fence_update(fence, NULL);
309 static struct drm_i915_fence_reg *fence_find(struct drm_i915_private *dev_priv)
311 struct drm_i915_fence_reg *fence;
313 list_for_each_entry(fence, &dev_priv->mm.fence_list, link) {
314 GEM_BUG_ON(fence->vma && fence->vma->fence != fence);
316 if (fence->pin_count)
322 /* Wait for completion of pending flips which consume fences */
323 if (intel_has_pending_fb_unpin(dev_priv))
324 return ERR_PTR(-EAGAIN);
326 return ERR_PTR(-EDEADLK);
330 * i915_vma_pin_fence - set up fencing for a vma
331 * @vma: vma to map through a fence reg
333 * When mapping objects through the GTT, userspace wants to be able to write
334 * to them without having to worry about swizzling if the object is tiled.
335 * This function walks the fence regs looking for a free one for @obj,
336 * stealing one if it can't find any.
338 * It then sets up the reg based on the object's properties: address, pitch
341 * For an untiled surface, this removes any existing fence.
345 * 0 on success, negative error code on failure.
348 i915_vma_pin_fence(struct i915_vma *vma)
350 struct drm_i915_fence_reg *fence;
351 struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL;
354 /* Note that we revoke fences on runtime suspend. Therefore the user
355 * must keep the device awake whilst using the fence.
357 assert_rpm_wakelock_held(vma->vm->i915);
359 /* Just update our place in the LRU if our fence is getting reused. */
362 GEM_BUG_ON(fence->vma != vma);
365 list_move_tail(&fence->link,
366 &fence->i915->mm.fence_list);
370 fence = fence_find(vma->vm->i915);
372 return PTR_ERR(fence);
374 GEM_BUG_ON(fence->pin_count);
379 err = fence_update(fence, set);
383 GEM_BUG_ON(fence->vma != set);
384 GEM_BUG_ON(vma->fence != (set ? fence : NULL));
395 * i915_reserve_fence - Reserve a fence for vGPU
396 * @dev_priv: i915 device private
398 * This function walks the fence regs looking for a free one and remove
399 * it from the fence_list. It is used to reserve fence for vGPU to use.
401 struct drm_i915_fence_reg *
402 i915_reserve_fence(struct drm_i915_private *dev_priv)
404 struct drm_i915_fence_reg *fence;
408 lockdep_assert_held(&dev_priv->drm.struct_mutex);
410 /* Keep at least one fence available for the display engine. */
412 list_for_each_entry(fence, &dev_priv->mm.fence_list, link)
413 count += !fence->pin_count;
415 return ERR_PTR(-ENOSPC);
417 fence = fence_find(dev_priv);
422 /* Force-remove fence from VMA */
423 ret = fence_update(fence, NULL);
428 list_del(&fence->link);
433 * i915_unreserve_fence - Reclaim a reserved fence
434 * @fence: the fence reg
436 * This function add a reserved fence register from vGPU to the fence_list.
438 void i915_unreserve_fence(struct drm_i915_fence_reg *fence)
440 lockdep_assert_held(&fence->i915->drm.struct_mutex);
442 list_add(&fence->link, &fence->i915->mm.fence_list);
446 * i915_gem_restore_fences - restore fence state
447 * @dev_priv: i915 device private
449 * Restore the hw fence state to match the software tracking again, to be called
450 * after a gpu reset and on resume. Note that on runtime suspend we only cancel
451 * the fences, to be reacquired by the user later.
453 void i915_gem_restore_fences(struct drm_i915_private *dev_priv)
457 rcu_read_lock(); /* keep obj alive as we dereference */
458 for (i = 0; i < dev_priv->num_fence_regs; i++) {
459 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
460 struct i915_vma *vma = READ_ONCE(reg->vma);
462 GEM_BUG_ON(vma && vma->fence != reg);
465 * Commit delayed tiling changes if we have an object still
466 * attached to the fence, otherwise just clear the fence.
468 if (vma && !i915_gem_object_is_tiled(vma->obj))
471 fence_write(reg, vma);
477 * DOC: tiling swizzling details
479 * The idea behind tiling is to increase cache hit rates by rearranging
480 * pixel data so that a group of pixel accesses are in the same cacheline.
481 * Performance improvement from doing this on the back/depth buffer are on
484 * Intel architectures make this somewhat more complicated, though, by
485 * adjustments made to addressing of data when the memory is in interleaved
486 * mode (matched pairs of DIMMS) to improve memory bandwidth.
487 * For interleaved memory, the CPU sends every sequential 64 bytes
488 * to an alternate memory channel so it can get the bandwidth from both.
490 * The GPU also rearranges its accesses for increased bandwidth to interleaved
491 * memory, and it matches what the CPU does for non-tiled. However, when tiled
492 * it does it a little differently, since one walks addresses not just in the
493 * X direction but also Y. So, along with alternating channels when bit
494 * 6 of the address flips, it also alternates when other bits flip -- Bits 9
495 * (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines)
496 * are common to both the 915 and 965-class hardware.
498 * The CPU also sometimes XORs in higher bits as well, to improve
499 * bandwidth doing strided access like we do so frequently in graphics. This
500 * is called "Channel XOR Randomization" in the MCH documentation. The result
501 * is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address
504 * All of this bit 6 XORing has an effect on our memory management,
505 * as we need to make sure that the 3d driver can correctly address object
508 * If we don't have interleaved memory, all tiling is safe and no swizzling is
511 * When bit 17 is XORed in, we simply refuse to tile at all. Bit
512 * 17 is not just a page offset, so as we page an object out and back in,
513 * individual pages in it will have different bit 17 addresses, resulting in
514 * each 64 bytes being swapped with its neighbor!
516 * Otherwise, if interleaved, we have to tell the 3d driver what the address
517 * swizzling it needs to do is, since it's writing with the CPU to the pages
518 * (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the
519 * pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling
520 * required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order
521 * to match what the GPU expects.
525 * i915_gem_detect_bit_6_swizzle - detect bit 6 swizzling pattern
526 * @dev_priv: i915 device private
528 * Detects bit 6 swizzling of address lookup between IGD access and CPU
529 * access through main memory.
532 i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
534 u32 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
535 u32 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
537 if (INTEL_GEN(dev_priv) >= 8 || IS_VALLEYVIEW(dev_priv)) {
539 * On BDW+, swizzling is not used. We leave the CPU memory
540 * controller in charge of optimizing memory accesses without
541 * the extra address manipulation GPU side.
543 * VLV and CHV don't have GPU swizzling.
545 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
546 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
547 } else if (INTEL_GEN(dev_priv) >= 6) {
548 if (dev_priv->preserve_bios_swizzle) {
549 if (I915_READ(DISP_ARB_CTL) &
550 DISP_TILE_SURFACE_SWIZZLING) {
551 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
552 swizzle_y = I915_BIT_6_SWIZZLE_9;
554 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
555 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
558 u32 dimm_c0, dimm_c1;
559 dimm_c0 = I915_READ(MAD_DIMM_C0);
560 dimm_c1 = I915_READ(MAD_DIMM_C1);
561 dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
562 dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
563 /* Enable swizzling when the channels are populated
564 * with identically sized dimms. We don't need to check
565 * the 3rd channel because no cpu with gpu attached
566 * ships in that configuration. Also, swizzling only
567 * makes sense for 2 channels anyway. */
568 if (dimm_c0 == dimm_c1) {
569 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
570 swizzle_y = I915_BIT_6_SWIZZLE_9;
572 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
573 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
576 } else if (IS_GEN(dev_priv, 5)) {
577 /* On Ironlake whatever DRAM config, GPU always do
578 * same swizzling setup.
580 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
581 swizzle_y = I915_BIT_6_SWIZZLE_9;
582 } else if (IS_GEN(dev_priv, 2)) {
583 /* As far as we know, the 865 doesn't have these bit 6
586 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
587 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
588 } else if (IS_G45(dev_priv) || IS_I965G(dev_priv) || IS_G33(dev_priv)) {
589 /* The 965, G33, and newer, have a very flexible memory
590 * configuration. It will enable dual-channel mode
591 * (interleaving) on as much memory as it can, and the GPU
592 * will additionally sometimes enable different bit 6
593 * swizzling for tiled objects from the CPU.
595 * Here's what I found on the G965:
596 * slot fill memory size swizzling
597 * 0A 0B 1A 1B 1-ch 2-ch
599 * 512 0 512 0 16 1008 X
600 * 512 0 0 512 16 1008 X
601 * 0 512 0 512 16 1008 X
602 * 1024 1024 1024 0 2048 1024 O
604 * We could probably detect this based on either the DRB
605 * matching, which was the case for the swizzling required in
606 * the table above, or from the 1-ch value being less than
607 * the minimum size of a rank.
609 * Reports indicate that the swizzling actually
610 * varies depending upon page placement inside the
611 * channels, i.e. we see swizzled pages where the
612 * banks of memory are paired and unswizzled on the
613 * uneven portion, so leave that as unknown.
615 if (I915_READ16(C0DRB3) == I915_READ16(C1DRB3)) {
616 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
617 swizzle_y = I915_BIT_6_SWIZZLE_9;
622 /* On 9xx chipsets, channel interleave by the CPU is
623 * determined by DCC. For single-channel, neither the CPU
624 * nor the GPU do swizzling. For dual channel interleaved,
625 * the GPU's interleave is bit 9 and 10 for X tiled, and bit
626 * 9 for Y tiled. The CPU's interleave is independent, and
627 * can be based on either bit 11 (haven't seen this yet) or
630 dcc = I915_READ(DCC);
631 switch (dcc & DCC_ADDRESSING_MODE_MASK) {
632 case DCC_ADDRESSING_MODE_SINGLE_CHANNEL:
633 case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC:
634 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
635 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
637 case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
638 if (dcc & DCC_CHANNEL_XOR_DISABLE) {
639 /* This is the base swizzling by the GPU for
642 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
643 swizzle_y = I915_BIT_6_SWIZZLE_9;
644 } else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
645 /* Bit 11 swizzling by the CPU in addition. */
646 swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
647 swizzle_y = I915_BIT_6_SWIZZLE_9_11;
649 /* Bit 17 swizzling by the CPU in addition. */
650 swizzle_x = I915_BIT_6_SWIZZLE_9_10_17;
651 swizzle_y = I915_BIT_6_SWIZZLE_9_17;
656 /* check for L-shaped memory aka modified enhanced addressing */
657 if (IS_GEN(dev_priv, 4) &&
658 !(I915_READ(DCC2) & DCC2_MODIFIED_ENHANCED_DISABLE)) {
659 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
660 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
663 if (dcc == 0xffffffff) {
664 DRM_ERROR("Couldn't read from MCHBAR. "
665 "Disabling tiling.\n");
666 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
667 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
671 if (swizzle_x == I915_BIT_6_SWIZZLE_UNKNOWN ||
672 swizzle_y == I915_BIT_6_SWIZZLE_UNKNOWN) {
673 /* Userspace likes to explode if it sees unknown swizzling,
674 * so lie. We will finish the lie when reporting through
675 * the get-tiling-ioctl by reporting the physical swizzle
676 * mode as unknown instead.
678 * As we don't strictly know what the swizzling is, it may be
679 * bit17 dependent, and so we need to also prevent the pages
682 dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
683 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
684 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
687 dev_priv->mm.bit_6_swizzle_x = swizzle_x;
688 dev_priv->mm.bit_6_swizzle_y = swizzle_y;
692 * Swap every 64 bytes of this page around, to account for it having a new
693 * bit 17 of its physical address and therefore being interpreted differently
697 i915_gem_swizzle_page(struct page *page)
705 for (i = 0; i < PAGE_SIZE; i += 128) {
706 memcpy(temp, &vaddr[i], 64);
707 memcpy(&vaddr[i], &vaddr[i + 64], 64);
708 memcpy(&vaddr[i + 64], temp, 64);
715 * i915_gem_object_do_bit_17_swizzle - fixup bit 17 swizzling
716 * @obj: i915 GEM buffer object
717 * @pages: the scattergather list of physical pages
719 * This function fixes up the swizzling in case any page frame number for this
720 * object has changed in bit 17 since that state has been saved with
721 * i915_gem_object_save_bit_17_swizzle().
723 * This is called when pinning backing storage again, since the kernel is free
724 * to move unpinned backing storage around (either by directly moving pages or
725 * by swapping them out and back in again).
728 i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
729 struct sg_table *pages)
731 struct sgt_iter sgt_iter;
735 if (obj->bit_17 == NULL)
739 for_each_sgt_page(page, sgt_iter, pages) {
740 char new_bit_17 = page_to_phys(page) >> 17;
741 if ((new_bit_17 & 0x1) != (test_bit(i, obj->bit_17) != 0)) {
742 i915_gem_swizzle_page(page);
743 set_page_dirty(page);
750 * i915_gem_object_save_bit_17_swizzle - save bit 17 swizzling
751 * @obj: i915 GEM buffer object
752 * @pages: the scattergather list of physical pages
754 * This function saves the bit 17 of each page frame number so that swizzling
755 * can be fixed up later on with i915_gem_object_do_bit_17_swizzle(). This must
756 * be called before the backing storage can be unpinned.
759 i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
760 struct sg_table *pages)
762 const unsigned int page_count = obj->base.size >> PAGE_SHIFT;
763 struct sgt_iter sgt_iter;
767 if (obj->bit_17 == NULL) {
768 obj->bit_17 = bitmap_zalloc(page_count, GFP_KERNEL);
769 if (obj->bit_17 == NULL) {
770 DRM_ERROR("Failed to allocate memory for bit 17 "
778 for_each_sgt_page(page, sgt_iter, pages) {
779 if (page_to_phys(page) & (1 << 17))
780 __set_bit(i, obj->bit_17);
782 __clear_bit(i, obj->bit_17);