2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
25 * DOC: Frame Buffer Compression (FBC)
27 * FBC tries to save memory bandwidth (and so power consumption) by
28 * compressing the amount of memory used by the display. It is total
29 * transparent to user space and completely handled in the kernel.
31 * The benefits of FBC are mostly visible with solid backgrounds and
32 * variation-less patterns. It comes from keeping the memory footprint small
33 * and having fewer memory pages opened and accessed for refreshing the display.
35 * i915 is responsible to reserve stolen memory for FBC and configure its
36 * offset on proper registers. The hardware takes care of all
37 * compress/decompress. However there are many known cases where we have to
38 * forcibly disable it to allow proper screen updates.
41 #include <drm/drm_fourcc.h>
44 #include "i915_trace.h"
45 #include "i915_vgpu.h"
47 #include "intel_display_types.h"
48 #include "intel_fbc.h"
49 #include "intel_frontbuffer.h"
52 * For SKL+, the plane source size used by the hardware is based on the value we
53 * write to the PLANE_SIZE register. For BDW-, the hardware looks at the value
54 * we wrote to PIPESRC.
56 static void intel_fbc_get_plane_source_size(const struct intel_fbc_state_cache *cache,
57 int *width, int *height)
60 *width = cache->plane.src_w;
62 *height = cache->plane.src_h;
65 /* plane stride in pixels */
66 static unsigned int intel_fbc_plane_stride(const struct intel_plane_state *plane_state)
68 const struct drm_framebuffer *fb = plane_state->hw.fb;
71 stride = plane_state->view.color_plane[0].mapping_stride;
72 if (!drm_rotation_90_or_270(plane_state->hw.rotation))
73 stride /= fb->format->cpp[0];
78 /* plane stride based cfb stride in bytes, assuming 1:1 compression limit */
79 static unsigned int _intel_fbc_cfb_stride(const struct intel_fbc_state_cache *cache)
81 unsigned int cpp = 4; /* FBC always 4 bytes per pixel */
83 return cache->fb.stride * cpp;
86 /* minimum acceptable cfb stride in bytes, assuming 1:1 compression limit */
87 static unsigned int skl_fbc_min_cfb_stride(struct drm_i915_private *i915,
88 const struct intel_fbc_state_cache *cache)
90 unsigned int limit = 4; /* 1:4 compression limit is the worst case */
91 unsigned int cpp = 4; /* FBC always 4 bytes per pixel */
92 unsigned int height = 4; /* FBC segment is 4 lines */
95 /* minimum segment stride we can use */
96 stride = cache->plane.src_w * cpp * height / limit;
99 * Wa_16011863758: icl+
100 * Avoid some hardware segment address miscalculation.
102 if (DISPLAY_VER(i915) >= 11)
106 * At least some of the platforms require each 4 line segment to
107 * be 512 byte aligned. Just do it always for simplicity.
109 stride = ALIGN(stride, 512);
111 /* convert back to single line equivalent with 1:1 compression limit */
112 return stride * limit / height;
115 /* properly aligned cfb stride in bytes, assuming 1:1 compression limit */
116 static unsigned int intel_fbc_cfb_stride(struct drm_i915_private *i915,
117 const struct intel_fbc_state_cache *cache)
119 unsigned int stride = _intel_fbc_cfb_stride(cache);
122 * At least some of the platforms require each 4 line segment to
123 * be 512 byte aligned. Aligning each line to 512 bytes guarantees
124 * that regardless of the compression limit we choose later.
126 if (DISPLAY_VER(i915) >= 9)
127 return max(ALIGN(stride, 512), skl_fbc_min_cfb_stride(i915, cache));
132 static unsigned int intel_fbc_cfb_size(struct drm_i915_private *dev_priv,
133 const struct intel_fbc_state_cache *cache)
135 int lines = cache->plane.src_h;
137 if (DISPLAY_VER(dev_priv) == 7)
138 lines = min(lines, 2048);
139 else if (DISPLAY_VER(dev_priv) >= 8)
140 lines = min(lines, 2560);
142 return lines * intel_fbc_cfb_stride(dev_priv, cache);
145 static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv)
149 /* Disable compression */
150 fbc_ctl = intel_de_read(dev_priv, FBC_CONTROL);
151 if ((fbc_ctl & FBC_CTL_EN) == 0)
154 fbc_ctl &= ~FBC_CTL_EN;
155 intel_de_write(dev_priv, FBC_CONTROL, fbc_ctl);
157 /* Wait for compressing bit to clear */
158 if (intel_de_wait_for_clear(dev_priv, FBC_STATUS,
159 FBC_STAT_COMPRESSING, 10)) {
160 drm_dbg_kms(&dev_priv->drm, "FBC idle timed out\n");
165 static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
167 struct intel_fbc *fbc = &dev_priv->fbc;
168 const struct intel_fbc_reg_params *params = &fbc->params;
173 cfb_pitch = params->cfb_stride / fbc->limit;
175 /* FBC_CTL wants 32B or 64B units */
176 if (DISPLAY_VER(dev_priv) == 2)
177 cfb_pitch = (cfb_pitch / 32) - 1;
179 cfb_pitch = (cfb_pitch / 64) - 1;
182 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
183 intel_de_write(dev_priv, FBC_TAG(i), 0);
185 if (DISPLAY_VER(dev_priv) == 4) {
189 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM;
190 fbc_ctl2 |= FBC_CTL_PLANE(params->crtc.i9xx_plane);
191 if (params->fence_id >= 0)
192 fbc_ctl2 |= FBC_CTL_CPU_FENCE;
193 intel_de_write(dev_priv, FBC_CONTROL2, fbc_ctl2);
194 intel_de_write(dev_priv, FBC_FENCE_OFF,
195 params->fence_y_offset);
199 fbc_ctl = FBC_CTL_INTERVAL(params->interval);
200 fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
201 if (IS_I945GM(dev_priv))
202 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
203 fbc_ctl |= FBC_CTL_STRIDE(cfb_pitch & 0xff);
204 if (params->fence_id >= 0)
205 fbc_ctl |= FBC_CTL_FENCENO(params->fence_id);
206 intel_de_write(dev_priv, FBC_CONTROL, fbc_ctl);
209 static bool i8xx_fbc_is_active(struct drm_i915_private *dev_priv)
211 return intel_de_read(dev_priv, FBC_CONTROL) & FBC_CTL_EN;
214 static bool i8xx_fbc_is_compressing(struct drm_i915_private *i915)
216 return intel_de_read(i915, FBC_STATUS) &
217 (FBC_STAT_COMPRESSING | FBC_STAT_COMPRESSED);
220 static u32 g4x_dpfc_ctl_limit(struct drm_i915_private *i915)
222 switch (i915->fbc.limit) {
224 MISSING_CASE(i915->fbc.limit);
227 return DPFC_CTL_LIMIT_1X;
229 return DPFC_CTL_LIMIT_2X;
231 return DPFC_CTL_LIMIT_4X;
235 static void g4x_fbc_activate(struct drm_i915_private *dev_priv)
237 struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
240 dpfc_ctl = DPFC_CTL_PLANE(params->crtc.i9xx_plane) | DPFC_SR_EN;
242 dpfc_ctl |= g4x_dpfc_ctl_limit(dev_priv);
244 if (params->fence_id >= 0)
245 dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fence_id;
247 intel_de_write(dev_priv, DPFC_FENCE_YOFF,
248 params->fence_y_offset);
251 intel_de_write(dev_priv, DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
254 static void g4x_fbc_deactivate(struct drm_i915_private *dev_priv)
258 /* Disable compression */
259 dpfc_ctl = intel_de_read(dev_priv, DPFC_CONTROL);
260 if (dpfc_ctl & DPFC_CTL_EN) {
261 dpfc_ctl &= ~DPFC_CTL_EN;
262 intel_de_write(dev_priv, DPFC_CONTROL, dpfc_ctl);
266 static bool g4x_fbc_is_active(struct drm_i915_private *dev_priv)
268 return intel_de_read(dev_priv, DPFC_CONTROL) & DPFC_CTL_EN;
271 static bool g4x_fbc_is_compressing(struct drm_i915_private *i915)
273 return intel_de_read(i915, DPFC_STATUS) & DPFC_COMP_SEG_MASK;
276 static void i8xx_fbc_recompress(struct drm_i915_private *dev_priv)
278 struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
279 enum i9xx_plane_id i9xx_plane = params->crtc.i9xx_plane;
281 spin_lock_irq(&dev_priv->uncore.lock);
282 intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane),
283 intel_de_read_fw(dev_priv, DSPADDR(i9xx_plane)));
284 spin_unlock_irq(&dev_priv->uncore.lock);
287 static void i965_fbc_recompress(struct drm_i915_private *dev_priv)
289 struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
290 enum i9xx_plane_id i9xx_plane = params->crtc.i9xx_plane;
292 spin_lock_irq(&dev_priv->uncore.lock);
293 intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane),
294 intel_de_read_fw(dev_priv, DSPSURF(i9xx_plane)));
295 spin_unlock_irq(&dev_priv->uncore.lock);
298 /* This function forces a CFB recompression through the nuke operation. */
299 static void snb_fbc_recompress(struct drm_i915_private *dev_priv)
301 intel_de_write(dev_priv, MSG_FBC_REND_STATE, FBC_REND_NUKE);
302 intel_de_posting_read(dev_priv, MSG_FBC_REND_STATE);
305 static void intel_fbc_recompress(struct drm_i915_private *dev_priv)
307 struct intel_fbc *fbc = &dev_priv->fbc;
309 trace_intel_fbc_nuke(fbc->crtc);
311 if (DISPLAY_VER(dev_priv) >= 6)
312 snb_fbc_recompress(dev_priv);
313 else if (DISPLAY_VER(dev_priv) >= 4)
314 i965_fbc_recompress(dev_priv);
316 i8xx_fbc_recompress(dev_priv);
319 static void snb_fbc_program_fence(struct drm_i915_private *i915)
321 const struct intel_fbc_reg_params *params = &i915->fbc.params;
324 if (params->fence_id >= 0)
325 ctl = SNB_CPU_FENCE_ENABLE | params->fence_id;
327 intel_de_write(i915, SNB_DPFC_CTL_SA, ctl);
328 intel_de_write(i915, DPFC_CPU_FENCE_OFFSET, params->fence_y_offset);
331 static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
333 struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
336 dpfc_ctl = DPFC_CTL_PLANE(params->crtc.i9xx_plane);
338 dpfc_ctl |= g4x_dpfc_ctl_limit(dev_priv);
340 if (params->fence_id >= 0) {
341 dpfc_ctl |= DPFC_CTL_FENCE_EN;
342 if (IS_IRONLAKE(dev_priv))
343 dpfc_ctl |= params->fence_id;
346 if (IS_SANDYBRIDGE(dev_priv))
347 snb_fbc_program_fence(dev_priv);
349 intel_de_write(dev_priv, ILK_DPFC_FENCE_YOFF,
350 params->fence_y_offset);
352 intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
355 static void ilk_fbc_deactivate(struct drm_i915_private *dev_priv)
359 /* Disable compression */
360 dpfc_ctl = intel_de_read(dev_priv, ILK_DPFC_CONTROL);
361 if (dpfc_ctl & DPFC_CTL_EN) {
362 dpfc_ctl &= ~DPFC_CTL_EN;
363 intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl);
367 static bool ilk_fbc_is_active(struct drm_i915_private *dev_priv)
369 return intel_de_read(dev_priv, ILK_DPFC_CONTROL) & DPFC_CTL_EN;
372 static bool ilk_fbc_is_compressing(struct drm_i915_private *i915)
374 return intel_de_read(i915, ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
377 static void glk_fbc_program_cfb_stride(struct drm_i915_private *i915)
379 struct intel_fbc *fbc = &i915->fbc;
380 const struct intel_fbc_reg_params *params = &fbc->params;
383 if (params->override_cfb_stride)
384 val |= FBC_STRIDE_OVERRIDE |
385 FBC_STRIDE(params->override_cfb_stride / fbc->limit);
387 intel_de_write(i915, GLK_FBC_STRIDE, val);
390 static void skl_fbc_program_cfb_stride(struct drm_i915_private *i915)
392 struct intel_fbc *fbc = &i915->fbc;
393 const struct intel_fbc_reg_params *params = &fbc->params;
396 /* Display WA #0529: skl, kbl, bxt. */
397 if (params->override_cfb_stride)
398 val |= CHICKEN_FBC_STRIDE_OVERRIDE |
399 CHICKEN_FBC_STRIDE(params->override_cfb_stride / fbc->limit);
401 intel_de_rmw(i915, CHICKEN_MISC_4,
402 CHICKEN_FBC_STRIDE_OVERRIDE |
403 CHICKEN_FBC_STRIDE_MASK, val);
406 static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
408 struct intel_fbc *fbc = &dev_priv->fbc;
409 const struct intel_fbc_reg_params *params = &fbc->params;
412 if (DISPLAY_VER(dev_priv) >= 10)
413 glk_fbc_program_cfb_stride(dev_priv);
414 else if (DISPLAY_VER(dev_priv) == 9)
415 skl_fbc_program_cfb_stride(dev_priv);
418 if (IS_IVYBRIDGE(dev_priv))
419 dpfc_ctl |= IVB_DPFC_CTL_PLANE(params->crtc.i9xx_plane);
421 dpfc_ctl |= g4x_dpfc_ctl_limit(dev_priv);
423 if (params->fence_id >= 0)
424 dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
426 if (dev_priv->fbc.false_color)
427 dpfc_ctl |= FBC_CTL_FALSE_COLOR;
429 if (dev_priv->ggtt.num_fences)
430 snb_fbc_program_fence(dev_priv);
432 intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
435 static bool gen7_fbc_is_compressing(struct drm_i915_private *i915)
437 if (DISPLAY_VER(i915) >= 8)
438 return intel_de_read(i915, IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
440 return intel_de_read(i915, IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
443 static bool intel_fbc_hw_is_active(struct drm_i915_private *dev_priv)
445 if (DISPLAY_VER(dev_priv) >= 5)
446 return ilk_fbc_is_active(dev_priv);
447 else if (IS_GM45(dev_priv))
448 return g4x_fbc_is_active(dev_priv);
450 return i8xx_fbc_is_active(dev_priv);
453 static void intel_fbc_hw_activate(struct drm_i915_private *dev_priv)
455 struct intel_fbc *fbc = &dev_priv->fbc;
457 trace_intel_fbc_activate(fbc->crtc);
460 fbc->activated = true;
462 if (DISPLAY_VER(dev_priv) >= 7)
463 gen7_fbc_activate(dev_priv);
464 else if (DISPLAY_VER(dev_priv) >= 5)
465 ilk_fbc_activate(dev_priv);
466 else if (IS_GM45(dev_priv))
467 g4x_fbc_activate(dev_priv);
469 i8xx_fbc_activate(dev_priv);
472 static void intel_fbc_hw_deactivate(struct drm_i915_private *dev_priv)
474 struct intel_fbc *fbc = &dev_priv->fbc;
476 trace_intel_fbc_deactivate(fbc->crtc);
480 if (DISPLAY_VER(dev_priv) >= 5)
481 ilk_fbc_deactivate(dev_priv);
482 else if (IS_GM45(dev_priv))
483 g4x_fbc_deactivate(dev_priv);
485 i8xx_fbc_deactivate(dev_priv);
488 bool intel_fbc_is_compressing(struct drm_i915_private *i915)
490 if (DISPLAY_VER(i915) >= 7)
491 return gen7_fbc_is_compressing(i915);
492 else if (DISPLAY_VER(i915) >= 5)
493 return ilk_fbc_is_compressing(i915);
494 else if (IS_G4X(i915))
495 return g4x_fbc_is_compressing(i915);
497 return i8xx_fbc_is_compressing(i915);
501 * intel_fbc_is_active - Is FBC active?
502 * @dev_priv: i915 device instance
504 * This function is used to verify the current state of FBC.
506 * FIXME: This should be tracked in the plane config eventually
507 * instead of queried at runtime for most callers.
509 bool intel_fbc_is_active(struct drm_i915_private *dev_priv)
511 return dev_priv->fbc.active;
514 static void intel_fbc_activate(struct drm_i915_private *dev_priv)
516 intel_fbc_hw_activate(dev_priv);
517 intel_fbc_recompress(dev_priv);
520 static void intel_fbc_deactivate(struct drm_i915_private *dev_priv,
523 struct intel_fbc *fbc = &dev_priv->fbc;
525 drm_WARN_ON(&dev_priv->drm, !mutex_is_locked(&fbc->lock));
528 intel_fbc_hw_deactivate(dev_priv);
530 fbc->no_fbc_reason = reason;
533 static u64 intel_fbc_cfb_base_max(struct drm_i915_private *i915)
535 if (DISPLAY_VER(i915) >= 5 || IS_G4X(i915))
541 static u64 intel_fbc_stolen_end(struct drm_i915_private *dev_priv)
545 /* The FBC hardware for BDW/SKL doesn't have access to the stolen
546 * reserved range size, so it always assumes the maximum (8mb) is used.
547 * If we enable FBC using a CFB on that memory range we'll get FIFO
548 * underruns, even if that range is not reserved by the BIOS. */
549 if (IS_BROADWELL(dev_priv) || (DISPLAY_VER(dev_priv) == 9 &&
550 !IS_BROXTON(dev_priv)))
551 end = resource_size(&dev_priv->dsm) - 8 * 1024 * 1024;
555 return min(end, intel_fbc_cfb_base_max(dev_priv));
558 static int intel_fbc_min_limit(int fb_cpp)
560 return fb_cpp == 2 ? 2 : 1;
563 static int intel_fbc_max_limit(struct drm_i915_private *dev_priv)
565 /* WaFbcOnly1to1Ratio:ctg */
566 if (IS_G4X(dev_priv))
570 * FBC2 can only do 1:1, 1:2, 1:4, we limit
571 * FBC1 to the same out of convenience.
576 static int find_compression_limit(struct drm_i915_private *dev_priv,
577 unsigned int size, int min_limit)
579 struct intel_fbc *fbc = &dev_priv->fbc;
580 u64 end = intel_fbc_stolen_end(dev_priv);
581 int ret, limit = min_limit;
585 /* Try to over-allocate to reduce reallocations and fragmentation. */
586 ret = i915_gem_stolen_insert_node_in_range(dev_priv, &fbc->compressed_fb,
587 size <<= 1, 4096, 0, end);
591 for (; limit <= intel_fbc_max_limit(dev_priv); limit <<= 1) {
592 ret = i915_gem_stolen_insert_node_in_range(dev_priv, &fbc->compressed_fb,
593 size >>= 1, 4096, 0, end);
601 static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv,
602 unsigned int size, int min_limit)
604 struct intel_fbc *fbc = &dev_priv->fbc;
607 drm_WARN_ON(&dev_priv->drm,
608 drm_mm_node_allocated(&fbc->compressed_fb));
609 drm_WARN_ON(&dev_priv->drm,
610 drm_mm_node_allocated(&fbc->compressed_llb));
612 if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) {
613 ret = i915_gem_stolen_insert_node(dev_priv, &fbc->compressed_llb,
619 ret = find_compression_limit(dev_priv, size, min_limit);
622 else if (ret > min_limit)
623 drm_info_once(&dev_priv->drm,
624 "Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
628 drm_dbg_kms(&dev_priv->drm,
629 "reserved %llu bytes of contiguous stolen space for FBC, limit: %d\n",
630 fbc->compressed_fb.size, fbc->limit);
635 if (drm_mm_node_allocated(&fbc->compressed_llb))
636 i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_llb);
638 if (drm_mm_initialized(&dev_priv->mm.stolen))
639 drm_info_once(&dev_priv->drm, "not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
643 static void intel_fbc_program_cfb(struct drm_i915_private *dev_priv)
645 struct intel_fbc *fbc = &dev_priv->fbc;
647 if (DISPLAY_VER(dev_priv) >= 5) {
648 intel_de_write(dev_priv, ILK_DPFC_CB_BASE,
649 fbc->compressed_fb.start);
650 } else if (IS_GM45(dev_priv)) {
651 intel_de_write(dev_priv, DPFC_CB_BASE,
652 fbc->compressed_fb.start);
654 GEM_BUG_ON(range_overflows_end_t(u64, dev_priv->dsm.start,
655 fbc->compressed_fb.start,
657 GEM_BUG_ON(range_overflows_end_t(u64, dev_priv->dsm.start,
658 fbc->compressed_llb.start,
661 intel_de_write(dev_priv, FBC_CFB_BASE,
662 dev_priv->dsm.start + fbc->compressed_fb.start);
663 intel_de_write(dev_priv, FBC_LL_BASE,
664 dev_priv->dsm.start + fbc->compressed_llb.start);
668 static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
670 struct intel_fbc *fbc = &dev_priv->fbc;
672 if (WARN_ON(intel_fbc_hw_is_active(dev_priv)))
675 if (drm_mm_node_allocated(&fbc->compressed_llb))
676 i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_llb);
677 if (drm_mm_node_allocated(&fbc->compressed_fb))
678 i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb);
681 void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
683 struct intel_fbc *fbc = &dev_priv->fbc;
685 if (!HAS_FBC(dev_priv))
688 mutex_lock(&fbc->lock);
689 __intel_fbc_cleanup_cfb(dev_priv);
690 mutex_unlock(&fbc->lock);
693 static bool stride_is_valid(struct drm_i915_private *dev_priv,
694 u64 modifier, unsigned int stride)
696 /* This should have been caught earlier. */
697 if (drm_WARN_ON_ONCE(&dev_priv->drm, (stride & (64 - 1)) != 0))
700 /* Below are the additional FBC restrictions. */
704 if (DISPLAY_VER(dev_priv) == 2 || DISPLAY_VER(dev_priv) == 3)
705 return stride == 4096 || stride == 8192;
707 if (DISPLAY_VER(dev_priv) == 4 && !IS_G4X(dev_priv) && stride < 2048)
710 /* Display WA #1105: skl,bxt,kbl,cfl,glk */
711 if ((DISPLAY_VER(dev_priv) == 9 || IS_GEMINILAKE(dev_priv)) &&
712 modifier == DRM_FORMAT_MOD_LINEAR && stride & 511)
721 static bool pixel_format_is_valid(struct drm_i915_private *dev_priv,
724 switch (pixel_format) {
725 case DRM_FORMAT_XRGB8888:
726 case DRM_FORMAT_XBGR8888:
728 case DRM_FORMAT_XRGB1555:
729 case DRM_FORMAT_RGB565:
730 /* 16bpp not supported on gen2 */
731 if (DISPLAY_VER(dev_priv) == 2)
733 /* WaFbcOnly1to1Ratio:ctg */
734 if (IS_G4X(dev_priv))
742 static bool rotation_is_valid(struct drm_i915_private *dev_priv,
743 u32 pixel_format, unsigned int rotation)
745 if (DISPLAY_VER(dev_priv) >= 9 && pixel_format == DRM_FORMAT_RGB565 &&
746 drm_rotation_90_or_270(rotation))
748 else if (DISPLAY_VER(dev_priv) <= 4 && !IS_G4X(dev_priv) &&
749 rotation != DRM_MODE_ROTATE_0)
756 * For some reason, the hardware tracking starts looking at whatever we
757 * programmed as the display plane base address register. It does not look at
758 * the X and Y offset registers. That's why we include the src x/y offsets
759 * instead of just looking at the plane size.
761 static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
763 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
764 struct intel_fbc *fbc = &dev_priv->fbc;
765 unsigned int effective_w, effective_h, max_w, max_h;
767 if (DISPLAY_VER(dev_priv) >= 10) {
770 } else if (DISPLAY_VER(dev_priv) >= 8 || IS_HASWELL(dev_priv)) {
773 } else if (IS_G4X(dev_priv) || DISPLAY_VER(dev_priv) >= 5) {
781 intel_fbc_get_plane_source_size(&fbc->state_cache, &effective_w,
783 effective_w += fbc->state_cache.plane.adjusted_x;
784 effective_h += fbc->state_cache.plane.adjusted_y;
786 return effective_w <= max_w && effective_h <= max_h;
789 static bool tiling_is_valid(struct drm_i915_private *dev_priv,
793 case DRM_FORMAT_MOD_LINEAR:
794 case I915_FORMAT_MOD_Y_TILED:
795 case I915_FORMAT_MOD_Yf_TILED:
796 return DISPLAY_VER(dev_priv) >= 9;
797 case I915_FORMAT_MOD_X_TILED:
804 static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
805 const struct intel_crtc_state *crtc_state,
806 const struct intel_plane_state *plane_state)
808 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
809 struct intel_fbc *fbc = &dev_priv->fbc;
810 struct intel_fbc_state_cache *cache = &fbc->state_cache;
811 struct drm_framebuffer *fb = plane_state->hw.fb;
813 cache->plane.visible = plane_state->uapi.visible;
814 if (!cache->plane.visible)
817 cache->crtc.mode_flags = crtc_state->hw.adjusted_mode.flags;
818 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
819 cache->crtc.hsw_bdw_pixel_rate = crtc_state->pixel_rate;
821 cache->plane.rotation = plane_state->hw.rotation;
823 * Src coordinates are already rotated by 270 degrees for
824 * the 90/270 degree plane rotation cases (to match the
825 * GTT mapping), hence no need to account for rotation here.
827 cache->plane.src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
828 cache->plane.src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
829 cache->plane.adjusted_x = plane_state->view.color_plane[0].x;
830 cache->plane.adjusted_y = plane_state->view.color_plane[0].y;
832 cache->plane.pixel_blend_mode = plane_state->hw.pixel_blend_mode;
834 cache->fb.format = fb->format;
835 cache->fb.modifier = fb->modifier;
836 cache->fb.stride = intel_fbc_plane_stride(plane_state);
838 /* FBC1 compression interval: arbitrary choice of 1 second */
839 cache->interval = drm_mode_vrefresh(&crtc_state->hw.adjusted_mode);
841 cache->fence_y_offset = intel_plane_fence_y_offset(plane_state);
843 drm_WARN_ON(&dev_priv->drm, plane_state->flags & PLANE_HAS_FENCE &&
844 !plane_state->ggtt_vma->fence);
846 if (plane_state->flags & PLANE_HAS_FENCE &&
847 plane_state->ggtt_vma->fence)
848 cache->fence_id = plane_state->ggtt_vma->fence->id;
850 cache->fence_id = -1;
852 cache->psr2_active = crtc_state->has_psr2;
855 static bool intel_fbc_cfb_size_changed(struct drm_i915_private *dev_priv)
857 struct intel_fbc *fbc = &dev_priv->fbc;
859 return intel_fbc_cfb_size(dev_priv, &fbc->state_cache) >
860 fbc->compressed_fb.size * fbc->limit;
863 static u16 intel_fbc_override_cfb_stride(struct drm_i915_private *dev_priv,
864 const struct intel_fbc_state_cache *cache)
866 unsigned int stride = _intel_fbc_cfb_stride(cache);
867 unsigned int stride_aligned = intel_fbc_cfb_stride(dev_priv, cache);
870 * Override stride in 64 byte units per 4 line segment.
872 * Gen9 hw miscalculates cfb stride for linear as
873 * PLANE_STRIDE*512 instead of PLANE_STRIDE*64, so
874 * we always need to use the override there.
876 if (stride != stride_aligned ||
877 (DISPLAY_VER(dev_priv) == 9 &&
878 cache->fb.modifier == DRM_FORMAT_MOD_LINEAR))
879 return stride_aligned * 4 / 64;
884 static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv)
886 struct intel_fbc *fbc = &dev_priv->fbc;
888 if (intel_vgpu_active(dev_priv)) {
889 fbc->no_fbc_reason = "VGPU is active";
893 if (!dev_priv->params.enable_fbc) {
894 fbc->no_fbc_reason = "disabled per module param or by default";
898 if (fbc->underrun_detected) {
899 fbc->no_fbc_reason = "underrun detected";
906 static bool intel_fbc_can_activate(struct intel_crtc *crtc)
908 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
909 struct intel_fbc *fbc = &dev_priv->fbc;
910 struct intel_fbc_state_cache *cache = &fbc->state_cache;
912 if (!intel_fbc_can_enable(dev_priv))
915 if (!cache->plane.visible) {
916 fbc->no_fbc_reason = "primary plane not visible";
920 /* We don't need to use a state cache here since this information is
921 * global for all CRTC.
923 if (fbc->underrun_detected) {
924 fbc->no_fbc_reason = "underrun detected";
928 if (cache->crtc.mode_flags & DRM_MODE_FLAG_INTERLACE) {
929 fbc->no_fbc_reason = "incompatible mode";
933 if (!intel_fbc_hw_tracking_covers_screen(crtc)) {
934 fbc->no_fbc_reason = "mode too large for compression";
938 /* The use of a CPU fence is one of two ways to detect writes by the
939 * CPU to the scanout and trigger updates to the FBC.
941 * The other method is by software tracking (see
942 * intel_fbc_invalidate/flush()), it will manually notify FBC and nuke
943 * the current compressed buffer and recompress it.
945 * Note that is possible for a tiled surface to be unmappable (and
946 * so have no fence associated with it) due to aperture constraints
947 * at the time of pinning.
949 * FIXME with 90/270 degree rotation we should use the fence on
950 * the normal GTT view (the rotated view doesn't even have a
951 * fence). Would need changes to the FBC fence Y offset as well.
952 * For now this will effectively disable FBC with 90/270 degree
955 if (DISPLAY_VER(dev_priv) < 9 && cache->fence_id < 0) {
956 fbc->no_fbc_reason = "framebuffer not tiled or fenced";
960 if (!pixel_format_is_valid(dev_priv, cache->fb.format->format)) {
961 fbc->no_fbc_reason = "pixel format is invalid";
965 if (!rotation_is_valid(dev_priv, cache->fb.format->format,
966 cache->plane.rotation)) {
967 fbc->no_fbc_reason = "rotation unsupported";
971 if (!tiling_is_valid(dev_priv, cache->fb.modifier)) {
972 fbc->no_fbc_reason = "tiling unsupported";
976 if (!stride_is_valid(dev_priv, cache->fb.modifier,
977 cache->fb.stride * cache->fb.format->cpp[0])) {
978 fbc->no_fbc_reason = "framebuffer stride not supported";
982 if (cache->plane.pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE &&
983 cache->fb.format->has_alpha) {
984 fbc->no_fbc_reason = "per-pixel alpha blending is incompatible with FBC";
988 /* WaFbcExceedCdClockThreshold:hsw,bdw */
989 if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) &&
990 cache->crtc.hsw_bdw_pixel_rate >= dev_priv->cdclk.hw.cdclk * 95 / 100) {
991 fbc->no_fbc_reason = "pixel rate is too big";
995 /* It is possible for the required CFB size change without a
996 * crtc->disable + crtc->enable since it is possible to change the
997 * stride without triggering a full modeset. Since we try to
998 * over-allocate the CFB, there's a chance we may keep FBC enabled even
999 * if this happens, but if we exceed the current CFB size we'll have to
1000 * disable FBC. Notice that it would be possible to disable FBC, wait
1001 * for a frame, free the stolen node, then try to reenable FBC in case
1002 * we didn't get any invalidate/deactivate calls, but this would require
1003 * a lot of tracking just for a specific case. If we conclude it's an
1004 * important case, we can implement it later. */
1005 if (intel_fbc_cfb_size_changed(dev_priv)) {
1006 fbc->no_fbc_reason = "CFB requirements changed";
1011 * Work around a problem on GEN9+ HW, where enabling FBC on a plane
1012 * having a Y offset that isn't divisible by 4 causes FIFO underrun
1013 * and screen flicker.
1015 if (DISPLAY_VER(dev_priv) >= 9 &&
1016 (fbc->state_cache.plane.adjusted_y & 3)) {
1017 fbc->no_fbc_reason = "plane Y offset is misaligned";
1021 /* Wa_22010751166: icl, ehl, tgl, dg1, rkl */
1022 if (DISPLAY_VER(dev_priv) >= 11 &&
1023 (cache->plane.src_h + cache->plane.adjusted_y) % 4) {
1024 fbc->no_fbc_reason = "plane height + offset is non-modulo of 4";
1029 * Display 12+ is not supporting FBC with PSR2.
1030 * Recommendation is to keep this combination disabled
1031 * Bspec: 50422 HSD: 14010260002
1033 if (fbc->state_cache.psr2_active && DISPLAY_VER(dev_priv) >= 12) {
1034 fbc->no_fbc_reason = "not supported with PSR2";
1041 static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
1042 struct intel_fbc_reg_params *params)
1044 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1045 struct intel_fbc *fbc = &dev_priv->fbc;
1046 struct intel_fbc_state_cache *cache = &fbc->state_cache;
1048 /* Since all our fields are integer types, use memset here so the
1049 * comparison function can rely on memcmp because the padding will be
1051 memset(params, 0, sizeof(*params));
1053 params->fence_id = cache->fence_id;
1054 params->fence_y_offset = cache->fence_y_offset;
1056 params->interval = cache->interval;
1058 params->crtc.pipe = crtc->pipe;
1059 params->crtc.i9xx_plane = to_intel_plane(crtc->base.primary)->i9xx_plane;
1061 params->fb.format = cache->fb.format;
1062 params->fb.modifier = cache->fb.modifier;
1063 params->fb.stride = cache->fb.stride;
1065 params->cfb_stride = intel_fbc_cfb_stride(dev_priv, cache);
1066 params->cfb_size = intel_fbc_cfb_size(dev_priv, cache);
1067 params->override_cfb_stride = intel_fbc_override_cfb_stride(dev_priv, cache);
1069 params->plane_visible = cache->plane.visible;
1072 static bool intel_fbc_can_flip_nuke(const struct intel_crtc_state *crtc_state)
1074 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
1075 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1076 const struct intel_fbc *fbc = &dev_priv->fbc;
1077 const struct intel_fbc_state_cache *cache = &fbc->state_cache;
1078 const struct intel_fbc_reg_params *params = &fbc->params;
1080 if (drm_atomic_crtc_needs_modeset(&crtc_state->uapi))
1083 if (!params->plane_visible)
1086 if (!intel_fbc_can_activate(crtc))
1089 if (params->fb.format != cache->fb.format)
1092 if (params->fb.modifier != cache->fb.modifier)
1095 if (params->fb.stride != cache->fb.stride)
1098 if (params->cfb_stride != intel_fbc_cfb_stride(dev_priv, cache))
1101 if (params->cfb_size != intel_fbc_cfb_size(dev_priv, cache))
1104 if (params->override_cfb_stride != intel_fbc_override_cfb_stride(dev_priv, cache))
1110 bool intel_fbc_pre_update(struct intel_atomic_state *state,
1111 struct intel_crtc *crtc)
1113 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
1114 const struct intel_crtc_state *crtc_state =
1115 intel_atomic_get_new_crtc_state(state, crtc);
1116 const struct intel_plane_state *plane_state =
1117 intel_atomic_get_new_plane_state(state, plane);
1118 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1119 struct intel_fbc *fbc = &dev_priv->fbc;
1120 const char *reason = "update pending";
1121 bool need_vblank_wait = false;
1123 if (!plane->has_fbc || !plane_state)
1124 return need_vblank_wait;
1126 mutex_lock(&fbc->lock);
1128 if (fbc->crtc != crtc)
1131 intel_fbc_update_state_cache(crtc, crtc_state, plane_state);
1132 fbc->flip_pending = true;
1134 if (!intel_fbc_can_flip_nuke(crtc_state)) {
1135 intel_fbc_deactivate(dev_priv, reason);
1138 * Display WA #1198: glk+
1139 * Need an extra vblank wait between FBC disable and most plane
1140 * updates. Bspec says this is only needed for plane disable, but
1141 * that is not true. Touching most plane registers will cause the
1142 * corruption to appear. Also SKL/derivatives do not seem to be
1145 * TODO: could optimize this a bit by sampling the frame
1146 * counter when we disable FBC (if it was already done earlier)
1147 * and skipping the extra vblank wait before the plane update
1148 * if at least one frame has already passed.
1150 if (fbc->activated &&
1151 DISPLAY_VER(dev_priv) >= 10)
1152 need_vblank_wait = true;
1153 fbc->activated = false;
1156 mutex_unlock(&fbc->lock);
1158 return need_vblank_wait;
1162 * __intel_fbc_disable - disable FBC
1163 * @dev_priv: i915 device instance
1165 * This is the low level function that actually disables FBC. Callers should
1166 * grab the FBC lock.
1168 static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
1170 struct intel_fbc *fbc = &dev_priv->fbc;
1171 struct intel_crtc *crtc = fbc->crtc;
1173 drm_WARN_ON(&dev_priv->drm, !mutex_is_locked(&fbc->lock));
1174 drm_WARN_ON(&dev_priv->drm, !fbc->crtc);
1175 drm_WARN_ON(&dev_priv->drm, fbc->active);
1177 drm_dbg_kms(&dev_priv->drm, "Disabling FBC on pipe %c\n",
1178 pipe_name(crtc->pipe));
1180 __intel_fbc_cleanup_cfb(dev_priv);
1185 static void __intel_fbc_post_update(struct intel_crtc *crtc)
1187 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1188 struct intel_fbc *fbc = &dev_priv->fbc;
1190 drm_WARN_ON(&dev_priv->drm, !mutex_is_locked(&fbc->lock));
1192 if (fbc->crtc != crtc)
1195 fbc->flip_pending = false;
1197 if (!dev_priv->params.enable_fbc) {
1198 intel_fbc_deactivate(dev_priv, "disabled at runtime per module param");
1199 __intel_fbc_disable(dev_priv);
1204 intel_fbc_get_reg_params(crtc, &fbc->params);
1206 if (!intel_fbc_can_activate(crtc))
1209 if (!fbc->busy_bits)
1210 intel_fbc_activate(dev_priv);
1212 intel_fbc_deactivate(dev_priv, "frontbuffer write");
1215 void intel_fbc_post_update(struct intel_atomic_state *state,
1216 struct intel_crtc *crtc)
1218 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1219 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
1220 const struct intel_plane_state *plane_state =
1221 intel_atomic_get_new_plane_state(state, plane);
1222 struct intel_fbc *fbc = &dev_priv->fbc;
1224 if (!plane->has_fbc || !plane_state)
1227 mutex_lock(&fbc->lock);
1228 __intel_fbc_post_update(crtc);
1229 mutex_unlock(&fbc->lock);
1232 static unsigned int intel_fbc_get_frontbuffer_bit(struct intel_fbc *fbc)
1235 return to_intel_plane(fbc->crtc->base.primary)->frontbuffer_bit;
1237 return fbc->possible_framebuffer_bits;
1240 void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
1241 unsigned int frontbuffer_bits,
1242 enum fb_op_origin origin)
1244 struct intel_fbc *fbc = &dev_priv->fbc;
1246 if (!HAS_FBC(dev_priv))
1249 if (origin == ORIGIN_FLIP || origin == ORIGIN_CURSOR_UPDATE)
1252 mutex_lock(&fbc->lock);
1254 fbc->busy_bits |= intel_fbc_get_frontbuffer_bit(fbc) & frontbuffer_bits;
1256 if (fbc->crtc && fbc->busy_bits)
1257 intel_fbc_deactivate(dev_priv, "frontbuffer write");
1259 mutex_unlock(&fbc->lock);
1262 void intel_fbc_flush(struct drm_i915_private *dev_priv,
1263 unsigned int frontbuffer_bits, enum fb_op_origin origin)
1265 struct intel_fbc *fbc = &dev_priv->fbc;
1267 if (!HAS_FBC(dev_priv))
1270 mutex_lock(&fbc->lock);
1272 fbc->busy_bits &= ~frontbuffer_bits;
1274 if (origin == ORIGIN_FLIP || origin == ORIGIN_CURSOR_UPDATE)
1277 if (!fbc->busy_bits && fbc->crtc &&
1278 (frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) {
1280 intel_fbc_recompress(dev_priv);
1281 else if (!fbc->flip_pending)
1282 __intel_fbc_post_update(fbc->crtc);
1286 mutex_unlock(&fbc->lock);
1290 * intel_fbc_choose_crtc - select a CRTC to enable FBC on
1291 * @dev_priv: i915 device instance
1292 * @state: the atomic state structure
1294 * This function looks at the proposed state for CRTCs and planes, then chooses
1295 * which pipe is going to have FBC by setting intel_crtc_state->enable_fbc to
1298 * Later, intel_fbc_enable is going to look for state->enable_fbc and then maybe
1299 * enable FBC for the chosen CRTC. If it does, it will set dev_priv->fbc.crtc.
1301 void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
1302 struct intel_atomic_state *state)
1304 struct intel_fbc *fbc = &dev_priv->fbc;
1305 struct intel_plane *plane;
1306 struct intel_plane_state *plane_state;
1307 bool crtc_chosen = false;
1310 mutex_lock(&fbc->lock);
1312 /* Does this atomic commit involve the CRTC currently tied to FBC? */
1314 !intel_atomic_get_new_crtc_state(state, fbc->crtc))
1317 if (!intel_fbc_can_enable(dev_priv))
1320 /* Simply choose the first CRTC that is compatible and has a visible
1321 * plane. We could go for fancier schemes such as checking the plane
1322 * size, but this would just affect the few platforms that don't tie FBC
1323 * to pipe or plane A. */
1324 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
1325 struct intel_crtc_state *crtc_state;
1326 struct intel_crtc *crtc = to_intel_crtc(plane_state->hw.crtc);
1328 if (!plane->has_fbc)
1331 if (!plane_state->uapi.visible)
1334 crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
1336 crtc_state->enable_fbc = true;
1342 fbc->no_fbc_reason = "no suitable CRTC for FBC";
1345 mutex_unlock(&fbc->lock);
1349 * intel_fbc_enable: tries to enable FBC on the CRTC
1351 * @state: corresponding &drm_crtc_state for @crtc
1353 * This function checks if the given CRTC was chosen for FBC, then enables it if
1354 * possible. Notice that it doesn't activate FBC. It is valid to call
1355 * intel_fbc_enable multiple times for the same pipe without an
1356 * intel_fbc_disable in the middle, as long as it is deactivated.
1358 static void intel_fbc_enable(struct intel_atomic_state *state,
1359 struct intel_crtc *crtc)
1361 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1362 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
1363 const struct intel_crtc_state *crtc_state =
1364 intel_atomic_get_new_crtc_state(state, crtc);
1365 const struct intel_plane_state *plane_state =
1366 intel_atomic_get_new_plane_state(state, plane);
1367 struct intel_fbc *fbc = &dev_priv->fbc;
1368 struct intel_fbc_state_cache *cache = &fbc->state_cache;
1371 if (!plane->has_fbc || !plane_state)
1374 min_limit = intel_fbc_min_limit(plane_state->hw.fb ?
1375 plane_state->hw.fb->format->cpp[0] : 0);
1377 mutex_lock(&fbc->lock);
1380 if (fbc->crtc != crtc)
1383 if (fbc->limit >= min_limit &&
1384 !intel_fbc_cfb_size_changed(dev_priv))
1387 __intel_fbc_disable(dev_priv);
1390 drm_WARN_ON(&dev_priv->drm, fbc->active);
1392 intel_fbc_update_state_cache(crtc, crtc_state, plane_state);
1394 /* FIXME crtc_state->enable_fbc lies :( */
1395 if (!cache->plane.visible)
1398 if (intel_fbc_alloc_cfb(dev_priv,
1399 intel_fbc_cfb_size(dev_priv, cache), min_limit)) {
1400 cache->plane.visible = false;
1401 fbc->no_fbc_reason = "not enough stolen memory";
1405 drm_dbg_kms(&dev_priv->drm, "Enabling FBC on pipe %c\n",
1406 pipe_name(crtc->pipe));
1407 fbc->no_fbc_reason = "FBC enabled but not active yet\n";
1411 intel_fbc_program_cfb(dev_priv);
1413 mutex_unlock(&fbc->lock);
1417 * intel_fbc_disable - disable FBC if it's associated with crtc
1420 * This function disables FBC if it's associated with the provided CRTC.
1422 void intel_fbc_disable(struct intel_crtc *crtc)
1424 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1425 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
1426 struct intel_fbc *fbc = &dev_priv->fbc;
1428 if (!plane->has_fbc)
1431 mutex_lock(&fbc->lock);
1432 if (fbc->crtc == crtc)
1433 __intel_fbc_disable(dev_priv);
1434 mutex_unlock(&fbc->lock);
1438 * intel_fbc_update: enable/disable FBC on the CRTC
1439 * @state: atomic state
1442 * This function checks if the given CRTC was chosen for FBC, then enables it if
1443 * possible. Notice that it doesn't activate FBC. It is valid to call
1444 * intel_fbc_update multiple times for the same pipe without an
1445 * intel_fbc_disable in the middle.
1447 void intel_fbc_update(struct intel_atomic_state *state,
1448 struct intel_crtc *crtc)
1450 const struct intel_crtc_state *crtc_state =
1451 intel_atomic_get_new_crtc_state(state, crtc);
1453 if (crtc_state->update_pipe && !crtc_state->enable_fbc)
1454 intel_fbc_disable(crtc);
1456 intel_fbc_enable(state, crtc);
1460 * intel_fbc_global_disable - globally disable FBC
1461 * @dev_priv: i915 device instance
1463 * This function disables FBC regardless of which CRTC is associated with it.
1465 void intel_fbc_global_disable(struct drm_i915_private *dev_priv)
1467 struct intel_fbc *fbc = &dev_priv->fbc;
1469 if (!HAS_FBC(dev_priv))
1472 mutex_lock(&fbc->lock);
1474 drm_WARN_ON(&dev_priv->drm, fbc->crtc->active);
1475 __intel_fbc_disable(dev_priv);
1477 mutex_unlock(&fbc->lock);
1480 static void intel_fbc_underrun_work_fn(struct work_struct *work)
1482 struct drm_i915_private *dev_priv =
1483 container_of(work, struct drm_i915_private, fbc.underrun_work);
1484 struct intel_fbc *fbc = &dev_priv->fbc;
1486 mutex_lock(&fbc->lock);
1488 /* Maybe we were scheduled twice. */
1489 if (fbc->underrun_detected || !fbc->crtc)
1492 drm_dbg_kms(&dev_priv->drm, "Disabling FBC due to FIFO underrun.\n");
1493 fbc->underrun_detected = true;
1495 intel_fbc_deactivate(dev_priv, "FIFO underrun");
1497 mutex_unlock(&fbc->lock);
1501 * intel_fbc_reset_underrun - reset FBC fifo underrun status.
1502 * @dev_priv: i915 device instance
1504 * See intel_fbc_handle_fifo_underrun_irq(). For automated testing we
1505 * want to re-enable FBC after an underrun to increase test coverage.
1507 int intel_fbc_reset_underrun(struct drm_i915_private *dev_priv)
1511 cancel_work_sync(&dev_priv->fbc.underrun_work);
1513 ret = mutex_lock_interruptible(&dev_priv->fbc.lock);
1517 if (dev_priv->fbc.underrun_detected) {
1518 drm_dbg_kms(&dev_priv->drm,
1519 "Re-allowing FBC after fifo underrun\n");
1520 dev_priv->fbc.no_fbc_reason = "FIFO underrun cleared";
1523 dev_priv->fbc.underrun_detected = false;
1524 mutex_unlock(&dev_priv->fbc.lock);
1530 * intel_fbc_handle_fifo_underrun_irq - disable FBC when we get a FIFO underrun
1531 * @dev_priv: i915 device instance
1533 * Without FBC, most underruns are harmless and don't really cause too many
1534 * problems, except for an annoying message on dmesg. With FBC, underruns can
1535 * become black screens or even worse, especially when paired with bad
1536 * watermarks. So in order for us to be on the safe side, completely disable FBC
1537 * in case we ever detect a FIFO underrun on any pipe. An underrun on any pipe
1538 * already suggests that watermarks may be bad, so try to be as safe as
1541 * This function is called from the IRQ handler.
1543 void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv)
1545 struct intel_fbc *fbc = &dev_priv->fbc;
1547 if (!HAS_FBC(dev_priv))
1550 /* There's no guarantee that underrun_detected won't be set to true
1551 * right after this check and before the work is scheduled, but that's
1552 * not a problem since we'll check it again under the work function
1553 * while FBC is locked. This check here is just to prevent us from
1554 * unnecessarily scheduling the work, and it relies on the fact that we
1555 * never switch underrun_detect back to false after it's true. */
1556 if (READ_ONCE(fbc->underrun_detected))
1559 schedule_work(&fbc->underrun_work);
1563 * The DDX driver changes its behavior depending on the value it reads from
1564 * i915.enable_fbc, so sanitize it by translating the default value into either
1565 * 0 or 1 in order to allow it to know what's going on.
1567 * Notice that this is done at driver initialization and we still allow user
1568 * space to change the value during runtime without sanitizing it again. IGT
1569 * relies on being able to change i915.enable_fbc at runtime.
1571 static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv)
1573 if (dev_priv->params.enable_fbc >= 0)
1574 return !!dev_priv->params.enable_fbc;
1576 if (!HAS_FBC(dev_priv))
1579 if (IS_BROADWELL(dev_priv) || DISPLAY_VER(dev_priv) >= 9)
1585 static bool need_fbc_vtd_wa(struct drm_i915_private *dev_priv)
1587 /* WaFbcTurnOffFbcWhenHyperVisorIsUsed:skl,bxt */
1588 if (intel_vtd_active() &&
1589 (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv))) {
1590 drm_info(&dev_priv->drm,
1591 "Disabling framebuffer compression (FBC) to prevent screen flicker with VT-d enabled\n");
1599 * intel_fbc_init - Initialize FBC
1600 * @dev_priv: the i915 device
1602 * This function might be called during PM init process.
1604 void intel_fbc_init(struct drm_i915_private *dev_priv)
1606 struct intel_fbc *fbc = &dev_priv->fbc;
1608 INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn);
1609 mutex_init(&fbc->lock);
1610 fbc->active = false;
1612 if (!drm_mm_initialized(&dev_priv->mm.stolen))
1613 mkwrite_device_info(dev_priv)->display.has_fbc = false;
1615 if (need_fbc_vtd_wa(dev_priv))
1616 mkwrite_device_info(dev_priv)->display.has_fbc = false;
1618 dev_priv->params.enable_fbc = intel_sanitize_fbc_option(dev_priv);
1619 drm_dbg_kms(&dev_priv->drm, "Sanitized enable_fbc value: %d\n",
1620 dev_priv->params.enable_fbc);
1622 if (!HAS_FBC(dev_priv)) {
1623 fbc->no_fbc_reason = "unsupported by this chipset";
1627 /* We still don't have any sort of hardware state readout for FBC, so
1628 * deactivate it in case the BIOS activated it to make sure software
1629 * matches the hardware state. */
1630 if (intel_fbc_hw_is_active(dev_priv))
1631 intel_fbc_hw_deactivate(dev_priv);