1 /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
30 #include <linux/acpi.h>
31 #include <linux/device.h>
32 #include <linux/oom.h>
33 #include <linux/module.h>
34 #include <linux/pci.h>
36 #include <linux/pm_runtime.h>
37 #include <linux/pnp.h>
38 #include <linux/slab.h>
39 #include <linux/vga_switcheroo.h>
41 #include <acpi/video.h>
43 #include <drm/drm_atomic_helper.h>
44 #include <drm/drm_ioctl.h>
45 #include <drm/drm_irq.h>
46 #include <drm/drm_probe_helper.h>
47 #include <drm/i915_drm.h>
49 #include "display/intel_acpi.h"
50 #include "display/intel_audio.h"
51 #include "display/intel_bw.h"
52 #include "display/intel_cdclk.h"
53 #include "display/intel_display_types.h"
54 #include "display/intel_dp.h"
55 #include "display/intel_fbdev.h"
56 #include "display/intel_hotplug.h"
57 #include "display/intel_overlay.h"
58 #include "display/intel_pipe_crc.h"
59 #include "display/intel_sprite.h"
60 #include "display/intel_vga.h"
62 #include "gem/i915_gem_context.h"
63 #include "gem/i915_gem_ioctls.h"
64 #include "gt/intel_gt.h"
65 #include "gt/intel_gt_pm.h"
67 #include "i915_debugfs.h"
70 #include "i915_memcpy.h"
71 #include "i915_perf.h"
72 #include "i915_query.h"
73 #include "i915_suspend.h"
74 #include "i915_switcheroo.h"
75 #include "i915_sysfs.h"
76 #include "i915_trace.h"
77 #include "i915_vgpu.h"
78 #include "intel_csr.h"
81 static struct drm_driver driver;
83 struct vlv_s0ix_state {
90 u32 lra_limits[GEN7_LRA_LIMITS_REG_NUM];
91 u32 media_max_req_count;
92 u32 gfx_max_req_count;
124 /* Display 1 CZ domain */
129 u32 gt_scratch[GEN7_GT_SCRATCH_REG_NUM];
131 /* GT SA CZ domain */
138 /* Display 2 CZ domain */
145 static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
147 int domain = pci_domain_nr(dev_priv->drm.pdev->bus);
149 dev_priv->bridge_dev =
150 pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(0, 0));
151 if (!dev_priv->bridge_dev) {
152 DRM_ERROR("bridge device not found\n");
158 /* Allocate space for the MCH regs if needed, return nonzero on error */
160 intel_alloc_mchbar_resource(struct drm_i915_private *dev_priv)
162 int reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
163 u32 temp_lo, temp_hi = 0;
167 if (INTEL_GEN(dev_priv) >= 4)
168 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
169 pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
170 mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
172 /* If ACPI doesn't have it, assume we need to allocate it ourselves */
175 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
179 /* Get some space for it */
180 dev_priv->mch_res.name = "i915 MCHBAR";
181 dev_priv->mch_res.flags = IORESOURCE_MEM;
182 ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
184 MCHBAR_SIZE, MCHBAR_SIZE,
186 0, pcibios_align_resource,
187 dev_priv->bridge_dev);
189 DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
190 dev_priv->mch_res.start = 0;
194 if (INTEL_GEN(dev_priv) >= 4)
195 pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
196 upper_32_bits(dev_priv->mch_res.start));
198 pci_write_config_dword(dev_priv->bridge_dev, reg,
199 lower_32_bits(dev_priv->mch_res.start));
203 /* Setup MCHBAR if possible, return true if we should disable it again */
205 intel_setup_mchbar(struct drm_i915_private *dev_priv)
207 int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
211 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
214 dev_priv->mchbar_need_disable = false;
216 if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
217 pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp);
218 enabled = !!(temp & DEVEN_MCHBAR_EN);
220 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
224 /* If it's already enabled, don't have to do anything */
228 if (intel_alloc_mchbar_resource(dev_priv))
231 dev_priv->mchbar_need_disable = true;
233 /* Space is allocated or reserved, so enable it. */
234 if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
235 pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
236 temp | DEVEN_MCHBAR_EN);
238 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
239 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
244 intel_teardown_mchbar(struct drm_i915_private *dev_priv)
246 int mchbar_reg = INTEL_GEN(dev_priv) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
248 if (dev_priv->mchbar_need_disable) {
249 if (IS_I915G(dev_priv) || IS_I915GM(dev_priv)) {
252 pci_read_config_dword(dev_priv->bridge_dev, DEVEN,
254 deven_val &= ~DEVEN_MCHBAR_EN;
255 pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
260 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg,
263 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg,
268 if (dev_priv->mch_res.start)
269 release_resource(&dev_priv->mch_res);
272 static int i915_driver_modeset_probe(struct drm_i915_private *i915)
276 if (i915_inject_probe_failure(i915))
279 if (HAS_DISPLAY(i915) && INTEL_DISPLAY_ENABLED(i915)) {
280 ret = drm_vblank_init(&i915->drm,
281 INTEL_NUM_PIPES(i915));
286 intel_bios_init(i915);
288 ret = intel_vga_register(i915);
292 intel_register_dsm_handler();
294 ret = i915_switcheroo_register(i915);
296 goto cleanup_vga_client;
298 /* must happen before intel_power_domains_init_hw() on VLV/CHV */
299 intel_update_rawclk(i915);
301 intel_power_domains_init_hw(i915, false);
303 intel_csr_ucode_init(i915);
305 ret = intel_irq_install(i915);
309 /* Important: The output setup functions called by modeset_init need
310 * working irqs for e.g. gmbus and dp aux transfers. */
311 ret = intel_modeset_init(i915);
315 ret = i915_gem_init(i915);
317 goto cleanup_modeset;
319 intel_overlay_setup(i915);
321 if (!HAS_DISPLAY(i915) || !INTEL_DISPLAY_ENABLED(i915))
324 ret = intel_fbdev_init(&i915->drm);
328 /* Only enable hotplug handling once the fbdev is fully set up. */
329 intel_hpd_init(i915);
331 intel_init_ipc(i915);
336 i915_gem_suspend(i915);
337 i915_gem_driver_remove(i915);
338 i915_gem_driver_release(i915);
340 intel_modeset_driver_remove(i915);
342 intel_irq_uninstall(i915);
344 intel_csr_ucode_fini(i915);
345 intel_power_domains_driver_remove(i915);
346 i915_switcheroo_unregister(i915);
348 intel_vga_unregister(i915);
353 static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
355 struct apertures_struct *ap;
356 struct pci_dev *pdev = dev_priv->drm.pdev;
357 struct i915_ggtt *ggtt = &dev_priv->ggtt;
361 ap = alloc_apertures(1);
365 ap->ranges[0].base = ggtt->gmadr.start;
366 ap->ranges[0].size = ggtt->mappable_end;
369 pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
371 ret = drm_fb_helper_remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
378 static void i915_driver_modeset_remove(struct drm_i915_private *i915)
380 intel_modeset_driver_remove(i915);
382 intel_bios_driver_remove(i915);
384 i915_switcheroo_unregister(i915);
386 intel_vga_unregister(i915);
388 intel_csr_ucode_fini(i915);
391 static void intel_init_dpio(struct drm_i915_private *dev_priv)
394 * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
395 * CHV x1 PHY (DP/HDMI D)
396 * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
398 if (IS_CHERRYVIEW(dev_priv)) {
399 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
400 DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
401 } else if (IS_VALLEYVIEW(dev_priv)) {
402 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
406 static int i915_workqueues_init(struct drm_i915_private *dev_priv)
409 * The i915 workqueue is primarily used for batched retirement of
410 * requests (and thus managing bo) once the task has been completed
411 * by the GPU. i915_retire_requests() is called directly when we
412 * need high-priority retirement, such as waiting for an explicit
415 * It is also used for periodic low-priority events, such as
416 * idle-timers and recording error state.
418 * All tasks on the workqueue are expected to acquire the dev mutex
419 * so there is no point in running more than one instance of the
420 * workqueue at any time. Use an ordered one.
422 dev_priv->wq = alloc_ordered_workqueue("i915", 0);
423 if (dev_priv->wq == NULL)
426 dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
427 if (dev_priv->hotplug.dp_wq == NULL)
433 destroy_workqueue(dev_priv->wq);
435 DRM_ERROR("Failed to allocate workqueues.\n");
440 static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
442 destroy_workqueue(dev_priv->hotplug.dp_wq);
443 destroy_workqueue(dev_priv->wq);
447 * We don't keep the workarounds for pre-production hardware, so we expect our
448 * driver to fail on these machines in one way or another. A little warning on
449 * dmesg may help both the user and the bug triagers.
451 * Our policy for removing pre-production workarounds is to keep the
452 * current gen workarounds as a guide to the bring-up of the next gen
453 * (workarounds have a habit of persisting!). Anything older than that
454 * should be removed along with the complications they introduce.
456 static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
460 pre |= IS_HSW_EARLY_SDV(dev_priv);
461 pre |= IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0);
462 pre |= IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST);
463 pre |= IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0);
466 DRM_ERROR("This is a pre-production stepping. "
467 "It may not be fully functional.\n");
468 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK);
472 static int vlv_alloc_s0ix_state(struct drm_i915_private *i915)
474 if (!IS_VALLEYVIEW(i915))
477 /* we write all the values in the struct, so no need to zero it out */
478 i915->vlv_s0ix_state = kmalloc(sizeof(*i915->vlv_s0ix_state),
480 if (!i915->vlv_s0ix_state)
486 static void vlv_free_s0ix_state(struct drm_i915_private *i915)
488 if (!i915->vlv_s0ix_state)
491 kfree(i915->vlv_s0ix_state);
492 i915->vlv_s0ix_state = NULL;
496 * i915_driver_early_probe - setup state not requiring device access
497 * @dev_priv: device private
499 * Initialize everything that is a "SW-only" state, that is state not
500 * requiring accessing the device or exposing the driver via kernel internal
501 * or userspace interfaces. Example steps belonging here: lock initialization,
502 * system memory allocation, setting up device specific attributes and
503 * function hooks not requiring accessing the device.
505 static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
509 if (i915_inject_probe_failure(dev_priv))
512 intel_device_info_subplatform_init(dev_priv);
514 intel_uncore_mmio_debug_init_early(&dev_priv->mmio_debug);
515 intel_uncore_init_early(&dev_priv->uncore, dev_priv);
517 spin_lock_init(&dev_priv->irq_lock);
518 spin_lock_init(&dev_priv->gpu_error.lock);
519 mutex_init(&dev_priv->backlight_lock);
521 mutex_init(&dev_priv->sb_lock);
522 pm_qos_add_request(&dev_priv->sb_qos,
523 PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
525 mutex_init(&dev_priv->av_mutex);
526 mutex_init(&dev_priv->wm.wm_mutex);
527 mutex_init(&dev_priv->pps_mutex);
528 mutex_init(&dev_priv->hdcp_comp_mutex);
530 i915_memcpy_init_early(dev_priv);
531 intel_runtime_pm_init_early(&dev_priv->runtime_pm);
533 ret = i915_workqueues_init(dev_priv);
537 ret = vlv_alloc_s0ix_state(dev_priv);
541 intel_wopcm_init_early(&dev_priv->wopcm);
543 intel_gt_init_early(&dev_priv->gt, dev_priv);
545 i915_gem_init_early(dev_priv);
547 /* This must be called before any calls to HAS_PCH_* */
548 intel_detect_pch(dev_priv);
550 intel_pm_setup(dev_priv);
551 intel_init_dpio(dev_priv);
552 ret = intel_power_domains_init(dev_priv);
555 intel_irq_init(dev_priv);
556 intel_init_display_hooks(dev_priv);
557 intel_init_clock_gating_hooks(dev_priv);
558 intel_init_audio_hooks(dev_priv);
559 intel_display_crc_init(dev_priv);
561 intel_detect_preproduction_hw(dev_priv);
566 i915_gem_cleanup_early(dev_priv);
567 intel_gt_driver_late_release(&dev_priv->gt);
568 vlv_free_s0ix_state(dev_priv);
570 i915_workqueues_cleanup(dev_priv);
575 * i915_driver_late_release - cleanup the setup done in
576 * i915_driver_early_probe()
577 * @dev_priv: device private
579 static void i915_driver_late_release(struct drm_i915_private *dev_priv)
581 intel_irq_fini(dev_priv);
582 intel_power_domains_cleanup(dev_priv);
583 i915_gem_cleanup_early(dev_priv);
584 intel_gt_driver_late_release(&dev_priv->gt);
585 vlv_free_s0ix_state(dev_priv);
586 i915_workqueues_cleanup(dev_priv);
588 pm_qos_remove_request(&dev_priv->sb_qos);
589 mutex_destroy(&dev_priv->sb_lock);
593 * i915_driver_mmio_probe - setup device MMIO
594 * @dev_priv: device private
596 * Setup minimal device state necessary for MMIO accesses later in the
597 * initialization sequence. The setup here should avoid any other device-wide
598 * side effects or exposing the driver via kernel internal or user space
601 static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
605 if (i915_inject_probe_failure(dev_priv))
608 if (i915_get_bridge_dev(dev_priv))
611 ret = intel_uncore_init_mmio(&dev_priv->uncore);
615 /* Try to make sure MCHBAR is enabled before poking at it */
616 intel_setup_mchbar(dev_priv);
618 intel_device_info_init_mmio(dev_priv);
620 intel_uncore_prune_mmio_domains(&dev_priv->uncore);
622 intel_uc_init_mmio(&dev_priv->gt.uc);
624 ret = intel_engines_init_mmio(dev_priv);
628 i915_gem_init_mmio(dev_priv);
633 intel_teardown_mchbar(dev_priv);
634 intel_uncore_fini_mmio(&dev_priv->uncore);
636 pci_dev_put(dev_priv->bridge_dev);
642 * i915_driver_mmio_release - cleanup the setup done in i915_driver_mmio_probe()
643 * @dev_priv: device private
645 static void i915_driver_mmio_release(struct drm_i915_private *dev_priv)
647 intel_engines_cleanup(dev_priv);
648 intel_teardown_mchbar(dev_priv);
649 intel_uncore_fini_mmio(&dev_priv->uncore);
650 pci_dev_put(dev_priv->bridge_dev);
653 static void intel_sanitize_options(struct drm_i915_private *dev_priv)
655 intel_gvt_sanitize_options(dev_priv);
658 #define DRAM_TYPE_STR(type) [INTEL_DRAM_ ## type] = #type
660 static const char *intel_dram_type_str(enum intel_dram_type type)
662 static const char * const str[] = {
663 DRAM_TYPE_STR(UNKNOWN),
666 DRAM_TYPE_STR(LPDDR3),
667 DRAM_TYPE_STR(LPDDR4),
670 if (type >= ARRAY_SIZE(str))
671 type = INTEL_DRAM_UNKNOWN;
678 static int intel_dimm_num_devices(const struct dram_dimm_info *dimm)
680 return dimm->ranks * 64 / (dimm->width ?: 1);
683 /* Returns total GB for the whole DIMM */
684 static int skl_get_dimm_size(u16 val)
686 return val & SKL_DRAM_SIZE_MASK;
689 static int skl_get_dimm_width(u16 val)
691 if (skl_get_dimm_size(val) == 0)
694 switch (val & SKL_DRAM_WIDTH_MASK) {
695 case SKL_DRAM_WIDTH_X8:
696 case SKL_DRAM_WIDTH_X16:
697 case SKL_DRAM_WIDTH_X32:
698 val = (val & SKL_DRAM_WIDTH_MASK) >> SKL_DRAM_WIDTH_SHIFT;
706 static int skl_get_dimm_ranks(u16 val)
708 if (skl_get_dimm_size(val) == 0)
711 val = (val & SKL_DRAM_RANK_MASK) >> SKL_DRAM_RANK_SHIFT;
716 /* Returns total GB for the whole DIMM */
717 static int cnl_get_dimm_size(u16 val)
719 return (val & CNL_DRAM_SIZE_MASK) / 2;
722 static int cnl_get_dimm_width(u16 val)
724 if (cnl_get_dimm_size(val) == 0)
727 switch (val & CNL_DRAM_WIDTH_MASK) {
728 case CNL_DRAM_WIDTH_X8:
729 case CNL_DRAM_WIDTH_X16:
730 case CNL_DRAM_WIDTH_X32:
731 val = (val & CNL_DRAM_WIDTH_MASK) >> CNL_DRAM_WIDTH_SHIFT;
739 static int cnl_get_dimm_ranks(u16 val)
741 if (cnl_get_dimm_size(val) == 0)
744 val = (val & CNL_DRAM_RANK_MASK) >> CNL_DRAM_RANK_SHIFT;
750 skl_is_16gb_dimm(const struct dram_dimm_info *dimm)
752 /* Convert total GB to Gb per DRAM device */
753 return 8 * dimm->size / (intel_dimm_num_devices(dimm) ?: 1) == 16;
757 skl_dram_get_dimm_info(struct drm_i915_private *dev_priv,
758 struct dram_dimm_info *dimm,
759 int channel, char dimm_name, u16 val)
761 if (INTEL_GEN(dev_priv) >= 10) {
762 dimm->size = cnl_get_dimm_size(val);
763 dimm->width = cnl_get_dimm_width(val);
764 dimm->ranks = cnl_get_dimm_ranks(val);
766 dimm->size = skl_get_dimm_size(val);
767 dimm->width = skl_get_dimm_width(val);
768 dimm->ranks = skl_get_dimm_ranks(val);
771 DRM_DEBUG_KMS("CH%u DIMM %c size: %u GB, width: X%u, ranks: %u, 16Gb DIMMs: %s\n",
772 channel, dimm_name, dimm->size, dimm->width, dimm->ranks,
773 yesno(skl_is_16gb_dimm(dimm)));
777 skl_dram_get_channel_info(struct drm_i915_private *dev_priv,
778 struct dram_channel_info *ch,
779 int channel, u32 val)
781 skl_dram_get_dimm_info(dev_priv, &ch->dimm_l,
782 channel, 'L', val & 0xffff);
783 skl_dram_get_dimm_info(dev_priv, &ch->dimm_s,
784 channel, 'S', val >> 16);
786 if (ch->dimm_l.size == 0 && ch->dimm_s.size == 0) {
787 DRM_DEBUG_KMS("CH%u not populated\n", channel);
791 if (ch->dimm_l.ranks == 2 || ch->dimm_s.ranks == 2)
793 else if (ch->dimm_l.ranks == 1 && ch->dimm_s.ranks == 1)
799 skl_is_16gb_dimm(&ch->dimm_l) ||
800 skl_is_16gb_dimm(&ch->dimm_s);
802 DRM_DEBUG_KMS("CH%u ranks: %u, 16Gb DIMMs: %s\n",
803 channel, ch->ranks, yesno(ch->is_16gb_dimm));
809 intel_is_dram_symmetric(const struct dram_channel_info *ch0,
810 const struct dram_channel_info *ch1)
812 return !memcmp(ch0, ch1, sizeof(*ch0)) &&
813 (ch0->dimm_s.size == 0 ||
814 !memcmp(&ch0->dimm_l, &ch0->dimm_s, sizeof(ch0->dimm_l)));
818 skl_dram_get_channels_info(struct drm_i915_private *dev_priv)
820 struct dram_info *dram_info = &dev_priv->dram_info;
821 struct dram_channel_info ch0 = {}, ch1 = {};
825 val = I915_READ(SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN);
826 ret = skl_dram_get_channel_info(dev_priv, &ch0, 0, val);
828 dram_info->num_channels++;
830 val = I915_READ(SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN);
831 ret = skl_dram_get_channel_info(dev_priv, &ch1, 1, val);
833 dram_info->num_channels++;
835 if (dram_info->num_channels == 0) {
836 DRM_INFO("Number of memory channels is zero\n");
841 * If any of the channel is single rank channel, worst case output
842 * will be same as if single rank memory, so consider single rank
845 if (ch0.ranks == 1 || ch1.ranks == 1)
846 dram_info->ranks = 1;
848 dram_info->ranks = max(ch0.ranks, ch1.ranks);
850 if (dram_info->ranks == 0) {
851 DRM_INFO("couldn't get memory rank information\n");
855 dram_info->is_16gb_dimm = ch0.is_16gb_dimm || ch1.is_16gb_dimm;
857 dram_info->symmetric_memory = intel_is_dram_symmetric(&ch0, &ch1);
859 DRM_DEBUG_KMS("Memory configuration is symmetric? %s\n",
860 yesno(dram_info->symmetric_memory));
864 static enum intel_dram_type
865 skl_get_dram_type(struct drm_i915_private *dev_priv)
869 val = I915_READ(SKL_MAD_INTER_CHANNEL_0_0_0_MCHBAR_MCMAIN);
871 switch (val & SKL_DRAM_DDR_TYPE_MASK) {
872 case SKL_DRAM_DDR_TYPE_DDR3:
873 return INTEL_DRAM_DDR3;
874 case SKL_DRAM_DDR_TYPE_DDR4:
875 return INTEL_DRAM_DDR4;
876 case SKL_DRAM_DDR_TYPE_LPDDR3:
877 return INTEL_DRAM_LPDDR3;
878 case SKL_DRAM_DDR_TYPE_LPDDR4:
879 return INTEL_DRAM_LPDDR4;
882 return INTEL_DRAM_UNKNOWN;
887 skl_get_dram_info(struct drm_i915_private *dev_priv)
889 struct dram_info *dram_info = &dev_priv->dram_info;
890 u32 mem_freq_khz, val;
893 dram_info->type = skl_get_dram_type(dev_priv);
894 DRM_DEBUG_KMS("DRAM type: %s\n", intel_dram_type_str(dram_info->type));
896 ret = skl_dram_get_channels_info(dev_priv);
900 val = I915_READ(SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU);
901 mem_freq_khz = DIV_ROUND_UP((val & SKL_REQ_DATA_MASK) *
902 SKL_MEMORY_FREQ_MULTIPLIER_HZ, 1000);
904 dram_info->bandwidth_kbps = dram_info->num_channels *
907 if (dram_info->bandwidth_kbps == 0) {
908 DRM_INFO("Couldn't get system memory bandwidth\n");
912 dram_info->valid = true;
916 /* Returns Gb per DRAM device */
917 static int bxt_get_dimm_size(u32 val)
919 switch (val & BXT_DRAM_SIZE_MASK) {
920 case BXT_DRAM_SIZE_4GBIT:
922 case BXT_DRAM_SIZE_6GBIT:
924 case BXT_DRAM_SIZE_8GBIT:
926 case BXT_DRAM_SIZE_12GBIT:
928 case BXT_DRAM_SIZE_16GBIT:
936 static int bxt_get_dimm_width(u32 val)
938 if (!bxt_get_dimm_size(val))
941 val = (val & BXT_DRAM_WIDTH_MASK) >> BXT_DRAM_WIDTH_SHIFT;
946 static int bxt_get_dimm_ranks(u32 val)
948 if (!bxt_get_dimm_size(val))
951 switch (val & BXT_DRAM_RANK_MASK) {
952 case BXT_DRAM_RANK_SINGLE:
954 case BXT_DRAM_RANK_DUAL:
962 static enum intel_dram_type bxt_get_dimm_type(u32 val)
964 if (!bxt_get_dimm_size(val))
965 return INTEL_DRAM_UNKNOWN;
967 switch (val & BXT_DRAM_TYPE_MASK) {
968 case BXT_DRAM_TYPE_DDR3:
969 return INTEL_DRAM_DDR3;
970 case BXT_DRAM_TYPE_LPDDR3:
971 return INTEL_DRAM_LPDDR3;
972 case BXT_DRAM_TYPE_DDR4:
973 return INTEL_DRAM_DDR4;
974 case BXT_DRAM_TYPE_LPDDR4:
975 return INTEL_DRAM_LPDDR4;
978 return INTEL_DRAM_UNKNOWN;
982 static void bxt_get_dimm_info(struct dram_dimm_info *dimm,
985 dimm->width = bxt_get_dimm_width(val);
986 dimm->ranks = bxt_get_dimm_ranks(val);
989 * Size in register is Gb per DRAM device. Convert to total
990 * GB to match the way we report this for non-LP platforms.
992 dimm->size = bxt_get_dimm_size(val) * intel_dimm_num_devices(dimm) / 8;
996 bxt_get_dram_info(struct drm_i915_private *dev_priv)
998 struct dram_info *dram_info = &dev_priv->dram_info;
1000 u32 mem_freq_khz, val;
1001 u8 num_active_channels;
1004 val = I915_READ(BXT_P_CR_MC_BIOS_REQ_0_0_0);
1005 mem_freq_khz = DIV_ROUND_UP((val & BXT_REQ_DATA_MASK) *
1006 BXT_MEMORY_FREQ_MULTIPLIER_HZ, 1000);
1008 dram_channels = val & BXT_DRAM_CHANNEL_ACTIVE_MASK;
1009 num_active_channels = hweight32(dram_channels);
1011 /* Each active bit represents 4-byte channel */
1012 dram_info->bandwidth_kbps = (mem_freq_khz * num_active_channels * 4);
1014 if (dram_info->bandwidth_kbps == 0) {
1015 DRM_INFO("Couldn't get system memory bandwidth\n");
1020 * Now read each DUNIT8/9/10/11 to check the rank of each dimms.
1022 for (i = BXT_D_CR_DRP0_DUNIT_START; i <= BXT_D_CR_DRP0_DUNIT_END; i++) {
1023 struct dram_dimm_info dimm;
1024 enum intel_dram_type type;
1026 val = I915_READ(BXT_D_CR_DRP0_DUNIT(i));
1027 if (val == 0xFFFFFFFF)
1030 dram_info->num_channels++;
1032 bxt_get_dimm_info(&dimm, val);
1033 type = bxt_get_dimm_type(val);
1035 WARN_ON(type != INTEL_DRAM_UNKNOWN &&
1036 dram_info->type != INTEL_DRAM_UNKNOWN &&
1037 dram_info->type != type);
1039 DRM_DEBUG_KMS("CH%u DIMM size: %u GB, width: X%u, ranks: %u, type: %s\n",
1040 i - BXT_D_CR_DRP0_DUNIT_START,
1041 dimm.size, dimm.width, dimm.ranks,
1042 intel_dram_type_str(type));
1045 * If any of the channel is single rank channel,
1046 * worst case output will be same as if single rank
1047 * memory, so consider single rank memory.
1049 if (dram_info->ranks == 0)
1050 dram_info->ranks = dimm.ranks;
1051 else if (dimm.ranks == 1)
1052 dram_info->ranks = 1;
1054 if (type != INTEL_DRAM_UNKNOWN)
1055 dram_info->type = type;
1058 if (dram_info->type == INTEL_DRAM_UNKNOWN ||
1059 dram_info->ranks == 0) {
1060 DRM_INFO("couldn't get memory information\n");
1064 dram_info->valid = true;
1069 intel_get_dram_info(struct drm_i915_private *dev_priv)
1071 struct dram_info *dram_info = &dev_priv->dram_info;
1075 * Assume 16Gb DIMMs are present until proven otherwise.
1076 * This is only used for the level 0 watermark latency
1077 * w/a which does not apply to bxt/glk.
1079 dram_info->is_16gb_dimm = !IS_GEN9_LP(dev_priv);
1081 if (INTEL_GEN(dev_priv) < 9)
1084 if (IS_GEN9_LP(dev_priv))
1085 ret = bxt_get_dram_info(dev_priv);
1087 ret = skl_get_dram_info(dev_priv);
1091 DRM_DEBUG_KMS("DRAM bandwidth: %u kBps, channels: %u\n",
1092 dram_info->bandwidth_kbps,
1093 dram_info->num_channels);
1095 DRM_DEBUG_KMS("DRAM ranks: %u, 16Gb DIMMs: %s\n",
1096 dram_info->ranks, yesno(dram_info->is_16gb_dimm));
1099 static u32 gen9_edram_size_mb(struct drm_i915_private *dev_priv, u32 cap)
1101 const unsigned int ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
1102 const unsigned int sets[4] = { 1, 1, 2, 2 };
1104 return EDRAM_NUM_BANKS(cap) *
1105 ways[EDRAM_WAYS_IDX(cap)] *
1106 sets[EDRAM_SETS_IDX(cap)];
1109 static void edram_detect(struct drm_i915_private *dev_priv)
1113 if (!(IS_HASWELL(dev_priv) ||
1114 IS_BROADWELL(dev_priv) ||
1115 INTEL_GEN(dev_priv) >= 9))
1118 edram_cap = __raw_uncore_read32(&dev_priv->uncore, HSW_EDRAM_CAP);
1120 /* NB: We can't write IDICR yet because we don't have gt funcs set up */
1122 if (!(edram_cap & EDRAM_ENABLED))
1126 * The needed capability bits for size calculation are not there with
1127 * pre gen9 so return 128MB always.
1129 if (INTEL_GEN(dev_priv) < 9)
1130 dev_priv->edram_size_mb = 128;
1132 dev_priv->edram_size_mb =
1133 gen9_edram_size_mb(dev_priv, edram_cap);
1135 dev_info(dev_priv->drm.dev,
1136 "Found %uMB of eDRAM\n", dev_priv->edram_size_mb);
1140 * i915_driver_hw_probe - setup state requiring device access
1141 * @dev_priv: device private
1143 * Setup state that requires accessing the device, but doesn't require
1144 * exposing the driver via kernel internal or userspace interfaces.
1146 static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
1148 struct pci_dev *pdev = dev_priv->drm.pdev;
1151 if (i915_inject_probe_failure(dev_priv))
1154 intel_device_info_runtime_init(dev_priv);
1156 if (HAS_PPGTT(dev_priv)) {
1157 if (intel_vgpu_active(dev_priv) &&
1158 !intel_vgpu_has_full_ppgtt(dev_priv)) {
1159 i915_report_error(dev_priv,
1160 "incompatible vGPU found, support for isolated ppGTT required\n");
1165 if (HAS_EXECLISTS(dev_priv)) {
1167 * Older GVT emulation depends upon intercepting CSB mmio,
1168 * which we no longer use, preferring to use the HWSP cache
1171 if (intel_vgpu_active(dev_priv) &&
1172 !intel_vgpu_has_hwsp_emulation(dev_priv)) {
1173 i915_report_error(dev_priv,
1174 "old vGPU host found, support for HWSP emulation required\n");
1179 intel_sanitize_options(dev_priv);
1181 /* needs to be done before ggtt probe */
1182 edram_detect(dev_priv);
1184 i915_perf_init(dev_priv);
1186 ret = i915_ggtt_probe_hw(dev_priv);
1191 * WARNING: Apparently we must kick fbdev drivers before vgacon,
1192 * otherwise the vga fbdev driver falls over.
1194 ret = i915_kick_out_firmware_fb(dev_priv);
1196 DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
1200 ret = vga_remove_vgacon(pdev);
1202 DRM_ERROR("failed to remove conflicting VGA console\n");
1206 ret = i915_ggtt_init_hw(dev_priv);
1210 intel_gt_init_hw_early(dev_priv);
1212 ret = i915_ggtt_enable_hw(dev_priv);
1214 DRM_ERROR("failed to enable GGTT\n");
1218 pci_set_master(pdev);
1221 * We don't have a max segment size, so set it to the max so sg's
1222 * debugging layer doesn't complain
1224 dma_set_max_seg_size(&pdev->dev, UINT_MAX);
1226 /* overlay on gen2 is broken and can't address above 1G */
1227 if (IS_GEN(dev_priv, 2)) {
1228 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30));
1230 DRM_ERROR("failed to set DMA mask\n");
1236 /* 965GM sometimes incorrectly writes to hardware status page (HWS)
1237 * using 32bit addressing, overwriting memory if HWS is located
1240 * The documentation also mentions an issue with undefined
1241 * behaviour if any general state is accessed within a page above 4GB,
1242 * which also needs to be handled carefully.
1244 if (IS_I965G(dev_priv) || IS_I965GM(dev_priv)) {
1245 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
1248 DRM_ERROR("failed to set DMA mask\n");
1254 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
1255 PM_QOS_DEFAULT_VALUE);
1257 intel_gt_init_workarounds(dev_priv);
1259 /* On the 945G/GM, the chipset reports the MSI capability on the
1260 * integrated graphics even though the support isn't actually there
1261 * according to the published specs. It doesn't appear to function
1262 * correctly in testing on 945G.
1263 * This may be a side effect of MSI having been made available for PEG
1264 * and the registers being closely associated.
1266 * According to chipset errata, on the 965GM, MSI interrupts may
1267 * be lost or delayed, and was defeatured. MSI interrupts seem to
1268 * get lost on g4x as well, and interrupt delivery seems to stay
1269 * properly dead afterwards. So we'll just disable them for all
1270 * pre-gen5 chipsets.
1272 * dp aux and gmbus irq on gen4 seems to be able to generate legacy
1273 * interrupts even when in MSI mode. This results in spurious
1274 * interrupt warnings if the legacy irq no. is shared with another
1275 * device. The kernel then disables that interrupt source and so
1276 * prevents the other device from working properly.
1278 if (INTEL_GEN(dev_priv) >= 5) {
1279 if (pci_enable_msi(pdev) < 0)
1280 DRM_DEBUG_DRIVER("can't enable MSI");
1283 ret = intel_gvt_init(dev_priv);
1287 intel_opregion_setup(dev_priv);
1289 * Fill the dram structure to get the system raw bandwidth and
1290 * dram info. This will be used for memory latency calculation.
1292 intel_get_dram_info(dev_priv);
1294 intel_bw_init_hw(dev_priv);
1299 if (pdev->msi_enabled)
1300 pci_disable_msi(pdev);
1301 pm_qos_remove_request(&dev_priv->pm_qos);
1303 i915_ggtt_driver_release(dev_priv);
1305 i915_perf_fini(dev_priv);
1310 * i915_driver_hw_remove - cleanup the setup done in i915_driver_hw_probe()
1311 * @dev_priv: device private
1313 static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
1315 struct pci_dev *pdev = dev_priv->drm.pdev;
1317 i915_perf_fini(dev_priv);
1319 if (pdev->msi_enabled)
1320 pci_disable_msi(pdev);
1322 pm_qos_remove_request(&dev_priv->pm_qos);
1326 * i915_driver_register - register the driver with the rest of the system
1327 * @dev_priv: device private
1329 * Perform any steps necessary to make the driver available via kernel
1330 * internal or userspace interfaces.
1332 static void i915_driver_register(struct drm_i915_private *dev_priv)
1334 struct drm_device *dev = &dev_priv->drm;
1336 i915_gem_driver_register(dev_priv);
1337 i915_pmu_register(dev_priv);
1340 * Notify a valid surface after modesetting,
1341 * when running inside a VM.
1343 if (intel_vgpu_active(dev_priv))
1344 I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);
1346 /* Reveal our presence to userspace */
1347 if (drm_dev_register(dev, 0) == 0) {
1348 i915_debugfs_register(dev_priv);
1349 i915_setup_sysfs(dev_priv);
1351 /* Depends on sysfs having been initialized */
1352 i915_perf_register(dev_priv);
1354 DRM_ERROR("Failed to register driver for userspace access!\n");
1356 if (HAS_DISPLAY(dev_priv) && INTEL_DISPLAY_ENABLED(dev_priv)) {
1357 /* Must be done after probing outputs */
1358 intel_opregion_register(dev_priv);
1359 acpi_video_register();
1362 intel_gt_driver_register(&dev_priv->gt);
1364 intel_audio_init(dev_priv);
1367 * Some ports require correctly set-up hpd registers for detection to
1368 * work properly (leading to ghost connected connector status), e.g. VGA
1369 * on gm45. Hence we can only set up the initial fbdev config after hpd
1370 * irqs are fully enabled. We do it last so that the async config
1371 * cannot run before the connectors are registered.
1373 intel_fbdev_initial_config_async(dev);
1376 * We need to coordinate the hotplugs with the asynchronous fbdev
1377 * configuration, for which we use the fbdev->async_cookie.
1379 if (HAS_DISPLAY(dev_priv) && INTEL_DISPLAY_ENABLED(dev_priv))
1380 drm_kms_helper_poll_init(dev);
1382 intel_power_domains_enable(dev_priv);
1383 intel_runtime_pm_enable(&dev_priv->runtime_pm);
1387 * i915_driver_unregister - cleanup the registration done in i915_driver_regiser()
1388 * @dev_priv: device private
1390 static void i915_driver_unregister(struct drm_i915_private *dev_priv)
1392 intel_runtime_pm_disable(&dev_priv->runtime_pm);
1393 intel_power_domains_disable(dev_priv);
1395 intel_fbdev_unregister(dev_priv);
1396 intel_audio_deinit(dev_priv);
1399 * After flushing the fbdev (incl. a late async config which will
1400 * have delayed queuing of a hotplug event), then flush the hotplug
1403 drm_kms_helper_poll_fini(&dev_priv->drm);
1405 intel_gt_driver_unregister(&dev_priv->gt);
1406 acpi_video_unregister();
1407 intel_opregion_unregister(dev_priv);
1409 i915_perf_unregister(dev_priv);
1410 i915_pmu_unregister(dev_priv);
1412 i915_teardown_sysfs(dev_priv);
1413 drm_dev_unplug(&dev_priv->drm);
1415 i915_gem_driver_unregister(dev_priv);
1418 static void i915_welcome_messages(struct drm_i915_private *dev_priv)
1420 if (drm_debug & DRM_UT_DRIVER) {
1421 struct drm_printer p = drm_debug_printer("i915 device info:");
1423 drm_printf(&p, "pciid=0x%04x rev=0x%02x platform=%s (subplatform=0x%x) gen=%i\n",
1424 INTEL_DEVID(dev_priv),
1425 INTEL_REVID(dev_priv),
1426 intel_platform_name(INTEL_INFO(dev_priv)->platform),
1427 intel_subplatform(RUNTIME_INFO(dev_priv),
1428 INTEL_INFO(dev_priv)->platform),
1429 INTEL_GEN(dev_priv));
1431 intel_device_info_dump_flags(INTEL_INFO(dev_priv), &p);
1432 intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p);
1435 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
1436 DRM_INFO("DRM_I915_DEBUG enabled\n");
1437 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
1438 DRM_INFO("DRM_I915_DEBUG_GEM enabled\n");
1439 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM))
1440 DRM_INFO("DRM_I915_DEBUG_RUNTIME_PM enabled\n");
1443 static struct drm_i915_private *
1444 i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
1446 const struct intel_device_info *match_info =
1447 (struct intel_device_info *)ent->driver_data;
1448 struct intel_device_info *device_info;
1449 struct drm_i915_private *i915;
1452 i915 = kzalloc(sizeof(*i915), GFP_KERNEL);
1454 return ERR_PTR(-ENOMEM);
1456 err = drm_dev_init(&i915->drm, &driver, &pdev->dev);
1459 return ERR_PTR(err);
1462 i915->drm.dev_private = i915;
1464 i915->drm.pdev = pdev;
1465 pci_set_drvdata(pdev, i915);
1467 /* Setup the write-once "constant" device info */
1468 device_info = mkwrite_device_info(i915);
1469 memcpy(device_info, match_info, sizeof(*device_info));
1470 RUNTIME_INFO(i915)->device_id = pdev->device;
1472 BUG_ON(device_info->gen > BITS_PER_TYPE(device_info->gen_mask));
1477 static void i915_driver_destroy(struct drm_i915_private *i915)
1479 struct pci_dev *pdev = i915->drm.pdev;
1481 drm_dev_fini(&i915->drm);
1484 /* And make sure we never chase our dangling pointer from pci_dev */
1485 pci_set_drvdata(pdev, NULL);
1489 * i915_driver_probe - setup chip and create an initial config
1491 * @ent: matching PCI ID entry
1493 * The driver probe routine has to do several things:
1494 * - drive output discovery via intel_modeset_init()
1495 * - initialize the memory manager
1496 * - allocate initial config memory
1497 * - setup the DRM framebuffer with the allocated memory
1499 int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1501 const struct intel_device_info *match_info =
1502 (struct intel_device_info *)ent->driver_data;
1503 struct drm_i915_private *dev_priv;
1506 dev_priv = i915_driver_create(pdev, ent);
1507 if (IS_ERR(dev_priv))
1508 return PTR_ERR(dev_priv);
1510 /* Disable nuclear pageflip by default on pre-ILK */
1511 if (!i915_modparams.nuclear_pageflip && match_info->gen < 5)
1512 dev_priv->drm.driver_features &= ~DRIVER_ATOMIC;
1514 ret = pci_enable_device(pdev);
1518 ret = i915_driver_early_probe(dev_priv);
1520 goto out_pci_disable;
1522 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1524 i915_detect_vgpu(dev_priv);
1526 ret = i915_driver_mmio_probe(dev_priv);
1528 goto out_runtime_pm_put;
1530 ret = i915_driver_hw_probe(dev_priv);
1532 goto out_cleanup_mmio;
1534 ret = i915_driver_modeset_probe(dev_priv);
1536 goto out_cleanup_hw;
1538 i915_driver_register(dev_priv);
1540 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1542 i915_welcome_messages(dev_priv);
1547 i915_driver_hw_remove(dev_priv);
1548 i915_ggtt_driver_release(dev_priv);
1550 i915_driver_mmio_release(dev_priv);
1552 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1553 i915_driver_late_release(dev_priv);
1555 pci_disable_device(pdev);
1557 i915_probe_error(dev_priv, "Device initialization failed (%d)\n", ret);
1558 i915_driver_destroy(dev_priv);
1562 void i915_driver_remove(struct drm_i915_private *i915)
1564 disable_rpm_wakeref_asserts(&i915->runtime_pm);
1566 i915_driver_unregister(i915);
1569 * After unregistering the device to prevent any new users, cancel
1570 * all in-flight requests so that we can quickly unbind the active
1573 intel_gt_set_wedged(&i915->gt);
1575 /* Flush any external code that still may be under the RCU lock */
1578 i915_gem_suspend(i915);
1580 drm_atomic_helper_shutdown(&i915->drm);
1582 intel_gvt_driver_remove(i915);
1584 i915_driver_modeset_remove(i915);
1586 /* Free error state after interrupts are fully disabled. */
1587 cancel_delayed_work_sync(&i915->gt.hangcheck.work);
1588 i915_reset_error_state(i915);
1590 i915_gem_driver_remove(i915);
1592 intel_power_domains_driver_remove(i915);
1594 i915_driver_hw_remove(i915);
1596 enable_rpm_wakeref_asserts(&i915->runtime_pm);
1599 static void i915_driver_release(struct drm_device *dev)
1601 struct drm_i915_private *dev_priv = to_i915(dev);
1602 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
1604 disable_rpm_wakeref_asserts(rpm);
1606 i915_gem_driver_release(dev_priv);
1608 i915_ggtt_driver_release(dev_priv);
1610 i915_driver_mmio_release(dev_priv);
1612 enable_rpm_wakeref_asserts(rpm);
1613 intel_runtime_pm_driver_release(rpm);
1615 i915_driver_late_release(dev_priv);
1616 i915_driver_destroy(dev_priv);
1619 static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
1621 struct drm_i915_private *i915 = to_i915(dev);
1624 ret = i915_gem_open(i915, file);
1632 * i915_driver_lastclose - clean up after all DRM clients have exited
1635 * Take care of cleaning up after all DRM clients have exited. In the
1636 * mode setting case, we want to restore the kernel's initial mode (just
1637 * in case the last client left us in a bad state).
1639 * Additionally, in the non-mode setting case, we'll tear down the GTT
1640 * and DMA structures, since the kernel won't be using them, and clea
1643 static void i915_driver_lastclose(struct drm_device *dev)
1645 intel_fbdev_restore_mode(dev);
1646 vga_switcheroo_process_delayed_switch();
1649 static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
1651 struct drm_i915_file_private *file_priv = file->driver_priv;
1653 i915_gem_context_close(file);
1654 i915_gem_release(dev, file);
1656 kfree_rcu(file_priv, rcu);
1658 /* Catch up with all the deferred frees from "this" client */
1659 i915_gem_flush_free_objects(to_i915(dev));
1662 static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
1664 struct drm_device *dev = &dev_priv->drm;
1665 struct intel_encoder *encoder;
1667 drm_modeset_lock_all(dev);
1668 for_each_intel_encoder(dev, encoder)
1669 if (encoder->suspend)
1670 encoder->suspend(encoder);
1671 drm_modeset_unlock_all(dev);
1674 static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
1676 static int vlv_suspend_complete(struct drm_i915_private *dev_priv);
1678 static bool suspend_to_idle(struct drm_i915_private *dev_priv)
1680 #if IS_ENABLED(CONFIG_ACPI_SLEEP)
1681 if (acpi_target_system_state() < ACPI_STATE_S3)
1687 static int i915_drm_prepare(struct drm_device *dev)
1689 struct drm_i915_private *i915 = to_i915(dev);
1692 * NB intel_display_suspend() may issue new requests after we've
1693 * ostensibly marked the GPU as ready-to-sleep here. We need to
1694 * split out that work and pull it forward so that after point,
1695 * the GPU is not woken again.
1697 i915_gem_suspend(i915);
1702 static int i915_drm_suspend(struct drm_device *dev)
1704 struct drm_i915_private *dev_priv = to_i915(dev);
1705 struct pci_dev *pdev = dev_priv->drm.pdev;
1706 pci_power_t opregion_target_state;
1708 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1710 /* We do a lot of poking in a lot of registers, make sure they work
1712 intel_power_domains_disable(dev_priv);
1714 drm_kms_helper_poll_disable(dev);
1716 pci_save_state(pdev);
1718 intel_display_suspend(dev);
1720 intel_dp_mst_suspend(dev_priv);
1722 intel_runtime_pm_disable_interrupts(dev_priv);
1723 intel_hpd_cancel_work(dev_priv);
1725 intel_suspend_encoders(dev_priv);
1727 intel_suspend_hw(dev_priv);
1729 i915_gem_suspend_gtt_mappings(dev_priv);
1731 i915_save_state(dev_priv);
1733 opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
1734 intel_opregion_suspend(dev_priv, opregion_target_state);
1736 intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
1738 dev_priv->suspend_count++;
1740 intel_csr_ucode_suspend(dev_priv);
1742 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1747 static enum i915_drm_suspend_mode
1748 get_suspend_mode(struct drm_i915_private *dev_priv, bool hibernate)
1751 return I915_DRM_SUSPEND_HIBERNATE;
1753 if (suspend_to_idle(dev_priv))
1754 return I915_DRM_SUSPEND_IDLE;
1756 return I915_DRM_SUSPEND_MEM;
1759 static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
1761 struct drm_i915_private *dev_priv = to_i915(dev);
1762 struct pci_dev *pdev = dev_priv->drm.pdev;
1763 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
1766 disable_rpm_wakeref_asserts(rpm);
1768 i915_gem_suspend_late(dev_priv);
1770 intel_uncore_suspend(&dev_priv->uncore);
1772 intel_power_domains_suspend(dev_priv,
1773 get_suspend_mode(dev_priv, hibernation));
1775 intel_display_power_suspend_late(dev_priv);
1777 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1778 ret = vlv_suspend_complete(dev_priv);
1781 DRM_ERROR("Suspend complete failed: %d\n", ret);
1782 intel_power_domains_resume(dev_priv);
1787 pci_disable_device(pdev);
1789 * During hibernation on some platforms the BIOS may try to access
1790 * the device even though it's already in D3 and hang the machine. So
1791 * leave the device in D0 on those platforms and hope the BIOS will
1792 * power down the device properly. The issue was seen on multiple old
1793 * GENs with different BIOS vendors, so having an explicit blacklist
1794 * is inpractical; apply the workaround on everything pre GEN6. The
1795 * platforms where the issue was seen:
1796 * Lenovo Thinkpad X301, X61s, X60, T60, X41
1800 if (!(hibernation && INTEL_GEN(dev_priv) < 6))
1801 pci_set_power_state(pdev, PCI_D3hot);
1804 enable_rpm_wakeref_asserts(rpm);
1805 if (!dev_priv->uncore.user_forcewake_count)
1806 intel_runtime_pm_driver_release(rpm);
1811 int i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state)
1815 if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND &&
1816 state.event != PM_EVENT_FREEZE))
1819 if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1822 error = i915_drm_suspend(&i915->drm);
1826 return i915_drm_suspend_late(&i915->drm, false);
1829 static int i915_drm_resume(struct drm_device *dev)
1831 struct drm_i915_private *dev_priv = to_i915(dev);
1834 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1835 intel_gt_pm_disable(&dev_priv->gt);
1837 i915_gem_sanitize(dev_priv);
1839 ret = i915_ggtt_enable_hw(dev_priv);
1841 DRM_ERROR("failed to re-enable GGTT\n");
1843 i915_gem_restore_gtt_mappings(dev_priv);
1844 i915_gem_restore_fences(dev_priv);
1846 intel_csr_ucode_resume(dev_priv);
1848 i915_restore_state(dev_priv);
1849 intel_pps_unlock_regs_wa(dev_priv);
1851 intel_init_pch_refclk(dev_priv);
1854 * Interrupts have to be enabled before any batches are run. If not the
1855 * GPU will hang. i915_gem_init_hw() will initiate batches to
1856 * update/restore the context.
1858 * drm_mode_config_reset() needs AUX interrupts.
1860 * Modeset enabling in intel_modeset_init_hw() also needs working
1863 intel_runtime_pm_enable_interrupts(dev_priv);
1865 drm_mode_config_reset(dev);
1867 i915_gem_resume(dev_priv);
1869 intel_modeset_init_hw(dev_priv);
1870 intel_init_clock_gating(dev_priv);
1872 spin_lock_irq(&dev_priv->irq_lock);
1873 if (dev_priv->display.hpd_irq_setup)
1874 dev_priv->display.hpd_irq_setup(dev_priv);
1875 spin_unlock_irq(&dev_priv->irq_lock);
1877 intel_dp_mst_resume(dev_priv);
1879 intel_display_resume(dev);
1881 drm_kms_helper_poll_enable(dev);
1884 * ... but also need to make sure that hotplug processing
1885 * doesn't cause havoc. Like in the driver load code we don't
1886 * bother with the tiny race here where we might lose hotplug
1889 intel_hpd_init(dev_priv);
1891 intel_opregion_resume(dev_priv);
1893 intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
1895 intel_power_domains_enable(dev_priv);
1897 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1902 static int i915_drm_resume_early(struct drm_device *dev)
1904 struct drm_i915_private *dev_priv = to_i915(dev);
1905 struct pci_dev *pdev = dev_priv->drm.pdev;
1909 * We have a resume ordering issue with the snd-hda driver also
1910 * requiring our device to be power up. Due to the lack of a
1911 * parent/child relationship we currently solve this with an early
1914 * FIXME: This should be solved with a special hdmi sink device or
1915 * similar so that power domains can be employed.
1919 * Note that we need to set the power state explicitly, since we
1920 * powered off the device during freeze and the PCI core won't power
1921 * it back up for us during thaw. Powering off the device during
1922 * freeze is not a hard requirement though, and during the
1923 * suspend/resume phases the PCI core makes sure we get here with the
1924 * device powered on. So in case we change our freeze logic and keep
1925 * the device powered we can also remove the following set power state
1928 ret = pci_set_power_state(pdev, PCI_D0);
1930 DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret);
1935 * Note that pci_enable_device() first enables any parent bridge
1936 * device and only then sets the power state for this device. The
1937 * bridge enabling is a nop though, since bridge devices are resumed
1938 * first. The order of enabling power and enabling the device is
1939 * imposed by the PCI core as described above, so here we preserve the
1940 * same order for the freeze/thaw phases.
1942 * TODO: eventually we should remove pci_disable_device() /
1943 * pci_enable_enable_device() from suspend/resume. Due to how they
1944 * depend on the device enable refcount we can't anyway depend on them
1945 * disabling/enabling the device.
1947 if (pci_enable_device(pdev))
1950 pci_set_master(pdev);
1952 disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1954 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1955 ret = vlv_resume_prepare(dev_priv, false);
1957 DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
1960 intel_uncore_resume_early(&dev_priv->uncore);
1962 intel_gt_check_and_clear_faults(&dev_priv->gt);
1964 intel_display_power_resume_early(dev_priv);
1966 intel_gt_pm_disable(&dev_priv->gt);
1968 intel_power_domains_resume(dev_priv);
1970 intel_gt_sanitize(&dev_priv->gt, true);
1972 enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
1977 int i915_resume_switcheroo(struct drm_i915_private *i915)
1981 if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
1984 ret = i915_drm_resume_early(&i915->drm);
1988 return i915_drm_resume(&i915->drm);
1991 static int i915_pm_prepare(struct device *kdev)
1993 struct drm_i915_private *i915 = kdev_to_i915(kdev);
1996 dev_err(kdev, "DRM not initialized, aborting suspend.\n");
2000 if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
2003 return i915_drm_prepare(&i915->drm);
2006 static int i915_pm_suspend(struct device *kdev)
2008 struct drm_i915_private *i915 = kdev_to_i915(kdev);
2011 dev_err(kdev, "DRM not initialized, aborting suspend.\n");
2015 if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
2018 return i915_drm_suspend(&i915->drm);
2021 static int i915_pm_suspend_late(struct device *kdev)
2023 struct drm_i915_private *i915 = kdev_to_i915(kdev);
2026 * We have a suspend ordering issue with the snd-hda driver also
2027 * requiring our device to be power up. Due to the lack of a
2028 * parent/child relationship we currently solve this with an late
2031 * FIXME: This should be solved with a special hdmi sink device or
2032 * similar so that power domains can be employed.
2034 if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
2037 return i915_drm_suspend_late(&i915->drm, false);
2040 static int i915_pm_poweroff_late(struct device *kdev)
2042 struct drm_i915_private *i915 = kdev_to_i915(kdev);
2044 if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
2047 return i915_drm_suspend_late(&i915->drm, true);
2050 static int i915_pm_resume_early(struct device *kdev)
2052 struct drm_i915_private *i915 = kdev_to_i915(kdev);
2054 if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
2057 return i915_drm_resume_early(&i915->drm);
2060 static int i915_pm_resume(struct device *kdev)
2062 struct drm_i915_private *i915 = kdev_to_i915(kdev);
2064 if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
2067 return i915_drm_resume(&i915->drm);
2070 /* freeze: before creating the hibernation_image */
2071 static int i915_pm_freeze(struct device *kdev)
2073 struct drm_i915_private *i915 = kdev_to_i915(kdev);
2076 if (i915->drm.switch_power_state != DRM_SWITCH_POWER_OFF) {
2077 ret = i915_drm_suspend(&i915->drm);
2082 ret = i915_gem_freeze(i915);
2089 static int i915_pm_freeze_late(struct device *kdev)
2091 struct drm_i915_private *i915 = kdev_to_i915(kdev);
2094 if (i915->drm.switch_power_state != DRM_SWITCH_POWER_OFF) {
2095 ret = i915_drm_suspend_late(&i915->drm, true);
2100 ret = i915_gem_freeze_late(i915);
2107 /* thaw: called after creating the hibernation image, but before turning off. */
2108 static int i915_pm_thaw_early(struct device *kdev)
2110 return i915_pm_resume_early(kdev);
2113 static int i915_pm_thaw(struct device *kdev)
2115 return i915_pm_resume(kdev);
2118 /* restore: called after loading the hibernation image. */
2119 static int i915_pm_restore_early(struct device *kdev)
2121 return i915_pm_resume_early(kdev);
2124 static int i915_pm_restore(struct device *kdev)
2126 return i915_pm_resume(kdev);
2130 * Save all Gunit registers that may be lost after a D3 and a subsequent
2131 * S0i[R123] transition. The list of registers needing a save/restore is
2132 * defined in the VLV2_S0IXRegs document. This documents marks all Gunit
2133 * registers in the following way:
2134 * - Driver: saved/restored by the driver
2135 * - Punit : saved/restored by the Punit firmware
2136 * - No, w/o marking: no need to save/restore, since the register is R/O or
2137 * used internally by the HW in a way that doesn't depend
2138 * keeping the content across a suspend/resume.
2139 * - Debug : used for debugging
2141 * We save/restore all registers marked with 'Driver', with the following
2143 * - Registers out of use, including also registers marked with 'Debug'.
2144 * These have no effect on the driver's operation, so we don't save/restore
2145 * them to reduce the overhead.
2146 * - Registers that are fully setup by an initialization function called from
2147 * the resume path. For example many clock gating and RPS/RC6 registers.
2148 * - Registers that provide the right functionality with their reset defaults.
2150 * TODO: Except for registers that based on the above 3 criteria can be safely
2151 * ignored, we save/restore all others, practically treating the HW context as
2152 * a black-box for the driver. Further investigation is needed to reduce the
2153 * saved/restored registers even further, by following the same 3 criteria.
2155 static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
2157 struct vlv_s0ix_state *s = dev_priv->vlv_s0ix_state;
2163 /* GAM 0x4000-0x4770 */
2164 s->wr_watermark = I915_READ(GEN7_WR_WATERMARK);
2165 s->gfx_prio_ctrl = I915_READ(GEN7_GFX_PRIO_CTRL);
2166 s->arb_mode = I915_READ(ARB_MODE);
2167 s->gfx_pend_tlb0 = I915_READ(GEN7_GFX_PEND_TLB0);
2168 s->gfx_pend_tlb1 = I915_READ(GEN7_GFX_PEND_TLB1);
2170 for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
2171 s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS(i));
2173 s->media_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
2174 s->gfx_max_req_count = I915_READ(GEN7_GFX_MAX_REQ_COUNT);
2176 s->render_hwsp = I915_READ(RENDER_HWS_PGA_GEN7);
2177 s->ecochk = I915_READ(GAM_ECOCHK);
2178 s->bsd_hwsp = I915_READ(BSD_HWS_PGA_GEN7);
2179 s->blt_hwsp = I915_READ(BLT_HWS_PGA_GEN7);
2181 s->tlb_rd_addr = I915_READ(GEN7_TLB_RD_ADDR);
2183 /* MBC 0x9024-0x91D0, 0x8500 */
2184 s->g3dctl = I915_READ(VLV_G3DCTL);
2185 s->gsckgctl = I915_READ(VLV_GSCKGCTL);
2186 s->mbctl = I915_READ(GEN6_MBCTL);
2188 /* GCP 0x9400-0x9424, 0x8100-0x810C */
2189 s->ucgctl1 = I915_READ(GEN6_UCGCTL1);
2190 s->ucgctl3 = I915_READ(GEN6_UCGCTL3);
2191 s->rcgctl1 = I915_READ(GEN6_RCGCTL1);
2192 s->rcgctl2 = I915_READ(GEN6_RCGCTL2);
2193 s->rstctl = I915_READ(GEN6_RSTCTL);
2194 s->misccpctl = I915_READ(GEN7_MISCCPCTL);
2196 /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
2197 s->gfxpause = I915_READ(GEN6_GFXPAUSE);
2198 s->rpdeuhwtc = I915_READ(GEN6_RPDEUHWTC);
2199 s->rpdeuc = I915_READ(GEN6_RPDEUC);
2200 s->ecobus = I915_READ(ECOBUS);
2201 s->pwrdwnupctl = I915_READ(VLV_PWRDWNUPCTL);
2202 s->rp_down_timeout = I915_READ(GEN6_RP_DOWN_TIMEOUT);
2203 s->rp_deucsw = I915_READ(GEN6_RPDEUCSW);
2204 s->rcubmabdtmr = I915_READ(GEN6_RCUBMABDTMR);
2205 s->rcedata = I915_READ(VLV_RCEDATA);
2206 s->spare2gh = I915_READ(VLV_SPAREG2H);
2208 /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
2209 s->gt_imr = I915_READ(GTIMR);
2210 s->gt_ier = I915_READ(GTIER);
2211 s->pm_imr = I915_READ(GEN6_PMIMR);
2212 s->pm_ier = I915_READ(GEN6_PMIER);
2214 for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
2215 s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH(i));
2217 /* GT SA CZ domain, 0x100000-0x138124 */
2218 s->tilectl = I915_READ(TILECTL);
2219 s->gt_fifoctl = I915_READ(GTFIFOCTL);
2220 s->gtlc_wake_ctrl = I915_READ(VLV_GTLC_WAKE_CTRL);
2221 s->gtlc_survive = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
2222 s->pmwgicz = I915_READ(VLV_PMWGICZ);
2224 /* Gunit-Display CZ domain, 0x182028-0x1821CF */
2225 s->gu_ctl0 = I915_READ(VLV_GU_CTL0);
2226 s->gu_ctl1 = I915_READ(VLV_GU_CTL1);
2227 s->pcbr = I915_READ(VLV_PCBR);
2228 s->clock_gate_dis2 = I915_READ(VLV_GUNIT_CLOCK_GATE2);
2231 * Not saving any of:
2232 * DFT, 0x9800-0x9EC0
2233 * SARB, 0xB000-0xB1FC
2234 * GAC, 0x5208-0x524C, 0x14000-0x14C000
2239 static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
2241 struct vlv_s0ix_state *s = dev_priv->vlv_s0ix_state;
2248 /* GAM 0x4000-0x4770 */
2249 I915_WRITE(GEN7_WR_WATERMARK, s->wr_watermark);
2250 I915_WRITE(GEN7_GFX_PRIO_CTRL, s->gfx_prio_ctrl);
2251 I915_WRITE(ARB_MODE, s->arb_mode | (0xffff << 16));
2252 I915_WRITE(GEN7_GFX_PEND_TLB0, s->gfx_pend_tlb0);
2253 I915_WRITE(GEN7_GFX_PEND_TLB1, s->gfx_pend_tlb1);
2255 for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
2256 I915_WRITE(GEN7_LRA_LIMITS(i), s->lra_limits[i]);
2258 I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
2259 I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count);
2261 I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp);
2262 I915_WRITE(GAM_ECOCHK, s->ecochk);
2263 I915_WRITE(BSD_HWS_PGA_GEN7, s->bsd_hwsp);
2264 I915_WRITE(BLT_HWS_PGA_GEN7, s->blt_hwsp);
2266 I915_WRITE(GEN7_TLB_RD_ADDR, s->tlb_rd_addr);
2268 /* MBC 0x9024-0x91D0, 0x8500 */
2269 I915_WRITE(VLV_G3DCTL, s->g3dctl);
2270 I915_WRITE(VLV_GSCKGCTL, s->gsckgctl);
2271 I915_WRITE(GEN6_MBCTL, s->mbctl);
2273 /* GCP 0x9400-0x9424, 0x8100-0x810C */
2274 I915_WRITE(GEN6_UCGCTL1, s->ucgctl1);
2275 I915_WRITE(GEN6_UCGCTL3, s->ucgctl3);
2276 I915_WRITE(GEN6_RCGCTL1, s->rcgctl1);
2277 I915_WRITE(GEN6_RCGCTL2, s->rcgctl2);
2278 I915_WRITE(GEN6_RSTCTL, s->rstctl);
2279 I915_WRITE(GEN7_MISCCPCTL, s->misccpctl);
2281 /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
2282 I915_WRITE(GEN6_GFXPAUSE, s->gfxpause);
2283 I915_WRITE(GEN6_RPDEUHWTC, s->rpdeuhwtc);
2284 I915_WRITE(GEN6_RPDEUC, s->rpdeuc);
2285 I915_WRITE(ECOBUS, s->ecobus);
2286 I915_WRITE(VLV_PWRDWNUPCTL, s->pwrdwnupctl);
2287 I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout);
2288 I915_WRITE(GEN6_RPDEUCSW, s->rp_deucsw);
2289 I915_WRITE(GEN6_RCUBMABDTMR, s->rcubmabdtmr);
2290 I915_WRITE(VLV_RCEDATA, s->rcedata);
2291 I915_WRITE(VLV_SPAREG2H, s->spare2gh);
2293 /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
2294 I915_WRITE(GTIMR, s->gt_imr);
2295 I915_WRITE(GTIER, s->gt_ier);
2296 I915_WRITE(GEN6_PMIMR, s->pm_imr);
2297 I915_WRITE(GEN6_PMIER, s->pm_ier);
2299 for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
2300 I915_WRITE(GEN7_GT_SCRATCH(i), s->gt_scratch[i]);
2302 /* GT SA CZ domain, 0x100000-0x138124 */
2303 I915_WRITE(TILECTL, s->tilectl);
2304 I915_WRITE(GTFIFOCTL, s->gt_fifoctl);
2306 * Preserve the GT allow wake and GFX force clock bit, they are not
2307 * be restored, as they are used to control the s0ix suspend/resume
2308 * sequence by the caller.
2310 val = I915_READ(VLV_GTLC_WAKE_CTRL);
2311 val &= VLV_GTLC_ALLOWWAKEREQ;
2312 val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ;
2313 I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
2315 val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
2316 val &= VLV_GFX_CLK_FORCE_ON_BIT;
2317 val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT;
2318 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
2320 I915_WRITE(VLV_PMWGICZ, s->pmwgicz);
2322 /* Gunit-Display CZ domain, 0x182028-0x1821CF */
2323 I915_WRITE(VLV_GU_CTL0, s->gu_ctl0);
2324 I915_WRITE(VLV_GU_CTL1, s->gu_ctl1);
2325 I915_WRITE(VLV_PCBR, s->pcbr);
2326 I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2);
2329 static int vlv_wait_for_pw_status(struct drm_i915_private *i915,
2332 i915_reg_t reg = VLV_GTLC_PW_STATUS;
2336 /* The HW does not like us polling for PW_STATUS frequently, so
2337 * use the sleeping loop rather than risk the busy spin within
2338 * intel_wait_for_register().
2340 * Transitioning between RC6 states should be at most 2ms (see
2341 * valleyview_enable_rps) so use a 3ms timeout.
2343 ret = wait_for(((reg_value =
2344 intel_uncore_read_notrace(&i915->uncore, reg)) & mask)
2347 /* just trace the final value */
2348 trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true);
2353 int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
2358 val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
2359 val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
2361 val |= VLV_GFX_CLK_FORCE_ON_BIT;
2362 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
2367 err = intel_wait_for_register(&dev_priv->uncore,
2368 VLV_GTLC_SURVIVABILITY_REG,
2369 VLV_GFX_CLK_STATUS_BIT,
2370 VLV_GFX_CLK_STATUS_BIT,
2373 DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n",
2374 I915_READ(VLV_GTLC_SURVIVABILITY_REG));
2379 static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
2385 val = I915_READ(VLV_GTLC_WAKE_CTRL);
2386 val &= ~VLV_GTLC_ALLOWWAKEREQ;
2388 val |= VLV_GTLC_ALLOWWAKEREQ;
2389 I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
2390 POSTING_READ(VLV_GTLC_WAKE_CTRL);
2392 mask = VLV_GTLC_ALLOWWAKEACK;
2393 val = allow ? mask : 0;
2395 err = vlv_wait_for_pw_status(dev_priv, mask, val);
2397 DRM_ERROR("timeout disabling GT waking\n");
2402 static void vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
2408 mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
2409 val = wait_for_on ? mask : 0;
2412 * RC6 transitioning can be delayed up to 2 msec (see
2413 * valleyview_enable_rps), use 3 msec for safety.
2415 * This can fail to turn off the rc6 if the GPU is stuck after a failed
2416 * reset and we are trying to force the machine to sleep.
2418 if (vlv_wait_for_pw_status(dev_priv, mask, val))
2419 DRM_DEBUG_DRIVER("timeout waiting for GT wells to go %s\n",
2420 onoff(wait_for_on));
2423 static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
2425 if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
2428 DRM_DEBUG_DRIVER("GT register access while GT waking disabled\n");
2429 I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
2432 static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
2438 * Bspec defines the following GT well on flags as debug only, so
2439 * don't treat them as hard failures.
2441 vlv_wait_for_gt_wells(dev_priv, false);
2443 mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
2444 WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask);
2446 vlv_check_no_gt_access(dev_priv);
2448 err = vlv_force_gfx_clock(dev_priv, true);
2452 err = vlv_allow_gt_wake(dev_priv, false);
2456 vlv_save_gunit_s0ix_state(dev_priv);
2458 err = vlv_force_gfx_clock(dev_priv, false);
2465 /* For safety always re-enable waking and disable gfx clock forcing */
2466 vlv_allow_gt_wake(dev_priv, true);
2468 vlv_force_gfx_clock(dev_priv, false);
2473 static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
2480 * If any of the steps fail just try to continue, that's the best we
2481 * can do at this point. Return the first error code (which will also
2482 * leave RPM permanently disabled).
2484 ret = vlv_force_gfx_clock(dev_priv, true);
2486 vlv_restore_gunit_s0ix_state(dev_priv);
2488 err = vlv_allow_gt_wake(dev_priv, true);
2492 err = vlv_force_gfx_clock(dev_priv, false);
2496 vlv_check_no_gt_access(dev_priv);
2499 intel_init_clock_gating(dev_priv);
2504 static int intel_runtime_suspend(struct device *kdev)
2506 struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
2507 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
2510 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
2513 DRM_DEBUG_KMS("Suspending device\n");
2515 disable_rpm_wakeref_asserts(rpm);
2518 * We are safe here against re-faults, since the fault handler takes
2521 i915_gem_runtime_suspend(dev_priv);
2523 intel_gt_runtime_suspend(&dev_priv->gt);
2525 intel_runtime_pm_disable_interrupts(dev_priv);
2527 intel_uncore_suspend(&dev_priv->uncore);
2529 intel_display_power_suspend(dev_priv);
2531 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2532 ret = vlv_suspend_complete(dev_priv);
2535 DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
2536 intel_uncore_runtime_resume(&dev_priv->uncore);
2538 intel_runtime_pm_enable_interrupts(dev_priv);
2540 intel_gt_runtime_resume(&dev_priv->gt);
2542 i915_gem_restore_fences(dev_priv);
2544 enable_rpm_wakeref_asserts(rpm);
2549 enable_rpm_wakeref_asserts(rpm);
2550 intel_runtime_pm_driver_release(rpm);
2552 if (intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore))
2553 DRM_ERROR("Unclaimed access detected prior to suspending\n");
2555 rpm->suspended = true;
2558 * FIXME: We really should find a document that references the arguments
2561 if (IS_BROADWELL(dev_priv)) {
2563 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
2564 * being detected, and the call we do at intel_runtime_resume()
2565 * won't be able to restore them. Since PCI_D3hot matches the
2566 * actual specification and appears to be working, use it.
2568 intel_opregion_notify_adapter(dev_priv, PCI_D3hot);
2571 * current versions of firmware which depend on this opregion
2572 * notification have repurposed the D1 definition to mean
2573 * "runtime suspended" vs. what you would normally expect (D3)
2574 * to distinguish it from notifications that might be sent via
2577 intel_opregion_notify_adapter(dev_priv, PCI_D1);
2580 assert_forcewakes_inactive(&dev_priv->uncore);
2582 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
2583 intel_hpd_poll_init(dev_priv);
2585 DRM_DEBUG_KMS("Device suspended\n");
2589 static int intel_runtime_resume(struct device *kdev)
2591 struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
2592 struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
2595 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
2598 DRM_DEBUG_KMS("Resuming device\n");
2600 WARN_ON_ONCE(atomic_read(&rpm->wakeref_count));
2601 disable_rpm_wakeref_asserts(rpm);
2603 intel_opregion_notify_adapter(dev_priv, PCI_D0);
2604 rpm->suspended = false;
2605 if (intel_uncore_unclaimed_mmio(&dev_priv->uncore))
2606 DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
2608 intel_display_power_resume(dev_priv);
2610 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2611 ret = vlv_resume_prepare(dev_priv, true);
2613 intel_uncore_runtime_resume(&dev_priv->uncore);
2615 intel_runtime_pm_enable_interrupts(dev_priv);
2618 * No point of rolling back things in case of an error, as the best
2619 * we can do is to hope that things will still work (and disable RPM).
2621 intel_gt_runtime_resume(&dev_priv->gt);
2622 i915_gem_restore_fences(dev_priv);
2625 * On VLV/CHV display interrupts are part of the display
2626 * power well, so hpd is reinitialized from there. For
2627 * everyone else do it here.
2629 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
2630 intel_hpd_init(dev_priv);
2632 intel_enable_ipc(dev_priv);
2634 enable_rpm_wakeref_asserts(rpm);
2637 DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
2639 DRM_DEBUG_KMS("Device resumed\n");
2644 const struct dev_pm_ops i915_pm_ops = {
2646 * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
2649 .prepare = i915_pm_prepare,
2650 .suspend = i915_pm_suspend,
2651 .suspend_late = i915_pm_suspend_late,
2652 .resume_early = i915_pm_resume_early,
2653 .resume = i915_pm_resume,
2657 * @freeze, @freeze_late : called (1) before creating the
2658 * hibernation image [PMSG_FREEZE] and
2659 * (2) after rebooting, before restoring
2660 * the image [PMSG_QUIESCE]
2661 * @thaw, @thaw_early : called (1) after creating the hibernation
2662 * image, before writing it [PMSG_THAW]
2663 * and (2) after failing to create or
2664 * restore the image [PMSG_RECOVER]
2665 * @poweroff, @poweroff_late: called after writing the hibernation
2666 * image, before rebooting [PMSG_HIBERNATE]
2667 * @restore, @restore_early : called after rebooting and restoring the
2668 * hibernation image [PMSG_RESTORE]
2670 .freeze = i915_pm_freeze,
2671 .freeze_late = i915_pm_freeze_late,
2672 .thaw_early = i915_pm_thaw_early,
2673 .thaw = i915_pm_thaw,
2674 .poweroff = i915_pm_suspend,
2675 .poweroff_late = i915_pm_poweroff_late,
2676 .restore_early = i915_pm_restore_early,
2677 .restore = i915_pm_restore,
2679 /* S0ix (via runtime suspend) event handlers */
2680 .runtime_suspend = intel_runtime_suspend,
2681 .runtime_resume = intel_runtime_resume,
2684 static const struct vm_operations_struct i915_gem_vm_ops = {
2685 .fault = i915_gem_fault,
2686 .open = drm_gem_vm_open,
2687 .close = drm_gem_vm_close,
2690 static const struct file_operations i915_driver_fops = {
2691 .owner = THIS_MODULE,
2693 .release = drm_release,
2694 .unlocked_ioctl = drm_ioctl,
2695 .mmap = drm_gem_mmap,
2698 .compat_ioctl = i915_compat_ioctl,
2699 .llseek = noop_llseek,
2703 i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
2704 struct drm_file *file)
2709 static const struct drm_ioctl_desc i915_ioctls[] = {
2710 DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2711 DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
2712 DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
2713 DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
2714 DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
2715 DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
2716 DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam_ioctl, DRM_RENDER_ALLOW),
2717 DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2718 DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
2719 DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
2720 DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2721 DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
2722 DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2723 DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2724 DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, drm_noop, DRM_AUTH),
2725 DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
2726 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2727 DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2728 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer_ioctl, DRM_AUTH),
2729 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2_WR, i915_gem_execbuffer2_ioctl, DRM_RENDER_ALLOW),
2730 DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
2731 DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
2732 DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_RENDER_ALLOW),
2733 DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW),
2734 DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW),
2735 DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_RENDER_ALLOW),
2736 DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2737 DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2738 DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
2739 DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
2740 DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
2741 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
2742 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW),
2743 DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
2744 DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
2745 DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling_ioctl, DRM_RENDER_ALLOW),
2746 DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling_ioctl, DRM_RENDER_ALLOW),
2747 DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
2748 DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id_ioctl, 0),
2749 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
2750 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER),
2751 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER),
2752 DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey_ioctl, DRM_MASTER),
2753 DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER),
2754 DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_RENDER_ALLOW),
2755 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE_EXT, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
2756 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
2757 DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
2758 DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW),
2759 DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
2760 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
2761 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
2762 DRM_IOCTL_DEF_DRV(I915_PERF_OPEN, i915_perf_open_ioctl, DRM_RENDER_ALLOW),
2763 DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_RENDER_ALLOW),
2764 DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_RENDER_ALLOW),
2765 DRM_IOCTL_DEF_DRV(I915_QUERY, i915_query_ioctl, DRM_RENDER_ALLOW),
2766 DRM_IOCTL_DEF_DRV(I915_GEM_VM_CREATE, i915_gem_vm_create_ioctl, DRM_RENDER_ALLOW),
2767 DRM_IOCTL_DEF_DRV(I915_GEM_VM_DESTROY, i915_gem_vm_destroy_ioctl, DRM_RENDER_ALLOW),
2770 static struct drm_driver driver = {
2771 /* Don't use MTRRs here; the Xserver or userspace app should
2772 * deal with them for Intel hardware.
2776 DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_SYNCOBJ,
2777 .release = i915_driver_release,
2778 .open = i915_driver_open,
2779 .lastclose = i915_driver_lastclose,
2780 .postclose = i915_driver_postclose,
2782 .gem_close_object = i915_gem_close_object,
2783 .gem_free_object_unlocked = i915_gem_free_object,
2784 .gem_vm_ops = &i915_gem_vm_ops,
2786 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
2787 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
2788 .gem_prime_export = i915_gem_prime_export,
2789 .gem_prime_import = i915_gem_prime_import,
2791 .get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos,
2792 .get_scanout_position = i915_get_crtc_scanoutpos,
2794 .dumb_create = i915_gem_dumb_create,
2795 .dumb_map_offset = i915_gem_mmap_gtt,
2796 .ioctls = i915_ioctls,
2797 .num_ioctls = ARRAY_SIZE(i915_ioctls),
2798 .fops = &i915_driver_fops,
2799 .name = DRIVER_NAME,
2800 .desc = DRIVER_DESC,
2801 .date = DRIVER_DATE,
2802 .major = DRIVER_MAJOR,
2803 .minor = DRIVER_MINOR,
2804 .patchlevel = DRIVER_PATCHLEVEL,
2807 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
2808 #include "selftests/mock_drm.c"