drm/i915: vlv: increase timeout when forcing on the GFX clock
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / i915_drv.c
1 /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
2  */
3 /*
4  *
5  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6  * All Rights Reserved.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the
10  * "Software"), to deal in the Software without restriction, including
11  * without limitation the rights to use, copy, modify, merge, publish,
12  * distribute, sub license, and/or sell copies of the Software, and to
13  * permit persons to whom the Software is furnished to do so, subject to
14  * the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the
17  * next paragraph) shall be included in all copies or substantial portions
18  * of the Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27  *
28  */
29
30 #include <linux/device.h>
31 #include <drm/drmP.h>
32 #include <drm/i915_drm.h>
33 #include "i915_drv.h"
34 #include "i915_trace.h"
35 #include "intel_drv.h"
36
37 #include <linux/console.h>
38 #include <linux/module.h>
39 #include <drm/drm_crtc_helper.h>
40
41 static struct drm_driver driver;
42
43 #define GEN_DEFAULT_PIPEOFFSETS \
44         .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
45                           PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
46         .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
47                            TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
48         .dpll_offsets = { DPLL_A_OFFSET, DPLL_B_OFFSET }, \
49         .dpll_md_offsets = { DPLL_A_MD_OFFSET, DPLL_B_MD_OFFSET }, \
50         .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
51
52
53 static const struct intel_device_info intel_i830_info = {
54         .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
55         .has_overlay = 1, .overlay_needs_physical = 1,
56         .ring_mask = RENDER_RING,
57         GEN_DEFAULT_PIPEOFFSETS,
58 };
59
60 static const struct intel_device_info intel_845g_info = {
61         .gen = 2, .num_pipes = 1,
62         .has_overlay = 1, .overlay_needs_physical = 1,
63         .ring_mask = RENDER_RING,
64         GEN_DEFAULT_PIPEOFFSETS,
65 };
66
67 static const struct intel_device_info intel_i85x_info = {
68         .gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
69         .cursor_needs_physical = 1,
70         .has_overlay = 1, .overlay_needs_physical = 1,
71         .has_fbc = 1,
72         .ring_mask = RENDER_RING,
73         GEN_DEFAULT_PIPEOFFSETS,
74 };
75
76 static const struct intel_device_info intel_i865g_info = {
77         .gen = 2, .num_pipes = 1,
78         .has_overlay = 1, .overlay_needs_physical = 1,
79         .ring_mask = RENDER_RING,
80         GEN_DEFAULT_PIPEOFFSETS,
81 };
82
83 static const struct intel_device_info intel_i915g_info = {
84         .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
85         .has_overlay = 1, .overlay_needs_physical = 1,
86         .ring_mask = RENDER_RING,
87         GEN_DEFAULT_PIPEOFFSETS,
88 };
89 static const struct intel_device_info intel_i915gm_info = {
90         .gen = 3, .is_mobile = 1, .num_pipes = 2,
91         .cursor_needs_physical = 1,
92         .has_overlay = 1, .overlay_needs_physical = 1,
93         .supports_tv = 1,
94         .has_fbc = 1,
95         .ring_mask = RENDER_RING,
96         GEN_DEFAULT_PIPEOFFSETS,
97 };
98 static const struct intel_device_info intel_i945g_info = {
99         .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
100         .has_overlay = 1, .overlay_needs_physical = 1,
101         .ring_mask = RENDER_RING,
102         GEN_DEFAULT_PIPEOFFSETS,
103 };
104 static const struct intel_device_info intel_i945gm_info = {
105         .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
106         .has_hotplug = 1, .cursor_needs_physical = 1,
107         .has_overlay = 1, .overlay_needs_physical = 1,
108         .supports_tv = 1,
109         .has_fbc = 1,
110         .ring_mask = RENDER_RING,
111         GEN_DEFAULT_PIPEOFFSETS,
112 };
113
114 static const struct intel_device_info intel_i965g_info = {
115         .gen = 4, .is_broadwater = 1, .num_pipes = 2,
116         .has_hotplug = 1,
117         .has_overlay = 1,
118         .ring_mask = RENDER_RING,
119         GEN_DEFAULT_PIPEOFFSETS,
120 };
121
122 static const struct intel_device_info intel_i965gm_info = {
123         .gen = 4, .is_crestline = 1, .num_pipes = 2,
124         .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
125         .has_overlay = 1,
126         .supports_tv = 1,
127         .ring_mask = RENDER_RING,
128         GEN_DEFAULT_PIPEOFFSETS,
129 };
130
131 static const struct intel_device_info intel_g33_info = {
132         .gen = 3, .is_g33 = 1, .num_pipes = 2,
133         .need_gfx_hws = 1, .has_hotplug = 1,
134         .has_overlay = 1,
135         .ring_mask = RENDER_RING,
136         GEN_DEFAULT_PIPEOFFSETS,
137 };
138
139 static const struct intel_device_info intel_g45_info = {
140         .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
141         .has_pipe_cxsr = 1, .has_hotplug = 1,
142         .ring_mask = RENDER_RING | BSD_RING,
143         GEN_DEFAULT_PIPEOFFSETS,
144 };
145
146 static const struct intel_device_info intel_gm45_info = {
147         .gen = 4, .is_g4x = 1, .num_pipes = 2,
148         .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
149         .has_pipe_cxsr = 1, .has_hotplug = 1,
150         .supports_tv = 1,
151         .ring_mask = RENDER_RING | BSD_RING,
152         GEN_DEFAULT_PIPEOFFSETS,
153 };
154
155 static const struct intel_device_info intel_pineview_info = {
156         .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
157         .need_gfx_hws = 1, .has_hotplug = 1,
158         .has_overlay = 1,
159         GEN_DEFAULT_PIPEOFFSETS,
160 };
161
162 static const struct intel_device_info intel_ironlake_d_info = {
163         .gen = 5, .num_pipes = 2,
164         .need_gfx_hws = 1, .has_hotplug = 1,
165         .ring_mask = RENDER_RING | BSD_RING,
166         GEN_DEFAULT_PIPEOFFSETS,
167 };
168
169 static const struct intel_device_info intel_ironlake_m_info = {
170         .gen = 5, .is_mobile = 1, .num_pipes = 2,
171         .need_gfx_hws = 1, .has_hotplug = 1,
172         .has_fbc = 1,
173         .ring_mask = RENDER_RING | BSD_RING,
174         GEN_DEFAULT_PIPEOFFSETS,
175 };
176
177 static const struct intel_device_info intel_sandybridge_d_info = {
178         .gen = 6, .num_pipes = 2,
179         .need_gfx_hws = 1, .has_hotplug = 1,
180         .has_fbc = 1,
181         .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
182         .has_llc = 1,
183         GEN_DEFAULT_PIPEOFFSETS,
184 };
185
186 static const struct intel_device_info intel_sandybridge_m_info = {
187         .gen = 6, .is_mobile = 1, .num_pipes = 2,
188         .need_gfx_hws = 1, .has_hotplug = 1,
189         .has_fbc = 1,
190         .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
191         .has_llc = 1,
192         GEN_DEFAULT_PIPEOFFSETS,
193 };
194
195 #define GEN7_FEATURES  \
196         .gen = 7, .num_pipes = 3, \
197         .need_gfx_hws = 1, .has_hotplug = 1, \
198         .has_fbc = 1, \
199         .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
200         .has_llc = 1
201
202 static const struct intel_device_info intel_ivybridge_d_info = {
203         GEN7_FEATURES,
204         .is_ivybridge = 1,
205         GEN_DEFAULT_PIPEOFFSETS,
206 };
207
208 static const struct intel_device_info intel_ivybridge_m_info = {
209         GEN7_FEATURES,
210         .is_ivybridge = 1,
211         .is_mobile = 1,
212         GEN_DEFAULT_PIPEOFFSETS,
213 };
214
215 static const struct intel_device_info intel_ivybridge_q_info = {
216         GEN7_FEATURES,
217         .is_ivybridge = 1,
218         .num_pipes = 0, /* legal, last one wins */
219         GEN_DEFAULT_PIPEOFFSETS,
220 };
221
222 static const struct intel_device_info intel_valleyview_m_info = {
223         GEN7_FEATURES,
224         .is_mobile = 1,
225         .num_pipes = 2,
226         .is_valleyview = 1,
227         .display_mmio_offset = VLV_DISPLAY_BASE,
228         .has_fbc = 0, /* legal, last one wins */
229         .has_llc = 0, /* legal, last one wins */
230         GEN_DEFAULT_PIPEOFFSETS,
231 };
232
233 static const struct intel_device_info intel_valleyview_d_info = {
234         GEN7_FEATURES,
235         .num_pipes = 2,
236         .is_valleyview = 1,
237         .display_mmio_offset = VLV_DISPLAY_BASE,
238         .has_fbc = 0, /* legal, last one wins */
239         .has_llc = 0, /* legal, last one wins */
240         GEN_DEFAULT_PIPEOFFSETS,
241 };
242
243 static const struct intel_device_info intel_haswell_d_info = {
244         GEN7_FEATURES,
245         .is_haswell = 1,
246         .has_ddi = 1,
247         .has_fpga_dbg = 1,
248         .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
249         GEN_DEFAULT_PIPEOFFSETS,
250 };
251
252 static const struct intel_device_info intel_haswell_m_info = {
253         GEN7_FEATURES,
254         .is_haswell = 1,
255         .is_mobile = 1,
256         .has_ddi = 1,
257         .has_fpga_dbg = 1,
258         .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
259         GEN_DEFAULT_PIPEOFFSETS,
260 };
261
262 static const struct intel_device_info intel_broadwell_d_info = {
263         .gen = 8, .num_pipes = 3,
264         .need_gfx_hws = 1, .has_hotplug = 1,
265         .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
266         .has_llc = 1,
267         .has_ddi = 1,
268         .has_fbc = 1,
269         GEN_DEFAULT_PIPEOFFSETS,
270 };
271
272 static const struct intel_device_info intel_broadwell_m_info = {
273         .gen = 8, .is_mobile = 1, .num_pipes = 3,
274         .need_gfx_hws = 1, .has_hotplug = 1,
275         .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
276         .has_llc = 1,
277         .has_ddi = 1,
278         .has_fbc = 1,
279         GEN_DEFAULT_PIPEOFFSETS,
280 };
281
282 static const struct intel_device_info intel_broadwell_gt3d_info = {
283         .gen = 8, .num_pipes = 3,
284         .need_gfx_hws = 1, .has_hotplug = 1,
285         .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
286         .has_llc = 1,
287         .has_ddi = 1,
288         .has_fbc = 1,
289         GEN_DEFAULT_PIPEOFFSETS,
290 };
291
292 static const struct intel_device_info intel_broadwell_gt3m_info = {
293         .gen = 8, .is_mobile = 1, .num_pipes = 3,
294         .need_gfx_hws = 1, .has_hotplug = 1,
295         .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
296         .has_llc = 1,
297         .has_ddi = 1,
298         .has_fbc = 1,
299         GEN_DEFAULT_PIPEOFFSETS,
300 };
301
302 /*
303  * Make sure any device matches here are from most specific to most
304  * general.  For example, since the Quanta match is based on the subsystem
305  * and subvendor IDs, we need it to come before the more general IVB
306  * PCI ID matches, otherwise we'll use the wrong info struct above.
307  */
308 #define INTEL_PCI_IDS \
309         INTEL_I830_IDS(&intel_i830_info),       \
310         INTEL_I845G_IDS(&intel_845g_info),      \
311         INTEL_I85X_IDS(&intel_i85x_info),       \
312         INTEL_I865G_IDS(&intel_i865g_info),     \
313         INTEL_I915G_IDS(&intel_i915g_info),     \
314         INTEL_I915GM_IDS(&intel_i915gm_info),   \
315         INTEL_I945G_IDS(&intel_i945g_info),     \
316         INTEL_I945GM_IDS(&intel_i945gm_info),   \
317         INTEL_I965G_IDS(&intel_i965g_info),     \
318         INTEL_G33_IDS(&intel_g33_info),         \
319         INTEL_I965GM_IDS(&intel_i965gm_info),   \
320         INTEL_GM45_IDS(&intel_gm45_info),       \
321         INTEL_G45_IDS(&intel_g45_info),         \
322         INTEL_PINEVIEW_IDS(&intel_pineview_info),       \
323         INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),   \
324         INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),   \
325         INTEL_SNB_D_IDS(&intel_sandybridge_d_info),     \
326         INTEL_SNB_M_IDS(&intel_sandybridge_m_info),     \
327         INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ \
328         INTEL_IVB_M_IDS(&intel_ivybridge_m_info),       \
329         INTEL_IVB_D_IDS(&intel_ivybridge_d_info),       \
330         INTEL_HSW_D_IDS(&intel_haswell_d_info), \
331         INTEL_HSW_M_IDS(&intel_haswell_m_info), \
332         INTEL_VLV_M_IDS(&intel_valleyview_m_info),      \
333         INTEL_VLV_D_IDS(&intel_valleyview_d_info),      \
334         INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info),   \
335         INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info),   \
336         INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info), \
337         INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info)
338
339 static const struct pci_device_id pciidlist[] = {               /* aka */
340         INTEL_PCI_IDS,
341         {0, 0, 0}
342 };
343
344 #if defined(CONFIG_DRM_I915_KMS)
345 MODULE_DEVICE_TABLE(pci, pciidlist);
346 #endif
347
348 void intel_detect_pch(struct drm_device *dev)
349 {
350         struct drm_i915_private *dev_priv = dev->dev_private;
351         struct pci_dev *pch = NULL;
352
353         /* In all current cases, num_pipes is equivalent to the PCH_NOP setting
354          * (which really amounts to a PCH but no South Display).
355          */
356         if (INTEL_INFO(dev)->num_pipes == 0) {
357                 dev_priv->pch_type = PCH_NOP;
358                 return;
359         }
360
361         /*
362          * The reason to probe ISA bridge instead of Dev31:Fun0 is to
363          * make graphics device passthrough work easy for VMM, that only
364          * need to expose ISA bridge to let driver know the real hardware
365          * underneath. This is a requirement from virtualization team.
366          *
367          * In some virtualized environments (e.g. XEN), there is irrelevant
368          * ISA bridge in the system. To work reliably, we should scan trhough
369          * all the ISA bridge devices and check for the first match, instead
370          * of only checking the first one.
371          */
372         while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
373                 if (pch->vendor == PCI_VENDOR_ID_INTEL) {
374                         unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
375                         dev_priv->pch_id = id;
376
377                         if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
378                                 dev_priv->pch_type = PCH_IBX;
379                                 DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
380                                 WARN_ON(!IS_GEN5(dev));
381                         } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
382                                 dev_priv->pch_type = PCH_CPT;
383                                 DRM_DEBUG_KMS("Found CougarPoint PCH\n");
384                                 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
385                         } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
386                                 /* PantherPoint is CPT compatible */
387                                 dev_priv->pch_type = PCH_CPT;
388                                 DRM_DEBUG_KMS("Found PantherPoint PCH\n");
389                                 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
390                         } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
391                                 dev_priv->pch_type = PCH_LPT;
392                                 DRM_DEBUG_KMS("Found LynxPoint PCH\n");
393                                 WARN_ON(!IS_HASWELL(dev));
394                                 WARN_ON(IS_ULT(dev));
395                         } else if (IS_BROADWELL(dev)) {
396                                 dev_priv->pch_type = PCH_LPT;
397                                 dev_priv->pch_id =
398                                         INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
399                                 DRM_DEBUG_KMS("This is Broadwell, assuming "
400                                               "LynxPoint LP PCH\n");
401                         } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
402                                 dev_priv->pch_type = PCH_LPT;
403                                 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
404                                 WARN_ON(!IS_HASWELL(dev));
405                                 WARN_ON(!IS_ULT(dev));
406                         } else
407                                 continue;
408
409                         break;
410                 }
411         }
412         if (!pch)
413                 DRM_DEBUG_KMS("No PCH found.\n");
414
415         pci_dev_put(pch);
416 }
417
418 bool i915_semaphore_is_enabled(struct drm_device *dev)
419 {
420         if (INTEL_INFO(dev)->gen < 6)
421                 return false;
422
423         if (i915.semaphores >= 0)
424                 return i915.semaphores;
425
426         /* Until we get further testing... */
427         if (IS_GEN8(dev))
428                 return false;
429
430 #ifdef CONFIG_INTEL_IOMMU
431         /* Enable semaphores on SNB when IO remapping is off */
432         if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
433                 return false;
434 #endif
435
436         return true;
437 }
438
439 static int i915_drm_freeze(struct drm_device *dev)
440 {
441         struct drm_i915_private *dev_priv = dev->dev_private;
442         struct drm_crtc *crtc;
443
444         intel_runtime_pm_get(dev_priv);
445
446         /* ignore lid events during suspend */
447         mutex_lock(&dev_priv->modeset_restore_lock);
448         dev_priv->modeset_restore = MODESET_SUSPENDED;
449         mutex_unlock(&dev_priv->modeset_restore_lock);
450
451         /* We do a lot of poking in a lot of registers, make sure they work
452          * properly. */
453         intel_display_set_init_power(dev_priv, true);
454
455         drm_kms_helper_poll_disable(dev);
456
457         pci_save_state(dev->pdev);
458
459         /* If KMS is active, we do the leavevt stuff here */
460         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
461                 int error;
462
463                 error = i915_gem_suspend(dev);
464                 if (error) {
465                         dev_err(&dev->pdev->dev,
466                                 "GEM idle failed, resume might fail\n");
467                         return error;
468                 }
469
470                 cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
471
472                 drm_irq_uninstall(dev);
473                 dev_priv->enable_hotplug_processing = false;
474                 /*
475                  * Disable CRTCs directly since we want to preserve sw state
476                  * for _thaw.
477                  */
478                 mutex_lock(&dev->mode_config.mutex);
479                 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
480                         dev_priv->display.crtc_disable(crtc);
481                 mutex_unlock(&dev->mode_config.mutex);
482
483                 intel_modeset_suspend_hw(dev);
484         }
485
486         i915_gem_suspend_gtt_mappings(dev);
487
488         i915_save_state(dev);
489
490         intel_opregion_fini(dev);
491         intel_uncore_fini(dev);
492
493         console_lock();
494         intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED);
495         console_unlock();
496
497         dev_priv->suspend_count++;
498
499         return 0;
500 }
501
502 int i915_suspend(struct drm_device *dev, pm_message_t state)
503 {
504         int error;
505
506         if (!dev || !dev->dev_private) {
507                 DRM_ERROR("dev: %p\n", dev);
508                 DRM_ERROR("DRM not initialized, aborting suspend.\n");
509                 return -ENODEV;
510         }
511
512         if (state.event == PM_EVENT_PRETHAW)
513                 return 0;
514
515
516         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
517                 return 0;
518
519         error = i915_drm_freeze(dev);
520         if (error)
521                 return error;
522
523         if (state.event == PM_EVENT_SUSPEND) {
524                 /* Shut down the device */
525                 pci_disable_device(dev->pdev);
526                 pci_set_power_state(dev->pdev, PCI_D3hot);
527         }
528
529         return 0;
530 }
531
532 void intel_console_resume(struct work_struct *work)
533 {
534         struct drm_i915_private *dev_priv =
535                 container_of(work, struct drm_i915_private,
536                              console_resume_work);
537         struct drm_device *dev = dev_priv->dev;
538
539         console_lock();
540         intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
541         console_unlock();
542 }
543
544 static void intel_resume_hotplug(struct drm_device *dev)
545 {
546         struct drm_mode_config *mode_config = &dev->mode_config;
547         struct intel_encoder *encoder;
548
549         mutex_lock(&mode_config->mutex);
550         DRM_DEBUG_KMS("running encoder hotplug functions\n");
551
552         list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
553                 if (encoder->hot_plug)
554                         encoder->hot_plug(encoder);
555
556         mutex_unlock(&mode_config->mutex);
557
558         /* Just fire off a uevent and let userspace tell us what to do */
559         drm_helper_hpd_irq_event(dev);
560 }
561
562 static int i915_drm_thaw_early(struct drm_device *dev)
563 {
564         struct drm_i915_private *dev_priv = dev->dev_private;
565
566         intel_uncore_early_sanitize(dev);
567         intel_uncore_sanitize(dev);
568         intel_power_domains_init_hw(dev_priv);
569
570         return 0;
571 }
572
573 static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
574 {
575         struct drm_i915_private *dev_priv = dev->dev_private;
576
577         if (drm_core_check_feature(dev, DRIVER_MODESET) &&
578             restore_gtt_mappings) {
579                 mutex_lock(&dev->struct_mutex);
580                 i915_gem_restore_gtt_mappings(dev);
581                 mutex_unlock(&dev->struct_mutex);
582         }
583
584         i915_restore_state(dev);
585         intel_opregion_setup(dev);
586
587         /* KMS EnterVT equivalent */
588         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
589                 intel_init_pch_refclk(dev);
590                 drm_mode_config_reset(dev);
591
592                 mutex_lock(&dev->struct_mutex);
593                 if (i915_gem_init_hw(dev)) {
594                         DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
595                         atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
596                 }
597                 mutex_unlock(&dev->struct_mutex);
598
599                 /* We need working interrupts for modeset enabling ... */
600                 drm_irq_install(dev, dev->pdev->irq);
601
602                 intel_modeset_init_hw(dev);
603
604                 drm_modeset_lock_all(dev);
605                 intel_modeset_setup_hw_state(dev, true);
606                 drm_modeset_unlock_all(dev);
607
608                 /*
609                  * ... but also need to make sure that hotplug processing
610                  * doesn't cause havoc. Like in the driver load code we don't
611                  * bother with the tiny race here where we might loose hotplug
612                  * notifications.
613                  * */
614                 intel_hpd_init(dev);
615                 dev_priv->enable_hotplug_processing = true;
616                 /* Config may have changed between suspend and resume */
617                 intel_resume_hotplug(dev);
618         }
619
620         intel_opregion_init(dev);
621
622         /*
623          * The console lock can be pretty contented on resume due
624          * to all the printk activity.  Try to keep it out of the hot
625          * path of resume if possible.
626          */
627         if (console_trylock()) {
628                 intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
629                 console_unlock();
630         } else {
631                 schedule_work(&dev_priv->console_resume_work);
632         }
633
634         mutex_lock(&dev_priv->modeset_restore_lock);
635         dev_priv->modeset_restore = MODESET_DONE;
636         mutex_unlock(&dev_priv->modeset_restore_lock);
637
638         intel_runtime_pm_put(dev_priv);
639         return 0;
640 }
641
642 static int i915_drm_thaw(struct drm_device *dev)
643 {
644         if (drm_core_check_feature(dev, DRIVER_MODESET))
645                 i915_check_and_clear_faults(dev);
646
647         return __i915_drm_thaw(dev, true);
648 }
649
650 static int i915_resume_early(struct drm_device *dev)
651 {
652         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
653                 return 0;
654
655         /*
656          * We have a resume ordering issue with the snd-hda driver also
657          * requiring our device to be power up. Due to the lack of a
658          * parent/child relationship we currently solve this with an early
659          * resume hook.
660          *
661          * FIXME: This should be solved with a special hdmi sink device or
662          * similar so that power domains can be employed.
663          */
664         if (pci_enable_device(dev->pdev))
665                 return -EIO;
666
667         pci_set_master(dev->pdev);
668
669         return i915_drm_thaw_early(dev);
670 }
671
672 int i915_resume(struct drm_device *dev)
673 {
674         struct drm_i915_private *dev_priv = dev->dev_private;
675         int ret;
676
677         /*
678          * Platforms with opregion should have sane BIOS, older ones (gen3 and
679          * earlier) need to restore the GTT mappings since the BIOS might clear
680          * all our scratch PTEs.
681          */
682         ret = __i915_drm_thaw(dev, !dev_priv->opregion.header);
683         if (ret)
684                 return ret;
685
686         drm_kms_helper_poll_enable(dev);
687         return 0;
688 }
689
690 static int i915_resume_legacy(struct drm_device *dev)
691 {
692         i915_resume_early(dev);
693         i915_resume(dev);
694
695         return 0;
696 }
697
698 /**
699  * i915_reset - reset chip after a hang
700  * @dev: drm device to reset
701  *
702  * Reset the chip.  Useful if a hang is detected. Returns zero on successful
703  * reset or otherwise an error code.
704  *
705  * Procedure is fairly simple:
706  *   - reset the chip using the reset reg
707  *   - re-init context state
708  *   - re-init hardware status page
709  *   - re-init ring buffer
710  *   - re-init interrupt state
711  *   - re-init display
712  */
713 int i915_reset(struct drm_device *dev)
714 {
715         struct drm_i915_private *dev_priv = dev->dev_private;
716         bool simulated;
717         int ret;
718
719         if (!i915.reset)
720                 return 0;
721
722         mutex_lock(&dev->struct_mutex);
723
724         i915_gem_reset(dev);
725
726         simulated = dev_priv->gpu_error.stop_rings != 0;
727
728         ret = intel_gpu_reset(dev);
729
730         /* Also reset the gpu hangman. */
731         if (simulated) {
732                 DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
733                 dev_priv->gpu_error.stop_rings = 0;
734                 if (ret == -ENODEV) {
735                         DRM_INFO("Reset not implemented, but ignoring "
736                                  "error for simulated gpu hangs\n");
737                         ret = 0;
738                 }
739         }
740
741         if (ret) {
742                 DRM_ERROR("Failed to reset chip: %i\n", ret);
743                 mutex_unlock(&dev->struct_mutex);
744                 return ret;
745         }
746
747         /* Ok, now get things going again... */
748
749         /*
750          * Everything depends on having the GTT running, so we need to start
751          * there.  Fortunately we don't need to do this unless we reset the
752          * chip at a PCI level.
753          *
754          * Next we need to restore the context, but we don't use those
755          * yet either...
756          *
757          * Ring buffer needs to be re-initialized in the KMS case, or if X
758          * was running at the time of the reset (i.e. we weren't VT
759          * switched away).
760          */
761         if (drm_core_check_feature(dev, DRIVER_MODESET) ||
762                         !dev_priv->ums.mm_suspended) {
763                 dev_priv->ums.mm_suspended = 0;
764
765                 ret = i915_gem_init_hw(dev);
766                 mutex_unlock(&dev->struct_mutex);
767                 if (ret) {
768                         DRM_ERROR("Failed hw init on reset %d\n", ret);
769                         return ret;
770                 }
771
772                 /*
773                  * FIXME: This is horribly race against concurrent pageflip and
774                  * vblank wait ioctls since they can observe dev->irqs_disabled
775                  * being false when they shouldn't be able to.
776                  */
777                 drm_irq_uninstall(dev);
778                 drm_irq_install(dev, dev->pdev->irq);
779
780                 /* rps/rc6 re-init is necessary to restore state lost after the
781                  * reset and the re-install of drm irq. Skip for ironlake per
782                  * previous concerns that it doesn't respond well to some forms
783                  * of re-init after reset. */
784                 if (INTEL_INFO(dev)->gen > 5)
785                         intel_reset_gt_powersave(dev);
786
787                 intel_hpd_init(dev);
788         } else {
789                 mutex_unlock(&dev->struct_mutex);
790         }
791
792         return 0;
793 }
794
795 static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
796 {
797         struct intel_device_info *intel_info =
798                 (struct intel_device_info *) ent->driver_data;
799
800         if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) {
801                 DRM_INFO("This hardware requires preliminary hardware support.\n"
802                          "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
803                 return -ENODEV;
804         }
805
806         /* Only bind to function 0 of the device. Early generations
807          * used function 1 as a placeholder for multi-head. This causes
808          * us confusion instead, especially on the systems where both
809          * functions have the same PCI-ID!
810          */
811         if (PCI_FUNC(pdev->devfn))
812                 return -ENODEV;
813
814         driver.driver_features &= ~(DRIVER_USE_AGP);
815
816         return drm_get_pci_dev(pdev, ent, &driver);
817 }
818
819 static void
820 i915_pci_remove(struct pci_dev *pdev)
821 {
822         struct drm_device *dev = pci_get_drvdata(pdev);
823
824         drm_put_dev(dev);
825 }
826
827 static int i915_pm_suspend(struct device *dev)
828 {
829         struct pci_dev *pdev = to_pci_dev(dev);
830         struct drm_device *drm_dev = pci_get_drvdata(pdev);
831
832         if (!drm_dev || !drm_dev->dev_private) {
833                 dev_err(dev, "DRM not initialized, aborting suspend.\n");
834                 return -ENODEV;
835         }
836
837         if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
838                 return 0;
839
840         return i915_drm_freeze(drm_dev);
841 }
842
843 static int i915_pm_suspend_late(struct device *dev)
844 {
845         struct pci_dev *pdev = to_pci_dev(dev);
846         struct drm_device *drm_dev = pci_get_drvdata(pdev);
847
848         /*
849          * We have a suspedn ordering issue with the snd-hda driver also
850          * requiring our device to be power up. Due to the lack of a
851          * parent/child relationship we currently solve this with an late
852          * suspend hook.
853          *
854          * FIXME: This should be solved with a special hdmi sink device or
855          * similar so that power domains can be employed.
856          */
857         if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
858                 return 0;
859
860         pci_disable_device(pdev);
861         pci_set_power_state(pdev, PCI_D3hot);
862
863         return 0;
864 }
865
866 static int i915_pm_resume_early(struct device *dev)
867 {
868         struct pci_dev *pdev = to_pci_dev(dev);
869         struct drm_device *drm_dev = pci_get_drvdata(pdev);
870
871         return i915_resume_early(drm_dev);
872 }
873
874 static int i915_pm_resume(struct device *dev)
875 {
876         struct pci_dev *pdev = to_pci_dev(dev);
877         struct drm_device *drm_dev = pci_get_drvdata(pdev);
878
879         return i915_resume(drm_dev);
880 }
881
882 static int i915_pm_freeze(struct device *dev)
883 {
884         struct pci_dev *pdev = to_pci_dev(dev);
885         struct drm_device *drm_dev = pci_get_drvdata(pdev);
886
887         if (!drm_dev || !drm_dev->dev_private) {
888                 dev_err(dev, "DRM not initialized, aborting suspend.\n");
889                 return -ENODEV;
890         }
891
892         return i915_drm_freeze(drm_dev);
893 }
894
895 static int i915_pm_thaw_early(struct device *dev)
896 {
897         struct pci_dev *pdev = to_pci_dev(dev);
898         struct drm_device *drm_dev = pci_get_drvdata(pdev);
899
900         return i915_drm_thaw_early(drm_dev);
901 }
902
903 static int i915_pm_thaw(struct device *dev)
904 {
905         struct pci_dev *pdev = to_pci_dev(dev);
906         struct drm_device *drm_dev = pci_get_drvdata(pdev);
907
908         return i915_drm_thaw(drm_dev);
909 }
910
911 static int i915_pm_poweroff(struct device *dev)
912 {
913         struct pci_dev *pdev = to_pci_dev(dev);
914         struct drm_device *drm_dev = pci_get_drvdata(pdev);
915
916         return i915_drm_freeze(drm_dev);
917 }
918
919 static void hsw_runtime_suspend(struct drm_i915_private *dev_priv)
920 {
921         hsw_enable_pc8(dev_priv);
922 }
923
924 static void snb_runtime_resume(struct drm_i915_private *dev_priv)
925 {
926         struct drm_device *dev = dev_priv->dev;
927
928         intel_init_pch_refclk(dev);
929 }
930
931 static void hsw_runtime_resume(struct drm_i915_private *dev_priv)
932 {
933         hsw_disable_pc8(dev_priv);
934 }
935
936 int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
937 {
938         u32 val;
939         int err;
940
941         val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
942         WARN_ON(!!(val & VLV_GFX_CLK_FORCE_ON_BIT) == force_on);
943
944 #define COND (I915_READ(VLV_GTLC_SURVIVABILITY_REG) & VLV_GFX_CLK_STATUS_BIT)
945         /* Wait for a previous force-off to settle */
946         if (force_on) {
947                 err = wait_for(!COND, 20);
948                 if (err) {
949                         DRM_ERROR("timeout waiting for GFX clock force-off (%08x)\n",
950                                   I915_READ(VLV_GTLC_SURVIVABILITY_REG));
951                         return err;
952                 }
953         }
954
955         val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
956         val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
957         if (force_on)
958                 val |= VLV_GFX_CLK_FORCE_ON_BIT;
959         I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
960
961         if (!force_on)
962                 return 0;
963
964         err = wait_for(COND, 20);
965         if (err)
966                 DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n",
967                           I915_READ(VLV_GTLC_SURVIVABILITY_REG));
968
969         return err;
970 #undef COND
971 }
972
973 static int intel_runtime_suspend(struct device *device)
974 {
975         struct pci_dev *pdev = to_pci_dev(device);
976         struct drm_device *dev = pci_get_drvdata(pdev);
977         struct drm_i915_private *dev_priv = dev->dev_private;
978
979         if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev))))
980                 return -ENODEV;
981
982         WARN_ON(!HAS_RUNTIME_PM(dev));
983         assert_force_wake_inactive(dev_priv);
984
985         DRM_DEBUG_KMS("Suspending device\n");
986
987         /*
988          * rps.work can't be rearmed here, since we get here only after making
989          * sure the GPU is idle and the RPS freq is set to the minimum. See
990          * intel_mark_idle().
991          */
992         cancel_work_sync(&dev_priv->rps.work);
993         intel_runtime_pm_disable_interrupts(dev);
994
995         if (IS_GEN6(dev))
996                 ;
997         else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
998                 hsw_runtime_suspend(dev_priv);
999         else
1000                 WARN_ON(1);
1001
1002         i915_gem_release_all_mmaps(dev_priv);
1003
1004         del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
1005         dev_priv->pm.suspended = true;
1006
1007         /*
1008          * current versions of firmware which depend on this opregion
1009          * notification have repurposed the D1 definition to mean
1010          * "runtime suspended" vs. what you would normally expect (D3)
1011          * to distinguish it from notifications that might be sent
1012          * via the suspend path.
1013          */
1014         intel_opregion_notify_adapter(dev, PCI_D1);
1015
1016         DRM_DEBUG_KMS("Device suspended\n");
1017         return 0;
1018 }
1019
1020 static int intel_runtime_resume(struct device *device)
1021 {
1022         struct pci_dev *pdev = to_pci_dev(device);
1023         struct drm_device *dev = pci_get_drvdata(pdev);
1024         struct drm_i915_private *dev_priv = dev->dev_private;
1025
1026         WARN_ON(!HAS_RUNTIME_PM(dev));
1027
1028         DRM_DEBUG_KMS("Resuming device\n");
1029
1030         intel_opregion_notify_adapter(dev, PCI_D0);
1031         dev_priv->pm.suspended = false;
1032
1033         if (IS_GEN6(dev))
1034                 snb_runtime_resume(dev_priv);
1035         else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1036                 hsw_runtime_resume(dev_priv);
1037         else
1038                 WARN_ON(1);
1039
1040         i915_gem_init_swizzling(dev);
1041         gen6_update_ring_freq(dev);
1042
1043         intel_runtime_pm_restore_interrupts(dev);
1044         intel_reset_gt_powersave(dev);
1045
1046         DRM_DEBUG_KMS("Device resumed\n");
1047         return 0;
1048 }
1049
1050 static const struct dev_pm_ops i915_pm_ops = {
1051         .suspend = i915_pm_suspend,
1052         .suspend_late = i915_pm_suspend_late,
1053         .resume_early = i915_pm_resume_early,
1054         .resume = i915_pm_resume,
1055         .freeze = i915_pm_freeze,
1056         .thaw_early = i915_pm_thaw_early,
1057         .thaw = i915_pm_thaw,
1058         .poweroff = i915_pm_poweroff,
1059         .restore_early = i915_pm_resume_early,
1060         .restore = i915_pm_resume,
1061         .runtime_suspend = intel_runtime_suspend,
1062         .runtime_resume = intel_runtime_resume,
1063 };
1064
1065 static const struct vm_operations_struct i915_gem_vm_ops = {
1066         .fault = i915_gem_fault,
1067         .open = drm_gem_vm_open,
1068         .close = drm_gem_vm_close,
1069 };
1070
1071 static const struct file_operations i915_driver_fops = {
1072         .owner = THIS_MODULE,
1073         .open = drm_open,
1074         .release = drm_release,
1075         .unlocked_ioctl = drm_ioctl,
1076         .mmap = drm_gem_mmap,
1077         .poll = drm_poll,
1078         .read = drm_read,
1079 #ifdef CONFIG_COMPAT
1080         .compat_ioctl = i915_compat_ioctl,
1081 #endif
1082         .llseek = noop_llseek,
1083 };
1084
1085 static struct drm_driver driver = {
1086         /* Don't use MTRRs here; the Xserver or userspace app should
1087          * deal with them for Intel hardware.
1088          */
1089         .driver_features =
1090             DRIVER_USE_AGP |
1091             DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
1092             DRIVER_RENDER,
1093         .load = i915_driver_load,
1094         .unload = i915_driver_unload,
1095         .open = i915_driver_open,
1096         .lastclose = i915_driver_lastclose,
1097         .preclose = i915_driver_preclose,
1098         .postclose = i915_driver_postclose,
1099
1100         /* Used in place of i915_pm_ops for non-DRIVER_MODESET */
1101         .suspend = i915_suspend,
1102         .resume = i915_resume_legacy,
1103
1104         .device_is_agp = i915_driver_device_is_agp,
1105         .master_create = i915_master_create,
1106         .master_destroy = i915_master_destroy,
1107 #if defined(CONFIG_DEBUG_FS)
1108         .debugfs_init = i915_debugfs_init,
1109         .debugfs_cleanup = i915_debugfs_cleanup,
1110 #endif
1111         .gem_free_object = i915_gem_free_object,
1112         .gem_vm_ops = &i915_gem_vm_ops,
1113
1114         .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
1115         .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
1116         .gem_prime_export = i915_gem_prime_export,
1117         .gem_prime_import = i915_gem_prime_import,
1118
1119         .dumb_create = i915_gem_dumb_create,
1120         .dumb_map_offset = i915_gem_mmap_gtt,
1121         .dumb_destroy = drm_gem_dumb_destroy,
1122         .ioctls = i915_ioctls,
1123         .fops = &i915_driver_fops,
1124         .name = DRIVER_NAME,
1125         .desc = DRIVER_DESC,
1126         .date = DRIVER_DATE,
1127         .major = DRIVER_MAJOR,
1128         .minor = DRIVER_MINOR,
1129         .patchlevel = DRIVER_PATCHLEVEL,
1130 };
1131
1132 static struct pci_driver i915_pci_driver = {
1133         .name = DRIVER_NAME,
1134         .id_table = pciidlist,
1135         .probe = i915_pci_probe,
1136         .remove = i915_pci_remove,
1137         .driver.pm = &i915_pm_ops,
1138 };
1139
1140 static int __init i915_init(void)
1141 {
1142         driver.num_ioctls = i915_max_ioctl;
1143
1144         /*
1145          * If CONFIG_DRM_I915_KMS is set, default to KMS unless
1146          * explicitly disabled with the module pararmeter.
1147          *
1148          * Otherwise, just follow the parameter (defaulting to off).
1149          *
1150          * Allow optional vga_text_mode_force boot option to override
1151          * the default behavior.
1152          */
1153 #if defined(CONFIG_DRM_I915_KMS)
1154         if (i915.modeset != 0)
1155                 driver.driver_features |= DRIVER_MODESET;
1156 #endif
1157         if (i915.modeset == 1)
1158                 driver.driver_features |= DRIVER_MODESET;
1159
1160 #ifdef CONFIG_VGA_CONSOLE
1161         if (vgacon_text_force() && i915.modeset == -1)
1162                 driver.driver_features &= ~DRIVER_MODESET;
1163 #endif
1164
1165         if (!(driver.driver_features & DRIVER_MODESET)) {
1166                 driver.get_vblank_timestamp = NULL;
1167 #ifndef CONFIG_DRM_I915_UMS
1168                 /* Silently fail loading to not upset userspace. */
1169                 return 0;
1170 #endif
1171         }
1172
1173         return drm_pci_init(&driver, &i915_pci_driver);
1174 }
1175
1176 static void __exit i915_exit(void)
1177 {
1178 #ifndef CONFIG_DRM_I915_UMS
1179         if (!(driver.driver_features & DRIVER_MODESET))
1180                 return; /* Never loaded a driver. */
1181 #endif
1182
1183         drm_pci_exit(&driver, &i915_pci_driver);
1184 }
1185
1186 module_init(i915_init);
1187 module_exit(i915_exit);
1188
1189 MODULE_AUTHOR(DRIVER_AUTHOR);
1190 MODULE_DESCRIPTION(DRIVER_DESC);
1191 MODULE_LICENSE("GPL and additional rights");