drm/i915: Restore rps/rc6 on reset
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / i915_drv.c
1 /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
2  */
3 /*
4  *
5  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6  * All Rights Reserved.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the
10  * "Software"), to deal in the Software without restriction, including
11  * without limitation the rights to use, copy, modify, merge, publish,
12  * distribute, sub license, and/or sell copies of the Software, and to
13  * permit persons to whom the Software is furnished to do so, subject to
14  * the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the
17  * next paragraph) shall be included in all copies or substantial portions
18  * of the Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27  *
28  */
29
30 #include <linux/device.h>
31 #include <drm/drmP.h>
32 #include <drm/i915_drm.h>
33 #include "i915_drv.h"
34 #include "i915_trace.h"
35 #include "intel_drv.h"
36
37 #include <linux/console.h>
38 #include <linux/module.h>
39 #include <drm/drm_crtc_helper.h>
40
41 static struct drm_driver driver;
42
43 #define GEN_DEFAULT_PIPEOFFSETS \
44         .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
45                           PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
46         .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
47                            TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
48         .dpll_offsets = { DPLL_A_OFFSET, DPLL_B_OFFSET }, \
49         .dpll_md_offsets = { DPLL_A_MD_OFFSET, DPLL_B_MD_OFFSET }, \
50         .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
51
52
53 static const struct intel_device_info intel_i830_info = {
54         .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
55         .has_overlay = 1, .overlay_needs_physical = 1,
56         .ring_mask = RENDER_RING,
57         GEN_DEFAULT_PIPEOFFSETS,
58 };
59
60 static const struct intel_device_info intel_845g_info = {
61         .gen = 2, .num_pipes = 1,
62         .has_overlay = 1, .overlay_needs_physical = 1,
63         .ring_mask = RENDER_RING,
64         GEN_DEFAULT_PIPEOFFSETS,
65 };
66
67 static const struct intel_device_info intel_i85x_info = {
68         .gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
69         .cursor_needs_physical = 1,
70         .has_overlay = 1, .overlay_needs_physical = 1,
71         .has_fbc = 1,
72         .ring_mask = RENDER_RING,
73         GEN_DEFAULT_PIPEOFFSETS,
74 };
75
76 static const struct intel_device_info intel_i865g_info = {
77         .gen = 2, .num_pipes = 1,
78         .has_overlay = 1, .overlay_needs_physical = 1,
79         .ring_mask = RENDER_RING,
80         GEN_DEFAULT_PIPEOFFSETS,
81 };
82
83 static const struct intel_device_info intel_i915g_info = {
84         .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
85         .has_overlay = 1, .overlay_needs_physical = 1,
86         .ring_mask = RENDER_RING,
87         GEN_DEFAULT_PIPEOFFSETS,
88 };
89 static const struct intel_device_info intel_i915gm_info = {
90         .gen = 3, .is_mobile = 1, .num_pipes = 2,
91         .cursor_needs_physical = 1,
92         .has_overlay = 1, .overlay_needs_physical = 1,
93         .supports_tv = 1,
94         .has_fbc = 1,
95         .ring_mask = RENDER_RING,
96         GEN_DEFAULT_PIPEOFFSETS,
97 };
98 static const struct intel_device_info intel_i945g_info = {
99         .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
100         .has_overlay = 1, .overlay_needs_physical = 1,
101         .ring_mask = RENDER_RING,
102         GEN_DEFAULT_PIPEOFFSETS,
103 };
104 static const struct intel_device_info intel_i945gm_info = {
105         .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
106         .has_hotplug = 1, .cursor_needs_physical = 1,
107         .has_overlay = 1, .overlay_needs_physical = 1,
108         .supports_tv = 1,
109         .has_fbc = 1,
110         .ring_mask = RENDER_RING,
111         GEN_DEFAULT_PIPEOFFSETS,
112 };
113
114 static const struct intel_device_info intel_i965g_info = {
115         .gen = 4, .is_broadwater = 1, .num_pipes = 2,
116         .has_hotplug = 1,
117         .has_overlay = 1,
118         .ring_mask = RENDER_RING,
119         GEN_DEFAULT_PIPEOFFSETS,
120 };
121
122 static const struct intel_device_info intel_i965gm_info = {
123         .gen = 4, .is_crestline = 1, .num_pipes = 2,
124         .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
125         .has_overlay = 1,
126         .supports_tv = 1,
127         .ring_mask = RENDER_RING,
128         GEN_DEFAULT_PIPEOFFSETS,
129 };
130
131 static const struct intel_device_info intel_g33_info = {
132         .gen = 3, .is_g33 = 1, .num_pipes = 2,
133         .need_gfx_hws = 1, .has_hotplug = 1,
134         .has_overlay = 1,
135         .ring_mask = RENDER_RING,
136         GEN_DEFAULT_PIPEOFFSETS,
137 };
138
139 static const struct intel_device_info intel_g45_info = {
140         .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
141         .has_pipe_cxsr = 1, .has_hotplug = 1,
142         .ring_mask = RENDER_RING | BSD_RING,
143         GEN_DEFAULT_PIPEOFFSETS,
144 };
145
146 static const struct intel_device_info intel_gm45_info = {
147         .gen = 4, .is_g4x = 1, .num_pipes = 2,
148         .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
149         .has_pipe_cxsr = 1, .has_hotplug = 1,
150         .supports_tv = 1,
151         .ring_mask = RENDER_RING | BSD_RING,
152         GEN_DEFAULT_PIPEOFFSETS,
153 };
154
155 static const struct intel_device_info intel_pineview_info = {
156         .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
157         .need_gfx_hws = 1, .has_hotplug = 1,
158         .has_overlay = 1,
159         GEN_DEFAULT_PIPEOFFSETS,
160 };
161
162 static const struct intel_device_info intel_ironlake_d_info = {
163         .gen = 5, .num_pipes = 2,
164         .need_gfx_hws = 1, .has_hotplug = 1,
165         .ring_mask = RENDER_RING | BSD_RING,
166         GEN_DEFAULT_PIPEOFFSETS,
167 };
168
169 static const struct intel_device_info intel_ironlake_m_info = {
170         .gen = 5, .is_mobile = 1, .num_pipes = 2,
171         .need_gfx_hws = 1, .has_hotplug = 1,
172         .has_fbc = 1,
173         .ring_mask = RENDER_RING | BSD_RING,
174         GEN_DEFAULT_PIPEOFFSETS,
175 };
176
177 static const struct intel_device_info intel_sandybridge_d_info = {
178         .gen = 6, .num_pipes = 2,
179         .need_gfx_hws = 1, .has_hotplug = 1,
180         .has_fbc = 1,
181         .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
182         .has_llc = 1,
183         GEN_DEFAULT_PIPEOFFSETS,
184 };
185
186 static const struct intel_device_info intel_sandybridge_m_info = {
187         .gen = 6, .is_mobile = 1, .num_pipes = 2,
188         .need_gfx_hws = 1, .has_hotplug = 1,
189         .has_fbc = 1,
190         .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
191         .has_llc = 1,
192         GEN_DEFAULT_PIPEOFFSETS,
193 };
194
195 #define GEN7_FEATURES  \
196         .gen = 7, .num_pipes = 3, \
197         .need_gfx_hws = 1, .has_hotplug = 1, \
198         .has_fbc = 1, \
199         .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
200         .has_llc = 1
201
202 static const struct intel_device_info intel_ivybridge_d_info = {
203         GEN7_FEATURES,
204         .is_ivybridge = 1,
205         GEN_DEFAULT_PIPEOFFSETS,
206 };
207
208 static const struct intel_device_info intel_ivybridge_m_info = {
209         GEN7_FEATURES,
210         .is_ivybridge = 1,
211         .is_mobile = 1,
212         GEN_DEFAULT_PIPEOFFSETS,
213 };
214
215 static const struct intel_device_info intel_ivybridge_q_info = {
216         GEN7_FEATURES,
217         .is_ivybridge = 1,
218         .num_pipes = 0, /* legal, last one wins */
219         GEN_DEFAULT_PIPEOFFSETS,
220 };
221
222 static const struct intel_device_info intel_valleyview_m_info = {
223         GEN7_FEATURES,
224         .is_mobile = 1,
225         .num_pipes = 2,
226         .is_valleyview = 1,
227         .display_mmio_offset = VLV_DISPLAY_BASE,
228         .has_fbc = 0, /* legal, last one wins */
229         .has_llc = 0, /* legal, last one wins */
230         GEN_DEFAULT_PIPEOFFSETS,
231 };
232
233 static const struct intel_device_info intel_valleyview_d_info = {
234         GEN7_FEATURES,
235         .num_pipes = 2,
236         .is_valleyview = 1,
237         .display_mmio_offset = VLV_DISPLAY_BASE,
238         .has_fbc = 0, /* legal, last one wins */
239         .has_llc = 0, /* legal, last one wins */
240         GEN_DEFAULT_PIPEOFFSETS,
241 };
242
243 static const struct intel_device_info intel_haswell_d_info = {
244         GEN7_FEATURES,
245         .is_haswell = 1,
246         .has_ddi = 1,
247         .has_fpga_dbg = 1,
248         .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
249         GEN_DEFAULT_PIPEOFFSETS,
250 };
251
252 static const struct intel_device_info intel_haswell_m_info = {
253         GEN7_FEATURES,
254         .is_haswell = 1,
255         .is_mobile = 1,
256         .has_ddi = 1,
257         .has_fpga_dbg = 1,
258         .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
259         GEN_DEFAULT_PIPEOFFSETS,
260 };
261
262 static const struct intel_device_info intel_broadwell_d_info = {
263         .gen = 8, .num_pipes = 3,
264         .need_gfx_hws = 1, .has_hotplug = 1,
265         .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
266         .has_llc = 1,
267         .has_ddi = 1,
268         GEN_DEFAULT_PIPEOFFSETS,
269 };
270
271 static const struct intel_device_info intel_broadwell_m_info = {
272         .gen = 8, .is_mobile = 1, .num_pipes = 3,
273         .need_gfx_hws = 1, .has_hotplug = 1,
274         .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
275         .has_llc = 1,
276         .has_ddi = 1,
277         GEN_DEFAULT_PIPEOFFSETS,
278 };
279
280 /*
281  * Make sure any device matches here are from most specific to most
282  * general.  For example, since the Quanta match is based on the subsystem
283  * and subvendor IDs, we need it to come before the more general IVB
284  * PCI ID matches, otherwise we'll use the wrong info struct above.
285  */
286 #define INTEL_PCI_IDS \
287         INTEL_I830_IDS(&intel_i830_info),       \
288         INTEL_I845G_IDS(&intel_845g_info),      \
289         INTEL_I85X_IDS(&intel_i85x_info),       \
290         INTEL_I865G_IDS(&intel_i865g_info),     \
291         INTEL_I915G_IDS(&intel_i915g_info),     \
292         INTEL_I915GM_IDS(&intel_i915gm_info),   \
293         INTEL_I945G_IDS(&intel_i945g_info),     \
294         INTEL_I945GM_IDS(&intel_i945gm_info),   \
295         INTEL_I965G_IDS(&intel_i965g_info),     \
296         INTEL_G33_IDS(&intel_g33_info),         \
297         INTEL_I965GM_IDS(&intel_i965gm_info),   \
298         INTEL_GM45_IDS(&intel_gm45_info),       \
299         INTEL_G45_IDS(&intel_g45_info),         \
300         INTEL_PINEVIEW_IDS(&intel_pineview_info),       \
301         INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),   \
302         INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),   \
303         INTEL_SNB_D_IDS(&intel_sandybridge_d_info),     \
304         INTEL_SNB_M_IDS(&intel_sandybridge_m_info),     \
305         INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ \
306         INTEL_IVB_M_IDS(&intel_ivybridge_m_info),       \
307         INTEL_IVB_D_IDS(&intel_ivybridge_d_info),       \
308         INTEL_HSW_D_IDS(&intel_haswell_d_info), \
309         INTEL_HSW_M_IDS(&intel_haswell_m_info), \
310         INTEL_VLV_M_IDS(&intel_valleyview_m_info),      \
311         INTEL_VLV_D_IDS(&intel_valleyview_d_info),      \
312         INTEL_BDW_M_IDS(&intel_broadwell_m_info),       \
313         INTEL_BDW_D_IDS(&intel_broadwell_d_info)
314
315 static const struct pci_device_id pciidlist[] = {               /* aka */
316         INTEL_PCI_IDS,
317         {0, 0, 0}
318 };
319
320 #if defined(CONFIG_DRM_I915_KMS)
321 MODULE_DEVICE_TABLE(pci, pciidlist);
322 #endif
323
324 void intel_detect_pch(struct drm_device *dev)
325 {
326         struct drm_i915_private *dev_priv = dev->dev_private;
327         struct pci_dev *pch;
328
329         /* In all current cases, num_pipes is equivalent to the PCH_NOP setting
330          * (which really amounts to a PCH but no South Display).
331          */
332         if (INTEL_INFO(dev)->num_pipes == 0) {
333                 dev_priv->pch_type = PCH_NOP;
334                 return;
335         }
336
337         /*
338          * The reason to probe ISA bridge instead of Dev31:Fun0 is to
339          * make graphics device passthrough work easy for VMM, that only
340          * need to expose ISA bridge to let driver know the real hardware
341          * underneath. This is a requirement from virtualization team.
342          *
343          * In some virtualized environments (e.g. XEN), there is irrelevant
344          * ISA bridge in the system. To work reliably, we should scan trhough
345          * all the ISA bridge devices and check for the first match, instead
346          * of only checking the first one.
347          */
348         pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
349         while (pch) {
350                 struct pci_dev *curr = pch;
351                 if (pch->vendor == PCI_VENDOR_ID_INTEL) {
352                         unsigned short id;
353                         id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
354                         dev_priv->pch_id = id;
355
356                         if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
357                                 dev_priv->pch_type = PCH_IBX;
358                                 DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
359                                 WARN_ON(!IS_GEN5(dev));
360                         } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
361                                 dev_priv->pch_type = PCH_CPT;
362                                 DRM_DEBUG_KMS("Found CougarPoint PCH\n");
363                                 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
364                         } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
365                                 /* PantherPoint is CPT compatible */
366                                 dev_priv->pch_type = PCH_CPT;
367                                 DRM_DEBUG_KMS("Found PantherPoint PCH\n");
368                                 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
369                         } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
370                                 dev_priv->pch_type = PCH_LPT;
371                                 DRM_DEBUG_KMS("Found LynxPoint PCH\n");
372                                 WARN_ON(!IS_HASWELL(dev));
373                                 WARN_ON(IS_ULT(dev));
374                         } else if (IS_BROADWELL(dev)) {
375                                 dev_priv->pch_type = PCH_LPT;
376                                 dev_priv->pch_id =
377                                         INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
378                                 DRM_DEBUG_KMS("This is Broadwell, assuming "
379                                               "LynxPoint LP PCH\n");
380                         } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
381                                 dev_priv->pch_type = PCH_LPT;
382                                 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
383                                 WARN_ON(!IS_HASWELL(dev));
384                                 WARN_ON(!IS_ULT(dev));
385                         } else {
386                                 goto check_next;
387                         }
388                         pci_dev_put(pch);
389                         break;
390                 }
391 check_next:
392                 pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, curr);
393                 pci_dev_put(curr);
394         }
395         if (!pch)
396                 DRM_DEBUG_KMS("No PCH found?\n");
397 }
398
399 bool i915_semaphore_is_enabled(struct drm_device *dev)
400 {
401         if (INTEL_INFO(dev)->gen < 6)
402                 return false;
403
404         /* Until we get further testing... */
405         if (IS_GEN8(dev)) {
406                 WARN_ON(!i915.preliminary_hw_support);
407                 return false;
408         }
409
410         if (i915.semaphores >= 0)
411                 return i915.semaphores;
412
413 #ifdef CONFIG_INTEL_IOMMU
414         /* Enable semaphores on SNB when IO remapping is off */
415         if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
416                 return false;
417 #endif
418
419         return true;
420 }
421
422 static int i915_drm_freeze(struct drm_device *dev)
423 {
424         struct drm_i915_private *dev_priv = dev->dev_private;
425         struct drm_crtc *crtc;
426
427         intel_runtime_pm_get(dev_priv);
428
429         /* ignore lid events during suspend */
430         mutex_lock(&dev_priv->modeset_restore_lock);
431         dev_priv->modeset_restore = MODESET_SUSPENDED;
432         mutex_unlock(&dev_priv->modeset_restore_lock);
433
434         /* We do a lot of poking in a lot of registers, make sure they work
435          * properly. */
436         hsw_disable_package_c8(dev_priv);
437         intel_display_set_init_power(dev, true);
438
439         drm_kms_helper_poll_disable(dev);
440
441         pci_save_state(dev->pdev);
442
443         /* If KMS is active, we do the leavevt stuff here */
444         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
445                 int error;
446
447                 error = i915_gem_suspend(dev);
448                 if (error) {
449                         dev_err(&dev->pdev->dev,
450                                 "GEM idle failed, resume might fail\n");
451                         return error;
452                 }
453
454                 cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
455
456                 drm_irq_uninstall(dev);
457                 dev_priv->enable_hotplug_processing = false;
458                 /*
459                  * Disable CRTCs directly since we want to preserve sw state
460                  * for _thaw.
461                  */
462                 mutex_lock(&dev->mode_config.mutex);
463                 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
464                         dev_priv->display.crtc_disable(crtc);
465                 mutex_unlock(&dev->mode_config.mutex);
466
467                 intel_modeset_suspend_hw(dev);
468         }
469
470         i915_gem_suspend_gtt_mappings(dev);
471
472         i915_save_state(dev);
473
474         intel_opregion_fini(dev);
475
476         console_lock();
477         intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED);
478         console_unlock();
479
480         return 0;
481 }
482
483 int i915_suspend(struct drm_device *dev, pm_message_t state)
484 {
485         int error;
486
487         if (!dev || !dev->dev_private) {
488                 DRM_ERROR("dev: %p\n", dev);
489                 DRM_ERROR("DRM not initialized, aborting suspend.\n");
490                 return -ENODEV;
491         }
492
493         if (state.event == PM_EVENT_PRETHAW)
494                 return 0;
495
496
497         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
498                 return 0;
499
500         error = i915_drm_freeze(dev);
501         if (error)
502                 return error;
503
504         if (state.event == PM_EVENT_SUSPEND) {
505                 /* Shut down the device */
506                 pci_disable_device(dev->pdev);
507                 pci_set_power_state(dev->pdev, PCI_D3hot);
508         }
509
510         return 0;
511 }
512
513 void intel_console_resume(struct work_struct *work)
514 {
515         struct drm_i915_private *dev_priv =
516                 container_of(work, struct drm_i915_private,
517                              console_resume_work);
518         struct drm_device *dev = dev_priv->dev;
519
520         console_lock();
521         intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
522         console_unlock();
523 }
524
525 static void intel_resume_hotplug(struct drm_device *dev)
526 {
527         struct drm_mode_config *mode_config = &dev->mode_config;
528         struct intel_encoder *encoder;
529
530         mutex_lock(&mode_config->mutex);
531         DRM_DEBUG_KMS("running encoder hotplug functions\n");
532
533         list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
534                 if (encoder->hot_plug)
535                         encoder->hot_plug(encoder);
536
537         mutex_unlock(&mode_config->mutex);
538
539         /* Just fire off a uevent and let userspace tell us what to do */
540         drm_helper_hpd_irq_event(dev);
541 }
542
543 static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
544 {
545         struct drm_i915_private *dev_priv = dev->dev_private;
546         int error = 0;
547
548         intel_uncore_early_sanitize(dev);
549
550         intel_uncore_sanitize(dev);
551
552         if (drm_core_check_feature(dev, DRIVER_MODESET) &&
553             restore_gtt_mappings) {
554                 mutex_lock(&dev->struct_mutex);
555                 i915_gem_restore_gtt_mappings(dev);
556                 mutex_unlock(&dev->struct_mutex);
557         }
558
559         intel_power_domains_init_hw(dev);
560
561         i915_restore_state(dev);
562         intel_opregion_setup(dev);
563
564         /* KMS EnterVT equivalent */
565         if (drm_core_check_feature(dev, DRIVER_MODESET)) {
566                 intel_init_pch_refclk(dev);
567                 drm_mode_config_reset(dev);
568
569                 mutex_lock(&dev->struct_mutex);
570
571                 error = i915_gem_init_hw(dev);
572                 mutex_unlock(&dev->struct_mutex);
573
574                 /* We need working interrupts for modeset enabling ... */
575                 drm_irq_install(dev);
576
577                 intel_modeset_init_hw(dev);
578
579                 drm_modeset_lock_all(dev);
580                 intel_modeset_setup_hw_state(dev, true);
581                 drm_modeset_unlock_all(dev);
582
583                 /*
584                  * ... but also need to make sure that hotplug processing
585                  * doesn't cause havoc. Like in the driver load code we don't
586                  * bother with the tiny race here where we might loose hotplug
587                  * notifications.
588                  * */
589                 intel_hpd_init(dev);
590                 dev_priv->enable_hotplug_processing = true;
591                 /* Config may have changed between suspend and resume */
592                 intel_resume_hotplug(dev);
593         }
594
595         intel_opregion_init(dev);
596
597         /*
598          * The console lock can be pretty contented on resume due
599          * to all the printk activity.  Try to keep it out of the hot
600          * path of resume if possible.
601          */
602         if (console_trylock()) {
603                 intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
604                 console_unlock();
605         } else {
606                 schedule_work(&dev_priv->console_resume_work);
607         }
608
609         /* Undo what we did at i915_drm_freeze so the refcount goes back to the
610          * expected level. */
611         hsw_enable_package_c8(dev_priv);
612
613         mutex_lock(&dev_priv->modeset_restore_lock);
614         dev_priv->modeset_restore = MODESET_DONE;
615         mutex_unlock(&dev_priv->modeset_restore_lock);
616
617         intel_runtime_pm_put(dev_priv);
618         return error;
619 }
620
621 static int i915_drm_thaw(struct drm_device *dev)
622 {
623         if (drm_core_check_feature(dev, DRIVER_MODESET))
624                 i915_check_and_clear_faults(dev);
625
626         return __i915_drm_thaw(dev, true);
627 }
628
629 int i915_resume(struct drm_device *dev)
630 {
631         struct drm_i915_private *dev_priv = dev->dev_private;
632         int ret;
633
634         if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
635                 return 0;
636
637         if (pci_enable_device(dev->pdev))
638                 return -EIO;
639
640         pci_set_master(dev->pdev);
641
642         /*
643          * Platforms with opregion should have sane BIOS, older ones (gen3 and
644          * earlier) need to restore the GTT mappings since the BIOS might clear
645          * all our scratch PTEs.
646          */
647         ret = __i915_drm_thaw(dev, !dev_priv->opregion.header);
648         if (ret)
649                 return ret;
650
651         drm_kms_helper_poll_enable(dev);
652         return 0;
653 }
654
655 /**
656  * i915_reset - reset chip after a hang
657  * @dev: drm device to reset
658  *
659  * Reset the chip.  Useful if a hang is detected. Returns zero on successful
660  * reset or otherwise an error code.
661  *
662  * Procedure is fairly simple:
663  *   - reset the chip using the reset reg
664  *   - re-init context state
665  *   - re-init hardware status page
666  *   - re-init ring buffer
667  *   - re-init interrupt state
668  *   - re-init display
669  */
670 int i915_reset(struct drm_device *dev)
671 {
672         drm_i915_private_t *dev_priv = dev->dev_private;
673         bool simulated;
674         int ret;
675
676         if (!i915.reset)
677                 return 0;
678
679         mutex_lock(&dev->struct_mutex);
680
681         i915_gem_reset(dev);
682
683         simulated = dev_priv->gpu_error.stop_rings != 0;
684
685         ret = intel_gpu_reset(dev);
686
687         /* Also reset the gpu hangman. */
688         if (simulated) {
689                 DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
690                 dev_priv->gpu_error.stop_rings = 0;
691                 if (ret == -ENODEV) {
692                         DRM_INFO("Reset not implemented, but ignoring "
693                                  "error for simulated gpu hangs\n");
694                         ret = 0;
695                 }
696         }
697
698         if (ret) {
699                 DRM_ERROR("Failed to reset chip: %i\n", ret);
700                 mutex_unlock(&dev->struct_mutex);
701                 return ret;
702         }
703
704         /* Ok, now get things going again... */
705
706         /*
707          * Everything depends on having the GTT running, so we need to start
708          * there.  Fortunately we don't need to do this unless we reset the
709          * chip at a PCI level.
710          *
711          * Next we need to restore the context, but we don't use those
712          * yet either...
713          *
714          * Ring buffer needs to be re-initialized in the KMS case, or if X
715          * was running at the time of the reset (i.e. we weren't VT
716          * switched away).
717          */
718         if (drm_core_check_feature(dev, DRIVER_MODESET) ||
719                         !dev_priv->ums.mm_suspended) {
720                 dev_priv->ums.mm_suspended = 0;
721
722                 ret = i915_gem_init_hw(dev);
723                 mutex_unlock(&dev->struct_mutex);
724                 if (ret) {
725                         DRM_ERROR("Failed hw init on reset %d\n", ret);
726                         return ret;
727                 }
728
729                 drm_irq_uninstall(dev);
730                 drm_irq_install(dev);
731
732                 /* rps/rc6 re-init is necessary to restore state lost after the
733                  * reset and the re-install of drm irq. Skip for ironlake per
734                  * previous concerns that it doesn't respond well to some forms
735                  * of re-init after reset. */
736                 if (INTEL_INFO(dev)->gen > 5) {
737                         mutex_lock(&dev->struct_mutex);
738                         intel_enable_gt_powersave(dev);
739                         mutex_unlock(&dev->struct_mutex);
740                 }
741
742                 intel_hpd_init(dev);
743         } else {
744                 mutex_unlock(&dev->struct_mutex);
745         }
746
747         return 0;
748 }
749
750 static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
751 {
752         struct intel_device_info *intel_info =
753                 (struct intel_device_info *) ent->driver_data;
754
755         if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) {
756                 DRM_INFO("This hardware requires preliminary hardware support.\n"
757                          "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
758                 return -ENODEV;
759         }
760
761         /* Only bind to function 0 of the device. Early generations
762          * used function 1 as a placeholder for multi-head. This causes
763          * us confusion instead, especially on the systems where both
764          * functions have the same PCI-ID!
765          */
766         if (PCI_FUNC(pdev->devfn))
767                 return -ENODEV;
768
769         driver.driver_features &= ~(DRIVER_USE_AGP);
770
771         return drm_get_pci_dev(pdev, ent, &driver);
772 }
773
774 static void
775 i915_pci_remove(struct pci_dev *pdev)
776 {
777         struct drm_device *dev = pci_get_drvdata(pdev);
778
779         drm_put_dev(dev);
780 }
781
782 static int i915_pm_suspend(struct device *dev)
783 {
784         struct pci_dev *pdev = to_pci_dev(dev);
785         struct drm_device *drm_dev = pci_get_drvdata(pdev);
786         int error;
787
788         if (!drm_dev || !drm_dev->dev_private) {
789                 dev_err(dev, "DRM not initialized, aborting suspend.\n");
790                 return -ENODEV;
791         }
792
793         if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
794                 return 0;
795
796         error = i915_drm_freeze(drm_dev);
797         if (error)
798                 return error;
799
800         pci_disable_device(pdev);
801         pci_set_power_state(pdev, PCI_D3hot);
802
803         return 0;
804 }
805
806 static int i915_pm_resume(struct device *dev)
807 {
808         struct pci_dev *pdev = to_pci_dev(dev);
809         struct drm_device *drm_dev = pci_get_drvdata(pdev);
810
811         return i915_resume(drm_dev);
812 }
813
814 static int i915_pm_freeze(struct device *dev)
815 {
816         struct pci_dev *pdev = to_pci_dev(dev);
817         struct drm_device *drm_dev = pci_get_drvdata(pdev);
818
819         if (!drm_dev || !drm_dev->dev_private) {
820                 dev_err(dev, "DRM not initialized, aborting suspend.\n");
821                 return -ENODEV;
822         }
823
824         return i915_drm_freeze(drm_dev);
825 }
826
827 static int i915_pm_thaw(struct device *dev)
828 {
829         struct pci_dev *pdev = to_pci_dev(dev);
830         struct drm_device *drm_dev = pci_get_drvdata(pdev);
831
832         return i915_drm_thaw(drm_dev);
833 }
834
835 static int i915_pm_poweroff(struct device *dev)
836 {
837         struct pci_dev *pdev = to_pci_dev(dev);
838         struct drm_device *drm_dev = pci_get_drvdata(pdev);
839
840         return i915_drm_freeze(drm_dev);
841 }
842
843 static int i915_runtime_suspend(struct device *device)
844 {
845         struct pci_dev *pdev = to_pci_dev(device);
846         struct drm_device *dev = pci_get_drvdata(pdev);
847         struct drm_i915_private *dev_priv = dev->dev_private;
848
849         WARN_ON(!HAS_RUNTIME_PM(dev));
850
851         DRM_DEBUG_KMS("Suspending device\n");
852
853         i915_gem_release_all_mmaps(dev_priv);
854
855         del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
856         dev_priv->pm.suspended = true;
857
858         /*
859          * current versions of firmware which depend on this opregion
860          * notification have repurposed the D1 definition to mean
861          * "runtime suspended" vs. what you would normally expect (D3)
862          * to distinguish it from notifications that might be sent
863          * via the suspend path.
864          */
865         intel_opregion_notify_adapter(dev, PCI_D1);
866
867         return 0;
868 }
869
870 static int i915_runtime_resume(struct device *device)
871 {
872         struct pci_dev *pdev = to_pci_dev(device);
873         struct drm_device *dev = pci_get_drvdata(pdev);
874         struct drm_i915_private *dev_priv = dev->dev_private;
875
876         WARN_ON(!HAS_RUNTIME_PM(dev));
877
878         DRM_DEBUG_KMS("Resuming device\n");
879
880         intel_opregion_notify_adapter(dev, PCI_D0);
881         dev_priv->pm.suspended = false;
882
883         return 0;
884 }
885
886 static const struct dev_pm_ops i915_pm_ops = {
887         .suspend = i915_pm_suspend,
888         .resume = i915_pm_resume,
889         .freeze = i915_pm_freeze,
890         .thaw = i915_pm_thaw,
891         .poweroff = i915_pm_poweroff,
892         .restore = i915_pm_resume,
893         .runtime_suspend = i915_runtime_suspend,
894         .runtime_resume = i915_runtime_resume,
895 };
896
897 static const struct vm_operations_struct i915_gem_vm_ops = {
898         .fault = i915_gem_fault,
899         .open = drm_gem_vm_open,
900         .close = drm_gem_vm_close,
901 };
902
903 static const struct file_operations i915_driver_fops = {
904         .owner = THIS_MODULE,
905         .open = drm_open,
906         .release = drm_release,
907         .unlocked_ioctl = drm_ioctl,
908         .mmap = drm_gem_mmap,
909         .poll = drm_poll,
910         .read = drm_read,
911 #ifdef CONFIG_COMPAT
912         .compat_ioctl = i915_compat_ioctl,
913 #endif
914         .llseek = noop_llseek,
915 };
916
917 static struct drm_driver driver = {
918         /* Don't use MTRRs here; the Xserver or userspace app should
919          * deal with them for Intel hardware.
920          */
921         .driver_features =
922             DRIVER_USE_AGP |
923             DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
924             DRIVER_RENDER,
925         .load = i915_driver_load,
926         .unload = i915_driver_unload,
927         .open = i915_driver_open,
928         .lastclose = i915_driver_lastclose,
929         .preclose = i915_driver_preclose,
930         .postclose = i915_driver_postclose,
931
932         /* Used in place of i915_pm_ops for non-DRIVER_MODESET */
933         .suspend = i915_suspend,
934         .resume = i915_resume,
935
936         .device_is_agp = i915_driver_device_is_agp,
937         .master_create = i915_master_create,
938         .master_destroy = i915_master_destroy,
939 #if defined(CONFIG_DEBUG_FS)
940         .debugfs_init = i915_debugfs_init,
941         .debugfs_cleanup = i915_debugfs_cleanup,
942 #endif
943         .gem_free_object = i915_gem_free_object,
944         .gem_vm_ops = &i915_gem_vm_ops,
945
946         .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
947         .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
948         .gem_prime_export = i915_gem_prime_export,
949         .gem_prime_import = i915_gem_prime_import,
950
951         .dumb_create = i915_gem_dumb_create,
952         .dumb_map_offset = i915_gem_mmap_gtt,
953         .dumb_destroy = drm_gem_dumb_destroy,
954         .ioctls = i915_ioctls,
955         .fops = &i915_driver_fops,
956         .name = DRIVER_NAME,
957         .desc = DRIVER_DESC,
958         .date = DRIVER_DATE,
959         .major = DRIVER_MAJOR,
960         .minor = DRIVER_MINOR,
961         .patchlevel = DRIVER_PATCHLEVEL,
962 };
963
964 static struct pci_driver i915_pci_driver = {
965         .name = DRIVER_NAME,
966         .id_table = pciidlist,
967         .probe = i915_pci_probe,
968         .remove = i915_pci_remove,
969         .driver.pm = &i915_pm_ops,
970 };
971
972 static int __init i915_init(void)
973 {
974         driver.num_ioctls = i915_max_ioctl;
975
976         /*
977          * If CONFIG_DRM_I915_KMS is set, default to KMS unless
978          * explicitly disabled with the module pararmeter.
979          *
980          * Otherwise, just follow the parameter (defaulting to off).
981          *
982          * Allow optional vga_text_mode_force boot option to override
983          * the default behavior.
984          */
985 #if defined(CONFIG_DRM_I915_KMS)
986         if (i915.modeset != 0)
987                 driver.driver_features |= DRIVER_MODESET;
988 #endif
989         if (i915.modeset == 1)
990                 driver.driver_features |= DRIVER_MODESET;
991
992 #ifdef CONFIG_VGA_CONSOLE
993         if (vgacon_text_force() && i915.modeset == -1)
994                 driver.driver_features &= ~DRIVER_MODESET;
995 #endif
996
997         if (!(driver.driver_features & DRIVER_MODESET)) {
998                 driver.get_vblank_timestamp = NULL;
999 #ifndef CONFIG_DRM_I915_UMS
1000                 /* Silently fail loading to not upset userspace. */
1001                 return 0;
1002 #endif
1003         }
1004
1005         return drm_pci_init(&driver, &i915_pci_driver);
1006 }
1007
1008 static void __exit i915_exit(void)
1009 {
1010 #ifndef CONFIG_DRM_I915_UMS
1011         if (!(driver.driver_features & DRIVER_MODESET))
1012                 return; /* Never loaded a driver. */
1013 #endif
1014
1015         drm_pci_exit(&driver, &i915_pci_driver);
1016 }
1017
1018 module_init(i915_init);
1019 module_exit(i915_exit);
1020
1021 MODULE_AUTHOR(DRIVER_AUTHOR);
1022 MODULE_DESCRIPTION(DRIVER_DESC);
1023 MODULE_LICENSE("GPL and additional rights");