1 /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
33 #include <uapi/drm/i915_drm.h>
36 #include "intel_bios.h"
37 #include "intel_ringbuffer.h"
38 #include <linux/io-mapping.h>
39 #include <linux/i2c.h>
40 #include <linux/i2c-algo-bit.h>
41 #include <drm/intel-gtt.h>
42 #include <linux/backlight.h>
43 #include <linux/intel-iommu.h>
44 #include <linux/kref.h>
45 #include <linux/pm_qos.h>
47 /* General customization:
50 #define DRIVER_AUTHOR "Tungsten Graphics, Inc."
52 #define DRIVER_NAME "i915"
53 #define DRIVER_DESC "Intel Graphics"
54 #define DRIVER_DATE "20080730"
62 #define pipe_name(p) ((p) + 'A')
70 #define transcoder_name(t) ((t) + 'A')
77 #define plane_name(p) ((p) + 'A')
79 #define sprite_name(p, s) ((p) * dev_priv->num_plane + (s) + 'A')
89 #define port_name(p) ((p) + 'A')
91 enum intel_display_power_domain {
95 POWER_DOMAIN_PIPE_A_PANEL_FITTER,
96 POWER_DOMAIN_PIPE_B_PANEL_FITTER,
97 POWER_DOMAIN_PIPE_C_PANEL_FITTER,
98 POWER_DOMAIN_TRANSCODER_A,
99 POWER_DOMAIN_TRANSCODER_B,
100 POWER_DOMAIN_TRANSCODER_C,
101 POWER_DOMAIN_TRANSCODER_EDP = POWER_DOMAIN_TRANSCODER_A + 0xF,
105 #define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
106 #define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
107 ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
108 #define POWER_DOMAIN_TRANSCODER(tran) ((tran) + POWER_DOMAIN_TRANSCODER_A)
112 HPD_PORT_A = HPD_NONE, /* PORT_A is internal */
113 HPD_TV = HPD_NONE, /* TV is known to be unreliable */
123 #define I915_GEM_GPU_DOMAINS \
124 (I915_GEM_DOMAIN_RENDER | \
125 I915_GEM_DOMAIN_SAMPLER | \
126 I915_GEM_DOMAIN_COMMAND | \
127 I915_GEM_DOMAIN_INSTRUCTION | \
128 I915_GEM_DOMAIN_VERTEX)
130 #define for_each_pipe(p) for ((p) = 0; (p) < INTEL_INFO(dev)->num_pipes; (p)++)
132 #define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
133 list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
134 if ((intel_encoder)->base.crtc == (__crtc))
136 struct drm_i915_private;
139 DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */
140 /* real shared dpll ids must be >= 0 */
144 #define I915_NUM_PLLS 2
146 struct intel_dpll_hw_state {
153 struct intel_shared_dpll {
154 int refcount; /* count of number of CRTCs sharing this PLL */
155 int active; /* count of number of active CRTCs (i.e. DPMS on) */
156 bool on; /* is the PLL actually active? Disabled during modeset */
158 /* should match the index in the dev_priv->shared_dplls array */
159 enum intel_dpll_id id;
160 struct intel_dpll_hw_state hw_state;
161 void (*mode_set)(struct drm_i915_private *dev_priv,
162 struct intel_shared_dpll *pll);
163 void (*enable)(struct drm_i915_private *dev_priv,
164 struct intel_shared_dpll *pll);
165 void (*disable)(struct drm_i915_private *dev_priv,
166 struct intel_shared_dpll *pll);
167 bool (*get_hw_state)(struct drm_i915_private *dev_priv,
168 struct intel_shared_dpll *pll,
169 struct intel_dpll_hw_state *hw_state);
172 /* Used by dp and fdi links */
173 struct intel_link_m_n {
181 void intel_link_compute_m_n(int bpp, int nlanes,
182 int pixel_clock, int link_clock,
183 struct intel_link_m_n *m_n);
185 struct intel_ddi_plls {
191 /* Interface history:
194 * 1.2: Add Power Management
195 * 1.3: Add vblank support
196 * 1.4: Fix cmdbuffer path, add heap destroy
197 * 1.5: Add vblank pipe configuration
198 * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
199 * - Support vertical blank on secondary display pipe
201 #define DRIVER_MAJOR 1
202 #define DRIVER_MINOR 6
203 #define DRIVER_PATCHLEVEL 0
205 #define WATCH_LISTS 0
208 #define I915_GEM_PHYS_CURSOR_0 1
209 #define I915_GEM_PHYS_CURSOR_1 2
210 #define I915_GEM_PHYS_OVERLAY_REGS 3
211 #define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS)
213 struct drm_i915_gem_phys_object {
215 struct page **page_list;
216 drm_dma_handle_t *handle;
217 struct drm_i915_gem_object *cur_obj;
220 struct opregion_header;
221 struct opregion_acpi;
222 struct opregion_swsci;
223 struct opregion_asle;
225 struct intel_opregion {
226 struct opregion_header __iomem *header;
227 struct opregion_acpi __iomem *acpi;
228 struct opregion_swsci __iomem *swsci;
229 u32 swsci_gbda_sub_functions;
230 u32 swsci_sbcb_sub_functions;
231 struct opregion_asle __iomem *asle;
233 u32 __iomem *lid_state;
235 #define OPREGION_SIZE (8*1024)
237 struct intel_overlay;
238 struct intel_overlay_error_state;
240 struct drm_i915_master_private {
241 drm_local_map_t *sarea;
242 struct _drm_i915_sarea *sarea_priv;
244 #define I915_FENCE_REG_NONE -1
245 #define I915_MAX_NUM_FENCES 32
246 /* 32 fences + sign bit for FENCE_REG_NONE */
247 #define I915_MAX_NUM_FENCE_BITS 6
249 struct drm_i915_fence_reg {
250 struct list_head lru_list;
251 struct drm_i915_gem_object *obj;
255 struct sdvo_device_mapping {
264 struct intel_display_error_state;
266 struct drm_i915_error_state {
274 bool waiting[I915_NUM_RINGS];
275 u32 pipestat[I915_MAX_PIPES];
276 u32 tail[I915_NUM_RINGS];
277 u32 head[I915_NUM_RINGS];
278 u32 ctl[I915_NUM_RINGS];
279 u32 ipeir[I915_NUM_RINGS];
280 u32 ipehr[I915_NUM_RINGS];
281 u32 instdone[I915_NUM_RINGS];
282 u32 acthd[I915_NUM_RINGS];
283 u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1];
284 u32 semaphore_seqno[I915_NUM_RINGS][I915_NUM_RINGS - 1];
285 u32 rc_psmi[I915_NUM_RINGS]; /* sleep state */
286 /* our own tracking of ring head and tail */
287 u32 cpu_ring_head[I915_NUM_RINGS];
288 u32 cpu_ring_tail[I915_NUM_RINGS];
289 u32 error; /* gen6+ */
290 u32 err_int; /* gen7 */
291 u32 instpm[I915_NUM_RINGS];
292 u32 instps[I915_NUM_RINGS];
293 u32 extra_instdone[I915_NUM_INSTDONE_REG];
294 u32 seqno[I915_NUM_RINGS];
296 u32 fault_reg[I915_NUM_RINGS];
298 u32 faddr[I915_NUM_RINGS];
299 u64 fence[I915_MAX_NUM_FENCES];
301 struct drm_i915_error_ring {
302 struct drm_i915_error_object {
306 } *ringbuffer, *batchbuffer, *ctx;
307 struct drm_i915_error_request {
313 } ring[I915_NUM_RINGS];
314 struct drm_i915_error_buffer {
321 s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
328 } **active_bo, **pinned_bo;
329 u32 *active_bo_count, *pinned_bo_count;
330 struct intel_overlay_error_state *overlay;
331 struct intel_display_error_state *display;
332 int hangcheck_score[I915_NUM_RINGS];
333 enum intel_ring_hangcheck_action hangcheck_action[I915_NUM_RINGS];
336 struct intel_crtc_config;
341 struct drm_i915_display_funcs {
342 bool (*fbc_enabled)(struct drm_device *dev);
343 void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval);
344 void (*disable_fbc)(struct drm_device *dev);
345 int (*get_display_clock_speed)(struct drm_device *dev);
346 int (*get_fifo_size)(struct drm_device *dev, int plane);
348 * find_dpll() - Find the best values for the PLL
349 * @limit: limits for the PLL
350 * @crtc: current CRTC
351 * @target: target frequency in kHz
352 * @refclk: reference clock frequency in kHz
353 * @match_clock: if provided, @best_clock P divider must
354 * match the P divider from @match_clock
355 * used for LVDS downclocking
356 * @best_clock: best PLL values found
358 * Returns true on success, false on failure.
360 bool (*find_dpll)(const struct intel_limit *limit,
361 struct drm_crtc *crtc,
362 int target, int refclk,
363 struct dpll *match_clock,
364 struct dpll *best_clock);
365 void (*update_wm)(struct drm_crtc *crtc);
366 void (*update_sprite_wm)(struct drm_plane *plane,
367 struct drm_crtc *crtc,
368 uint32_t sprite_width, int pixel_size,
369 bool enable, bool scaled);
370 void (*modeset_global_resources)(struct drm_device *dev);
371 /* Returns the active state of the crtc, and if the crtc is active,
372 * fills out the pipe-config with the hw state. */
373 bool (*get_pipe_config)(struct intel_crtc *,
374 struct intel_crtc_config *);
375 int (*crtc_mode_set)(struct drm_crtc *crtc,
377 struct drm_framebuffer *old_fb);
378 void (*crtc_enable)(struct drm_crtc *crtc);
379 void (*crtc_disable)(struct drm_crtc *crtc);
380 void (*off)(struct drm_crtc *crtc);
381 void (*write_eld)(struct drm_connector *connector,
382 struct drm_crtc *crtc);
383 void (*fdi_link_train)(struct drm_crtc *crtc);
384 void (*init_clock_gating)(struct drm_device *dev);
385 int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
386 struct drm_framebuffer *fb,
387 struct drm_i915_gem_object *obj,
389 int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
391 void (*hpd_irq_setup)(struct drm_device *dev);
392 /* clock updates for mode set */
394 /* render clock increase/decrease */
395 /* display clock increase/decrease */
396 /* pll clock increase/decrease */
399 struct intel_uncore_funcs {
400 void (*force_wake_get)(struct drm_i915_private *dev_priv);
401 void (*force_wake_put)(struct drm_i915_private *dev_priv);
404 struct intel_uncore {
405 spinlock_t lock; /** lock is also taken in irq contexts. */
407 struct intel_uncore_funcs funcs;
410 unsigned forcewake_count;
412 struct delayed_work force_wake_work;
415 #define DEV_INFO_FOR_EACH_FLAG(func, sep) \
416 func(is_mobile) sep \
419 func(is_i945gm) sep \
421 func(need_gfx_hws) sep \
423 func(is_pineview) sep \
424 func(is_broadwater) sep \
425 func(is_crestline) sep \
426 func(is_ivybridge) sep \
427 func(is_valleyview) sep \
428 func(is_haswell) sep \
429 func(is_preliminary) sep \
430 func(has_force_wake) sep \
432 func(has_pipe_cxsr) sep \
433 func(has_hotplug) sep \
434 func(cursor_needs_physical) sep \
435 func(has_overlay) sep \
436 func(overlay_needs_physical) sep \
437 func(supports_tv) sep \
438 func(has_bsd_ring) sep \
439 func(has_blt_ring) sep \
440 func(has_vebox_ring) sep \
445 #define DEFINE_FLAG(name) u8 name:1
446 #define SEP_SEMICOLON ;
448 struct intel_device_info {
449 u32 display_mmio_offset;
452 DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
458 enum i915_cache_level {
460 I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */
461 I915_CACHE_L3_LLC, /* gen7+, L3 sits between the domain specifc
462 caches, eg sampler/render caches, and the
463 large Last-Level-Cache. LLC is coherent with
464 the CPU, but L3 is only visible to the GPU. */
465 I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */
468 typedef uint32_t gen6_gtt_pte_t;
470 struct i915_address_space {
472 struct drm_device *dev;
473 struct list_head global_link;
474 unsigned long start; /* Start offset always 0 for dri2 */
475 size_t total; /* size addr space maps (ex. 2GB for ggtt) */
483 * List of objects currently involved in rendering.
485 * Includes buffers having the contents of their GPU caches
486 * flushed, not necessarily primitives. last_rendering_seqno
487 * represents when the rendering involved will be completed.
489 * A reference is held on the buffer while on this list.
491 struct list_head active_list;
494 * LRU list of objects which are not in the ringbuffer and
495 * are ready to unbind, but are still in the GTT.
497 * last_rendering_seqno is 0 while an object is in this list.
499 * A reference is not held on the buffer while on this list,
500 * as merely being GTT-bound shouldn't prevent its being
501 * freed, and we'll pull it off the list in the free path.
503 struct list_head inactive_list;
505 /* FIXME: Need a more generic return type */
506 gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
507 enum i915_cache_level level);
508 void (*clear_range)(struct i915_address_space *vm,
509 unsigned int first_entry,
510 unsigned int num_entries);
511 void (*insert_entries)(struct i915_address_space *vm,
513 unsigned int first_entry,
514 enum i915_cache_level cache_level);
515 void (*cleanup)(struct i915_address_space *vm);
518 /* The Graphics Translation Table is the way in which GEN hardware translates a
519 * Graphics Virtual Address into a Physical Address. In addition to the normal
520 * collateral associated with any va->pa translations GEN hardware also has a
521 * portion of the GTT which can be mapped by the CPU and remain both coherent
522 * and correct (in cases like swizzling). That region is referred to as GMADR in
526 struct i915_address_space base;
527 size_t stolen_size; /* Total size of stolen memory */
529 unsigned long mappable_end; /* End offset that we can CPU map */
530 struct io_mapping *mappable; /* Mapping to our CPU mappable region */
531 phys_addr_t mappable_base; /* PA of our GMADR */
533 /** "Graphics Stolen Memory" holds the global PTEs */
541 int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total,
542 size_t *stolen, phys_addr_t *mappable_base,
543 unsigned long *mappable_end);
545 #define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
547 struct i915_hw_ppgtt {
548 struct i915_address_space base;
549 unsigned num_pd_entries;
550 struct page **pt_pages;
552 dma_addr_t *pt_dma_addr;
554 int (*enable)(struct drm_device *dev);
558 * A VMA represents a GEM BO that is bound into an address space. Therefore, a
559 * VMA's presence cannot be guaranteed before binding, or after unbinding the
560 * object into/from the address space.
562 * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
563 * will always be <= an objects lifetime. So object refcounting should cover us.
566 struct drm_mm_node node;
567 struct drm_i915_gem_object *obj;
568 struct i915_address_space *vm;
570 /** This object's place on the active/inactive lists */
571 struct list_head mm_list;
573 struct list_head vma_link; /* Link in the object's VMA list */
575 /** This vma's place in the batchbuffer or on the eviction list */
576 struct list_head exec_list;
579 * Used for performing relocations during execbuffer insertion.
581 struct hlist_node exec_node;
582 unsigned long exec_handle;
583 struct drm_i915_gem_exec_object2 *exec_entry;
587 struct i915_ctx_hang_stats {
588 /* This context had batch pending when hang was declared */
589 unsigned batch_pending;
591 /* This context had batch active when hang was declared */
592 unsigned batch_active;
594 /* Time when this context was last blamed for a GPU reset */
595 unsigned long guilty_ts;
597 /* This context is banned to submit more work */
601 /* This must match up with the value previously used for execbuf2.rsvd1. */
602 #define DEFAULT_CONTEXT_ID 0
603 struct i915_hw_context {
608 struct drm_i915_file_private *file_priv;
609 struct intel_ring_buffer *ring;
610 struct drm_i915_gem_object *obj;
611 struct i915_ctx_hang_stats hang_stats;
613 struct list_head link;
622 struct drm_mm_node *compressed_fb;
623 struct drm_mm_node *compressed_llb;
625 struct intel_fbc_work {
626 struct delayed_work work;
627 struct drm_crtc *crtc;
628 struct drm_framebuffer *fb;
633 FBC_OK, /* FBC is enabled */
634 FBC_UNSUPPORTED, /* FBC is not supported by this chipset */
635 FBC_NO_OUTPUT, /* no outputs enabled to compress */
636 FBC_STOLEN_TOO_SMALL, /* not enough space for buffers */
637 FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
638 FBC_MODE_TOO_LARGE, /* mode too large for compression */
639 FBC_BAD_PLANE, /* fbc not supported on plane */
640 FBC_NOT_TILED, /* buffer not tiled */
641 FBC_MULTIPLE_PIPES, /* more than one pipe active */
643 FBC_CHIP_DEFAULT, /* disabled by default on this chip */
648 PSR_NO_SOURCE, /* Not supported on platform */
649 PSR_NO_SINK, /* Not supported by panel */
652 PSR_PWR_WELL_ENABLED,
656 PSR_INTERLACED_ENABLED,
661 PCH_NONE = 0, /* No PCH present */
662 PCH_IBX, /* Ibexpeak PCH */
663 PCH_CPT, /* Cougarpoint PCH */
664 PCH_LPT, /* Lynxpoint PCH */
668 enum intel_sbi_destination {
673 #define QUIRK_PIPEA_FORCE (1<<0)
674 #define QUIRK_LVDS_SSC_DISABLE (1<<1)
675 #define QUIRK_INVERT_BRIGHTNESS (1<<2)
676 #define QUIRK_NO_PCH_PWM_ENABLE (1<<3)
679 struct intel_fbc_work;
682 struct i2c_adapter adapter;
686 struct i2c_algo_bit_data bit_algo;
687 struct drm_i915_private *dev_priv;
690 struct i915_suspend_saved_registers {
711 u32 saveTRANS_HTOTAL_A;
712 u32 saveTRANS_HBLANK_A;
713 u32 saveTRANS_HSYNC_A;
714 u32 saveTRANS_VTOTAL_A;
715 u32 saveTRANS_VBLANK_A;
716 u32 saveTRANS_VSYNC_A;
724 u32 savePFIT_PGM_RATIOS;
725 u32 saveBLC_HIST_CTL;
727 u32 saveBLC_PWM_CTL2;
728 u32 saveBLC_CPU_PWM_CTL;
729 u32 saveBLC_CPU_PWM_CTL2;
742 u32 saveTRANS_HTOTAL_B;
743 u32 saveTRANS_HBLANK_B;
744 u32 saveTRANS_HSYNC_B;
745 u32 saveTRANS_VTOTAL_B;
746 u32 saveTRANS_VBLANK_B;
747 u32 saveTRANS_VSYNC_B;
761 u32 savePP_ON_DELAYS;
762 u32 savePP_OFF_DELAYS;
770 u32 savePFIT_CONTROL;
771 u32 save_palette_a[256];
772 u32 save_palette_b[256];
773 u32 saveDPFC_CB_BASE;
774 u32 saveFBC_CFB_BASE;
777 u32 saveFBC_CONTROL2;
787 u32 saveCACHE_MODE_0;
788 u32 saveMI_ARB_STATE;
799 uint64_t saveFENCE[I915_MAX_NUM_FENCES];
810 u32 savePIPEA_GMCH_DATA_M;
811 u32 savePIPEB_GMCH_DATA_M;
812 u32 savePIPEA_GMCH_DATA_N;
813 u32 savePIPEB_GMCH_DATA_N;
814 u32 savePIPEA_DP_LINK_M;
815 u32 savePIPEB_DP_LINK_M;
816 u32 savePIPEA_DP_LINK_N;
817 u32 savePIPEB_DP_LINK_N;
828 u32 savePCH_DREF_CONTROL;
829 u32 saveDISP_ARB_CTL;
830 u32 savePIPEA_DATA_M1;
831 u32 savePIPEA_DATA_N1;
832 u32 savePIPEA_LINK_M1;
833 u32 savePIPEA_LINK_N1;
834 u32 savePIPEB_DATA_M1;
835 u32 savePIPEB_DATA_N1;
836 u32 savePIPEB_LINK_M1;
837 u32 savePIPEB_LINK_N1;
838 u32 saveMCHBAR_RENDER_STANDBY;
839 u32 savePCH_PORT_HOTPLUG;
842 struct intel_gen6_power_mgmt {
843 /* work and pm_iir are protected by dev_priv->irq_lock */
844 struct work_struct work;
847 /* On vlv we need to manually drop to Vmin with a delayed work. */
848 struct delayed_work vlv_work;
850 /* The below variables an all the rps hw state are protected by
851 * dev->struct mutext. */
858 struct delayed_work delayed_resume_work;
861 * Protects RPS/RC6 register access and PCU communication.
862 * Must be taken after struct_mutex if nested.
864 struct mutex hw_lock;
867 /* defined intel_pm.c */
868 extern spinlock_t mchdev_lock;
870 struct intel_ilk_power_mgmt {
878 unsigned long last_time1;
879 unsigned long chipset_power;
881 struct timespec last_time2;
882 unsigned long gfx_power;
888 struct drm_i915_gem_object *pwrctx;
889 struct drm_i915_gem_object *renderctx;
892 /* Power well structure for haswell */
893 struct i915_power_well {
894 struct drm_device *device;
896 /* power well enable/disable usage count */
901 struct i915_dri1_state {
902 unsigned allow_batchbuffer : 1;
903 u32 __iomem *gfx_hws_cpu_addr;
914 struct i915_ums_state {
916 * Flag if the X Server, and thus DRM, is not currently in
917 * control of the device.
919 * This is set between LeaveVT and EnterVT. It needs to be
920 * replaced with a semaphore. It also needs to be
921 * transitioned away from for kernel modesetting.
926 #define MAX_L3_SLICES 2
927 struct intel_l3_parity {
928 u32 *remap_info[MAX_L3_SLICES];
929 struct work_struct error_work;
934 /** Memory allocator for GTT stolen memory */
935 struct drm_mm stolen;
936 /** List of all objects in gtt_space. Used to restore gtt
937 * mappings on resume */
938 struct list_head bound_list;
940 * List of objects which are not bound to the GTT (thus
941 * are idle and not used by the GPU) but still have
942 * (presumably uncached) pages still attached.
944 struct list_head unbound_list;
946 /** Usable portion of the GTT for GEM */
947 unsigned long stolen_base; /* limited to low memory (32-bit) */
949 /** PPGTT used for aliasing the PPGTT with the GTT */
950 struct i915_hw_ppgtt *aliasing_ppgtt;
952 struct shrinker inactive_shrinker;
953 bool shrinker_no_lock_stealing;
955 /** LRU list of objects with fence regs on them. */
956 struct list_head fence_list;
959 * We leave the user IRQ off as much as possible,
960 * but this means that requests will finish and never
961 * be retired once the system goes idle. Set a timer to
962 * fire periodically while the ring is running. When it
963 * fires, go retire requests.
965 struct delayed_work retire_work;
968 * Are we in a non-interruptible section of code like
973 /** Bit 6 swizzling required for X tiling */
974 uint32_t bit_6_swizzle_x;
975 /** Bit 6 swizzling required for Y tiling */
976 uint32_t bit_6_swizzle_y;
978 /* storage for physical objects */
979 struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
981 /* accounting, useful for userland debugging */
982 spinlock_t object_stat_lock;
983 size_t object_memory;
987 struct drm_i915_error_state_buf {
996 struct i915_error_state_file_priv {
997 struct drm_device *dev;
998 struct drm_i915_error_state *error;
1001 struct i915_gpu_error {
1002 /* For hangcheck timer */
1003 #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
1004 #define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
1005 /* Hang gpu twice in this window and your context gets banned */
1006 #define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000)
1008 struct timer_list hangcheck_timer;
1010 /* For reset and error_state handling. */
1012 /* Protected by the above dev->gpu_error.lock. */
1013 struct drm_i915_error_state *first_error;
1014 struct work_struct work;
1017 * State variable and reset counter controlling the reset flow
1019 * Upper bits are for the reset counter. This counter is used by the
1020 * wait_seqno code to race-free noticed that a reset event happened and
1021 * that it needs to restart the entire ioctl (since most likely the
1022 * seqno it waited for won't ever signal anytime soon).
1024 * This is important for lock-free wait paths, where no contended lock
1025 * naturally enforces the correct ordering between the bail-out of the
1026 * waiter and the gpu reset work code.
1028 * Lowest bit controls the reset state machine: Set means a reset is in
1029 * progress. This state will (presuming we don't have any bugs) decay
1030 * into either unset (successful reset) or the special WEDGED value (hw
1031 * terminally sour). All waiters on the reset_queue will be woken when
1034 atomic_t reset_counter;
1037 * Special values/flags for reset_counter
1039 * Note that the code relies on
1040 * I915_WEDGED & I915_RESET_IN_PROGRESS_FLAG
1043 #define I915_RESET_IN_PROGRESS_FLAG 1
1044 #define I915_WEDGED 0xffffffff
1047 * Waitqueue to signal when the reset has completed. Used by clients
1048 * that wait for dev_priv->mm.wedged to settle.
1050 wait_queue_head_t reset_queue;
1052 /* For gpu hang simulation. */
1053 unsigned int stop_rings;
1056 enum modeset_restore {
1057 MODESET_ON_LID_OPEN,
1062 struct ddi_vbt_port_info {
1063 uint8_t hdmi_level_shift;
1065 uint8_t supports_dvi:1;
1066 uint8_t supports_hdmi:1;
1067 uint8_t supports_dp:1;
1070 struct intel_vbt_data {
1071 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
1072 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
1075 unsigned int int_tv_support:1;
1076 unsigned int lvds_dither:1;
1077 unsigned int lvds_vbt:1;
1078 unsigned int int_crt_support:1;
1079 unsigned int lvds_use_ssc:1;
1080 unsigned int display_clock_mode:1;
1081 unsigned int fdi_rx_polarity_inverted:1;
1083 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
1088 int edp_preemphasis;
1090 bool edp_initialized;
1093 struct edp_power_seq edp_pps;
1103 union child_device_config *child_dev;
1105 struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS];
1108 enum intel_ddb_partitioning {
1110 INTEL_DDB_PART_5_6, /* IVB+ */
1113 struct intel_wm_level {
1122 * This struct tracks the state needed for the Package C8+ feature.
1124 * Package states C8 and deeper are really deep PC states that can only be
1125 * reached when all the devices on the system allow it, so even if the graphics
1126 * device allows PC8+, it doesn't mean the system will actually get to these
1129 * Our driver only allows PC8+ when all the outputs are disabled, the power well
1130 * is disabled and the GPU is idle. When these conditions are met, we manually
1131 * do the other conditions: disable the interrupts, clocks and switch LCPLL
1134 * When we really reach PC8 or deeper states (not just when we allow it) we lose
1135 * the state of some registers, so when we come back from PC8+ we need to
1136 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
1137 * need to take care of the registers kept by RC6.
1139 * The interrupt disabling is part of the requirements. We can only leave the
1140 * PCH HPD interrupts enabled. If we're in PC8+ and we get another interrupt we
1141 * can lock the machine.
1143 * Ideally every piece of our code that needs PC8+ disabled would call
1144 * hsw_disable_package_c8, which would increment disable_count and prevent the
1145 * system from reaching PC8+. But we don't have a symmetric way to do this for
1146 * everything, so we have the requirements_met and gpu_idle variables. When we
1147 * switch requirements_met or gpu_idle to true we decrease disable_count, and
1148 * increase it in the opposite case. The requirements_met variable is true when
1149 * all the CRTCs, encoders and the power well are disabled. The gpu_idle
1150 * variable is true when the GPU is idle.
1152 * In addition to everything, we only actually enable PC8+ if disable_count
1153 * stays at zero for at least some seconds. This is implemented with the
1154 * enable_work variable. We do this so we don't enable/disable PC8 dozens of
1155 * consecutive times when all screens are disabled and some background app
1156 * queries the state of our connectors, or we have some application constantly
1157 * waking up to use the GPU. Only after the enable_work function actually
1158 * enables PC8+ the "enable" variable will become true, which means that it can
1159 * be false even if disable_count is 0.
1161 * The irqs_disabled variable becomes true exactly after we disable the IRQs and
1162 * goes back to false exactly before we reenable the IRQs. We use this variable
1163 * to check if someone is trying to enable/disable IRQs while they're supposed
1164 * to be disabled. This shouldn't happen and we'll print some error messages in
1165 * case it happens, but if it actually happens we'll also update the variables
1166 * inside struct regsave so when we restore the IRQs they will contain the
1167 * latest expected values.
1169 * For more, read "Display Sequences for Package C8" on our documentation.
1171 struct i915_package_c8 {
1172 bool requirements_met;
1175 /* Only true after the delayed work task actually enables it. */
1179 struct delayed_work enable_work;
1186 uint32_t gen6_pmimr;
1190 typedef struct drm_i915_private {
1191 struct drm_device *dev;
1192 struct kmem_cache *slab;
1194 const struct intel_device_info *info;
1196 int relative_constants_mode;
1200 struct intel_uncore uncore;
1202 struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
1205 /** gmbus_mutex protects against concurrent usage of the single hw gmbus
1206 * controller on different i2c buses. */
1207 struct mutex gmbus_mutex;
1210 * Base address of the gmbus and gpio block.
1212 uint32_t gpio_mmio_base;
1214 wait_queue_head_t gmbus_wait_queue;
1216 struct pci_dev *bridge_dev;
1217 struct intel_ring_buffer ring[I915_NUM_RINGS];
1218 uint32_t last_seqno, next_seqno;
1220 drm_dma_handle_t *status_page_dmah;
1221 struct resource mch_res;
1223 atomic_t irq_received;
1225 /* protects the irq masks */
1226 spinlock_t irq_lock;
1228 /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
1229 struct pm_qos_request pm_qos;
1231 /* DPIO indirect register protection */
1232 struct mutex dpio_lock;
1234 /** Cached value of IMR to avoid reads in updating the bitfield */
1239 struct work_struct hotplug_work;
1240 bool enable_hotplug_processing;
1242 unsigned long hpd_last_jiffies;
1247 HPD_MARK_DISABLED = 2
1249 } hpd_stats[HPD_NUM_PINS];
1251 struct timer_list hotplug_reenable_timer;
1255 struct i915_fbc fbc;
1256 struct intel_opregion opregion;
1257 struct intel_vbt_data vbt;
1260 struct intel_overlay *overlay;
1261 unsigned int sprite_scaling_enabled;
1267 spinlock_t lock; /* bl registers and the above bl fields */
1268 struct backlight_device *device;
1272 bool no_aux_handshake;
1274 struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
1275 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
1276 int num_fence_regs; /* 8 on pre-965, 16 otherwise */
1278 unsigned int fsb_freq, mem_freq, is_ddr3;
1281 * wq - Driver workqueue for GEM.
1283 * NOTE: Work items scheduled here are not allowed to grab any modeset
1284 * locks, for otherwise the flushing done in the pageflip code will
1285 * result in deadlocks.
1287 struct workqueue_struct *wq;
1289 /* Display functions */
1290 struct drm_i915_display_funcs display;
1292 /* PCH chipset type */
1293 enum intel_pch pch_type;
1294 unsigned short pch_id;
1296 unsigned long quirks;
1298 enum modeset_restore modeset_restore;
1299 struct mutex modeset_restore_lock;
1301 struct list_head vm_list; /* Global list of all address spaces */
1302 struct i915_gtt gtt; /* VMA representing the global address space */
1304 struct i915_gem_mm mm;
1306 /* Kernel Modesetting */
1308 struct sdvo_device_mapping sdvo_mappings[2];
1310 struct drm_crtc *plane_to_crtc_mapping[3];
1311 struct drm_crtc *pipe_to_crtc_mapping[3];
1312 wait_queue_head_t pending_flip_queue;
1314 int num_shared_dpll;
1315 struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
1316 struct intel_ddi_plls ddi_plls;
1318 /* Reclocking support */
1319 bool render_reclock_avail;
1320 bool lvds_downclock_avail;
1321 /* indicates the reduced downclock for LVDS*/
1325 bool mchbar_need_disable;
1327 struct intel_l3_parity l3_parity;
1329 /* Cannot be determined by PCIID. You must always read a register. */
1332 /* gen6+ rps state */
1333 struct intel_gen6_power_mgmt rps;
1335 /* ilk-only ips/rps state. Everything in here is protected by the global
1336 * mchdev_lock in intel_pm.c */
1337 struct intel_ilk_power_mgmt ips;
1339 /* Haswell power well */
1340 struct i915_power_well power_well;
1342 enum no_psr_reason no_psr_reason;
1344 struct i915_gpu_error gpu_error;
1346 struct drm_i915_gem_object *vlv_pctx;
1348 /* list of fbdev register on this device */
1349 struct intel_fbdev *fbdev;
1352 * The console may be contended at resume, but we don't
1353 * want it to block on it.
1355 struct work_struct console_resume_work;
1357 struct drm_property *broadcast_rgb_property;
1358 struct drm_property *force_audio_property;
1360 bool hw_contexts_disabled;
1361 uint32_t hw_context_size;
1362 struct list_head context_list;
1366 struct i915_suspend_saved_registers regfile;
1370 * Raw watermark latency values:
1371 * in 0.1us units for WM0,
1372 * in 0.5us units for WM1+.
1375 uint16_t pri_latency[5];
1377 uint16_t spr_latency[5];
1379 uint16_t cur_latency[5];
1382 struct i915_package_c8 pc8;
1384 /* Old dri1 support infrastructure, beware the dragons ya fools entering
1386 struct i915_dri1_state dri1;
1387 /* Old ums support infrastructure, same warning applies. */
1388 struct i915_ums_state ums;
1389 } drm_i915_private_t;
1391 static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
1393 return dev->dev_private;
1396 /* Iterate over initialised rings */
1397 #define for_each_ring(ring__, dev_priv__, i__) \
1398 for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
1399 if (((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__)))
1401 enum hdmi_force_audio {
1402 HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
1403 HDMI_AUDIO_OFF, /* force turn off HDMI audio */
1404 HDMI_AUDIO_AUTO, /* trust EDID */
1405 HDMI_AUDIO_ON, /* force turn on HDMI audio */
1408 #define I915_GTT_OFFSET_NONE ((u32)-1)
1410 struct drm_i915_gem_object_ops {
1411 /* Interface between the GEM object and its backing storage.
1412 * get_pages() is called once prior to the use of the associated set
1413 * of pages before to binding them into the GTT, and put_pages() is
1414 * called after we no longer need them. As we expect there to be
1415 * associated cost with migrating pages between the backing storage
1416 * and making them available for the GPU (e.g. clflush), we may hold
1417 * onto the pages after they are no longer referenced by the GPU
1418 * in case they may be used again shortly (for example migrating the
1419 * pages to a different memory domain within the GTT). put_pages()
1420 * will therefore most likely be called when the object itself is
1421 * being released or under memory pressure (where we attempt to
1422 * reap pages for the shrinker).
1424 int (*get_pages)(struct drm_i915_gem_object *);
1425 void (*put_pages)(struct drm_i915_gem_object *);
1428 struct drm_i915_gem_object {
1429 struct drm_gem_object base;
1431 const struct drm_i915_gem_object_ops *ops;
1433 /** List of VMAs backed by this object */
1434 struct list_head vma_list;
1436 /** Stolen memory for this object, instead of being backed by shmem. */
1437 struct drm_mm_node *stolen;
1438 struct list_head global_list;
1440 struct list_head ring_list;
1441 /** Used in execbuf to temporarily hold a ref */
1442 struct list_head obj_exec_link;
1445 * This is set if the object is on the active lists (has pending
1446 * rendering and so a non-zero seqno), and is not set if it i s on
1447 * inactive (ready to be unbound) list.
1449 unsigned int active:1;
1452 * This is set if the object has been written to since last bound
1455 unsigned int dirty:1;
1458 * Fence register bits (if any) for this object. Will be set
1459 * as needed when mapped into the GTT.
1460 * Protected by dev->struct_mutex.
1462 signed int fence_reg:I915_MAX_NUM_FENCE_BITS;
1465 * Advice: are the backing pages purgeable?
1467 unsigned int madv:2;
1470 * Current tiling mode for the object.
1472 unsigned int tiling_mode:2;
1474 * Whether the tiling parameters for the currently associated fence
1475 * register have changed. Note that for the purposes of tracking
1476 * tiling changes we also treat the unfenced register, the register
1477 * slot that the object occupies whilst it executes a fenced
1478 * command (such as BLT on gen2/3), as a "fence".
1480 unsigned int fence_dirty:1;
1482 /** How many users have pinned this object in GTT space. The following
1483 * users can each hold at most one reference: pwrite/pread, pin_ioctl
1484 * (via user_pin_count), execbuffer (objects are not allowed multiple
1485 * times for the same batchbuffer), and the framebuffer code. When
1486 * switching/pageflipping, the framebuffer code has at most two buffers
1489 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
1490 * bits with absolutely no headroom. So use 4 bits. */
1491 unsigned int pin_count:4;
1492 #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
1495 * Is the object at the current location in the gtt mappable and
1496 * fenceable? Used to avoid costly recalculations.
1498 unsigned int map_and_fenceable:1;
1501 * Whether the current gtt mapping needs to be mappable (and isn't just
1502 * mappable by accident). Track pin and fault separate for a more
1503 * accurate mappable working set.
1505 unsigned int fault_mappable:1;
1506 unsigned int pin_mappable:1;
1507 unsigned int pin_display:1;
1510 * Is the GPU currently using a fence to access this buffer,
1512 unsigned int pending_fenced_gpu_access:1;
1513 unsigned int fenced_gpu_access:1;
1515 unsigned int cache_level:3;
1517 unsigned int has_aliasing_ppgtt_mapping:1;
1518 unsigned int has_global_gtt_mapping:1;
1519 unsigned int has_dma_mapping:1;
1521 struct sg_table *pages;
1522 int pages_pin_count;
1524 /* prime dma-buf support */
1525 void *dma_buf_vmapping;
1528 struct intel_ring_buffer *ring;
1530 /** Breadcrumb of last rendering to the buffer. */
1531 uint32_t last_read_seqno;
1532 uint32_t last_write_seqno;
1533 /** Breadcrumb of last fenced GPU access to the buffer. */
1534 uint32_t last_fenced_seqno;
1536 /** Current tiling stride for the object, if it's tiled. */
1539 /** Record of address bit 17 of each page at last unbind. */
1540 unsigned long *bit_17;
1542 /** User space pin count and filp owning the pin */
1543 uint32_t user_pin_count;
1544 struct drm_file *pin_filp;
1546 /** for phy allocated objects */
1547 struct drm_i915_gem_phys_object *phys_obj;
1549 #define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base)
1551 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
1554 * Request queue structure.
1556 * The request queue allows us to note sequence numbers that have been emitted
1557 * and may be associated with active buffers to be retired.
1559 * By keeping this list, we can avoid having to do questionable
1560 * sequence-number comparisons on buffer last_rendering_seqnos, and associate
1561 * an emission time with seqnos for tracking how far ahead of the GPU we are.
1563 struct drm_i915_gem_request {
1564 /** On Which ring this request was generated */
1565 struct intel_ring_buffer *ring;
1567 /** GEM sequence number associated with this request. */
1570 /** Position in the ringbuffer of the start of the request */
1573 /** Position in the ringbuffer of the end of the request */
1576 /** Context related to this request */
1577 struct i915_hw_context *ctx;
1579 /** Batch buffer related to this request if any */
1580 struct drm_i915_gem_object *batch_obj;
1582 /** Time at which this request was emitted, in jiffies. */
1583 unsigned long emitted_jiffies;
1585 /** global list entry for this request */
1586 struct list_head list;
1588 struct drm_i915_file_private *file_priv;
1589 /** file_priv list entry for this request */
1590 struct list_head client_list;
1593 struct drm_i915_file_private {
1596 struct list_head request_list;
1598 struct idr context_idr;
1600 struct i915_ctx_hang_stats hang_stats;
1603 #define INTEL_INFO(dev) (to_i915(dev)->info)
1605 #define IS_I830(dev) ((dev)->pci_device == 0x3577)
1606 #define IS_845G(dev) ((dev)->pci_device == 0x2562)
1607 #define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
1608 #define IS_I865G(dev) ((dev)->pci_device == 0x2572)
1609 #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
1610 #define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
1611 #define IS_I945G(dev) ((dev)->pci_device == 0x2772)
1612 #define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
1613 #define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
1614 #define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
1615 #define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
1616 #define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
1617 #define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
1618 #define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
1619 #define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
1620 #define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
1621 #define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
1622 #define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
1623 #define IS_IVB_GT1(dev) ((dev)->pci_device == 0x0156 || \
1624 (dev)->pci_device == 0x0152 || \
1625 (dev)->pci_device == 0x015a)
1626 #define IS_SNB_GT1(dev) ((dev)->pci_device == 0x0102 || \
1627 (dev)->pci_device == 0x0106 || \
1628 (dev)->pci_device == 0x010A)
1629 #define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
1630 #define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
1631 #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
1632 #define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
1633 ((dev)->pci_device & 0xFF00) == 0x0C00)
1634 #define IS_ULT(dev) (IS_HASWELL(dev) && \
1635 ((dev)->pci_device & 0xFF00) == 0x0A00)
1636 #define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \
1637 ((dev)->pci_device & 0x00F0) == 0x0020)
1638 #define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
1641 * The genX designation typically refers to the render engine, so render
1642 * capability related checks should use IS_GEN, while display and other checks
1643 * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
1646 #define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2)
1647 #define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3)
1648 #define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4)
1649 #define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
1650 #define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
1651 #define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7)
1653 #define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring)
1654 #define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring)
1655 #define HAS_VEBOX(dev) (INTEL_INFO(dev)->has_vebox_ring)
1656 #define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
1657 #define HAS_WT(dev) (IS_HASWELL(dev) && to_i915(dev)->ellc_size)
1658 #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
1660 #define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
1661 #define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >=6 && !IS_VALLEYVIEW(dev))
1663 #define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
1664 #define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
1666 /* Early gen2 have a totally busted CS tlb and require pinned batches. */
1667 #define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev))
1669 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
1670 * rows, which changed the alignment requirements and fence programming.
1672 #define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
1674 #define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev))
1675 #define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev))
1676 #define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev))
1677 #define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
1678 #define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
1680 #define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
1681 #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
1682 #define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
1684 #define HAS_IPS(dev) (IS_ULT(dev))
1686 #define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
1687 #define HAS_POWER_WELL(dev) (IS_HASWELL(dev))
1688 #define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
1689 #define HAS_PSR(dev) (IS_HASWELL(dev))
1691 #define INTEL_PCH_DEVICE_ID_MASK 0xff00
1692 #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
1693 #define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
1694 #define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
1695 #define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
1696 #define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
1698 #define INTEL_PCH_TYPE(dev) (to_i915(dev)->pch_type)
1699 #define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
1700 #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
1701 #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
1702 #define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
1703 #define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
1705 #define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake)
1707 /* DPF == dynamic parity feature */
1708 #define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
1709 #define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev))
1711 #define GT_FREQUENCY_MULTIPLIER 50
1713 #include "i915_trace.h"
1716 * RC6 is a special power stage which allows the GPU to enter an very
1717 * low-voltage mode when idle, using down to 0V while at this stage. This
1718 * stage is entered automatically when the GPU is idle when RC6 support is
1719 * enabled, and as soon as new workload arises GPU wakes up automatically as well.
1721 * There are different RC6 modes available in Intel GPU, which differentiate
1722 * among each other with the latency required to enter and leave RC6 and
1723 * voltage consumed by the GPU in different states.
1725 * The combination of the following flags define which states GPU is allowed
1726 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
1727 * RC6pp is deepest RC6. Their support by hardware varies according to the
1728 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
1729 * which brings the most power savings; deeper states save more power, but
1730 * require higher latency to switch to and wake up.
1732 #define INTEL_RC6_ENABLE (1<<0)
1733 #define INTEL_RC6p_ENABLE (1<<1)
1734 #define INTEL_RC6pp_ENABLE (1<<2)
1736 extern const struct drm_ioctl_desc i915_ioctls[];
1737 extern int i915_max_ioctl;
1738 extern unsigned int i915_fbpercrtc __always_unused;
1739 extern int i915_panel_ignore_lid __read_mostly;
1740 extern unsigned int i915_powersave __read_mostly;
1741 extern int i915_semaphores __read_mostly;
1742 extern unsigned int i915_lvds_downclock __read_mostly;
1743 extern int i915_lvds_channel_mode __read_mostly;
1744 extern int i915_panel_use_ssc __read_mostly;
1745 extern int i915_vbt_sdvo_panel_type __read_mostly;
1746 extern int i915_enable_rc6 __read_mostly;
1747 extern int i915_enable_fbc __read_mostly;
1748 extern bool i915_enable_hangcheck __read_mostly;
1749 extern int i915_enable_ppgtt __read_mostly;
1750 extern int i915_enable_psr __read_mostly;
1751 extern unsigned int i915_preliminary_hw_support __read_mostly;
1752 extern int i915_disable_power_well __read_mostly;
1753 extern int i915_enable_ips __read_mostly;
1754 extern bool i915_fastboot __read_mostly;
1755 extern int i915_enable_pc8 __read_mostly;
1756 extern int i915_pc8_timeout __read_mostly;
1757 extern bool i915_prefault_disable __read_mostly;
1759 extern int i915_suspend(struct drm_device *dev, pm_message_t state);
1760 extern int i915_resume(struct drm_device *dev);
1761 extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
1762 extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
1765 void i915_update_dri1_breadcrumb(struct drm_device *dev);
1766 extern void i915_kernel_lost_context(struct drm_device * dev);
1767 extern int i915_driver_load(struct drm_device *, unsigned long flags);
1768 extern int i915_driver_unload(struct drm_device *);
1769 extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv);
1770 extern void i915_driver_lastclose(struct drm_device * dev);
1771 extern void i915_driver_preclose(struct drm_device *dev,
1772 struct drm_file *file_priv);
1773 extern void i915_driver_postclose(struct drm_device *dev,
1774 struct drm_file *file_priv);
1775 extern int i915_driver_device_is_agp(struct drm_device * dev);
1776 #ifdef CONFIG_COMPAT
1777 extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
1780 extern int i915_emit_box(struct drm_device *dev,
1781 struct drm_clip_rect *box,
1783 extern int intel_gpu_reset(struct drm_device *dev);
1784 extern int i915_reset(struct drm_device *dev);
1785 extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
1786 extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
1787 extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
1788 extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
1790 extern void intel_console_resume(struct work_struct *work);
1793 void i915_queue_hangcheck(struct drm_device *dev);
1794 void i915_handle_error(struct drm_device *dev, bool wedged);
1796 extern void intel_irq_init(struct drm_device *dev);
1797 extern void intel_pm_init(struct drm_device *dev);
1798 extern void intel_hpd_init(struct drm_device *dev);
1799 extern void intel_pm_init(struct drm_device *dev);
1801 extern void intel_uncore_sanitize(struct drm_device *dev);
1802 extern void intel_uncore_early_sanitize(struct drm_device *dev);
1803 extern void intel_uncore_init(struct drm_device *dev);
1804 extern void intel_uncore_clear_errors(struct drm_device *dev);
1805 extern void intel_uncore_check_errors(struct drm_device *dev);
1806 extern void intel_uncore_fini(struct drm_device *dev);
1809 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
1812 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
1815 int i915_gem_init_ioctl(struct drm_device *dev, void *data,
1816 struct drm_file *file_priv);
1817 int i915_gem_create_ioctl(struct drm_device *dev, void *data,
1818 struct drm_file *file_priv);
1819 int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
1820 struct drm_file *file_priv);
1821 int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1822 struct drm_file *file_priv);
1823 int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1824 struct drm_file *file_priv);
1825 int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1826 struct drm_file *file_priv);
1827 int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1828 struct drm_file *file_priv);
1829 int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1830 struct drm_file *file_priv);
1831 int i915_gem_execbuffer(struct drm_device *dev, void *data,
1832 struct drm_file *file_priv);
1833 int i915_gem_execbuffer2(struct drm_device *dev, void *data,
1834 struct drm_file *file_priv);
1835 int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
1836 struct drm_file *file_priv);
1837 int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
1838 struct drm_file *file_priv);
1839 int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
1840 struct drm_file *file_priv);
1841 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
1842 struct drm_file *file);
1843 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
1844 struct drm_file *file);
1845 int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
1846 struct drm_file *file_priv);
1847 int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
1848 struct drm_file *file_priv);
1849 int i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
1850 struct drm_file *file_priv);
1851 int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
1852 struct drm_file *file_priv);
1853 int i915_gem_set_tiling(struct drm_device *dev, void *data,
1854 struct drm_file *file_priv);
1855 int i915_gem_get_tiling(struct drm_device *dev, void *data,
1856 struct drm_file *file_priv);
1857 int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
1858 struct drm_file *file_priv);
1859 int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
1860 struct drm_file *file_priv);
1861 void i915_gem_load(struct drm_device *dev);
1862 void *i915_gem_object_alloc(struct drm_device *dev);
1863 void i915_gem_object_free(struct drm_i915_gem_object *obj);
1864 int i915_gem_init_object(struct drm_gem_object *obj);
1865 void i915_gem_object_init(struct drm_i915_gem_object *obj,
1866 const struct drm_i915_gem_object_ops *ops);
1867 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
1869 void i915_gem_free_object(struct drm_gem_object *obj);
1870 void i915_gem_vma_destroy(struct i915_vma *vma);
1872 int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
1873 struct i915_address_space *vm,
1875 bool map_and_fenceable,
1877 void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
1878 int __must_check i915_vma_unbind(struct i915_vma *vma);
1879 int __must_check i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj);
1880 int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
1881 void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
1882 void i915_gem_lastclose(struct drm_device *dev);
1884 int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
1885 static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
1887 struct sg_page_iter sg_iter;
1889 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, n)
1890 return sg_page_iter_page(&sg_iter);
1894 static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
1896 BUG_ON(obj->pages == NULL);
1897 obj->pages_pin_count++;
1899 static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
1901 BUG_ON(obj->pages_pin_count == 0);
1902 obj->pages_pin_count--;
1905 int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
1906 int i915_gem_object_sync(struct drm_i915_gem_object *obj,
1907 struct intel_ring_buffer *to);
1908 void i915_vma_move_to_active(struct i915_vma *vma,
1909 struct intel_ring_buffer *ring);
1910 int i915_gem_dumb_create(struct drm_file *file_priv,
1911 struct drm_device *dev,
1912 struct drm_mode_create_dumb *args);
1913 int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
1914 uint32_t handle, uint64_t *offset);
1916 * Returns true if seq1 is later than seq2.
1919 i915_seqno_passed(uint32_t seq1, uint32_t seq2)
1921 return (int32_t)(seq1 - seq2) >= 0;
1924 int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
1925 int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
1926 int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
1927 int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
1930 i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
1932 if (obj->fence_reg != I915_FENCE_REG_NONE) {
1933 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1934 dev_priv->fence_regs[obj->fence_reg].pin_count++;
1941 i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
1943 if (obj->fence_reg != I915_FENCE_REG_NONE) {
1944 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1945 WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
1946 dev_priv->fence_regs[obj->fence_reg].pin_count--;
1950 void i915_gem_retire_requests(struct drm_device *dev);
1951 void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
1952 int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
1953 bool interruptible);
1954 static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
1956 return unlikely(atomic_read(&error->reset_counter)
1957 & I915_RESET_IN_PROGRESS_FLAG);
1960 static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
1962 return atomic_read(&error->reset_counter) == I915_WEDGED;
1965 void i915_gem_reset(struct drm_device *dev);
1966 bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
1967 int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
1968 int __must_check i915_gem_init(struct drm_device *dev);
1969 int __must_check i915_gem_init_hw(struct drm_device *dev);
1970 int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice);
1971 void i915_gem_init_swizzling(struct drm_device *dev);
1972 void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
1973 int __must_check i915_gpu_idle(struct drm_device *dev);
1974 int __must_check i915_gem_idle(struct drm_device *dev);
1975 int __i915_add_request(struct intel_ring_buffer *ring,
1976 struct drm_file *file,
1977 struct drm_i915_gem_object *batch_obj,
1979 #define i915_add_request(ring, seqno) \
1980 __i915_add_request(ring, NULL, NULL, seqno)
1981 int __must_check i915_wait_seqno(struct intel_ring_buffer *ring,
1983 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
1985 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
1988 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
1990 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
1992 struct intel_ring_buffer *pipelined);
1993 void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj);
1994 int i915_gem_attach_phys_object(struct drm_device *dev,
1995 struct drm_i915_gem_object *obj,
1998 void i915_gem_detach_phys_object(struct drm_device *dev,
1999 struct drm_i915_gem_object *obj);
2000 void i915_gem_free_all_phys_object(struct drm_device *dev);
2001 void i915_gem_release(struct drm_device *dev, struct drm_file *file);
2004 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode);
2006 i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
2007 int tiling_mode, bool fenced);
2009 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
2010 enum i915_cache_level cache_level);
2012 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
2013 struct dma_buf *dma_buf);
2015 struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
2016 struct drm_gem_object *gem_obj, int flags);
2018 void i915_gem_restore_fences(struct drm_device *dev);
2020 unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
2021 struct i915_address_space *vm);
2022 bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o);
2023 bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
2024 struct i915_address_space *vm);
2025 unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
2026 struct i915_address_space *vm);
2027 struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
2028 struct i915_address_space *vm);
2030 i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
2031 struct i915_address_space *vm);
2033 struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj);
2035 /* Some GGTT VM helpers */
2036 #define obj_to_ggtt(obj) \
2037 (&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base)
2038 static inline bool i915_is_ggtt(struct i915_address_space *vm)
2040 struct i915_address_space *ggtt =
2041 &((struct drm_i915_private *)(vm)->dev->dev_private)->gtt.base;
2045 static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj)
2047 return i915_gem_obj_bound(obj, obj_to_ggtt(obj));
2050 static inline unsigned long
2051 i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *obj)
2053 return i915_gem_obj_offset(obj, obj_to_ggtt(obj));
2056 static inline unsigned long
2057 i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj)
2059 return i915_gem_obj_size(obj, obj_to_ggtt(obj));
2062 static inline int __must_check
2063 i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
2065 bool map_and_fenceable,
2068 return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment,
2069 map_and_fenceable, nonblocking);
2072 /* i915_gem_context.c */
2073 void i915_gem_context_init(struct drm_device *dev);
2074 void i915_gem_context_fini(struct drm_device *dev);
2075 void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
2076 int i915_switch_context(struct intel_ring_buffer *ring,
2077 struct drm_file *file, int to_id);
2078 void i915_gem_context_free(struct kref *ctx_ref);
2079 static inline void i915_gem_context_reference(struct i915_hw_context *ctx)
2081 kref_get(&ctx->ref);
2084 static inline void i915_gem_context_unreference(struct i915_hw_context *ctx)
2086 kref_put(&ctx->ref, i915_gem_context_free);
2089 struct i915_ctx_hang_stats * __must_check
2090 i915_gem_context_get_hang_stats(struct drm_device *dev,
2091 struct drm_file *file,
2093 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
2094 struct drm_file *file);
2095 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
2096 struct drm_file *file);
2098 /* i915_gem_gtt.c */
2099 void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev);
2100 void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
2101 struct drm_i915_gem_object *obj,
2102 enum i915_cache_level cache_level);
2103 void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
2104 struct drm_i915_gem_object *obj);
2106 void i915_gem_restore_gtt_mappings(struct drm_device *dev);
2107 int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
2108 void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
2109 enum i915_cache_level cache_level);
2110 void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
2111 void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
2112 void i915_gem_init_global_gtt(struct drm_device *dev);
2113 void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
2114 unsigned long mappable_end, unsigned long end);
2115 int i915_gem_gtt_init(struct drm_device *dev);
2116 static inline void i915_gem_chipset_flush(struct drm_device *dev)
2118 if (INTEL_INFO(dev)->gen < 6)
2119 intel_gtt_chipset_flush();
2123 /* i915_gem_evict.c */
2124 int __must_check i915_gem_evict_something(struct drm_device *dev,
2125 struct i915_address_space *vm,
2128 unsigned cache_level,
2131 int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
2132 int i915_gem_evict_everything(struct drm_device *dev);
2134 /* i915_gem_stolen.c */
2135 int i915_gem_init_stolen(struct drm_device *dev);
2136 int i915_gem_stolen_setup_compression(struct drm_device *dev, int size);
2137 void i915_gem_stolen_cleanup_compression(struct drm_device *dev);
2138 void i915_gem_cleanup_stolen(struct drm_device *dev);
2139 struct drm_i915_gem_object *
2140 i915_gem_object_create_stolen(struct drm_device *dev, u32 size);
2141 struct drm_i915_gem_object *
2142 i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
2146 void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj);
2148 /* i915_gem_tiling.c */
2149 static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
2151 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
2153 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
2154 obj->tiling_mode != I915_TILING_NONE;
2157 void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
2158 void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
2159 void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
2161 /* i915_gem_debug.c */
2163 int i915_verify_lists(struct drm_device *dev);
2165 #define i915_verify_lists(dev) 0
2168 /* i915_debugfs.c */
2169 int i915_debugfs_init(struct drm_minor *minor);
2170 void i915_debugfs_cleanup(struct drm_minor *minor);
2172 /* i915_gpu_error.c */
2174 void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
2175 int i915_error_state_to_str(struct drm_i915_error_state_buf *estr,
2176 const struct i915_error_state_file_priv *error);
2177 int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb,
2178 size_t count, loff_t pos);
2179 static inline void i915_error_state_buf_release(
2180 struct drm_i915_error_state_buf *eb)
2184 void i915_capture_error_state(struct drm_device *dev);
2185 void i915_error_state_get(struct drm_device *dev,
2186 struct i915_error_state_file_priv *error_priv);
2187 void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
2188 void i915_destroy_error_state(struct drm_device *dev);
2190 void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone);
2191 const char *i915_cache_level_str(int type);
2193 /* i915_suspend.c */
2194 extern int i915_save_state(struct drm_device *dev);
2195 extern int i915_restore_state(struct drm_device *dev);
2198 void i915_save_display_reg(struct drm_device *dev);
2199 void i915_restore_display_reg(struct drm_device *dev);
2202 void i915_setup_sysfs(struct drm_device *dev_priv);
2203 void i915_teardown_sysfs(struct drm_device *dev_priv);
2206 extern int intel_setup_gmbus(struct drm_device *dev);
2207 extern void intel_teardown_gmbus(struct drm_device *dev);
2208 static inline bool intel_gmbus_is_port_valid(unsigned port)
2210 return (port >= GMBUS_PORT_SSC && port <= GMBUS_PORT_DPD);
2213 extern struct i2c_adapter *intel_gmbus_get_adapter(
2214 struct drm_i915_private *dev_priv, unsigned port);
2215 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
2216 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
2217 static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
2219 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
2221 extern void intel_i2c_reset(struct drm_device *dev);
2223 /* intel_opregion.c */
2224 struct intel_encoder;
2225 extern int intel_opregion_setup(struct drm_device *dev);
2227 extern void intel_opregion_init(struct drm_device *dev);
2228 extern void intel_opregion_fini(struct drm_device *dev);
2229 extern void intel_opregion_asle_intr(struct drm_device *dev);
2230 extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
2232 extern int intel_opregion_notify_adapter(struct drm_device *dev,
2235 static inline void intel_opregion_init(struct drm_device *dev) { return; }
2236 static inline void intel_opregion_fini(struct drm_device *dev) { return; }
2237 static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; }
2239 intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable)
2244 intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
2252 extern void intel_register_dsm_handler(void);
2253 extern void intel_unregister_dsm_handler(void);
2255 static inline void intel_register_dsm_handler(void) { return; }
2256 static inline void intel_unregister_dsm_handler(void) { return; }
2257 #endif /* CONFIG_ACPI */
2260 extern void intel_modeset_init_hw(struct drm_device *dev);
2261 extern void intel_modeset_suspend_hw(struct drm_device *dev);
2262 extern void intel_modeset_init(struct drm_device *dev);
2263 extern void intel_modeset_gem_init(struct drm_device *dev);
2264 extern void intel_modeset_cleanup(struct drm_device *dev);
2265 extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
2266 extern void intel_modeset_setup_hw_state(struct drm_device *dev,
2267 bool force_restore);
2268 extern void i915_redisable_vga(struct drm_device *dev);
2269 extern bool intel_fbc_enabled(struct drm_device *dev);
2270 extern void intel_disable_fbc(struct drm_device *dev);
2271 extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
2272 extern void intel_init_pch_refclk(struct drm_device *dev);
2273 extern void gen6_set_rps(struct drm_device *dev, u8 val);
2274 extern void valleyview_set_rps(struct drm_device *dev, u8 val);
2275 extern int valleyview_rps_max_freq(struct drm_i915_private *dev_priv);
2276 extern int valleyview_rps_min_freq(struct drm_i915_private *dev_priv);
2277 extern void intel_detect_pch(struct drm_device *dev);
2278 extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
2279 extern int intel_enable_rc6(const struct drm_device *dev);
2281 extern bool i915_semaphore_is_enabled(struct drm_device *dev);
2282 int i915_reg_read_ioctl(struct drm_device *dev, void *data,
2283 struct drm_file *file);
2286 extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
2287 extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
2288 struct intel_overlay_error_state *error);
2290 extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev);
2291 extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
2292 struct drm_device *dev,
2293 struct intel_display_error_state *error);
2295 /* On SNB platform, before reading ring registers forcewake bit
2296 * must be set to prevent GT core from power down and stale values being
2299 void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
2300 void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
2302 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
2303 int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
2305 /* intel_sideband.c */
2306 u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr);
2307 void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val);
2308 u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr);
2309 u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg);
2310 void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
2311 u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg);
2312 void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
2313 u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg);
2314 void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
2315 u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg);
2316 void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
2317 u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg);
2318 void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val);
2319 u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
2320 enum intel_sbi_destination destination);
2321 void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
2322 enum intel_sbi_destination destination);
2324 int vlv_gpu_freq(int ddr_freq, int val);
2325 int vlv_freq_opcode(int ddr_freq, int val);
2327 #define __i915_read(x) \
2328 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg, bool trace);
2335 #define __i915_write(x) \
2336 void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val, bool trace);
2343 #define I915_READ8(reg) i915_read8(dev_priv, (reg), true)
2344 #define I915_WRITE8(reg, val) i915_write8(dev_priv, (reg), (val), true)
2346 #define I915_READ16(reg) i915_read16(dev_priv, (reg), true)
2347 #define I915_WRITE16(reg, val) i915_write16(dev_priv, (reg), (val), true)
2348 #define I915_READ16_NOTRACE(reg) i915_read16(dev_priv, (reg), false)
2349 #define I915_WRITE16_NOTRACE(reg, val) i915_write16(dev_priv, (reg), (val), false)
2351 #define I915_READ(reg) i915_read32(dev_priv, (reg), true)
2352 #define I915_WRITE(reg, val) i915_write32(dev_priv, (reg), (val), true)
2353 #define I915_READ_NOTRACE(reg) i915_read32(dev_priv, (reg), false)
2354 #define I915_WRITE_NOTRACE(reg, val) i915_write32(dev_priv, (reg), (val), false)
2356 #define I915_WRITE64(reg, val) i915_write64(dev_priv, (reg), (val), true)
2357 #define I915_READ64(reg) i915_read64(dev_priv, (reg), true)
2359 #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
2360 #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
2362 /* "Broadcast RGB" property */
2363 #define INTEL_BROADCAST_RGB_AUTO 0
2364 #define INTEL_BROADCAST_RGB_FULL 1
2365 #define INTEL_BROADCAST_RGB_LIMITED 2
2367 static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev)
2369 if (HAS_PCH_SPLIT(dev))
2370 return CPU_VGACNTRL;
2371 else if (IS_VALLEYVIEW(dev))
2372 return VLV_VGACNTRL;
2377 static inline void __user *to_user_ptr(u64 address)
2379 return (void __user *)(uintptr_t)address;
2382 static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
2384 unsigned long j = msecs_to_jiffies(m);
2386 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
2389 static inline unsigned long
2390 timespec_to_jiffies_timeout(const struct timespec *value)
2392 unsigned long j = timespec_to_jiffies(value);
2394 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);