drm/i915: add new helpers for accessing stepping info
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / i915_drv.h
1 /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
2  */
3 /*
4  *
5  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6  * All Rights Reserved.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the
10  * "Software"), to deal in the Software without restriction, including
11  * without limitation the rights to use, copy, modify, merge, publish,
12  * distribute, sub license, and/or sell copies of the Software, and to
13  * permit persons to whom the Software is furnished to do so, subject to
14  * the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the
17  * next paragraph) shall be included in all copies or substantial portions
18  * of the Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27  *
28  */
29
30 #ifndef _I915_DRV_H_
31 #define _I915_DRV_H_
32
33 #include <uapi/drm/i915_drm.h>
34 #include <uapi/drm/drm_fourcc.h>
35
36 #include <asm/hypervisor.h>
37
38 #include <linux/io-mapping.h>
39 #include <linux/i2c.h>
40 #include <linux/i2c-algo-bit.h>
41 #include <linux/backlight.h>
42 #include <linux/hash.h>
43 #include <linux/intel-iommu.h>
44 #include <linux/kref.h>
45 #include <linux/mm_types.h>
46 #include <linux/perf_event.h>
47 #include <linux/pm_qos.h>
48 #include <linux/dma-resv.h>
49 #include <linux/shmem_fs.h>
50 #include <linux/stackdepot.h>
51 #include <linux/xarray.h>
52
53 #include <drm/intel-gtt.h>
54 #include <drm/drm_legacy.h> /* for struct drm_dma_handle */
55 #include <drm/drm_gem.h>
56 #include <drm/drm_auth.h>
57 #include <drm/drm_cache.h>
58 #include <drm/drm_util.h>
59 #include <drm/drm_dsc.h>
60 #include <drm/drm_atomic.h>
61 #include <drm/drm_connector.h>
62 #include <drm/i915_mei_hdcp_interface.h>
63
64 #include "i915_params.h"
65 #include "i915_reg.h"
66 #include "i915_utils.h"
67
68 #include "display/intel_bios.h"
69 #include "display/intel_display.h"
70 #include "display/intel_display_power.h"
71 #include "display/intel_dpll_mgr.h"
72 #include "display/intel_dsb.h"
73 #include "display/intel_frontbuffer.h"
74 #include "display/intel_global_state.h"
75 #include "display/intel_gmbus.h"
76 #include "display/intel_opregion.h"
77
78 #include "gem/i915_gem_context_types.h"
79 #include "gem/i915_gem_shrinker.h"
80 #include "gem/i915_gem_stolen.h"
81
82 #include "gt/intel_engine.h"
83 #include "gt/intel_gt_types.h"
84 #include "gt/intel_region_lmem.h"
85 #include "gt/intel_workarounds.h"
86 #include "gt/uc/intel_uc.h"
87
88 #include "intel_device_info.h"
89 #include "intel_memory_region.h"
90 #include "intel_pch.h"
91 #include "intel_runtime_pm.h"
92 #include "intel_step.h"
93 #include "intel_uncore.h"
94 #include "intel_wakeref.h"
95 #include "intel_wopcm.h"
96
97 #include "i915_gem.h"
98 #include "i915_gem_gtt.h"
99 #include "i915_gpu_error.h"
100 #include "i915_perf_types.h"
101 #include "i915_request.h"
102 #include "i915_scheduler.h"
103 #include "gt/intel_timeline.h"
104 #include "i915_vma.h"
105 #include "i915_irq.h"
106
107
108 /* General customization:
109  */
110
111 #define DRIVER_NAME             "i915"
112 #define DRIVER_DESC             "Intel Graphics"
113 #define DRIVER_DATE             "20201103"
114 #define DRIVER_TIMESTAMP        1604406085
115
116 struct drm_i915_gem_object;
117
118 enum hpd_pin {
119         HPD_NONE = 0,
120         HPD_TV = HPD_NONE,     /* TV is known to be unreliable */
121         HPD_CRT,
122         HPD_SDVO_B,
123         HPD_SDVO_C,
124         HPD_PORT_A,
125         HPD_PORT_B,
126         HPD_PORT_C,
127         HPD_PORT_D,
128         HPD_PORT_E,
129         HPD_PORT_TC1,
130         HPD_PORT_TC2,
131         HPD_PORT_TC3,
132         HPD_PORT_TC4,
133         HPD_PORT_TC5,
134         HPD_PORT_TC6,
135
136         HPD_NUM_PINS
137 };
138
139 #define for_each_hpd_pin(__pin) \
140         for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++)
141
142 /* Threshold == 5 for long IRQs, 50 for short */
143 #define HPD_STORM_DEFAULT_THRESHOLD 50
144
145 struct i915_hotplug {
146         struct delayed_work hotplug_work;
147
148         const u32 *hpd, *pch_hpd;
149
150         struct {
151                 unsigned long last_jiffies;
152                 int count;
153                 enum {
154                         HPD_ENABLED = 0,
155                         HPD_DISABLED = 1,
156                         HPD_MARK_DISABLED = 2
157                 } state;
158         } stats[HPD_NUM_PINS];
159         u32 event_bits;
160         u32 retry_bits;
161         struct delayed_work reenable_work;
162
163         u32 long_port_mask;
164         u32 short_port_mask;
165         struct work_struct dig_port_work;
166
167         struct work_struct poll_init_work;
168         bool poll_enabled;
169
170         unsigned int hpd_storm_threshold;
171         /* Whether or not to count short HPD IRQs in HPD storms */
172         u8 hpd_short_storm_enabled;
173
174         /*
175          * if we get a HPD irq from DP and a HPD irq from non-DP
176          * the non-DP HPD could block the workqueue on a mode config
177          * mutex getting, that userspace may have taken. However
178          * userspace is waiting on the DP workqueue to run which is
179          * blocked behind the non-DP one.
180          */
181         struct workqueue_struct *dp_wq;
182 };
183
184 #define I915_GEM_GPU_DOMAINS \
185         (I915_GEM_DOMAIN_RENDER | \
186          I915_GEM_DOMAIN_SAMPLER | \
187          I915_GEM_DOMAIN_COMMAND | \
188          I915_GEM_DOMAIN_INSTRUCTION | \
189          I915_GEM_DOMAIN_VERTEX)
190
191 struct drm_i915_private;
192 struct i915_mm_struct;
193 struct i915_mmu_object;
194
195 struct drm_i915_file_private {
196         struct drm_i915_private *dev_priv;
197
198         union {
199                 struct drm_file *file;
200                 struct rcu_head rcu;
201         };
202
203         struct xarray context_xa;
204         struct xarray vm_xa;
205
206         unsigned int bsd_engine;
207
208 /*
209  * Every context ban increments per client ban score. Also
210  * hangs in short succession increments ban score. If ban threshold
211  * is reached, client is considered banned and submitting more work
212  * will fail. This is a stop gap measure to limit the badly behaving
213  * clients access to gpu. Note that unbannable contexts never increment
214  * the client ban score.
215  */
216 #define I915_CLIENT_SCORE_HANG_FAST     1
217 #define   I915_CLIENT_FAST_HANG_JIFFIES (60 * HZ)
218 #define I915_CLIENT_SCORE_CONTEXT_BAN   3
219 #define I915_CLIENT_SCORE_BANNED        9
220         /** ban_score: Accumulated score of all ctx bans and fast hangs. */
221         atomic_t ban_score;
222         unsigned long hang_timestamp;
223 };
224
225 /* Interface history:
226  *
227  * 1.1: Original.
228  * 1.2: Add Power Management
229  * 1.3: Add vblank support
230  * 1.4: Fix cmdbuffer path, add heap destroy
231  * 1.5: Add vblank pipe configuration
232  * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
233  *      - Support vertical blank on secondary display pipe
234  */
235 #define DRIVER_MAJOR            1
236 #define DRIVER_MINOR            6
237 #define DRIVER_PATCHLEVEL       0
238
239 struct intel_overlay;
240 struct intel_overlay_error_state;
241
242 struct sdvo_device_mapping {
243         u8 initialized;
244         u8 dvo_port;
245         u8 slave_addr;
246         u8 dvo_wiring;
247         u8 i2c_pin;
248         u8 ddc_pin;
249 };
250
251 struct intel_connector;
252 struct intel_encoder;
253 struct intel_atomic_state;
254 struct intel_cdclk_config;
255 struct intel_cdclk_state;
256 struct intel_cdclk_vals;
257 struct intel_initial_plane_config;
258 struct intel_crtc;
259 struct intel_limit;
260 struct dpll;
261
262 struct drm_i915_display_funcs {
263         void (*get_cdclk)(struct drm_i915_private *dev_priv,
264                           struct intel_cdclk_config *cdclk_config);
265         void (*set_cdclk)(struct drm_i915_private *dev_priv,
266                           const struct intel_cdclk_config *cdclk_config,
267                           enum pipe pipe);
268         int (*bw_calc_min_cdclk)(struct intel_atomic_state *state);
269         int (*get_fifo_size)(struct drm_i915_private *dev_priv,
270                              enum i9xx_plane_id i9xx_plane);
271         int (*compute_pipe_wm)(struct intel_crtc_state *crtc_state);
272         int (*compute_intermediate_wm)(struct intel_crtc_state *crtc_state);
273         void (*initial_watermarks)(struct intel_atomic_state *state,
274                                    struct intel_crtc *crtc);
275         void (*atomic_update_watermarks)(struct intel_atomic_state *state,
276                                          struct intel_crtc *crtc);
277         void (*optimize_watermarks)(struct intel_atomic_state *state,
278                                     struct intel_crtc *crtc);
279         int (*compute_global_watermarks)(struct intel_atomic_state *state);
280         void (*update_wm)(struct intel_crtc *crtc);
281         int (*modeset_calc_cdclk)(struct intel_cdclk_state *state);
282         u8 (*calc_voltage_level)(int cdclk);
283         /* Returns the active state of the crtc, and if the crtc is active,
284          * fills out the pipe-config with the hw state. */
285         bool (*get_pipe_config)(struct intel_crtc *,
286                                 struct intel_crtc_state *);
287         void (*get_initial_plane_config)(struct intel_crtc *,
288                                          struct intel_initial_plane_config *);
289         int (*crtc_compute_clock)(struct intel_crtc *crtc,
290                                   struct intel_crtc_state *crtc_state);
291         void (*crtc_enable)(struct intel_atomic_state *state,
292                             struct intel_crtc *crtc);
293         void (*crtc_disable)(struct intel_atomic_state *state,
294                              struct intel_crtc *crtc);
295         void (*commit_modeset_enables)(struct intel_atomic_state *state);
296         void (*commit_modeset_disables)(struct intel_atomic_state *state);
297         void (*audio_codec_enable)(struct intel_encoder *encoder,
298                                    const struct intel_crtc_state *crtc_state,
299                                    const struct drm_connector_state *conn_state);
300         void (*audio_codec_disable)(struct intel_encoder *encoder,
301                                     const struct intel_crtc_state *old_crtc_state,
302                                     const struct drm_connector_state *old_conn_state);
303         void (*fdi_link_train)(struct intel_crtc *crtc,
304                                const struct intel_crtc_state *crtc_state);
305         void (*init_clock_gating)(struct drm_i915_private *dev_priv);
306         void (*hpd_irq_setup)(struct drm_i915_private *dev_priv);
307         /* clock updates for mode set */
308         /* cursor updates */
309         /* render clock increase/decrease */
310         /* display clock increase/decrease */
311         /* pll clock increase/decrease */
312
313         int (*color_check)(struct intel_crtc_state *crtc_state);
314         /*
315          * Program double buffered color management registers during
316          * vblank evasion. The registers should then latch during the
317          * next vblank start, alongside any other double buffered registers
318          * involved with the same commit.
319          */
320         void (*color_commit)(const struct intel_crtc_state *crtc_state);
321         /*
322          * Load LUTs (and other single buffered color management
323          * registers). Will (hopefully) be called during the vblank
324          * following the latching of any double buffered registers
325          * involved with the same commit.
326          */
327         void (*load_luts)(const struct intel_crtc_state *crtc_state);
328         void (*read_luts)(struct intel_crtc_state *crtc_state);
329 };
330
331 struct intel_csr {
332         struct work_struct work;
333         const char *fw_path;
334         u32 required_version;
335         u32 max_fw_size; /* bytes */
336         u32 *dmc_payload;
337         u32 dmc_fw_size; /* dwords */
338         u32 version;
339         u32 mmio_count;
340         i915_reg_t mmioaddr[20];
341         u32 mmiodata[20];
342         u32 dc_state;
343         u32 target_dc_state;
344         u32 allowed_dc_mask;
345         intel_wakeref_t wakeref;
346 };
347
348 enum i915_cache_level {
349         I915_CACHE_NONE = 0,
350         I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */
351         I915_CACHE_L3_LLC, /* gen7+, L3 sits between the domain specifc
352                               caches, eg sampler/render caches, and the
353                               large Last-Level-Cache. LLC is coherent with
354                               the CPU, but L3 is only visible to the GPU. */
355         I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */
356 };
357
358 #define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */
359
360 struct intel_fbc {
361         /* This is always the inner lock when overlapping with struct_mutex and
362          * it's the outer lock when overlapping with stolen_lock. */
363         struct mutex lock;
364         unsigned threshold;
365         unsigned int possible_framebuffer_bits;
366         unsigned int busy_bits;
367         struct intel_crtc *crtc;
368
369         struct drm_mm_node compressed_fb;
370         struct drm_mm_node *compressed_llb;
371
372         bool false_color;
373
374         bool active;
375         bool activated;
376         bool flip_pending;
377
378         bool underrun_detected;
379         struct work_struct underrun_work;
380
381         /*
382          * Due to the atomic rules we can't access some structures without the
383          * appropriate locking, so we cache information here in order to avoid
384          * these problems.
385          */
386         struct intel_fbc_state_cache {
387                 struct {
388                         unsigned int mode_flags;
389                         u32 hsw_bdw_pixel_rate;
390                 } crtc;
391
392                 struct {
393                         unsigned int rotation;
394                         int src_w;
395                         int src_h;
396                         bool visible;
397                         /*
398                          * Display surface base address adjustement for
399                          * pageflips. Note that on gen4+ this only adjusts up
400                          * to a tile, offsets within a tile are handled in
401                          * the hw itself (with the TILEOFF register).
402                          */
403                         int adjusted_x;
404                         int adjusted_y;
405
406                         u16 pixel_blend_mode;
407                 } plane;
408
409                 struct {
410                         const struct drm_format_info *format;
411                         unsigned int stride;
412                         u64 modifier;
413                 } fb;
414
415                 unsigned int fence_y_offset;
416                 u16 gen9_wa_cfb_stride;
417                 u16 interval;
418                 s8 fence_id;
419                 bool psr2_active;
420         } state_cache;
421
422         /*
423          * This structure contains everything that's relevant to program the
424          * hardware registers. When we want to figure out if we need to disable
425          * and re-enable FBC for a new configuration we just check if there's
426          * something different in the struct. The genx_fbc_activate functions
427          * are supposed to read from it in order to program the registers.
428          */
429         struct intel_fbc_reg_params {
430                 struct {
431                         enum pipe pipe;
432                         enum i9xx_plane_id i9xx_plane;
433                 } crtc;
434
435                 struct {
436                         const struct drm_format_info *format;
437                         unsigned int stride;
438                         u64 modifier;
439                 } fb;
440
441                 int cfb_size;
442                 unsigned int fence_y_offset;
443                 u16 gen9_wa_cfb_stride;
444                 u16 interval;
445                 s8 fence_id;
446                 bool plane_visible;
447         } params;
448
449         const char *no_fbc_reason;
450 };
451
452 /*
453  * HIGH_RR is the highest eDP panel refresh rate read from EDID
454  * LOW_RR is the lowest eDP panel refresh rate found from EDID
455  * parsing for same resolution.
456  */
457 enum drrs_refresh_rate_type {
458         DRRS_HIGH_RR,
459         DRRS_LOW_RR,
460         DRRS_MAX_RR, /* RR count */
461 };
462
463 enum drrs_support_type {
464         DRRS_NOT_SUPPORTED = 0,
465         STATIC_DRRS_SUPPORT = 1,
466         SEAMLESS_DRRS_SUPPORT = 2
467 };
468
469 struct intel_dp;
470 struct i915_drrs {
471         struct mutex mutex;
472         struct delayed_work work;
473         struct intel_dp *dp;
474         unsigned busy_frontbuffer_bits;
475         enum drrs_refresh_rate_type refresh_rate_type;
476         enum drrs_support_type type;
477 };
478
479 #define QUIRK_LVDS_SSC_DISABLE (1<<1)
480 #define QUIRK_INVERT_BRIGHTNESS (1<<2)
481 #define QUIRK_BACKLIGHT_PRESENT (1<<3)
482 #define QUIRK_PIN_SWIZZLED_PAGES (1<<5)
483 #define QUIRK_INCREASE_T12_DELAY (1<<6)
484 #define QUIRK_INCREASE_DDI_DISABLED_TIME (1<<7)
485
486 struct intel_fbdev;
487 struct intel_fbc_work;
488
489 struct intel_gmbus {
490         struct i2c_adapter adapter;
491 #define GMBUS_FORCE_BIT_RETRY (1U << 31)
492         u32 force_bit;
493         u32 reg0;
494         i915_reg_t gpio_reg;
495         struct i2c_algo_bit_data bit_algo;
496         struct drm_i915_private *dev_priv;
497 };
498
499 struct i915_suspend_saved_registers {
500         u32 saveDSPARB;
501         u32 saveSWF0[16];
502         u32 saveSWF1[16];
503         u32 saveSWF3[3];
504         u16 saveGCDGMBUS;
505 };
506
507 struct vlv_s0ix_state;
508
509 #define MAX_L3_SLICES 2
510 struct intel_l3_parity {
511         u32 *remap_info[MAX_L3_SLICES];
512         struct work_struct error_work;
513         int which_slice;
514 };
515
516 struct i915_gem_mm {
517         /** Memory allocator for GTT stolen memory */
518         struct drm_mm stolen;
519         /** Protects the usage of the GTT stolen memory allocator. This is
520          * always the inner lock when overlapping with struct_mutex. */
521         struct mutex stolen_lock;
522
523         /* Protects bound_list/unbound_list and #drm_i915_gem_object.mm.link */
524         spinlock_t obj_lock;
525
526         /**
527          * List of objects which are purgeable.
528          */
529         struct list_head purge_list;
530
531         /**
532          * List of objects which have allocated pages and are shrinkable.
533          */
534         struct list_head shrink_list;
535
536         /**
537          * List of objects which are pending destruction.
538          */
539         struct llist_head free_list;
540         struct work_struct free_work;
541         /**
542          * Count of objects pending destructions. Used to skip needlessly
543          * waiting on an RCU barrier if no objects are waiting to be freed.
544          */
545         atomic_t free_count;
546
547         /**
548          * tmpfs instance used for shmem backed objects
549          */
550         struct vfsmount *gemfs;
551
552         struct intel_memory_region *regions[INTEL_REGION_UNKNOWN];
553
554         struct notifier_block oom_notifier;
555         struct notifier_block vmap_notifier;
556         struct shrinker shrinker;
557
558         /**
559          * Workqueue to fault in userptr pages, flushed by the execbuf
560          * when required but otherwise left to userspace to try again
561          * on EAGAIN.
562          */
563         struct workqueue_struct *userptr_wq;
564
565         /* shrinker accounting, also useful for userland debugging */
566         u64 shrink_memory;
567         u32 shrink_count;
568 };
569
570 #define I915_IDLE_ENGINES_TIMEOUT (200) /* in ms */
571
572 unsigned long i915_fence_context_timeout(const struct drm_i915_private *i915,
573                                          u64 context);
574
575 static inline unsigned long
576 i915_fence_timeout(const struct drm_i915_private *i915)
577 {
578         return i915_fence_context_timeout(i915, U64_MAX);
579 }
580
581 /* Amount of SAGV/QGV points, BSpec precisely defines this */
582 #define I915_NUM_QGV_POINTS 8
583
584 struct ddi_vbt_port_info {
585         /* Non-NULL if port present. */
586         struct intel_bios_encoder_data *devdata;
587
588         int max_tmds_clock;
589
590         /* This is an index in the HDMI/DVI DDI buffer translation table. */
591         u8 hdmi_level_shift;
592         u8 hdmi_level_shift_set:1;
593
594         u8 alternate_aux_channel;
595         u8 alternate_ddc_pin;
596
597         int dp_max_link_rate;           /* 0 for not limited by VBT */
598 };
599
600 enum psr_lines_to_wait {
601         PSR_0_LINES_TO_WAIT = 0,
602         PSR_1_LINE_TO_WAIT,
603         PSR_4_LINES_TO_WAIT,
604         PSR_8_LINES_TO_WAIT
605 };
606
607 struct intel_vbt_data {
608         /* bdb version */
609         u16 version;
610
611         struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
612         struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
613
614         /* Feature bits */
615         unsigned int int_tv_support:1;
616         unsigned int lvds_dither:1;
617         unsigned int int_crt_support:1;
618         unsigned int lvds_use_ssc:1;
619         unsigned int int_lvds_support:1;
620         unsigned int display_clock_mode:1;
621         unsigned int fdi_rx_polarity_inverted:1;
622         unsigned int panel_type:4;
623         int lvds_ssc_freq;
624         unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
625         enum drm_panel_orientation orientation;
626
627         enum drrs_support_type drrs_type;
628
629         struct {
630                 int rate;
631                 int lanes;
632                 int preemphasis;
633                 int vswing;
634                 bool low_vswing;
635                 bool initialized;
636                 int bpp;
637                 struct edp_power_seq pps;
638                 bool hobl;
639         } edp;
640
641         struct {
642                 bool enable;
643                 bool full_link;
644                 bool require_aux_wakeup;
645                 int idle_frames;
646                 enum psr_lines_to_wait lines_to_wait;
647                 int tp1_wakeup_time_us;
648                 int tp2_tp3_wakeup_time_us;
649                 int psr2_tp2_tp3_wakeup_time_us;
650         } psr;
651
652         struct {
653                 u16 pwm_freq_hz;
654                 bool present;
655                 bool active_low_pwm;
656                 u8 min_brightness;      /* min_brightness/255 of max */
657                 u8 controller;          /* brightness controller number */
658                 enum intel_backlight_type type;
659         } backlight;
660
661         /* MIPI DSI */
662         struct {
663                 u16 panel_id;
664                 struct mipi_config *config;
665                 struct mipi_pps_data *pps;
666                 u16 bl_ports;
667                 u16 cabc_ports;
668                 u8 seq_version;
669                 u32 size;
670                 u8 *data;
671                 const u8 *sequence[MIPI_SEQ_MAX];
672                 u8 *deassert_seq; /* Used by fixup_mipi_sequences() */
673                 enum drm_panel_orientation orientation;
674         } dsi;
675
676         int crt_ddc_pin;
677
678         struct list_head display_devices;
679
680         struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS];
681         struct sdvo_device_mapping sdvo_mappings[2];
682 };
683
684 enum intel_ddb_partitioning {
685         INTEL_DDB_PART_1_2,
686         INTEL_DDB_PART_5_6, /* IVB+ */
687 };
688
689 struct ilk_wm_values {
690         u32 wm_pipe[3];
691         u32 wm_lp[3];
692         u32 wm_lp_spr[3];
693         bool enable_fbc_wm;
694         enum intel_ddb_partitioning partitioning;
695 };
696
697 struct g4x_pipe_wm {
698         u16 plane[I915_MAX_PLANES];
699         u16 fbc;
700 };
701
702 struct g4x_sr_wm {
703         u16 plane;
704         u16 cursor;
705         u16 fbc;
706 };
707
708 struct vlv_wm_ddl_values {
709         u8 plane[I915_MAX_PLANES];
710 };
711
712 struct vlv_wm_values {
713         struct g4x_pipe_wm pipe[3];
714         struct g4x_sr_wm sr;
715         struct vlv_wm_ddl_values ddl[3];
716         u8 level;
717         bool cxsr;
718 };
719
720 struct g4x_wm_values {
721         struct g4x_pipe_wm pipe[2];
722         struct g4x_sr_wm sr;
723         struct g4x_sr_wm hpll;
724         bool cxsr;
725         bool hpll_en;
726         bool fbc_en;
727 };
728
729 struct skl_ddb_entry {
730         u16 start, end; /* in number of blocks, 'end' is exclusive */
731 };
732
733 static inline u16 skl_ddb_entry_size(const struct skl_ddb_entry *entry)
734 {
735         return entry->end - entry->start;
736 }
737
738 static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1,
739                                        const struct skl_ddb_entry *e2)
740 {
741         if (e1->start == e2->start && e1->end == e2->end)
742                 return true;
743
744         return false;
745 }
746
747 struct i915_frontbuffer_tracking {
748         spinlock_t lock;
749
750         /*
751          * Tracking bits for delayed frontbuffer flushing du to gpu activity or
752          * scheduled flips.
753          */
754         unsigned busy_bits;
755         unsigned flip_bits;
756 };
757
758 struct i915_virtual_gpu {
759         struct mutex lock; /* serialises sending of g2v_notify command pkts */
760         bool active;
761         u32 caps;
762 };
763
764 struct intel_cdclk_config {
765         unsigned int cdclk, vco, ref, bypass;
766         u8 voltage_level;
767 };
768
769 struct i915_selftest_stash {
770         atomic_t counter;
771 };
772
773 struct drm_i915_private {
774         struct drm_device drm;
775
776         /* FIXME: Device release actions should all be moved to drmm_ */
777         bool do_release;
778
779         /* i915 device parameters */
780         struct i915_params params;
781
782         const struct intel_device_info __info; /* Use INTEL_INFO() to access. */
783         struct intel_runtime_info __runtime; /* Use RUNTIME_INFO() to access. */
784         struct intel_driver_caps caps;
785
786         /**
787          * Data Stolen Memory - aka "i915 stolen memory" gives us the start and
788          * end of stolen which we can optionally use to create GEM objects
789          * backed by stolen memory. Note that stolen_usable_size tells us
790          * exactly how much of this we are actually allowed to use, given that
791          * some portion of it is in fact reserved for use by hardware functions.
792          */
793         struct resource dsm;
794         /**
795          * Reseved portion of Data Stolen Memory
796          */
797         struct resource dsm_reserved;
798
799         /*
800          * Stolen memory is segmented in hardware with different portions
801          * offlimits to certain functions.
802          *
803          * The drm_mm is initialised to the total accessible range, as found
804          * from the PCI config. On Broadwell+, this is further restricted to
805          * avoid the first page! The upper end of stolen memory is reserved for
806          * hardware functions and similarly removed from the accessible range.
807          */
808         resource_size_t stolen_usable_size;     /* Total size minus reserved ranges */
809
810         struct intel_uncore uncore;
811         struct intel_uncore_mmio_debug mmio_debug;
812
813         struct i915_virtual_gpu vgpu;
814
815         struct intel_gvt *gvt;
816
817         struct intel_wopcm wopcm;
818
819         struct intel_csr csr;
820
821         struct intel_gmbus gmbus[GMBUS_NUM_PINS];
822
823         /** gmbus_mutex protects against concurrent usage of the single hw gmbus
824          * controller on different i2c buses. */
825         struct mutex gmbus_mutex;
826
827         /**
828          * Base address of where the gmbus and gpio blocks are located (either
829          * on PCH or on SoC for platforms without PCH).
830          */
831         u32 gpio_mmio_base;
832
833         u32 hsw_psr_mmio_adjust;
834
835         /* MMIO base address for MIPI regs */
836         u32 mipi_mmio_base;
837
838         u32 pps_mmio_base;
839
840         wait_queue_head_t gmbus_wait_queue;
841
842         struct pci_dev *bridge_dev;
843
844         struct rb_root uabi_engines;
845
846         struct resource mch_res;
847
848         /* protects the irq masks */
849         spinlock_t irq_lock;
850
851         bool display_irqs_enabled;
852
853         /* Sideband mailbox protection */
854         struct mutex sb_lock;
855         struct pm_qos_request sb_qos;
856
857         /** Cached value of IMR to avoid reads in updating the bitfield */
858         union {
859                 u32 irq_mask;
860                 u32 de_irq_mask[I915_MAX_PIPES];
861         };
862         u32 pipestat_irq_mask[I915_MAX_PIPES];
863
864         struct i915_hotplug hotplug;
865         struct intel_fbc fbc;
866         struct i915_drrs drrs;
867         struct intel_opregion opregion;
868         struct intel_vbt_data vbt;
869
870         bool preserve_bios_swizzle;
871
872         /* overlay */
873         struct intel_overlay *overlay;
874
875         /* backlight registers and fields in struct intel_panel */
876         struct mutex backlight_lock;
877
878         /* protects panel power sequencer state */
879         struct mutex pps_mutex;
880
881         unsigned int fsb_freq, mem_freq, is_ddr3;
882         unsigned int skl_preferred_vco_freq;
883         unsigned int max_cdclk_freq;
884
885         unsigned int max_dotclk_freq;
886         unsigned int hpll_freq;
887         unsigned int fdi_pll_freq;
888         unsigned int czclk_freq;
889
890         struct {
891                 /* The current hardware cdclk configuration */
892                 struct intel_cdclk_config hw;
893
894                 /* cdclk, divider, and ratio table from bspec */
895                 const struct intel_cdclk_vals *table;
896
897                 struct intel_global_obj obj;
898         } cdclk;
899
900         struct {
901                 /* The current hardware dbuf configuration */
902                 u8 enabled_slices;
903
904                 struct intel_global_obj obj;
905         } dbuf;
906
907         /**
908          * wq - Driver workqueue for GEM.
909          *
910          * NOTE: Work items scheduled here are not allowed to grab any modeset
911          * locks, for otherwise the flushing done in the pageflip code will
912          * result in deadlocks.
913          */
914         struct workqueue_struct *wq;
915
916         /* ordered wq for modesets */
917         struct workqueue_struct *modeset_wq;
918         /* unbound hipri wq for page flips/plane updates */
919         struct workqueue_struct *flip_wq;
920
921         /* Display functions */
922         struct drm_i915_display_funcs display;
923
924         /* PCH chipset type */
925         enum intel_pch pch_type;
926         unsigned short pch_id;
927
928         unsigned long quirks;
929
930         struct drm_atomic_state *modeset_restore_state;
931         struct drm_modeset_acquire_ctx reset_ctx;
932
933         struct i915_ggtt ggtt; /* VM representing the global address space */
934
935         struct i915_gem_mm mm;
936         DECLARE_HASHTABLE(mm_structs, 7);
937         spinlock_t mm_lock;
938
939         /* Kernel Modesetting */
940
941         struct intel_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
942         struct intel_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES];
943
944         /**
945          * dpll and cdclk state is protected by connection_mutex
946          * dpll.lock serializes intel_{prepare,enable,disable}_shared_dpll.
947          * Must be global rather than per dpll, because on some platforms plls
948          * share registers.
949          */
950         struct {
951                 struct mutex lock;
952
953                 int num_shared_dpll;
954                 struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
955                 const struct intel_dpll_mgr *mgr;
956
957                 struct {
958                         int nssc;
959                         int ssc;
960                 } ref_clks;
961         } dpll;
962
963         struct list_head global_obj_list;
964
965         /*
966          * For reading active_pipes holding any crtc lock is
967          * sufficient, for writing must hold all of them.
968          */
969         u8 active_pipes;
970
971         struct i915_wa_list gt_wa_list;
972
973         struct i915_frontbuffer_tracking fb_tracking;
974
975         struct intel_atomic_helper {
976                 struct llist_head free_list;
977                 struct work_struct free_work;
978         } atomic_helper;
979
980         bool mchbar_need_disable;
981
982         struct intel_l3_parity l3_parity;
983
984         /*
985          * HTI (aka HDPORT) state read during initial hw readout.  Most
986          * platforms don't have HTI, so this will just stay 0.  Those that do
987          * will use this later to figure out which PLLs and PHYs are unavailable
988          * for driver usage.
989          */
990         u32 hti_state;
991
992         /*
993          * edram size in MB.
994          * Cannot be determined by PCIID. You must always read a register.
995          */
996         u32 edram_size_mb;
997
998         struct i915_power_domains power_domains;
999
1000         struct i915_gpu_error gpu_error;
1001
1002         struct drm_i915_gem_object *vlv_pctx;
1003
1004         /* list of fbdev register on this device */
1005         struct intel_fbdev *fbdev;
1006         struct work_struct fbdev_suspend_work;
1007
1008         struct drm_property *broadcast_rgb_property;
1009         struct drm_property *force_audio_property;
1010
1011         /* hda/i915 audio component */
1012         struct i915_audio_component *audio_component;
1013         bool audio_component_registered;
1014         /**
1015          * av_mutex - mutex for audio/video sync
1016          *
1017          */
1018         struct mutex av_mutex;
1019         int audio_power_refcount;
1020         u32 audio_freq_cntrl;
1021
1022         u32 fdi_rx_config;
1023
1024         /* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */
1025         u32 chv_phy_control;
1026         /*
1027          * Shadows for CHV DPLL_MD regs to keep the state
1028          * checker somewhat working in the presence hardware
1029          * crappiness (can't read out DPLL_MD for pipes B & C).
1030          */
1031         u32 chv_dpll_md[I915_MAX_PIPES];
1032         u32 bxt_phy_grc;
1033
1034         u32 suspend_count;
1035         bool power_domains_suspended;
1036         struct i915_suspend_saved_registers regfile;
1037         struct vlv_s0ix_state *vlv_s0ix_state;
1038
1039         enum {
1040                 I915_SAGV_UNKNOWN = 0,
1041                 I915_SAGV_DISABLED,
1042                 I915_SAGV_ENABLED,
1043                 I915_SAGV_NOT_CONTROLLED
1044         } sagv_status;
1045
1046         u32 sagv_block_time_us;
1047
1048         struct {
1049                 /*
1050                  * Raw watermark latency values:
1051                  * in 0.1us units for WM0,
1052                  * in 0.5us units for WM1+.
1053                  */
1054                 /* primary */
1055                 u16 pri_latency[5];
1056                 /* sprite */
1057                 u16 spr_latency[5];
1058                 /* cursor */
1059                 u16 cur_latency[5];
1060                 /*
1061                  * Raw watermark memory latency values
1062                  * for SKL for all 8 levels
1063                  * in 1us units.
1064                  */
1065                 u16 skl_latency[8];
1066
1067                 /* current hardware state */
1068                 union {
1069                         struct ilk_wm_values hw;
1070                         struct vlv_wm_values vlv;
1071                         struct g4x_wm_values g4x;
1072                 };
1073
1074                 u8 max_level;
1075
1076                 /*
1077                  * Should be held around atomic WM register writing; also
1078                  * protects * intel_crtc->wm.active and
1079                  * crtc_state->wm.need_postvbl_update.
1080                  */
1081                 struct mutex wm_mutex;
1082         } wm;
1083
1084         struct dram_info {
1085                 bool wm_lv_0_adjust_needed;
1086                 u8 num_channels;
1087                 bool symmetric_memory;
1088                 enum intel_dram_type {
1089                         INTEL_DRAM_UNKNOWN,
1090                         INTEL_DRAM_DDR3,
1091                         INTEL_DRAM_DDR4,
1092                         INTEL_DRAM_LPDDR3,
1093                         INTEL_DRAM_LPDDR4,
1094                         INTEL_DRAM_DDR5,
1095                         INTEL_DRAM_LPDDR5,
1096                 } type;
1097                 u8 num_qgv_points;
1098         } dram_info;
1099
1100         struct intel_bw_info {
1101                 /* for each QGV point */
1102                 unsigned int deratedbw[I915_NUM_QGV_POINTS];
1103                 u8 num_qgv_points;
1104                 u8 num_planes;
1105         } max_bw[6];
1106
1107         struct intel_global_obj bw_obj;
1108
1109         struct intel_runtime_pm runtime_pm;
1110
1111         struct i915_perf perf;
1112
1113         /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
1114         struct intel_gt gt;
1115
1116         struct {
1117                 struct i915_gem_contexts {
1118                         spinlock_t lock; /* locks list */
1119                         struct list_head list;
1120                 } contexts;
1121
1122                 /*
1123                  * We replace the local file with a global mappings as the
1124                  * backing storage for the mmap is on the device and not
1125                  * on the struct file, and we do not want to prolong the
1126                  * lifetime of the local fd. To minimise the number of
1127                  * anonymous inodes we create, we use a global singleton to
1128                  * share the global mapping.
1129                  */
1130                 struct file *mmap_singleton;
1131         } gem;
1132
1133         u8 framestart_delay;
1134
1135         u8 pch_ssc_use;
1136
1137         /* For i915gm/i945gm vblank irq workaround */
1138         u8 vblank_enabled;
1139
1140         /* perform PHY state sanity checks? */
1141         bool chv_phy_assert[2];
1142
1143         bool ipc_enabled;
1144
1145         /* Used to save the pipe-to-encoder mapping for audio */
1146         struct intel_encoder *av_enc_map[I915_MAX_PIPES];
1147
1148         /* necessary resource sharing with HDMI LPE audio driver. */
1149         struct {
1150                 struct platform_device *platdev;
1151                 int     irq;
1152         } lpe_audio;
1153
1154         struct i915_pmu pmu;
1155
1156         struct i915_hdcp_comp_master *hdcp_master;
1157         bool hdcp_comp_added;
1158
1159         /* Mutex to protect the above hdcp component related values. */
1160         struct mutex hdcp_comp_mutex;
1161
1162         I915_SELFTEST_DECLARE(struct i915_selftest_stash selftest;)
1163
1164         /*
1165          * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
1166          * will be rejected. Instead look for a better place.
1167          */
1168 };
1169
1170 static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
1171 {
1172         return container_of(dev, struct drm_i915_private, drm);
1173 }
1174
1175 static inline struct drm_i915_private *kdev_to_i915(struct device *kdev)
1176 {
1177         return dev_get_drvdata(kdev);
1178 }
1179
1180 static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev)
1181 {
1182         return pci_get_drvdata(pdev);
1183 }
1184
1185 /* Simple iterator over all initialised engines */
1186 #define for_each_engine(engine__, dev_priv__, id__) \
1187         for ((id__) = 0; \
1188              (id__) < I915_NUM_ENGINES; \
1189              (id__)++) \
1190                 for_each_if ((engine__) = (dev_priv__)->engine[(id__)])
1191
1192 /* Iterator over subset of engines selected by mask */
1193 #define for_each_engine_masked(engine__, gt__, mask__, tmp__) \
1194         for ((tmp__) = (mask__) & (gt__)->info.engine_mask; \
1195              (tmp__) ? \
1196              ((engine__) = (gt__)->engine[__mask_next_bit(tmp__)]), 1 : \
1197              0;)
1198
1199 #define rb_to_uabi_engine(rb) \
1200         rb_entry_safe(rb, struct intel_engine_cs, uabi_node)
1201
1202 #define for_each_uabi_engine(engine__, i915__) \
1203         for ((engine__) = rb_to_uabi_engine(rb_first(&(i915__)->uabi_engines));\
1204              (engine__); \
1205              (engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node)))
1206
1207 #define for_each_uabi_class_engine(engine__, class__, i915__) \
1208         for ((engine__) = intel_engine_lookup_user((i915__), (class__), 0); \
1209              (engine__) && (engine__)->uabi_class == (class__); \
1210              (engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node)))
1211
1212 #define I915_GTT_OFFSET_NONE ((u32)-1)
1213
1214 /*
1215  * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is
1216  * considered to be the frontbuffer for the given plane interface-wise. This
1217  * doesn't mean that the hw necessarily already scans it out, but that any
1218  * rendering (by the cpu or gpu) will land in the frontbuffer eventually.
1219  *
1220  * We have one bit per pipe and per scanout plane type.
1221  */
1222 #define INTEL_FRONTBUFFER_BITS_PER_PIPE 8
1223 #define INTEL_FRONTBUFFER(pipe, plane_id) ({ \
1224         BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES > 32); \
1225         BUILD_BUG_ON(I915_MAX_PLANES > INTEL_FRONTBUFFER_BITS_PER_PIPE); \
1226         BIT((plane_id) + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)); \
1227 })
1228 #define INTEL_FRONTBUFFER_OVERLAY(pipe) \
1229         BIT(INTEL_FRONTBUFFER_BITS_PER_PIPE - 1 + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
1230 #define INTEL_FRONTBUFFER_ALL_MASK(pipe) \
1231         GENMASK(INTEL_FRONTBUFFER_BITS_PER_PIPE * ((pipe) + 1) - 1, \
1232                 INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
1233
1234 #define INTEL_INFO(dev_priv)    (&(dev_priv)->__info)
1235 #define RUNTIME_INFO(dev_priv)  (&(dev_priv)->__runtime)
1236 #define DRIVER_CAPS(dev_priv)   (&(dev_priv)->caps)
1237
1238 #define INTEL_GEN(dev_priv)     (INTEL_INFO(dev_priv)->gen)
1239 #define INTEL_DEVID(dev_priv)   (RUNTIME_INFO(dev_priv)->device_id)
1240
1241 #define DISPLAY_VER(i915)       (INTEL_INFO(i915)->display.version)
1242 #define IS_DISPLAY_RANGE(i915, from, until) \
1243         (DISPLAY_VER(i915) >= (from) && DISPLAY_VER(i915) <= (until))
1244 #define IS_DISPLAY_VER(i915, v) (DISPLAY_VER(i915) == (v))
1245
1246 #define REVID_FOREVER           0xff
1247 #define INTEL_REVID(dev_priv)   (to_pci_dev((dev_priv)->drm.dev)->revision)
1248
1249 #define INTEL_GEN_MASK(s, e) ( \
1250         BUILD_BUG_ON_ZERO(!__builtin_constant_p(s)) + \
1251         BUILD_BUG_ON_ZERO(!__builtin_constant_p(e)) + \
1252         GENMASK((e) - 1, (s) - 1))
1253
1254 /* Returns true if Gen is in inclusive range [Start, End] */
1255 #define IS_GEN_RANGE(dev_priv, s, e) \
1256         (!!(INTEL_INFO(dev_priv)->gen_mask & INTEL_GEN_MASK((s), (e))))
1257
1258 #define IS_GEN(dev_priv, n) \
1259         (BUILD_BUG_ON_ZERO(!__builtin_constant_p(n)) + \
1260          INTEL_INFO(dev_priv)->gen == (n))
1261
1262 #define HAS_DSB(dev_priv)       (INTEL_INFO(dev_priv)->display.has_dsb)
1263
1264 /*
1265  * Return true if revision is in range [since,until] inclusive.
1266  *
1267  * Use 0 for open-ended since, and REVID_FOREVER for open-ended until.
1268  */
1269 #define IS_REVID(p, since, until) \
1270         (INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until))
1271
1272 #define INTEL_DISPLAY_STEP(__i915) (RUNTIME_INFO(__i915)->step.disp_stepping)
1273 #define INTEL_GT_STEP(__i915) (RUNTIME_INFO(__i915)->step.gt_stepping)
1274
1275 #define IS_DISPLAY_STEP(__i915, since, until) \
1276         (drm_WARN_ON(&(__i915)->drm, INTEL_DISPLAY_STEP(__i915) == STEP_NONE), \
1277          INTEL_DISPLAY_STEP(__i915) >= (since) && INTEL_DISPLAY_STEP(__i915) <= (until))
1278
1279 #define IS_GT_STEP(__i915, since, until) \
1280         (drm_WARN_ON(&(__i915)->drm, INTEL_GT_STEP(__i915) == STEP_NONE), \
1281          INTEL_GT_STEP(__i915) >= (since) && INTEL_GT_STEP(__i915) <= (until))
1282
1283 static __always_inline unsigned int
1284 __platform_mask_index(const struct intel_runtime_info *info,
1285                       enum intel_platform p)
1286 {
1287         const unsigned int pbits =
1288                 BITS_PER_TYPE(info->platform_mask[0]) - INTEL_SUBPLATFORM_BITS;
1289
1290         /* Expand the platform_mask array if this fails. */
1291         BUILD_BUG_ON(INTEL_MAX_PLATFORMS >
1292                      pbits * ARRAY_SIZE(info->platform_mask));
1293
1294         return p / pbits;
1295 }
1296
1297 static __always_inline unsigned int
1298 __platform_mask_bit(const struct intel_runtime_info *info,
1299                     enum intel_platform p)
1300 {
1301         const unsigned int pbits =
1302                 BITS_PER_TYPE(info->platform_mask[0]) - INTEL_SUBPLATFORM_BITS;
1303
1304         return p % pbits + INTEL_SUBPLATFORM_BITS;
1305 }
1306
1307 static inline u32
1308 intel_subplatform(const struct intel_runtime_info *info, enum intel_platform p)
1309 {
1310         const unsigned int pi = __platform_mask_index(info, p);
1311
1312         return info->platform_mask[pi] & ((1 << INTEL_SUBPLATFORM_BITS) - 1);
1313 }
1314
1315 static __always_inline bool
1316 IS_PLATFORM(const struct drm_i915_private *i915, enum intel_platform p)
1317 {
1318         const struct intel_runtime_info *info = RUNTIME_INFO(i915);
1319         const unsigned int pi = __platform_mask_index(info, p);
1320         const unsigned int pb = __platform_mask_bit(info, p);
1321
1322         BUILD_BUG_ON(!__builtin_constant_p(p));
1323
1324         return info->platform_mask[pi] & BIT(pb);
1325 }
1326
1327 static __always_inline bool
1328 IS_SUBPLATFORM(const struct drm_i915_private *i915,
1329                enum intel_platform p, unsigned int s)
1330 {
1331         const struct intel_runtime_info *info = RUNTIME_INFO(i915);
1332         const unsigned int pi = __platform_mask_index(info, p);
1333         const unsigned int pb = __platform_mask_bit(info, p);
1334         const unsigned int msb = BITS_PER_TYPE(info->platform_mask[0]) - 1;
1335         const u32 mask = info->platform_mask[pi];
1336
1337         BUILD_BUG_ON(!__builtin_constant_p(p));
1338         BUILD_BUG_ON(!__builtin_constant_p(s));
1339         BUILD_BUG_ON((s) >= INTEL_SUBPLATFORM_BITS);
1340
1341         /* Shift and test on the MSB position so sign flag can be used. */
1342         return ((mask << (msb - pb)) & (mask << (msb - s))) & BIT(msb);
1343 }
1344
1345 #define IS_MOBILE(dev_priv)     (INTEL_INFO(dev_priv)->is_mobile)
1346 #define IS_DGFX(dev_priv)   (INTEL_INFO(dev_priv)->is_dgfx)
1347
1348 #define IS_I830(dev_priv)       IS_PLATFORM(dev_priv, INTEL_I830)
1349 #define IS_I845G(dev_priv)      IS_PLATFORM(dev_priv, INTEL_I845G)
1350 #define IS_I85X(dev_priv)       IS_PLATFORM(dev_priv, INTEL_I85X)
1351 #define IS_I865G(dev_priv)      IS_PLATFORM(dev_priv, INTEL_I865G)
1352 #define IS_I915G(dev_priv)      IS_PLATFORM(dev_priv, INTEL_I915G)
1353 #define IS_I915GM(dev_priv)     IS_PLATFORM(dev_priv, INTEL_I915GM)
1354 #define IS_I945G(dev_priv)      IS_PLATFORM(dev_priv, INTEL_I945G)
1355 #define IS_I945GM(dev_priv)     IS_PLATFORM(dev_priv, INTEL_I945GM)
1356 #define IS_I965G(dev_priv)      IS_PLATFORM(dev_priv, INTEL_I965G)
1357 #define IS_I965GM(dev_priv)     IS_PLATFORM(dev_priv, INTEL_I965GM)
1358 #define IS_G45(dev_priv)        IS_PLATFORM(dev_priv, INTEL_G45)
1359 #define IS_GM45(dev_priv)       IS_PLATFORM(dev_priv, INTEL_GM45)
1360 #define IS_G4X(dev_priv)        (IS_G45(dev_priv) || IS_GM45(dev_priv))
1361 #define IS_PINEVIEW(dev_priv)   IS_PLATFORM(dev_priv, INTEL_PINEVIEW)
1362 #define IS_G33(dev_priv)        IS_PLATFORM(dev_priv, INTEL_G33)
1363 #define IS_IRONLAKE(dev_priv)   IS_PLATFORM(dev_priv, INTEL_IRONLAKE)
1364 #define IS_IRONLAKE_M(dev_priv) \
1365         (IS_PLATFORM(dev_priv, INTEL_IRONLAKE) && IS_MOBILE(dev_priv))
1366 #define IS_SANDYBRIDGE(dev_priv) IS_PLATFORM(dev_priv, INTEL_SANDYBRIDGE)
1367 #define IS_IVYBRIDGE(dev_priv)  IS_PLATFORM(dev_priv, INTEL_IVYBRIDGE)
1368 #define IS_IVB_GT1(dev_priv)    (IS_IVYBRIDGE(dev_priv) && \
1369                                  INTEL_INFO(dev_priv)->gt == 1)
1370 #define IS_VALLEYVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_VALLEYVIEW)
1371 #define IS_CHERRYVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_CHERRYVIEW)
1372 #define IS_HASWELL(dev_priv)    IS_PLATFORM(dev_priv, INTEL_HASWELL)
1373 #define IS_BROADWELL(dev_priv)  IS_PLATFORM(dev_priv, INTEL_BROADWELL)
1374 #define IS_SKYLAKE(dev_priv)    IS_PLATFORM(dev_priv, INTEL_SKYLAKE)
1375 #define IS_BROXTON(dev_priv)    IS_PLATFORM(dev_priv, INTEL_BROXTON)
1376 #define IS_KABYLAKE(dev_priv)   IS_PLATFORM(dev_priv, INTEL_KABYLAKE)
1377 #define IS_GEMINILAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_GEMINILAKE)
1378 #define IS_COFFEELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COFFEELAKE)
1379 #define IS_COMETLAKE(dev_priv)  IS_PLATFORM(dev_priv, INTEL_COMETLAKE)
1380 #define IS_CANNONLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_CANNONLAKE)
1381 #define IS_ICELAKE(dev_priv)    IS_PLATFORM(dev_priv, INTEL_ICELAKE)
1382 #define IS_JSL_EHL(dev_priv)    (IS_PLATFORM(dev_priv, INTEL_JASPERLAKE) || \
1383                                 IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE))
1384 #define IS_TIGERLAKE(dev_priv)  IS_PLATFORM(dev_priv, INTEL_TIGERLAKE)
1385 #define IS_ROCKETLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ROCKETLAKE)
1386 #define IS_DG1(dev_priv)        IS_PLATFORM(dev_priv, INTEL_DG1)
1387 #define IS_ALDERLAKE_S(dev_priv) IS_PLATFORM(dev_priv, INTEL_ALDERLAKE_S)
1388 #define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
1389                                     (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
1390 #define IS_BDW_ULT(dev_priv) \
1391         IS_SUBPLATFORM(dev_priv, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULT)
1392 #define IS_BDW_ULX(dev_priv) \
1393         IS_SUBPLATFORM(dev_priv, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULX)
1394 #define IS_BDW_GT3(dev_priv)    (IS_BROADWELL(dev_priv) && \
1395                                  INTEL_INFO(dev_priv)->gt == 3)
1396 #define IS_HSW_ULT(dev_priv) \
1397         IS_SUBPLATFORM(dev_priv, INTEL_HASWELL, INTEL_SUBPLATFORM_ULT)
1398 #define IS_HSW_GT3(dev_priv)    (IS_HASWELL(dev_priv) && \
1399                                  INTEL_INFO(dev_priv)->gt == 3)
1400 #define IS_HSW_GT1(dev_priv)    (IS_HASWELL(dev_priv) && \
1401                                  INTEL_INFO(dev_priv)->gt == 1)
1402 /* ULX machines are also considered ULT. */
1403 #define IS_HSW_ULX(dev_priv) \
1404         IS_SUBPLATFORM(dev_priv, INTEL_HASWELL, INTEL_SUBPLATFORM_ULX)
1405 #define IS_SKL_ULT(dev_priv) \
1406         IS_SUBPLATFORM(dev_priv, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULT)
1407 #define IS_SKL_ULX(dev_priv) \
1408         IS_SUBPLATFORM(dev_priv, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULX)
1409 #define IS_KBL_ULT(dev_priv) \
1410         IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULT)
1411 #define IS_KBL_ULX(dev_priv) \
1412         IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULX)
1413 #define IS_SKL_GT2(dev_priv)    (IS_SKYLAKE(dev_priv) && \
1414                                  INTEL_INFO(dev_priv)->gt == 2)
1415 #define IS_SKL_GT3(dev_priv)    (IS_SKYLAKE(dev_priv) && \
1416                                  INTEL_INFO(dev_priv)->gt == 3)
1417 #define IS_SKL_GT4(dev_priv)    (IS_SKYLAKE(dev_priv) && \
1418                                  INTEL_INFO(dev_priv)->gt == 4)
1419 #define IS_KBL_GT2(dev_priv)    (IS_KABYLAKE(dev_priv) && \
1420                                  INTEL_INFO(dev_priv)->gt == 2)
1421 #define IS_KBL_GT3(dev_priv)    (IS_KABYLAKE(dev_priv) && \
1422                                  INTEL_INFO(dev_priv)->gt == 3)
1423 #define IS_CFL_ULT(dev_priv) \
1424         IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULT)
1425 #define IS_CFL_ULX(dev_priv) \
1426         IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULX)
1427 #define IS_CFL_GT2(dev_priv)    (IS_COFFEELAKE(dev_priv) && \
1428                                  INTEL_INFO(dev_priv)->gt == 2)
1429 #define IS_CFL_GT3(dev_priv)    (IS_COFFEELAKE(dev_priv) && \
1430                                  INTEL_INFO(dev_priv)->gt == 3)
1431
1432 #define IS_CML_ULT(dev_priv) \
1433         IS_SUBPLATFORM(dev_priv, INTEL_COMETLAKE, INTEL_SUBPLATFORM_ULT)
1434 #define IS_CML_ULX(dev_priv) \
1435         IS_SUBPLATFORM(dev_priv, INTEL_COMETLAKE, INTEL_SUBPLATFORM_ULX)
1436 #define IS_CML_GT2(dev_priv)    (IS_COMETLAKE(dev_priv) && \
1437                                  INTEL_INFO(dev_priv)->gt == 2)
1438
1439 #define IS_CNL_WITH_PORT_F(dev_priv) \
1440         IS_SUBPLATFORM(dev_priv, INTEL_CANNONLAKE, INTEL_SUBPLATFORM_PORTF)
1441 #define IS_ICL_WITH_PORT_F(dev_priv) \
1442         IS_SUBPLATFORM(dev_priv, INTEL_ICELAKE, INTEL_SUBPLATFORM_PORTF)
1443
1444 #define IS_TGL_U(dev_priv) \
1445         IS_SUBPLATFORM(dev_priv, INTEL_TIGERLAKE, INTEL_SUBPLATFORM_ULT)
1446
1447 #define IS_TGL_Y(dev_priv) \
1448         IS_SUBPLATFORM(dev_priv, INTEL_TIGERLAKE, INTEL_SUBPLATFORM_ULX)
1449
1450 #define SKL_REVID_A0            0x0
1451 #define SKL_REVID_B0            0x1
1452 #define SKL_REVID_C0            0x2
1453 #define SKL_REVID_D0            0x3
1454 #define SKL_REVID_E0            0x4
1455 #define SKL_REVID_F0            0x5
1456 #define SKL_REVID_G0            0x6
1457 #define SKL_REVID_H0            0x7
1458
1459 #define IS_SKL_REVID(p, since, until) (IS_SKYLAKE(p) && IS_REVID(p, since, until))
1460
1461 #define BXT_REVID_A0            0x0
1462 #define BXT_REVID_A1            0x1
1463 #define BXT_REVID_B0            0x3
1464 #define BXT_REVID_B_LAST        0x8
1465 #define BXT_REVID_C0            0x9
1466
1467 #define IS_BXT_REVID(dev_priv, since, until) \
1468         (IS_BROXTON(dev_priv) && IS_REVID(dev_priv, since, until))
1469
1470 enum {
1471         KBL_REVID_A0,
1472         KBL_REVID_B0,
1473         KBL_REVID_B1,
1474         KBL_REVID_C0,
1475         KBL_REVID_D0,
1476         KBL_REVID_D1,
1477         KBL_REVID_E0,
1478         KBL_REVID_F0,
1479         KBL_REVID_G0,
1480 };
1481
1482 #define IS_KBL_GT_REVID(dev_priv, since, until) \
1483         (IS_KABYLAKE(dev_priv) && \
1484          kbl_revids[INTEL_REVID(dev_priv)].gt_stepping >= since && \
1485          kbl_revids[INTEL_REVID(dev_priv)].gt_stepping <= until)
1486 #define IS_KBL_DISP_REVID(dev_priv, since, until) \
1487         (IS_KABYLAKE(dev_priv) && \
1488          kbl_revids[INTEL_REVID(dev_priv)].disp_stepping >= since && \
1489          kbl_revids[INTEL_REVID(dev_priv)].disp_stepping <= until)
1490
1491 #define GLK_REVID_A0            0x0
1492 #define GLK_REVID_A1            0x1
1493 #define GLK_REVID_A2            0x2
1494 #define GLK_REVID_B0            0x3
1495
1496 #define IS_GLK_REVID(dev_priv, since, until) \
1497         (IS_GEMINILAKE(dev_priv) && IS_REVID(dev_priv, since, until))
1498
1499 #define CNL_REVID_A0            0x0
1500 #define CNL_REVID_B0            0x1
1501 #define CNL_REVID_C0            0x2
1502
1503 #define IS_CNL_REVID(p, since, until) \
1504         (IS_CANNONLAKE(p) && IS_REVID(p, since, until))
1505
1506 #define ICL_REVID_A0            0x0
1507 #define ICL_REVID_A2            0x1
1508 #define ICL_REVID_B0            0x3
1509 #define ICL_REVID_B2            0x4
1510 #define ICL_REVID_C0            0x5
1511
1512 #define IS_ICL_REVID(p, since, until) \
1513         (IS_ICELAKE(p) && IS_REVID(p, since, until))
1514
1515 #define EHL_REVID_A0            0x0
1516 #define EHL_REVID_B0            0x1
1517
1518 #define IS_JSL_EHL_REVID(p, since, until) \
1519         (IS_JSL_EHL(p) && IS_REVID(p, since, until))
1520
1521 static inline const struct i915_rev_steppings *
1522 tgl_stepping_get(struct drm_i915_private *dev_priv)
1523 {
1524         u8 revid = INTEL_REVID(dev_priv);
1525         u8 size;
1526         const struct i915_rev_steppings *revid_step_tbl;
1527
1528         if (IS_ALDERLAKE_S(dev_priv)) {
1529                 revid_step_tbl = adls_revid_step_tbl;
1530                 size = ARRAY_SIZE(adls_revid_step_tbl);
1531         } else if (IS_TGL_U(dev_priv) || IS_TGL_Y(dev_priv)) {
1532                 revid_step_tbl = tgl_uy_revid_step_tbl;
1533                 size = ARRAY_SIZE(tgl_uy_revid_step_tbl);
1534         } else {
1535                 revid_step_tbl = tgl_revid_step_tbl;
1536                 size = ARRAY_SIZE(tgl_revid_step_tbl);
1537         }
1538
1539         revid = min_t(u8, revid, size - 1);
1540
1541         return &revid_step_tbl[revid];
1542 }
1543
1544 #define IS_TGL_DISP_STEPPING(p, since, until) \
1545         (IS_TIGERLAKE(p) && \
1546          tgl_stepping_get(p)->disp_stepping >= (since) && \
1547          tgl_stepping_get(p)->disp_stepping <= (until))
1548
1549 #define IS_TGL_UY_GT_STEPPING(p, since, until) \
1550         ((IS_TGL_U(p) || IS_TGL_Y(p)) && \
1551          tgl_stepping_get(p)->gt_stepping >= (since) && \
1552          tgl_stepping_get(p)->gt_stepping <= (until))
1553
1554 #define IS_TGL_GT_STEPPING(p, since, until) \
1555         (IS_TIGERLAKE(p) && \
1556          !(IS_TGL_U(p) || IS_TGL_Y(p)) && \
1557          tgl_stepping_get(p)->gt_stepping >= (since) && \
1558          tgl_stepping_get(p)->gt_stepping <= (until))
1559
1560 #define RKL_REVID_A0            0x0
1561 #define RKL_REVID_B0            0x1
1562 #define RKL_REVID_C0            0x4
1563
1564 #define IS_RKL_REVID(p, since, until) \
1565         (IS_ROCKETLAKE(p) && IS_REVID(p, since, until))
1566
1567 #define DG1_REVID_A0            0x0
1568 #define DG1_REVID_B0            0x1
1569
1570 #define IS_DG1_REVID(p, since, until) \
1571         (IS_DG1(p) && IS_REVID(p, since, until))
1572
1573 #define IS_ADLS_DISP_STEPPING(p, since, until) \
1574         (IS_ALDERLAKE_S(p) && \
1575          tgl_stepping_get(p)->disp_stepping >= (since) && \
1576          tgl_stepping_get(p)->disp_stepping <= (until))
1577
1578 #define IS_ADLS_GT_STEPPING(p, since, until) \
1579         (IS_ALDERLAKE_S(p) && \
1580          tgl_stepping_get(p)->gt_stepping >= (since) && \
1581          tgl_stepping_get(p)->gt_stepping <= (until))
1582
1583 #define IS_LP(dev_priv) (INTEL_INFO(dev_priv)->is_lp)
1584 #define IS_GEN9_LP(dev_priv)    (IS_GEN(dev_priv, 9) && IS_LP(dev_priv))
1585 #define IS_GEN9_BC(dev_priv)    (IS_GEN(dev_priv, 9) && !IS_LP(dev_priv))
1586
1587 #define __HAS_ENGINE(engine_mask, id) ((engine_mask) & BIT(id))
1588 #define HAS_ENGINE(gt, id) __HAS_ENGINE((gt)->info.engine_mask, id)
1589
1590 #define ENGINE_INSTANCES_MASK(gt, first, count) ({              \
1591         unsigned int first__ = (first);                                 \
1592         unsigned int count__ = (count);                                 \
1593         ((gt)->info.engine_mask &                                               \
1594          GENMASK(first__ + count__ - 1, first__)) >> first__;           \
1595 })
1596 #define VDBOX_MASK(gt) \
1597         ENGINE_INSTANCES_MASK(gt, VCS0, I915_MAX_VCS)
1598 #define VEBOX_MASK(gt) \
1599         ENGINE_INSTANCES_MASK(gt, VECS0, I915_MAX_VECS)
1600
1601 /*
1602  * The Gen7 cmdparser copies the scanned buffer to the ggtt for execution
1603  * All later gens can run the final buffer from the ppgtt
1604  */
1605 #define CMDPARSER_USES_GGTT(dev_priv) IS_GEN(dev_priv, 7)
1606
1607 #define HAS_LLC(dev_priv)       (INTEL_INFO(dev_priv)->has_llc)
1608 #define HAS_SNOOP(dev_priv)     (INTEL_INFO(dev_priv)->has_snoop)
1609 #define HAS_EDRAM(dev_priv)     ((dev_priv)->edram_size_mb)
1610 #define HAS_SECURE_BATCHES(dev_priv) (INTEL_GEN(dev_priv) < 6)
1611 #define HAS_WT(dev_priv)        HAS_EDRAM(dev_priv)
1612
1613 #define HWS_NEEDS_PHYSICAL(dev_priv)    (INTEL_INFO(dev_priv)->hws_needs_physical)
1614
1615 #define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \
1616                 (INTEL_INFO(dev_priv)->has_logical_ring_contexts)
1617 #define HAS_LOGICAL_RING_ELSQ(dev_priv) \
1618                 (INTEL_INFO(dev_priv)->has_logical_ring_elsq)
1619
1620 #define HAS_MASTER_UNIT_IRQ(dev_priv) (INTEL_INFO(dev_priv)->has_master_unit_irq)
1621
1622 #define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv)
1623
1624 #define INTEL_PPGTT(dev_priv) (INTEL_INFO(dev_priv)->ppgtt_type)
1625 #define HAS_PPGTT(dev_priv) \
1626         (INTEL_PPGTT(dev_priv) != INTEL_PPGTT_NONE)
1627 #define HAS_FULL_PPGTT(dev_priv) \
1628         (INTEL_PPGTT(dev_priv) >= INTEL_PPGTT_FULL)
1629
1630 #define HAS_PAGE_SIZES(dev_priv, sizes) ({ \
1631         GEM_BUG_ON((sizes) == 0); \
1632         ((sizes) & ~INTEL_INFO(dev_priv)->page_sizes) == 0; \
1633 })
1634
1635 #define HAS_OVERLAY(dev_priv)            (INTEL_INFO(dev_priv)->display.has_overlay)
1636 #define OVERLAY_NEEDS_PHYSICAL(dev_priv) \
1637                 (INTEL_INFO(dev_priv)->display.overlay_needs_physical)
1638
1639 /* Early gen2 have a totally busted CS tlb and require pinned batches. */
1640 #define HAS_BROKEN_CS_TLB(dev_priv)     (IS_I830(dev_priv) || IS_I845G(dev_priv))
1641
1642 #define NEEDS_RC6_CTX_CORRUPTION_WA(dev_priv)   \
1643         (IS_BROADWELL(dev_priv) || IS_GEN(dev_priv, 9))
1644
1645 /* WaRsDisableCoarsePowerGating:skl,cnl */
1646 #define NEEDS_WaRsDisableCoarsePowerGating(dev_priv)                    \
1647         (IS_CANNONLAKE(dev_priv) ||                                     \
1648          IS_SKL_GT3(dev_priv) ||                                        \
1649          IS_SKL_GT4(dev_priv))
1650
1651 #define HAS_GMBUS_IRQ(dev_priv) (INTEL_GEN(dev_priv) >= 4)
1652 #define HAS_GMBUS_BURST_READ(dev_priv) (INTEL_GEN(dev_priv) >= 10 || \
1653                                         IS_GEMINILAKE(dev_priv) || \
1654                                         IS_KABYLAKE(dev_priv))
1655
1656 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
1657  * rows, which changed the alignment requirements and fence programming.
1658  */
1659 #define HAS_128_BYTE_Y_TILING(dev_priv) (!IS_GEN(dev_priv, 2) && \
1660                                          !(IS_I915G(dev_priv) || \
1661                                          IS_I915GM(dev_priv)))
1662 #define SUPPORTS_TV(dev_priv)           (INTEL_INFO(dev_priv)->display.supports_tv)
1663 #define I915_HAS_HOTPLUG(dev_priv)      (INTEL_INFO(dev_priv)->display.has_hotplug)
1664
1665 #define HAS_FW_BLC(dev_priv)    (INTEL_GEN(dev_priv) > 2)
1666 #define HAS_FBC(dev_priv)       (INTEL_INFO(dev_priv)->display.has_fbc)
1667 #define HAS_CUR_FBC(dev_priv)   (!HAS_GMCH(dev_priv) && INTEL_GEN(dev_priv) >= 7)
1668
1669 #define HAS_IPS(dev_priv)       (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv))
1670
1671 #define HAS_DP_MST(dev_priv)    (INTEL_INFO(dev_priv)->display.has_dp_mst)
1672
1673 #define HAS_DDI(dev_priv)                (INTEL_INFO(dev_priv)->display.has_ddi)
1674 #define HAS_FPGA_DBG_UNCLAIMED(dev_priv) (INTEL_INFO(dev_priv)->display.has_fpga_dbg)
1675 #define HAS_PSR(dev_priv)                (INTEL_INFO(dev_priv)->display.has_psr)
1676 #define HAS_PSR_HW_TRACKING(dev_priv) \
1677         (INTEL_INFO(dev_priv)->display.has_psr_hw_tracking)
1678 #define HAS_PSR2_SEL_FETCH(dev_priv)     (INTEL_GEN(dev_priv) >= 12)
1679 #define HAS_TRANSCODER(dev_priv, trans)  ((INTEL_INFO(dev_priv)->cpu_transcoder_mask & BIT(trans)) != 0)
1680
1681 #define HAS_RC6(dev_priv)                (INTEL_INFO(dev_priv)->has_rc6)
1682 #define HAS_RC6p(dev_priv)               (INTEL_INFO(dev_priv)->has_rc6p)
1683 #define HAS_RC6pp(dev_priv)              (false) /* HW was never validated */
1684
1685 #define HAS_RPS(dev_priv)       (INTEL_INFO(dev_priv)->has_rps)
1686
1687 #define HAS_CSR(dev_priv)       (INTEL_INFO(dev_priv)->display.has_csr)
1688
1689 #define HAS_MSO(i915)           (INTEL_GEN(i915) >= 12)
1690
1691 #define HAS_RUNTIME_PM(dev_priv) (INTEL_INFO(dev_priv)->has_runtime_pm)
1692 #define HAS_64BIT_RELOC(dev_priv) (INTEL_INFO(dev_priv)->has_64bit_reloc)
1693
1694 #define HAS_IPC(dev_priv)                (INTEL_INFO(dev_priv)->display.has_ipc)
1695
1696 #define HAS_REGION(i915, i) (INTEL_INFO(i915)->memory_regions & (i))
1697 #define HAS_LMEM(i915) HAS_REGION(i915, REGION_LMEM)
1698
1699 #define HAS_GT_UC(dev_priv)     (INTEL_INFO(dev_priv)->has_gt_uc)
1700
1701 #define HAS_POOLED_EU(dev_priv) (INTEL_INFO(dev_priv)->has_pooled_eu)
1702
1703 #define HAS_GLOBAL_MOCS_REGISTERS(dev_priv)     (INTEL_INFO(dev_priv)->has_global_mocs)
1704
1705
1706 #define HAS_GMCH(dev_priv) (INTEL_INFO(dev_priv)->display.has_gmch)
1707
1708 #define HAS_LSPCON(dev_priv) (IS_GEN_RANGE(dev_priv, 9, 10))
1709
1710 /* DPF == dynamic parity feature */
1711 #define HAS_L3_DPF(dev_priv) (INTEL_INFO(dev_priv)->has_l3_dpf)
1712 #define NUM_L3_SLICES(dev_priv) (IS_HSW_GT3(dev_priv) ? \
1713                                  2 : HAS_L3_DPF(dev_priv))
1714
1715 #define GT_FREQUENCY_MULTIPLIER 50
1716 #define GEN9_FREQ_SCALER 3
1717
1718 #define INTEL_NUM_PIPES(dev_priv) (hweight8(INTEL_INFO(dev_priv)->pipe_mask))
1719
1720 #define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->pipe_mask != 0)
1721
1722 #define HAS_VRR(i915)   (INTEL_GEN(i915) >= 12)
1723
1724 /* Only valid when HAS_DISPLAY() is true */
1725 #define INTEL_DISPLAY_ENABLED(dev_priv) \
1726         (drm_WARN_ON(&(dev_priv)->drm, !HAS_DISPLAY(dev_priv)), !(dev_priv)->params.disable_display)
1727
1728 static inline bool run_as_guest(void)
1729 {
1730         return !hypervisor_is_type(X86_HYPER_NATIVE);
1731 }
1732
1733 #define HAS_D12_PLANE_MINIMIZATION(dev_priv) (IS_ROCKETLAKE(dev_priv) || \
1734                                               IS_ALDERLAKE_S(dev_priv))
1735
1736 static inline bool intel_vtd_active(void)
1737 {
1738 #ifdef CONFIG_INTEL_IOMMU
1739         if (intel_iommu_gfx_mapped)
1740                 return true;
1741 #endif
1742
1743         /* Running as a guest, we assume the host is enforcing VT'd */
1744         return run_as_guest();
1745 }
1746
1747 static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv)
1748 {
1749         return INTEL_GEN(dev_priv) >= 6 && intel_vtd_active();
1750 }
1751
1752 static inline bool
1753 intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *dev_priv)
1754 {
1755         return IS_BROXTON(dev_priv) && intel_vtd_active();
1756 }
1757
1758 /* i915_drv.c */
1759 extern const struct dev_pm_ops i915_pm_ops;
1760
1761 int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
1762 void i915_driver_remove(struct drm_i915_private *i915);
1763 void i915_driver_shutdown(struct drm_i915_private *i915);
1764
1765 int i915_resume_switcheroo(struct drm_i915_private *i915);
1766 int i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state);
1767
1768 int i915_getparam_ioctl(struct drm_device *dev, void *data,
1769                         struct drm_file *file_priv);
1770
1771 /* i915_gem.c */
1772 int i915_gem_init_userptr(struct drm_i915_private *dev_priv);
1773 void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv);
1774 void i915_gem_init_early(struct drm_i915_private *dev_priv);
1775 void i915_gem_cleanup_early(struct drm_i915_private *dev_priv);
1776
1777 struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915);
1778
1779 static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
1780 {
1781         /*
1782          * A single pass should suffice to release all the freed objects (along
1783          * most call paths) , but be a little more paranoid in that freeing
1784          * the objects does take a little amount of time, during which the rcu
1785          * callbacks could have added new objects into the freed list, and
1786          * armed the work again.
1787          */
1788         while (atomic_read(&i915->mm.free_count)) {
1789                 flush_work(&i915->mm.free_work);
1790                 rcu_barrier();
1791         }
1792 }
1793
1794 static inline void i915_gem_drain_workqueue(struct drm_i915_private *i915)
1795 {
1796         /*
1797          * Similar to objects above (see i915_gem_drain_freed-objects), in
1798          * general we have workers that are armed by RCU and then rearm
1799          * themselves in their callbacks. To be paranoid, we need to
1800          * drain the workqueue a second time after waiting for the RCU
1801          * grace period so that we catch work queued via RCU from the first
1802          * pass. As neither drain_workqueue() nor flush_workqueue() report
1803          * a result, we make an assumption that we only don't require more
1804          * than 3 passes to catch all _recursive_ RCU delayed work.
1805          *
1806          */
1807         int pass = 3;
1808         do {
1809                 flush_workqueue(i915->wq);
1810                 rcu_barrier();
1811                 i915_gem_drain_freed_objects(i915);
1812         } while (--pass);
1813         drain_workqueue(i915->wq);
1814 }
1815
1816 struct i915_vma * __must_check
1817 i915_gem_object_ggtt_pin_ww(struct drm_i915_gem_object *obj,
1818                             struct i915_gem_ww_ctx *ww,
1819                             const struct i915_ggtt_view *view,
1820                             u64 size, u64 alignment, u64 flags);
1821
1822 static inline struct i915_vma * __must_check
1823 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
1824                          const struct i915_ggtt_view *view,
1825                          u64 size, u64 alignment, u64 flags)
1826 {
1827         return i915_gem_object_ggtt_pin_ww(obj, NULL, view, size, alignment, flags);
1828 }
1829
1830 int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
1831                            unsigned long flags);
1832 #define I915_GEM_OBJECT_UNBIND_ACTIVE BIT(0)
1833 #define I915_GEM_OBJECT_UNBIND_BARRIER BIT(1)
1834 #define I915_GEM_OBJECT_UNBIND_TEST BIT(2)
1835
1836 void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
1837
1838 int i915_gem_dumb_create(struct drm_file *file_priv,
1839                          struct drm_device *dev,
1840                          struct drm_mode_create_dumb *args);
1841
1842 int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno);
1843
1844 static inline u32 i915_reset_count(struct i915_gpu_error *error)
1845 {
1846         return atomic_read(&error->reset_count);
1847 }
1848
1849 static inline u32 i915_reset_engine_count(struct i915_gpu_error *error,
1850                                           const struct intel_engine_cs *engine)
1851 {
1852         return atomic_read(&error->reset_engine_count[engine->uabi_class]);
1853 }
1854
1855 int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
1856 void i915_gem_driver_register(struct drm_i915_private *i915);
1857 void i915_gem_driver_unregister(struct drm_i915_private *i915);
1858 void i915_gem_driver_remove(struct drm_i915_private *dev_priv);
1859 void i915_gem_driver_release(struct drm_i915_private *dev_priv);
1860 void i915_gem_suspend(struct drm_i915_private *dev_priv);
1861 void i915_gem_suspend_late(struct drm_i915_private *dev_priv);
1862 void i915_gem_resume(struct drm_i915_private *dev_priv);
1863
1864 int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file);
1865
1866 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
1867                                     enum i915_cache_level cache_level);
1868
1869 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
1870                                 struct dma_buf *dma_buf);
1871
1872 struct dma_buf *i915_gem_prime_export(struct drm_gem_object *gem_obj, int flags);
1873
1874 static inline struct i915_gem_context *
1875 __i915_gem_context_lookup_rcu(struct drm_i915_file_private *file_priv, u32 id)
1876 {
1877         return xa_load(&file_priv->context_xa, id);
1878 }
1879
1880 static inline struct i915_gem_context *
1881 i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)
1882 {
1883         struct i915_gem_context *ctx;
1884
1885         rcu_read_lock();
1886         ctx = __i915_gem_context_lookup_rcu(file_priv, id);
1887         if (ctx && !kref_get_unless_zero(&ctx->ref))
1888                 ctx = NULL;
1889         rcu_read_unlock();
1890
1891         return ctx;
1892 }
1893
1894 /* i915_gem_evict.c */
1895 int __must_check i915_gem_evict_something(struct i915_address_space *vm,
1896                                           u64 min_size, u64 alignment,
1897                                           unsigned long color,
1898                                           u64 start, u64 end,
1899                                           unsigned flags);
1900 int __must_check i915_gem_evict_for_node(struct i915_address_space *vm,
1901                                          struct drm_mm_node *node,
1902                                          unsigned int flags);
1903 int i915_gem_evict_vm(struct i915_address_space *vm);
1904
1905 /* i915_gem_internal.c */
1906 struct drm_i915_gem_object *
1907 i915_gem_object_create_internal(struct drm_i915_private *dev_priv,
1908                                 phys_addr_t size);
1909
1910 /* i915_gem_tiling.c */
1911 static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
1912 {
1913         struct drm_i915_private *i915 = to_i915(obj->base.dev);
1914
1915         return i915->ggtt.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
1916                 i915_gem_object_is_tiled(obj);
1917 }
1918
1919 u32 i915_gem_fence_size(struct drm_i915_private *dev_priv, u32 size,
1920                         unsigned int tiling, unsigned int stride);
1921 u32 i915_gem_fence_alignment(struct drm_i915_private *dev_priv, u32 size,
1922                              unsigned int tiling, unsigned int stride);
1923
1924 const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
1925
1926 /* i915_cmd_parser.c */
1927 int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv);
1928 void intel_engine_init_cmd_parser(struct intel_engine_cs *engine);
1929 void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine);
1930 int intel_engine_cmd_parser(struct intel_engine_cs *engine,
1931                             struct i915_vma *batch,
1932                             unsigned long batch_offset,
1933                             unsigned long batch_length,
1934                             struct i915_vma *shadow,
1935                             bool trampoline);
1936 #define I915_CMD_PARSER_TRAMPOLINE_SIZE 8
1937
1938 /* intel_device_info.c */
1939 static inline struct intel_device_info *
1940 mkwrite_device_info(struct drm_i915_private *dev_priv)
1941 {
1942         return (struct intel_device_info *)INTEL_INFO(dev_priv);
1943 }
1944
1945 int i915_reg_read_ioctl(struct drm_device *dev, void *data,
1946                         struct drm_file *file);
1947
1948 /* i915_mm.c */
1949 int remap_io_mapping(struct vm_area_struct *vma,
1950                      unsigned long addr, unsigned long pfn, unsigned long size,
1951                      struct io_mapping *iomap);
1952 int remap_io_sg(struct vm_area_struct *vma,
1953                 unsigned long addr, unsigned long size,
1954                 struct scatterlist *sgl, resource_size_t iobase);
1955
1956 static inline int intel_hws_csb_write_index(struct drm_i915_private *i915)
1957 {
1958         if (INTEL_GEN(i915) >= 10)
1959                 return CNL_HWS_CSB_WRITE_INDEX;
1960         else
1961                 return I915_HWS_CSB_WRITE_INDEX;
1962 }
1963
1964 static inline enum i915_map_type
1965 i915_coherent_map_type(struct drm_i915_private *i915)
1966 {
1967         return HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC;
1968 }
1969
1970 #endif