2 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
20 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 #ifndef _UAPI_I915_DRM_H_
28 #define _UAPI_I915_DRM_H_
32 #if defined(__cplusplus)
36 /* Please note that modifications to all structs defined here are
37 * subject to backwards-compatibility constraints.
41 * DOC: uevents generated by i915 on it's device node
43 * I915_L3_PARITY_UEVENT - Generated when the driver receives a parity mismatch
44 * event from the gpu l3 cache. Additional information supplied is ROW,
45 * BANK, SUBBANK, SLICE of the affected cacheline. Userspace should keep
46 * track of these events and if a specific cache-line seems to have a
47 * persistent error remap it with the l3 remapping tool supplied in
48 * intel-gpu-tools. The value supplied with the event is always 1.
50 * I915_ERROR_UEVENT - Generated upon error detection, currently only via
51 * hangcheck. The error detection event is a good indicator of when things
52 * began to go badly. The value supplied with the event is a 1 upon error
53 * detection, and a 0 upon reset completion, signifying no more error
54 * exists. NOTE: Disabling hangcheck or reset via module parameter will
55 * cause the related events to not be seen.
57 * I915_RESET_UEVENT - Event is generated just before an attempt to reset the
58 * the GPU. The value supplied with the event is always 1. NOTE: Disable
59 * reset via module parameter will cause this event to not be seen.
61 #define I915_L3_PARITY_UEVENT "L3_PARITY_ERROR"
62 #define I915_ERROR_UEVENT "ERROR"
63 #define I915_RESET_UEVENT "RESET"
66 * i915_user_extension: Base class for defining a chain of extensions
68 * Many interfaces need to grow over time. In most cases we can simply
69 * extend the struct and have userspace pass in more data. Another option,
70 * as demonstrated by Vulkan's approach to providing extensions for forward
71 * and backward compatibility, is to use a list of optional structs to
72 * provide those extra details.
74 * The key advantage to using an extension chain is that it allows us to
75 * redefine the interface more easily than an ever growing struct of
76 * increasing complexity, and for large parts of that interface to be
77 * entirely optional. The downside is more pointer chasing; chasing across
78 * the __user boundary with pointers encapsulated inside u64.
80 struct i915_user_extension {
83 __u32 flags; /* All undefined bits must be zero. */
84 __u32 rsvd[4]; /* Reserved for future use; must be zero. */
88 * MOCS indexes used for GPU surfaces, defining the cacheability of the
89 * surface data and the coherency for this data wrt. CPU vs. GPU accesses.
91 enum i915_mocs_table_index {
93 * Not cached anywhere, coherency between CPU and GPU accesses is
98 * Cacheability and coherency controlled by the kernel automatically
99 * based on the DRM_I915_GEM_SET_CACHING IOCTL setting and the current
100 * usage of the surface (used for display scanout or not).
104 * Cached in all GPU caches available on the platform.
105 * Coherency between CPU and GPU accesses to the surface is not
106 * guaranteed without extra synchronization.
112 * Different engines serve different roles, and there may be more than one
113 * engine serving each role. enum drm_i915_gem_engine_class provides a
114 * classification of the role of the engine, which may be used when requesting
115 * operations to be performed on a certain subset of engines, or for providing
116 * information about that group.
118 enum drm_i915_gem_engine_class {
119 I915_ENGINE_CLASS_RENDER = 0,
120 I915_ENGINE_CLASS_COPY = 1,
121 I915_ENGINE_CLASS_VIDEO = 2,
122 I915_ENGINE_CLASS_VIDEO_ENHANCE = 3,
124 /* should be kept compact */
126 I915_ENGINE_CLASS_INVALID = -1
130 * There may be more than one engine fulfilling any role within the system.
131 * Each engine of a class is given a unique instance number and therefore
132 * any engine can be specified by its class:instance tuplet. APIs that allow
133 * access to any engine in the system will use struct i915_engine_class_instance
134 * for this identification.
136 struct i915_engine_class_instance {
137 __u16 engine_class; /* see enum drm_i915_gem_engine_class */
138 __u16 engine_instance;
142 * DOC: perf_events exposed by i915 through /sys/bus/event_sources/drivers/i915
146 enum drm_i915_pmu_engine_sample {
147 I915_SAMPLE_BUSY = 0,
148 I915_SAMPLE_WAIT = 1,
152 #define I915_PMU_SAMPLE_BITS (4)
153 #define I915_PMU_SAMPLE_MASK (0xf)
154 #define I915_PMU_SAMPLE_INSTANCE_BITS (8)
155 #define I915_PMU_CLASS_SHIFT \
156 (I915_PMU_SAMPLE_BITS + I915_PMU_SAMPLE_INSTANCE_BITS)
158 #define __I915_PMU_ENGINE(class, instance, sample) \
159 ((class) << I915_PMU_CLASS_SHIFT | \
160 (instance) << I915_PMU_SAMPLE_BITS | \
163 #define I915_PMU_ENGINE_BUSY(class, instance) \
164 __I915_PMU_ENGINE(class, instance, I915_SAMPLE_BUSY)
166 #define I915_PMU_ENGINE_WAIT(class, instance) \
167 __I915_PMU_ENGINE(class, instance, I915_SAMPLE_WAIT)
169 #define I915_PMU_ENGINE_SEMA(class, instance) \
170 __I915_PMU_ENGINE(class, instance, I915_SAMPLE_SEMA)
172 #define __I915_PMU_OTHER(x) (__I915_PMU_ENGINE(0xff, 0xff, 0xf) + 1 + (x))
174 #define I915_PMU_ACTUAL_FREQUENCY __I915_PMU_OTHER(0)
175 #define I915_PMU_REQUESTED_FREQUENCY __I915_PMU_OTHER(1)
176 #define I915_PMU_INTERRUPTS __I915_PMU_OTHER(2)
177 #define I915_PMU_RC6_RESIDENCY __I915_PMU_OTHER(3)
179 #define I915_PMU_LAST I915_PMU_RC6_RESIDENCY
181 /* Each region is a minimum of 16k, and there are at most 255 of them.
183 #define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use
184 * of chars for next/prev indices */
185 #define I915_LOG_MIN_TEX_REGION_SIZE 14
187 typedef struct _drm_i915_init {
189 I915_INIT_DMA = 0x01,
190 I915_CLEANUP_DMA = 0x02,
191 I915_RESUME_DMA = 0x03
193 unsigned int mmio_offset;
194 int sarea_priv_offset;
195 unsigned int ring_start;
196 unsigned int ring_end;
197 unsigned int ring_size;
198 unsigned int front_offset;
199 unsigned int back_offset;
200 unsigned int depth_offset;
204 unsigned int pitch_bits;
205 unsigned int back_pitch;
206 unsigned int depth_pitch;
208 unsigned int chipset;
211 typedef struct _drm_i915_sarea {
212 struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1];
213 int last_upload; /* last time texture was uploaded */
214 int last_enqueue; /* last time a buffer was enqueued */
215 int last_dispatch; /* age of the most recently dispatched buffer */
216 int ctxOwner; /* last context to upload state */
218 int pf_enabled; /* is pageflipping allowed? */
220 int pf_current_page; /* which buffer is being displayed? */
221 int perf_boxes; /* performance boxes to be displayed */
222 int width, height; /* screen size in pixels */
224 drm_handle_t front_handle;
228 drm_handle_t back_handle;
232 drm_handle_t depth_handle;
236 drm_handle_t tex_handle;
239 int log_tex_granularity;
241 int rotation; /* 0, 90, 180 or 270 */
245 int virtualX, virtualY;
247 unsigned int front_tiled;
248 unsigned int back_tiled;
249 unsigned int depth_tiled;
250 unsigned int rotated_tiled;
251 unsigned int rotated2_tiled;
262 /* fill out some space for old userspace triple buffer */
263 drm_handle_t unused_handle;
264 __u32 unused1, unused2, unused3;
266 /* buffer object handles for static buffers. May change
267 * over the lifetime of the client.
269 __u32 front_bo_handle;
270 __u32 back_bo_handle;
271 __u32 unused_bo_handle;
272 __u32 depth_bo_handle;
276 /* due to userspace building against these headers we need some compat here */
277 #define planeA_x pipeA_x
278 #define planeA_y pipeA_y
279 #define planeA_w pipeA_w
280 #define planeA_h pipeA_h
281 #define planeB_x pipeB_x
282 #define planeB_y pipeB_y
283 #define planeB_w pipeB_w
284 #define planeB_h pipeB_h
286 /* Flags for perf_boxes
288 #define I915_BOX_RING_EMPTY 0x1
289 #define I915_BOX_FLIP 0x2
290 #define I915_BOX_WAIT 0x4
291 #define I915_BOX_TEXTURE_LOAD 0x8
292 #define I915_BOX_LOST_CONTEXT 0x10
295 * i915 specific ioctls.
297 * The device specific ioctl range is [DRM_COMMAND_BASE, DRM_COMMAND_END) ie
298 * [0x40, 0xa0) (a0 is excluded). The numbers below are defined as offset
299 * against DRM_COMMAND_BASE and should be between [0x0, 0x60).
301 #define DRM_I915_INIT 0x00
302 #define DRM_I915_FLUSH 0x01
303 #define DRM_I915_FLIP 0x02
304 #define DRM_I915_BATCHBUFFER 0x03
305 #define DRM_I915_IRQ_EMIT 0x04
306 #define DRM_I915_IRQ_WAIT 0x05
307 #define DRM_I915_GETPARAM 0x06
308 #define DRM_I915_SETPARAM 0x07
309 #define DRM_I915_ALLOC 0x08
310 #define DRM_I915_FREE 0x09
311 #define DRM_I915_INIT_HEAP 0x0a
312 #define DRM_I915_CMDBUFFER 0x0b
313 #define DRM_I915_DESTROY_HEAP 0x0c
314 #define DRM_I915_SET_VBLANK_PIPE 0x0d
315 #define DRM_I915_GET_VBLANK_PIPE 0x0e
316 #define DRM_I915_VBLANK_SWAP 0x0f
317 #define DRM_I915_HWS_ADDR 0x11
318 #define DRM_I915_GEM_INIT 0x13
319 #define DRM_I915_GEM_EXECBUFFER 0x14
320 #define DRM_I915_GEM_PIN 0x15
321 #define DRM_I915_GEM_UNPIN 0x16
322 #define DRM_I915_GEM_BUSY 0x17
323 #define DRM_I915_GEM_THROTTLE 0x18
324 #define DRM_I915_GEM_ENTERVT 0x19
325 #define DRM_I915_GEM_LEAVEVT 0x1a
326 #define DRM_I915_GEM_CREATE 0x1b
327 #define DRM_I915_GEM_PREAD 0x1c
328 #define DRM_I915_GEM_PWRITE 0x1d
329 #define DRM_I915_GEM_MMAP 0x1e
330 #define DRM_I915_GEM_SET_DOMAIN 0x1f
331 #define DRM_I915_GEM_SW_FINISH 0x20
332 #define DRM_I915_GEM_SET_TILING 0x21
333 #define DRM_I915_GEM_GET_TILING 0x22
334 #define DRM_I915_GEM_GET_APERTURE 0x23
335 #define DRM_I915_GEM_MMAP_GTT 0x24
336 #define DRM_I915_GET_PIPE_FROM_CRTC_ID 0x25
337 #define DRM_I915_GEM_MADVISE 0x26
338 #define DRM_I915_OVERLAY_PUT_IMAGE 0x27
339 #define DRM_I915_OVERLAY_ATTRS 0x28
340 #define DRM_I915_GEM_EXECBUFFER2 0x29
341 #define DRM_I915_GEM_EXECBUFFER2_WR DRM_I915_GEM_EXECBUFFER2
342 #define DRM_I915_GET_SPRITE_COLORKEY 0x2a
343 #define DRM_I915_SET_SPRITE_COLORKEY 0x2b
344 #define DRM_I915_GEM_WAIT 0x2c
345 #define DRM_I915_GEM_CONTEXT_CREATE 0x2d
346 #define DRM_I915_GEM_CONTEXT_DESTROY 0x2e
347 #define DRM_I915_GEM_SET_CACHING 0x2f
348 #define DRM_I915_GEM_GET_CACHING 0x30
349 #define DRM_I915_REG_READ 0x31
350 #define DRM_I915_GET_RESET_STATS 0x32
351 #define DRM_I915_GEM_USERPTR 0x33
352 #define DRM_I915_GEM_CONTEXT_GETPARAM 0x34
353 #define DRM_I915_GEM_CONTEXT_SETPARAM 0x35
354 #define DRM_I915_PERF_OPEN 0x36
355 #define DRM_I915_PERF_ADD_CONFIG 0x37
356 #define DRM_I915_PERF_REMOVE_CONFIG 0x38
357 #define DRM_I915_QUERY 0x39
358 /* Must be kept compact -- no holes */
360 #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
361 #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
362 #define DRM_IOCTL_I915_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLIP)
363 #define DRM_IOCTL_I915_BATCHBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t)
364 #define DRM_IOCTL_I915_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t)
365 #define DRM_IOCTL_I915_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t)
366 #define DRM_IOCTL_I915_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t)
367 #define DRM_IOCTL_I915_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t)
368 #define DRM_IOCTL_I915_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t)
369 #define DRM_IOCTL_I915_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t)
370 #define DRM_IOCTL_I915_INIT_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t)
371 #define DRM_IOCTL_I915_CMDBUFFER DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t)
372 #define DRM_IOCTL_I915_DESTROY_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t)
373 #define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
374 #define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
375 #define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
376 #define DRM_IOCTL_I915_HWS_ADDR DRM_IOW(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, struct drm_i915_gem_init)
377 #define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
378 #define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
379 #define DRM_IOCTL_I915_GEM_EXECBUFFER2 DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
380 #define DRM_IOCTL_I915_GEM_EXECBUFFER2_WR DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2_WR, struct drm_i915_gem_execbuffer2)
381 #define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
382 #define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
383 #define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
384 #define DRM_IOCTL_I915_GEM_SET_CACHING DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_SET_CACHING, struct drm_i915_gem_caching)
385 #define DRM_IOCTL_I915_GEM_GET_CACHING DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_GET_CACHING, struct drm_i915_gem_caching)
386 #define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
387 #define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
388 #define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
389 #define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create)
390 #define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
391 #define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
392 #define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
393 #define DRM_IOCTL_I915_GEM_MMAP_GTT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt)
394 #define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
395 #define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
396 #define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
397 #define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
398 #define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture)
399 #define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id)
400 #define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise)
401 #define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image)
402 #define DRM_IOCTL_I915_OVERLAY_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs)
403 #define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
404 #define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
405 #define DRM_IOCTL_I915_GEM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait)
406 #define DRM_IOCTL_I915_GEM_CONTEXT_CREATE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create)
407 #define DRM_IOCTL_I915_GEM_CONTEXT_CREATE_EXT DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create_ext)
408 #define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
409 #define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
410 #define DRM_IOCTL_I915_GET_RESET_STATS DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats)
411 #define DRM_IOCTL_I915_GEM_USERPTR DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr)
412 #define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param)
413 #define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param)
414 #define DRM_IOCTL_I915_PERF_OPEN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_OPEN, struct drm_i915_perf_open_param)
415 #define DRM_IOCTL_I915_PERF_ADD_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_ADD_CONFIG, struct drm_i915_perf_oa_config)
416 #define DRM_IOCTL_I915_PERF_REMOVE_CONFIG DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_REMOVE_CONFIG, __u64)
417 #define DRM_IOCTL_I915_QUERY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_QUERY, struct drm_i915_query)
419 /* Allow drivers to submit batchbuffers directly to hardware, relying
420 * on the security mechanisms provided by hardware.
422 typedef struct drm_i915_batchbuffer {
423 int start; /* agp offset */
424 int used; /* nr bytes in use */
425 int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
426 int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
427 int num_cliprects; /* mulitpass with multiple cliprects? */
428 struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */
429 } drm_i915_batchbuffer_t;
431 /* As above, but pass a pointer to userspace buffer which can be
432 * validated by the kernel prior to sending to hardware.
434 typedef struct _drm_i915_cmdbuffer {
435 char __user *buf; /* pointer to userspace command buffer */
436 int sz; /* nr bytes in buf */
437 int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
438 int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
439 int num_cliprects; /* mulitpass with multiple cliprects? */
440 struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */
441 } drm_i915_cmdbuffer_t;
443 /* Userspace can request & wait on irq's:
445 typedef struct drm_i915_irq_emit {
447 } drm_i915_irq_emit_t;
449 typedef struct drm_i915_irq_wait {
451 } drm_i915_irq_wait_t;
454 * Different modes of per-process Graphics Translation Table,
455 * see I915_PARAM_HAS_ALIASING_PPGTT
457 #define I915_GEM_PPGTT_NONE 0
458 #define I915_GEM_PPGTT_ALIASING 1
459 #define I915_GEM_PPGTT_FULL 2
461 /* Ioctl to query kernel params:
463 #define I915_PARAM_IRQ_ACTIVE 1
464 #define I915_PARAM_ALLOW_BATCHBUFFER 2
465 #define I915_PARAM_LAST_DISPATCH 3
466 #define I915_PARAM_CHIPSET_ID 4
467 #define I915_PARAM_HAS_GEM 5
468 #define I915_PARAM_NUM_FENCES_AVAIL 6
469 #define I915_PARAM_HAS_OVERLAY 7
470 #define I915_PARAM_HAS_PAGEFLIPPING 8
471 #define I915_PARAM_HAS_EXECBUF2 9
472 #define I915_PARAM_HAS_BSD 10
473 #define I915_PARAM_HAS_BLT 11
474 #define I915_PARAM_HAS_RELAXED_FENCING 12
475 #define I915_PARAM_HAS_COHERENT_RINGS 13
476 #define I915_PARAM_HAS_EXEC_CONSTANTS 14
477 #define I915_PARAM_HAS_RELAXED_DELTA 15
478 #define I915_PARAM_HAS_GEN7_SOL_RESET 16
479 #define I915_PARAM_HAS_LLC 17
480 #define I915_PARAM_HAS_ALIASING_PPGTT 18
481 #define I915_PARAM_HAS_WAIT_TIMEOUT 19
482 #define I915_PARAM_HAS_SEMAPHORES 20
483 #define I915_PARAM_HAS_PRIME_VMAP_FLUSH 21
484 #define I915_PARAM_HAS_VEBOX 22
485 #define I915_PARAM_HAS_SECURE_BATCHES 23
486 #define I915_PARAM_HAS_PINNED_BATCHES 24
487 #define I915_PARAM_HAS_EXEC_NO_RELOC 25
488 #define I915_PARAM_HAS_EXEC_HANDLE_LUT 26
489 #define I915_PARAM_HAS_WT 27
490 #define I915_PARAM_CMD_PARSER_VERSION 28
491 #define I915_PARAM_HAS_COHERENT_PHYS_GTT 29
492 #define I915_PARAM_MMAP_VERSION 30
493 #define I915_PARAM_HAS_BSD2 31
494 #define I915_PARAM_REVISION 32
495 #define I915_PARAM_SUBSLICE_TOTAL 33
496 #define I915_PARAM_EU_TOTAL 34
497 #define I915_PARAM_HAS_GPU_RESET 35
498 #define I915_PARAM_HAS_RESOURCE_STREAMER 36
499 #define I915_PARAM_HAS_EXEC_SOFTPIN 37
500 #define I915_PARAM_HAS_POOLED_EU 38
501 #define I915_PARAM_MIN_EU_IN_POOL 39
502 #define I915_PARAM_MMAP_GTT_VERSION 40
505 * Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution
506 * priorities and the driver will attempt to execute batches in priority order.
507 * The param returns a capability bitmask, nonzero implies that the scheduler
508 * is enabled, with different features present according to the mask.
510 * The initial priority for each batch is supplied by the context and is
511 * controlled via I915_CONTEXT_PARAM_PRIORITY.
513 #define I915_PARAM_HAS_SCHEDULER 41
514 #define I915_SCHEDULER_CAP_ENABLED (1ul << 0)
515 #define I915_SCHEDULER_CAP_PRIORITY (1ul << 1)
516 #define I915_SCHEDULER_CAP_PREEMPTION (1ul << 2)
517 #define I915_SCHEDULER_CAP_SEMAPHORES (1ul << 3)
519 #define I915_PARAM_HUC_STATUS 42
521 /* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to opt-out of
522 * synchronisation with implicit fencing on individual objects.
523 * See EXEC_OBJECT_ASYNC.
525 #define I915_PARAM_HAS_EXEC_ASYNC 43
527 /* Query whether DRM_I915_GEM_EXECBUFFER2 supports explicit fence support -
528 * both being able to pass in a sync_file fd to wait upon before executing,
529 * and being able to return a new sync_file fd that is signaled when the
530 * current request is complete. See I915_EXEC_FENCE_IN and I915_EXEC_FENCE_OUT.
532 #define I915_PARAM_HAS_EXEC_FENCE 44
534 /* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to capture
535 * user specified bufffers for post-mortem debugging of GPU hangs. See
536 * EXEC_OBJECT_CAPTURE.
538 #define I915_PARAM_HAS_EXEC_CAPTURE 45
540 #define I915_PARAM_SLICE_MASK 46
542 /* Assuming it's uniform for each slice, this queries the mask of subslices
543 * per-slice for this system.
545 #define I915_PARAM_SUBSLICE_MASK 47
548 * Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying the batch buffer
549 * as the first execobject as opposed to the last. See I915_EXEC_BATCH_FIRST.
551 #define I915_PARAM_HAS_EXEC_BATCH_FIRST 48
553 /* Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying an array of
554 * drm_i915_gem_exec_fence structures. See I915_EXEC_FENCE_ARRAY.
556 #define I915_PARAM_HAS_EXEC_FENCE_ARRAY 49
559 * Query whether every context (both per-file default and user created) is
560 * isolated (insofar as HW supports). If this parameter is not true, then
561 * freshly created contexts may inherit values from an existing context,
562 * rather than default HW values. If true, it also ensures (insofar as HW
563 * supports) that all state set by this context will not leak to any other
566 * As not every engine across every gen support contexts, the returned
567 * value reports the support of context isolation for individual engines by
568 * returning a bitmask of each engine class set to true if that class supports
571 #define I915_PARAM_HAS_CONTEXT_ISOLATION 50
573 /* Frequency of the command streamer timestamps given by the *_TIMESTAMP
574 * registers. This used to be fixed per platform but from CNL onwards, this
575 * might vary depending on the parts.
577 #define I915_PARAM_CS_TIMESTAMP_FREQUENCY 51
580 * Once upon a time we supposed that writes through the GGTT would be
581 * immediately in physical memory (once flushed out of the CPU path). However,
582 * on a few different processors and chipsets, this is not necessarily the case
583 * as the writes appear to be buffered internally. Thus a read of the backing
584 * storage (physical memory) via a different path (with different physical tags
585 * to the indirect write via the GGTT) will see stale values from before
586 * the GGTT write. Inside the kernel, we can for the most part keep track of
587 * the different read/write domains in use (e.g. set-domain), but the assumption
588 * of coherency is baked into the ABI, hence reporting its true state in this
591 * Reports true when writes via mmap_gtt are immediately visible following an
592 * lfence to flush the WCB.
594 * Reports false when writes via mmap_gtt are indeterminately delayed in an in
595 * internal buffer and are _not_ immediately visible to third parties accessing
596 * directly via mmap_cpu/mmap_wc. Use of mmap_gtt as part of an IPC
597 * communications channel when reporting false is strongly disadvised.
599 #define I915_PARAM_MMAP_GTT_COHERENT 52
601 /* Must be kept compact -- no holes and well documented */
603 typedef struct drm_i915_getparam {
606 * WARNING: Using pointers instead of fixed-size u64 means we need to write
607 * compat32 code. Don't repeat this mistake.
610 } drm_i915_getparam_t;
612 /* Ioctl to set kernel params:
614 #define I915_SETPARAM_USE_MI_BATCHBUFFER_START 1
615 #define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY 2
616 #define I915_SETPARAM_ALLOW_BATCHBUFFER 3
617 #define I915_SETPARAM_NUM_USED_FENCES 4
618 /* Must be kept compact -- no holes */
620 typedef struct drm_i915_setparam {
623 } drm_i915_setparam_t;
625 /* A memory manager for regions of shared memory:
627 #define I915_MEM_REGION_AGP 1
629 typedef struct drm_i915_mem_alloc {
633 int __user *region_offset; /* offset from start of fb or agp */
634 } drm_i915_mem_alloc_t;
636 typedef struct drm_i915_mem_free {
639 } drm_i915_mem_free_t;
641 typedef struct drm_i915_mem_init_heap {
645 } drm_i915_mem_init_heap_t;
647 /* Allow memory manager to be torn down and re-initialized (eg on
650 typedef struct drm_i915_mem_destroy_heap {
652 } drm_i915_mem_destroy_heap_t;
654 /* Allow X server to configure which pipes to monitor for vblank signals
656 #define DRM_I915_VBLANK_PIPE_A 1
657 #define DRM_I915_VBLANK_PIPE_B 2
659 typedef struct drm_i915_vblank_pipe {
661 } drm_i915_vblank_pipe_t;
663 /* Schedule buffer swap at given vertical blank:
665 typedef struct drm_i915_vblank_swap {
666 drm_drawable_t drawable;
667 enum drm_vblank_seq_type seqtype;
668 unsigned int sequence;
669 } drm_i915_vblank_swap_t;
671 typedef struct drm_i915_hws_addr {
673 } drm_i915_hws_addr_t;
675 struct drm_i915_gem_init {
677 * Beginning offset in the GTT to be managed by the DRM memory
682 * Ending offset in the GTT to be managed by the DRM memory
688 struct drm_i915_gem_create {
690 * Requested size for the object.
692 * The (page-aligned) allocated size for the object will be returned.
696 * Returned handle for the object.
698 * Object handles are nonzero.
704 struct drm_i915_gem_pread {
705 /** Handle for the object being read. */
708 /** Offset into the object to read from */
710 /** Length of data to read */
713 * Pointer to write the data into.
715 * This is a fixed-size type for 32/64 compatibility.
720 struct drm_i915_gem_pwrite {
721 /** Handle for the object being written to. */
724 /** Offset into the object to write to */
726 /** Length of data to write */
729 * Pointer to read the data from.
731 * This is a fixed-size type for 32/64 compatibility.
736 struct drm_i915_gem_mmap {
737 /** Handle for the object being mapped. */
740 /** Offset in the object to map. */
743 * Length of data to map.
745 * The value will be page-aligned.
749 * Returned pointer the data was mapped at.
751 * This is a fixed-size type for 32/64 compatibility.
756 * Flags for extended behaviour.
758 * Added in version 2.
761 #define I915_MMAP_WC 0x1
764 struct drm_i915_gem_mmap_gtt {
765 /** Handle for the object being mapped. */
769 * Fake offset to use for subsequent mmap call
771 * This is a fixed-size type for 32/64 compatibility.
776 struct drm_i915_gem_set_domain {
777 /** Handle for the object */
780 /** New read domains */
783 /** New write domain */
787 struct drm_i915_gem_sw_finish {
788 /** Handle for the object */
792 struct drm_i915_gem_relocation_entry {
794 * Handle of the buffer being pointed to by this relocation entry.
796 * It's appealing to make this be an index into the mm_validate_entry
797 * list to refer to the buffer, but this allows the driver to create
798 * a relocation list for state buffers and not re-write it per
799 * exec using the buffer.
804 * Value to be added to the offset of the target buffer to make up
805 * the relocation entry.
809 /** Offset in the buffer the relocation entry will be written into */
813 * Offset value of the target buffer that the relocation entry was last
816 * If the buffer has the same offset as last time, we can skip syncing
817 * and writing the relocation. This value is written back out by
818 * the execbuffer ioctl when the relocation is written.
820 __u64 presumed_offset;
823 * Target memory domains read by this operation.
828 * Target memory domains written by this operation.
830 * Note that only one domain may be written by the whole
831 * execbuffer operation, so that where there are conflicts,
832 * the application will get -EINVAL back.
838 * Intel memory domains
840 * Most of these just align with the various caches in
841 * the system and are used to flush and invalidate as
842 * objects end up cached in different domains.
845 #define I915_GEM_DOMAIN_CPU 0x00000001
846 /** Render cache, used by 2D and 3D drawing */
847 #define I915_GEM_DOMAIN_RENDER 0x00000002
848 /** Sampler cache, used by texture engine */
849 #define I915_GEM_DOMAIN_SAMPLER 0x00000004
850 /** Command queue, used to load batch buffers */
851 #define I915_GEM_DOMAIN_COMMAND 0x00000008
852 /** Instruction cache, used by shader programs */
853 #define I915_GEM_DOMAIN_INSTRUCTION 0x00000010
854 /** Vertex address cache */
855 #define I915_GEM_DOMAIN_VERTEX 0x00000020
856 /** GTT domain - aperture and scanout */
857 #define I915_GEM_DOMAIN_GTT 0x00000040
858 /** WC domain - uncached access */
859 #define I915_GEM_DOMAIN_WC 0x00000080
862 struct drm_i915_gem_exec_object {
864 * User's handle for a buffer to be bound into the GTT for this
869 /** Number of relocations to be performed on this buffer */
870 __u32 relocation_count;
872 * Pointer to array of struct drm_i915_gem_relocation_entry containing
873 * the relocations to be performed in this buffer.
877 /** Required alignment in graphics aperture */
881 * Returned value of the updated offset of the object, for future
882 * presumed_offset writes.
887 struct drm_i915_gem_execbuffer {
889 * List of buffers to be validated with their relocations to be
890 * performend on them.
892 * This is a pointer to an array of struct drm_i915_gem_validate_entry.
894 * These buffers must be listed in an order such that all relocations
895 * a buffer is performing refer to buffers that have already appeared
896 * in the validate list.
901 /** Offset in the batchbuffer to start execution from. */
902 __u32 batch_start_offset;
903 /** Bytes used in batchbuffer from batch_start_offset */
908 /** This is a struct drm_clip_rect *cliprects */
912 struct drm_i915_gem_exec_object2 {
914 * User's handle for a buffer to be bound into the GTT for this
919 /** Number of relocations to be performed on this buffer */
920 __u32 relocation_count;
922 * Pointer to array of struct drm_i915_gem_relocation_entry containing
923 * the relocations to be performed in this buffer.
927 /** Required alignment in graphics aperture */
931 * When the EXEC_OBJECT_PINNED flag is specified this is populated by
932 * the user with the GTT offset at which this object will be pinned.
933 * When the I915_EXEC_NO_RELOC flag is specified this must contain the
934 * presumed_offset of the object.
935 * During execbuffer2 the kernel populates it with the value of the
936 * current GTT offset of the object, for future presumed_offset writes.
940 #define EXEC_OBJECT_NEEDS_FENCE (1<<0)
941 #define EXEC_OBJECT_NEEDS_GTT (1<<1)
942 #define EXEC_OBJECT_WRITE (1<<2)
943 #define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3)
944 #define EXEC_OBJECT_PINNED (1<<4)
945 #define EXEC_OBJECT_PAD_TO_SIZE (1<<5)
946 /* The kernel implicitly tracks GPU activity on all GEM objects, and
947 * synchronises operations with outstanding rendering. This includes
948 * rendering on other devices if exported via dma-buf. However, sometimes
949 * this tracking is too coarse and the user knows better. For example,
950 * if the object is split into non-overlapping ranges shared between different
951 * clients or engines (i.e. suballocating objects), the implicit tracking
952 * by kernel assumes that each operation affects the whole object rather
953 * than an individual range, causing needless synchronisation between clients.
954 * The kernel will also forgo any CPU cache flushes prior to rendering from
955 * the object as the client is expected to be also handling such domain
958 * The kernel maintains the implicit tracking in order to manage resources
959 * used by the GPU - this flag only disables the synchronisation prior to
960 * rendering with this object in this execbuf.
962 * Opting out of implicit synhronisation requires the user to do its own
963 * explicit tracking to avoid rendering corruption. See, for example,
964 * I915_PARAM_HAS_EXEC_FENCE to order execbufs and execute them asynchronously.
966 #define EXEC_OBJECT_ASYNC (1<<6)
967 /* Request that the contents of this execobject be copied into the error
968 * state upon a GPU hang involving this batch for post-mortem debugging.
969 * These buffers are recorded in no particular order as "user" in
970 * /sys/class/drm/cardN/error. Query I915_PARAM_HAS_EXEC_CAPTURE to see
971 * if the kernel supports this flag.
973 #define EXEC_OBJECT_CAPTURE (1<<7)
974 /* All remaining bits are MBZ and RESERVED FOR FUTURE USE */
975 #define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_CAPTURE<<1)
985 struct drm_i915_gem_exec_fence {
987 * User's handle for a drm_syncobj to wait on or signal.
991 #define I915_EXEC_FENCE_WAIT (1<<0)
992 #define I915_EXEC_FENCE_SIGNAL (1<<1)
993 #define __I915_EXEC_FENCE_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SIGNAL << 1))
997 struct drm_i915_gem_execbuffer2 {
999 * List of gem_exec_object2 structs
1004 /** Offset in the batchbuffer to start execution from. */
1005 __u32 batch_start_offset;
1006 /** Bytes used in batchbuffer from batch_start_offset */
1010 __u32 num_cliprects;
1012 * This is a struct drm_clip_rect *cliprects if I915_EXEC_FENCE_ARRAY
1013 * is not set. If I915_EXEC_FENCE_ARRAY is set, then this is a
1014 * struct drm_i915_gem_exec_fence *fences.
1016 __u64 cliprects_ptr;
1017 #define I915_EXEC_RING_MASK (0x3f)
1018 #define I915_EXEC_DEFAULT (0<<0)
1019 #define I915_EXEC_RENDER (1<<0)
1020 #define I915_EXEC_BSD (2<<0)
1021 #define I915_EXEC_BLT (3<<0)
1022 #define I915_EXEC_VEBOX (4<<0)
1024 /* Used for switching the constants addressing mode on gen4+ RENDER ring.
1025 * Gen6+ only supports relative addressing to dynamic state (default) and
1026 * absolute addressing.
1028 * These flags are ignored for the BSD and BLT rings.
1030 #define I915_EXEC_CONSTANTS_MASK (3<<6)
1031 #define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */
1032 #define I915_EXEC_CONSTANTS_ABSOLUTE (1<<6)
1033 #define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */
1035 __u64 rsvd1; /* now used for context info */
1039 /** Resets the SO write offset registers for transform feedback on gen7. */
1040 #define I915_EXEC_GEN7_SOL_RESET (1<<8)
1042 /** Request a privileged ("secure") batch buffer. Note only available for
1043 * DRM_ROOT_ONLY | DRM_MASTER processes.
1045 #define I915_EXEC_SECURE (1<<9)
1047 /** Inform the kernel that the batch is and will always be pinned. This
1048 * negates the requirement for a workaround to be performed to avoid
1049 * an incoherent CS (such as can be found on 830/845). If this flag is
1050 * not passed, the kernel will endeavour to make sure the batch is
1051 * coherent with the CS before execution. If this flag is passed,
1052 * userspace assumes the responsibility for ensuring the same.
1054 #define I915_EXEC_IS_PINNED (1<<10)
1056 /** Provide a hint to the kernel that the command stream and auxiliary
1057 * state buffers already holds the correct presumed addresses and so the
1058 * relocation process may be skipped if no buffers need to be moved in
1059 * preparation for the execbuffer.
1061 #define I915_EXEC_NO_RELOC (1<<11)
1063 /** Use the reloc.handle as an index into the exec object array rather
1064 * than as the per-file handle.
1066 #define I915_EXEC_HANDLE_LUT (1<<12)
1068 /** Used for switching BSD rings on the platforms with two BSD rings */
1069 #define I915_EXEC_BSD_SHIFT (13)
1070 #define I915_EXEC_BSD_MASK (3 << I915_EXEC_BSD_SHIFT)
1071 /* default ping-pong mode */
1072 #define I915_EXEC_BSD_DEFAULT (0 << I915_EXEC_BSD_SHIFT)
1073 #define I915_EXEC_BSD_RING1 (1 << I915_EXEC_BSD_SHIFT)
1074 #define I915_EXEC_BSD_RING2 (2 << I915_EXEC_BSD_SHIFT)
1076 /** Tell the kernel that the batchbuffer is processed by
1077 * the resource streamer.
1079 #define I915_EXEC_RESOURCE_STREAMER (1<<15)
1081 /* Setting I915_EXEC_FENCE_IN implies that lower_32_bits(rsvd2) represent
1082 * a sync_file fd to wait upon (in a nonblocking manner) prior to executing
1085 * Returns -EINVAL if the sync_file fd cannot be found.
1087 #define I915_EXEC_FENCE_IN (1<<16)
1089 /* Setting I915_EXEC_FENCE_OUT causes the ioctl to return a sync_file fd
1090 * in the upper_32_bits(rsvd2) upon success. Ownership of the fd is given
1091 * to the caller, and it should be close() after use. (The fd is a regular
1092 * file descriptor and will be cleaned up on process termination. It holds
1093 * a reference to the request, but nothing else.)
1095 * The sync_file fd can be combined with other sync_file and passed either
1096 * to execbuf using I915_EXEC_FENCE_IN, to atomic KMS ioctls (so that a flip
1097 * will only occur after this request completes), or to other devices.
1099 * Using I915_EXEC_FENCE_OUT requires use of
1100 * DRM_IOCTL_I915_GEM_EXECBUFFER2_WR ioctl so that the result is written
1101 * back to userspace. Failure to do so will cause the out-fence to always
1102 * be reported as zero, and the real fence fd to be leaked.
1104 #define I915_EXEC_FENCE_OUT (1<<17)
1107 * Traditionally the execbuf ioctl has only considered the final element in
1108 * the execobject[] to be the executable batch. Often though, the client
1109 * will known the batch object prior to construction and being able to place
1110 * it into the execobject[] array first can simplify the relocation tracking.
1111 * Setting I915_EXEC_BATCH_FIRST tells execbuf to use element 0 of the
1112 * execobject[] as the * batch instead (the default is to use the last
1115 #define I915_EXEC_BATCH_FIRST (1<<18)
1117 /* Setting I915_FENCE_ARRAY implies that num_cliprects and cliprects_ptr
1118 * define an array of i915_gem_exec_fence structures which specify a set of
1119 * dma fences to wait upon or signal.
1121 #define I915_EXEC_FENCE_ARRAY (1<<19)
1123 #define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_ARRAY<<1))
1125 #define I915_EXEC_CONTEXT_ID_MASK (0xffffffff)
1126 #define i915_execbuffer2_set_context_id(eb2, context) \
1127 (eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK
1128 #define i915_execbuffer2_get_context_id(eb2) \
1129 ((eb2).rsvd1 & I915_EXEC_CONTEXT_ID_MASK)
1131 struct drm_i915_gem_pin {
1132 /** Handle of the buffer to be pinned. */
1136 /** alignment required within the aperture */
1139 /** Returned GTT offset of the buffer. */
1143 struct drm_i915_gem_unpin {
1144 /** Handle of the buffer to be unpinned. */
1149 struct drm_i915_gem_busy {
1150 /** Handle of the buffer to check for busy */
1153 /** Return busy status
1155 * A return of 0 implies that the object is idle (after
1156 * having flushed any pending activity), and a non-zero return that
1157 * the object is still in-flight on the GPU. (The GPU has not yet
1158 * signaled completion for all pending requests that reference the
1159 * object.) An object is guaranteed to become idle eventually (so
1160 * long as no new GPU commands are executed upon it). Due to the
1161 * asynchronous nature of the hardware, an object reported
1162 * as busy may become idle before the ioctl is completed.
1164 * Furthermore, if the object is busy, which engine is busy is only
1165 * provided as a guide and only indirectly by reporting its class
1166 * (there may be more than one engine in each class). There are race
1167 * conditions which prevent the report of which engines are busy from
1168 * being always accurate. However, the converse is not true. If the
1169 * object is idle, the result of the ioctl, that all engines are idle,
1172 * The returned dword is split into two fields to indicate both
1173 * the engine classess on which the object is being read, and the
1174 * engine class on which it is currently being written (if any).
1176 * The low word (bits 0:15) indicate if the object is being written
1177 * to by any engine (there can only be one, as the GEM implicit
1178 * synchronisation rules force writes to be serialised). Only the
1179 * engine class (offset by 1, I915_ENGINE_CLASS_RENDER is reported as
1180 * 1 not 0 etc) for the last write is reported.
1182 * The high word (bits 16:31) are a bitmask of which engines classes
1183 * are currently reading from the object. Multiple engines may be
1184 * reading from the object simultaneously.
1186 * The value of each engine class is the same as specified in the
1187 * I915_CONTEXT_SET_ENGINES parameter and via perf, i.e.
1188 * I915_ENGINE_CLASS_RENDER, I915_ENGINE_CLASS_COPY, etc.
1189 * reported as active itself. Some hardware may have parallel
1190 * execution engines, e.g. multiple media engines, which are
1191 * mapped to the same class identifier and so are not separately
1192 * reported for busyness.
1195 * Only the boolean result of this query is reliable; that is whether
1196 * the object is idle or busy. The report of which engines are busy
1197 * should be only used as a heuristic.
1205 * GPU access is not coherent with cpu caches. Default for machines without an
1208 #define I915_CACHING_NONE 0
1210 * I915_CACHING_CACHED
1212 * GPU access is coherent with cpu caches and furthermore the data is cached in
1213 * last-level caches shared between cpu cores and the gpu GT. Default on
1214 * machines with HAS_LLC.
1216 #define I915_CACHING_CACHED 1
1218 * I915_CACHING_DISPLAY
1220 * Special GPU caching mode which is coherent with the scanout engines.
1221 * Transparently falls back to I915_CACHING_NONE on platforms where no special
1222 * cache mode (like write-through or gfdt flushing) is available. The kernel
1223 * automatically sets this mode when using a buffer as a scanout target.
1224 * Userspace can manually set this mode to avoid a costly stall and clflush in
1225 * the hotpath of drawing the first frame.
1227 #define I915_CACHING_DISPLAY 2
1229 struct drm_i915_gem_caching {
1231 * Handle of the buffer to set/get the caching level of. */
1235 * Cacheing level to apply or return value
1237 * bits0-15 are for generic caching control (i.e. the above defined
1238 * values). bits16-31 are reserved for platform-specific variations
1239 * (e.g. l3$ caching on gen7). */
1243 #define I915_TILING_NONE 0
1244 #define I915_TILING_X 1
1245 #define I915_TILING_Y 2
1246 #define I915_TILING_LAST I915_TILING_Y
1248 #define I915_BIT_6_SWIZZLE_NONE 0
1249 #define I915_BIT_6_SWIZZLE_9 1
1250 #define I915_BIT_6_SWIZZLE_9_10 2
1251 #define I915_BIT_6_SWIZZLE_9_11 3
1252 #define I915_BIT_6_SWIZZLE_9_10_11 4
1253 /* Not seen by userland */
1254 #define I915_BIT_6_SWIZZLE_UNKNOWN 5
1255 /* Seen by userland. */
1256 #define I915_BIT_6_SWIZZLE_9_17 6
1257 #define I915_BIT_6_SWIZZLE_9_10_17 7
1259 struct drm_i915_gem_set_tiling {
1260 /** Handle of the buffer to have its tiling state updated */
1264 * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
1267 * This value is to be set on request, and will be updated by the
1268 * kernel on successful return with the actual chosen tiling layout.
1270 * The tiling mode may be demoted to I915_TILING_NONE when the system
1271 * has bit 6 swizzling that can't be managed correctly by GEM.
1273 * Buffer contents become undefined when changing tiling_mode.
1278 * Stride in bytes for the object when in I915_TILING_X or
1284 * Returned address bit 6 swizzling required for CPU access through
1290 struct drm_i915_gem_get_tiling {
1291 /** Handle of the buffer to get tiling state for. */
1295 * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
1301 * Returned address bit 6 swizzling required for CPU access through
1307 * Returned address bit 6 swizzling required for CPU access through
1308 * mmap mapping whilst bound.
1310 __u32 phys_swizzle_mode;
1313 struct drm_i915_gem_get_aperture {
1314 /** Total size of the aperture used by i915_gem_execbuffer, in bytes */
1318 * Available space in the aperture used by i915_gem_execbuffer, in
1321 __u64 aper_available_size;
1324 struct drm_i915_get_pipe_from_crtc_id {
1325 /** ID of CRTC being requested **/
1328 /** pipe of requested CRTC **/
1332 #define I915_MADV_WILLNEED 0
1333 #define I915_MADV_DONTNEED 1
1334 #define __I915_MADV_PURGED 2 /* internal state */
1336 struct drm_i915_gem_madvise {
1337 /** Handle of the buffer to change the backing store advice */
1340 /* Advice: either the buffer will be needed again in the near future,
1341 * or wont be and could be discarded under memory pressure.
1345 /** Whether the backing store still exists. */
1350 #define I915_OVERLAY_TYPE_MASK 0xff
1351 #define I915_OVERLAY_YUV_PLANAR 0x01
1352 #define I915_OVERLAY_YUV_PACKED 0x02
1353 #define I915_OVERLAY_RGB 0x03
1355 #define I915_OVERLAY_DEPTH_MASK 0xff00
1356 #define I915_OVERLAY_RGB24 0x1000
1357 #define I915_OVERLAY_RGB16 0x2000
1358 #define I915_OVERLAY_RGB15 0x3000
1359 #define I915_OVERLAY_YUV422 0x0100
1360 #define I915_OVERLAY_YUV411 0x0200
1361 #define I915_OVERLAY_YUV420 0x0300
1362 #define I915_OVERLAY_YUV410 0x0400
1364 #define I915_OVERLAY_SWAP_MASK 0xff0000
1365 #define I915_OVERLAY_NO_SWAP 0x000000
1366 #define I915_OVERLAY_UV_SWAP 0x010000
1367 #define I915_OVERLAY_Y_SWAP 0x020000
1368 #define I915_OVERLAY_Y_AND_UV_SWAP 0x030000
1370 #define I915_OVERLAY_FLAGS_MASK 0xff000000
1371 #define I915_OVERLAY_ENABLE 0x01000000
1373 struct drm_intel_overlay_put_image {
1374 /* various flags and src format description */
1376 /* source picture description */
1378 /* stride values and offsets are in bytes, buffer relative */
1379 __u16 stride_Y; /* stride for packed formats */
1381 __u32 offset_Y; /* offset for packet formats */
1387 /* to compensate the scaling factors for partially covered surfaces */
1388 __u16 src_scan_width;
1389 __u16 src_scan_height;
1390 /* output crtc description */
1399 #define I915_OVERLAY_UPDATE_ATTRS (1<<0)
1400 #define I915_OVERLAY_UPDATE_GAMMA (1<<1)
1401 #define I915_OVERLAY_DISABLE_DEST_COLORKEY (1<<2)
1402 struct drm_intel_overlay_attrs {
1417 * Intel sprite handling
1419 * Color keying works with a min/mask/max tuple. Both source and destination
1420 * color keying is allowed.
1423 * Sprite pixels within the min & max values, masked against the color channels
1424 * specified in the mask field, will be transparent. All other pixels will
1425 * be displayed on top of the primary plane. For RGB surfaces, only the min
1426 * and mask fields will be used; ranged compares are not allowed.
1428 * Destination keying:
1429 * Primary plane pixels that match the min value, masked against the color
1430 * channels specified in the mask field, will be replaced by corresponding
1431 * pixels from the sprite plane.
1433 * Note that source & destination keying are exclusive; only one can be
1434 * active on a given plane.
1437 #define I915_SET_COLORKEY_NONE (1<<0) /* Deprecated. Instead set
1438 * flags==0 to disable colorkeying.
1440 #define I915_SET_COLORKEY_DESTINATION (1<<1)
1441 #define I915_SET_COLORKEY_SOURCE (1<<2)
1442 struct drm_intel_sprite_colorkey {
1450 struct drm_i915_gem_wait {
1451 /** Handle of BO we shall wait on */
1454 /** Number of nanoseconds to wait, Returns time remaining. */
1458 struct drm_i915_gem_context_create {
1459 __u32 ctx_id; /* output: id of new context*/
1463 struct drm_i915_gem_context_create_ext {
1464 __u32 ctx_id; /* output: id of new context*/
1466 #define I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS (1u << 0)
1467 #define I915_CONTEXT_CREATE_FLAGS_UNKNOWN \
1468 (-(I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS << 1))
1472 struct drm_i915_gem_context_param {
1476 #define I915_CONTEXT_PARAM_BAN_PERIOD 0x1
1477 #define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2
1478 #define I915_CONTEXT_PARAM_GTT_SIZE 0x3
1479 #define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE 0x4
1480 #define I915_CONTEXT_PARAM_BANNABLE 0x5
1481 #define I915_CONTEXT_PARAM_PRIORITY 0x6
1482 #define I915_CONTEXT_MAX_USER_PRIORITY 1023 /* inclusive */
1483 #define I915_CONTEXT_DEFAULT_PRIORITY 0
1484 #define I915_CONTEXT_MIN_USER_PRIORITY -1023 /* inclusive */
1486 * When using the following param, value should be a pointer to
1487 * drm_i915_gem_context_param_sseu.
1489 #define I915_CONTEXT_PARAM_SSEU 0x7
1492 * Not all clients may want to attempt automatic recover of a context after
1493 * a hang (for example, some clients may only submit very small incremental
1494 * batches relying on known logical state of previous batches which will never
1495 * recover correctly and each attempt will hang), and so would prefer that
1496 * the context is forever banned instead.
1498 * If set to false (0), after a reset, subsequent (and in flight) rendering
1499 * from this context is discarded, and the client will need to create a new
1500 * context to use instead.
1502 * If set to true (1), the kernel will automatically attempt to recover the
1503 * context by skipping the hanging batch and executing the next batch starting
1504 * from the default context state (discarding the incomplete logical context
1505 * state lost due to the reset).
1507 * On creation, all new contexts are marked as recoverable.
1509 #define I915_CONTEXT_PARAM_RECOVERABLE 0x8
1510 /* Must be kept compact -- no holes and well documented */
1516 * Context SSEU programming
1518 * It may be necessary for either functional or performance reason to configure
1519 * a context to run with a reduced number of SSEU (where SSEU stands for Slice/
1522 * This is done by configuring SSEU configuration using the below
1523 * @struct drm_i915_gem_context_param_sseu for every supported engine which
1524 * userspace intends to use.
1526 * Not all GPUs or engines support this functionality in which case an error
1527 * code -ENODEV will be returned.
1529 * Also, flexibility of possible SSEU configuration permutations varies between
1530 * GPU generations and software imposed limitations. Requesting such a
1531 * combination will return an error code of -EINVAL.
1533 * NOTE: When perf/OA is active the context's SSEU configuration is ignored in
1534 * favour of a single global setting.
1536 struct drm_i915_gem_context_param_sseu {
1538 * Engine class & instance to be configured or queried.
1540 struct i915_engine_class_instance engine;
1543 * Unused for now. Must be cleared to zero.
1548 * Mask of slices to enable for the context. Valid values are a subset
1549 * of the bitmask value returned for I915_PARAM_SLICE_MASK.
1554 * Mask of subslices to enable for the context. Valid values are a
1555 * subset of the bitmask value return by I915_PARAM_SUBSLICE_MASK.
1557 __u64 subslice_mask;
1560 * Minimum/Maximum number of EUs to enable per subslice for the
1561 * context. min_eus_per_subslice must be inferior or equal to
1562 * max_eus_per_subslice.
1564 __u16 min_eus_per_subslice;
1565 __u16 max_eus_per_subslice;
1568 * Unused for now. Must be cleared to zero.
1573 struct drm_i915_gem_context_create_ext_setparam {
1574 #define I915_CONTEXT_CREATE_EXT_SETPARAM 0
1575 struct i915_user_extension base;
1576 struct drm_i915_gem_context_param param;
1579 struct drm_i915_gem_context_destroy {
1585 * DRM_I915_GEM_VM_CREATE -
1587 * Create a new virtual memory address space (ppGTT) for use within a context
1588 * on the same file. Extensions can be provided to configure exactly how the
1589 * address space is setup upon creation.
1591 * The id of new VM (bound to the fd) for use with I915_CONTEXT_PARAM_VM is
1592 * returned in the outparam @id.
1594 * No flags are defined, with all bits reserved and must be zero.
1596 * An extension chain maybe provided, starting with @extensions, and terminated
1597 * by the @next_extension being 0. Currently, no extensions are defined.
1599 * DRM_I915_GEM_VM_DESTROY -
1601 * Destroys a previously created VM id, specified in @id.
1603 * No extensions or flags are allowed currently, and so must be zero.
1605 struct drm_i915_gem_vm_control {
1611 struct drm_i915_reg_read {
1614 * For 64bit wide registers where the upper 32bits don't immediately
1615 * follow the lower 32bits, the offset of the lower 32bits must
1619 #define I915_REG_READ_8B_WA (1ul << 0)
1621 __u64 val; /* Return value */
1626 * Render engine timestamp - 0x2358 + 64bit - gen7+
1627 * - Note this register returns an invalid value if using the default
1628 * single instruction 8byte read, in order to workaround that pass
1629 * flag I915_REG_READ_8B_WA in offset field.
1633 struct drm_i915_reset_stats {
1637 /* All resets since boot/module reload, for all contexts */
1640 /* Number of batches lost when active in GPU, for this context */
1643 /* Number of batches lost pending for execution, for this context */
1644 __u32 batch_pending;
1649 struct drm_i915_gem_userptr {
1653 #define I915_USERPTR_READ_ONLY 0x1
1654 #define I915_USERPTR_UNSYNCHRONIZED 0x80000000
1656 * Returned handle for the object.
1658 * Object handles are nonzero.
1663 enum drm_i915_oa_format {
1664 I915_OA_FORMAT_A13 = 1, /* HSW only */
1665 I915_OA_FORMAT_A29, /* HSW only */
1666 I915_OA_FORMAT_A13_B8_C8, /* HSW only */
1667 I915_OA_FORMAT_B4_C8, /* HSW only */
1668 I915_OA_FORMAT_A45_B8_C8, /* HSW only */
1669 I915_OA_FORMAT_B4_C8_A16, /* HSW only */
1670 I915_OA_FORMAT_C4_B8, /* HSW+ */
1674 I915_OA_FORMAT_A12_B8_C8,
1675 I915_OA_FORMAT_A32u40_A4u32_B8_C8,
1677 I915_OA_FORMAT_MAX /* non-ABI */
1680 enum drm_i915_perf_property_id {
1682 * Open the stream for a specific context handle (as used with
1683 * execbuffer2). A stream opened for a specific context this way
1684 * won't typically require root privileges.
1686 DRM_I915_PERF_PROP_CTX_HANDLE = 1,
1689 * A value of 1 requests the inclusion of raw OA unit reports as
1690 * part of stream samples.
1692 DRM_I915_PERF_PROP_SAMPLE_OA,
1695 * The value specifies which set of OA unit metrics should be
1696 * be configured, defining the contents of any OA unit reports.
1698 DRM_I915_PERF_PROP_OA_METRICS_SET,
1701 * The value specifies the size and layout of OA unit reports.
1703 DRM_I915_PERF_PROP_OA_FORMAT,
1706 * Specifying this property implicitly requests periodic OA unit
1707 * sampling and (at least on Haswell) the sampling frequency is derived
1708 * from this exponent as follows:
1710 * 80ns * 2^(period_exponent + 1)
1712 DRM_I915_PERF_PROP_OA_EXPONENT,
1714 DRM_I915_PERF_PROP_MAX /* non-ABI */
1717 struct drm_i915_perf_open_param {
1719 #define I915_PERF_FLAG_FD_CLOEXEC (1<<0)
1720 #define I915_PERF_FLAG_FD_NONBLOCK (1<<1)
1721 #define I915_PERF_FLAG_DISABLED (1<<2)
1723 /** The number of u64 (id, value) pairs */
1724 __u32 num_properties;
1727 * Pointer to array of u64 (id, value) pairs configuring the stream
1730 __u64 properties_ptr;
1734 * Enable data capture for a stream that was either opened in a disabled state
1735 * via I915_PERF_FLAG_DISABLED or was later disabled via
1736 * I915_PERF_IOCTL_DISABLE.
1738 * It is intended to be cheaper to disable and enable a stream than it may be
1739 * to close and re-open a stream with the same configuration.
1741 * It's undefined whether any pending data for the stream will be lost.
1743 #define I915_PERF_IOCTL_ENABLE _IO('i', 0x0)
1746 * Disable data capture for a stream.
1748 * It is an error to try and read a stream that is disabled.
1750 #define I915_PERF_IOCTL_DISABLE _IO('i', 0x1)
1753 * Common to all i915 perf records
1755 struct drm_i915_perf_record_header {
1761 enum drm_i915_perf_record_type {
1764 * Samples are the work horse record type whose contents are extensible
1765 * and defined when opening an i915 perf stream based on the given
1768 * Boolean properties following the naming convention
1769 * DRM_I915_PERF_SAMPLE_xyz_PROP request the inclusion of 'xyz' data in
1772 * The order of these sample properties given by userspace has no
1773 * affect on the ordering of data within a sample. The order is
1777 * struct drm_i915_perf_record_header header;
1779 * { u32 oa_report[]; } && DRM_I915_PERF_PROP_SAMPLE_OA
1782 DRM_I915_PERF_RECORD_SAMPLE = 1,
1785 * Indicates that one or more OA reports were not written by the
1786 * hardware. This can happen for example if an MI_REPORT_PERF_COUNT
1787 * command collides with periodic sampling - which would be more likely
1788 * at higher sampling frequencies.
1790 DRM_I915_PERF_RECORD_OA_REPORT_LOST = 2,
1793 * An error occurred that resulted in all pending OA reports being lost.
1795 DRM_I915_PERF_RECORD_OA_BUFFER_LOST = 3,
1797 DRM_I915_PERF_RECORD_MAX /* non-ABI */
1801 * Structure to upload perf dynamic configuration into the kernel.
1803 struct drm_i915_perf_oa_config {
1804 /** String formatted like "%08x-%04x-%04x-%04x-%012x" */
1808 __u32 n_boolean_regs;
1812 * These fields are pointers to tuples of u32 values (register address,
1813 * value). For example the expected length of the buffer pointed by
1814 * mux_regs_ptr is (2 * sizeof(u32) * n_mux_regs).
1817 __u64 boolean_regs_ptr;
1818 __u64 flex_regs_ptr;
1821 struct drm_i915_query_item {
1823 #define DRM_I915_QUERY_TOPOLOGY_INFO 1
1824 /* Must be kept compact -- no holes and well documented */
1827 * When set to zero by userspace, this is filled with the size of the
1828 * data to be written at the data_ptr pointer. The kernel sets this
1829 * value to a negative value to signal an error on a particular query
1835 * Unused for now. Must be cleared to zero.
1840 * Data will be written at the location pointed by data_ptr when the
1841 * value of length matches the length of the data to be written by the
1847 struct drm_i915_query {
1851 * Unused for now. Must be cleared to zero.
1856 * This points to an array of num_items drm_i915_query_item structures.
1862 * Data written by the kernel with query DRM_I915_QUERY_TOPOLOGY_INFO :
1864 * data: contains the 3 pieces of information :
1866 * - the slice mask with one bit per slice telling whether a slice is
1867 * available. The availability of slice X can be queried with the following
1870 * (data[X / 8] >> (X % 8)) & 1
1872 * - the subslice mask for each slice with one bit per subslice telling
1873 * whether a subslice is available. The availability of subslice Y in slice
1874 * X can be queried with the following formula :
1876 * (data[subslice_offset +
1877 * X * subslice_stride +
1878 * Y / 8] >> (Y % 8)) & 1
1880 * - the EU mask for each subslice in each slice with one bit per EU telling
1881 * whether an EU is available. The availability of EU Z in subslice Y in
1882 * slice X can be queried with the following formula :
1885 * (X * max_subslices + Y) * eu_stride +
1886 * Z / 8] >> (Z % 8)) & 1
1888 struct drm_i915_query_topology_info {
1890 * Unused for now. Must be cleared to zero.
1895 __u16 max_subslices;
1896 __u16 max_eus_per_subslice;
1899 * Offset in data[] at which the subslice masks are stored.
1901 __u16 subslice_offset;
1904 * Stride at which each of the subslice masks for each slice are
1907 __u16 subslice_stride;
1910 * Offset in data[] at which the EU masks are stored.
1915 * Stride at which each of the EU masks for each subslice are stored.
1922 #if defined(__cplusplus)
1926 #endif /* _UAPI_I915_DRM_H_ */