2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #ifndef KFD_IOCTL_H_INCLUDED
24 #define KFD_IOCTL_H_INCLUDED
27 #include <linux/ioctl.h>
30 * - 1.1 - initial version
31 * - 1.3 - Add SMI events support
32 * - 1.4 - Indicate new SRAM EDC bit in device properties
34 * - 1.6 - Query clear flags in SVM get_attr API
36 #define KFD_IOCTL_MAJOR_VERSION 1
37 #define KFD_IOCTL_MINOR_VERSION 6
39 struct kfd_ioctl_get_version_args {
40 __u32 major_version; /* from KFD */
41 __u32 minor_version; /* from KFD */
44 /* For kfd_ioctl_create_queue_args.queue_type. */
45 #define KFD_IOC_QUEUE_TYPE_COMPUTE 0x0
46 #define KFD_IOC_QUEUE_TYPE_SDMA 0x1
47 #define KFD_IOC_QUEUE_TYPE_COMPUTE_AQL 0x2
48 #define KFD_IOC_QUEUE_TYPE_SDMA_XGMI 0x3
50 #define KFD_MAX_QUEUE_PERCENTAGE 100
51 #define KFD_MAX_QUEUE_PRIORITY 15
53 struct kfd_ioctl_create_queue_args {
54 __u64 ring_base_address; /* to KFD */
55 __u64 write_pointer_address; /* from KFD */
56 __u64 read_pointer_address; /* from KFD */
57 __u64 doorbell_offset; /* from KFD */
59 __u32 ring_size; /* to KFD */
60 __u32 gpu_id; /* to KFD */
61 __u32 queue_type; /* to KFD */
62 __u32 queue_percentage; /* to KFD */
63 __u32 queue_priority; /* to KFD */
64 __u32 queue_id; /* from KFD */
66 __u64 eop_buffer_address; /* to KFD */
67 __u64 eop_buffer_size; /* to KFD */
68 __u64 ctx_save_restore_address; /* to KFD */
69 __u32 ctx_save_restore_size; /* to KFD */
70 __u32 ctl_stack_size; /* to KFD */
73 struct kfd_ioctl_destroy_queue_args {
74 __u32 queue_id; /* to KFD */
78 struct kfd_ioctl_update_queue_args {
79 __u64 ring_base_address; /* to KFD */
81 __u32 queue_id; /* to KFD */
82 __u32 ring_size; /* to KFD */
83 __u32 queue_percentage; /* to KFD */
84 __u32 queue_priority; /* to KFD */
87 struct kfd_ioctl_set_cu_mask_args {
88 __u32 queue_id; /* to KFD */
89 __u32 num_cu_mask; /* to KFD */
90 __u64 cu_mask_ptr; /* to KFD */
93 struct kfd_ioctl_get_queue_wave_state_args {
94 __u64 ctl_stack_address; /* to KFD */
95 __u32 ctl_stack_used_size; /* from KFD */
96 __u32 save_area_used_size; /* from KFD */
97 __u32 queue_id; /* to KFD */
101 /* For kfd_ioctl_set_memory_policy_args.default_policy and alternate_policy */
102 #define KFD_IOC_CACHE_POLICY_COHERENT 0
103 #define KFD_IOC_CACHE_POLICY_NONCOHERENT 1
105 struct kfd_ioctl_set_memory_policy_args {
106 __u64 alternate_aperture_base; /* to KFD */
107 __u64 alternate_aperture_size; /* to KFD */
109 __u32 gpu_id; /* to KFD */
110 __u32 default_policy; /* to KFD */
111 __u32 alternate_policy; /* to KFD */
116 * All counters are monotonic. They are used for profiling of compute jobs.
117 * The profiling is done by userspace.
119 * In case of GPU reset, the counter should not be affected.
122 struct kfd_ioctl_get_clock_counters_args {
123 __u64 gpu_clock_counter; /* from KFD */
124 __u64 cpu_clock_counter; /* from KFD */
125 __u64 system_clock_counter; /* from KFD */
126 __u64 system_clock_freq; /* from KFD */
128 __u32 gpu_id; /* to KFD */
132 struct kfd_process_device_apertures {
133 __u64 lds_base; /* from KFD */
134 __u64 lds_limit; /* from KFD */
135 __u64 scratch_base; /* from KFD */
136 __u64 scratch_limit; /* from KFD */
137 __u64 gpuvm_base; /* from KFD */
138 __u64 gpuvm_limit; /* from KFD */
139 __u32 gpu_id; /* from KFD */
144 * AMDKFD_IOC_GET_PROCESS_APERTURES is deprecated. Use
145 * AMDKFD_IOC_GET_PROCESS_APERTURES_NEW instead, which supports an
146 * unlimited number of GPUs.
148 #define NUM_OF_SUPPORTED_GPUS 7
149 struct kfd_ioctl_get_process_apertures_args {
150 struct kfd_process_device_apertures
151 process_apertures[NUM_OF_SUPPORTED_GPUS];/* from KFD */
153 /* from KFD, should be in the range [1 - NUM_OF_SUPPORTED_GPUS] */
158 struct kfd_ioctl_get_process_apertures_new_args {
159 /* User allocated. Pointer to struct kfd_process_device_apertures
160 * filled in by Kernel
162 __u64 kfd_process_device_apertures_ptr;
163 /* to KFD - indicates amount of memory present in
164 * kfd_process_device_apertures_ptr
165 * from KFD - Number of entries filled by KFD.
171 #define MAX_ALLOWED_NUM_POINTS 100
172 #define MAX_ALLOWED_AW_BUFF_SIZE 4096
173 #define MAX_ALLOWED_WAC_BUFF_SIZE 128
175 struct kfd_ioctl_dbg_register_args {
176 __u32 gpu_id; /* to KFD */
180 struct kfd_ioctl_dbg_unregister_args {
181 __u32 gpu_id; /* to KFD */
185 struct kfd_ioctl_dbg_address_watch_args {
186 __u64 content_ptr; /* a pointer to the actual content */
187 __u32 gpu_id; /* to KFD */
188 __u32 buf_size_in_bytes; /*including gpu_id and buf_size */
191 struct kfd_ioctl_dbg_wave_control_args {
192 __u64 content_ptr; /* a pointer to the actual content */
193 __u32 gpu_id; /* to KFD */
194 __u32 buf_size_in_bytes; /*including gpu_id and buf_size */
197 /* Matching HSA_EVENTTYPE */
198 #define KFD_IOC_EVENT_SIGNAL 0
199 #define KFD_IOC_EVENT_NODECHANGE 1
200 #define KFD_IOC_EVENT_DEVICESTATECHANGE 2
201 #define KFD_IOC_EVENT_HW_EXCEPTION 3
202 #define KFD_IOC_EVENT_SYSTEM_EVENT 4
203 #define KFD_IOC_EVENT_DEBUG_EVENT 5
204 #define KFD_IOC_EVENT_PROFILE_EVENT 6
205 #define KFD_IOC_EVENT_QUEUE_EVENT 7
206 #define KFD_IOC_EVENT_MEMORY 8
208 #define KFD_IOC_WAIT_RESULT_COMPLETE 0
209 #define KFD_IOC_WAIT_RESULT_TIMEOUT 1
210 #define KFD_IOC_WAIT_RESULT_FAIL 2
212 #define KFD_SIGNAL_EVENT_LIMIT 4096
214 /* For kfd_event_data.hw_exception_data.reset_type. */
215 #define KFD_HW_EXCEPTION_WHOLE_GPU_RESET 0
216 #define KFD_HW_EXCEPTION_PER_ENGINE_RESET 1
218 /* For kfd_event_data.hw_exception_data.reset_cause. */
219 #define KFD_HW_EXCEPTION_GPU_HANG 0
220 #define KFD_HW_EXCEPTION_ECC 1
222 /* For kfd_hsa_memory_exception_data.ErrorType */
223 #define KFD_MEM_ERR_NO_RAS 0
224 #define KFD_MEM_ERR_SRAM_ECC 1
225 #define KFD_MEM_ERR_POISON_CONSUMED 2
226 #define KFD_MEM_ERR_GPU_HANG 3
228 struct kfd_ioctl_create_event_args {
229 __u64 event_page_offset; /* from KFD */
230 __u32 event_trigger_data; /* from KFD - signal events only */
231 __u32 event_type; /* to KFD */
232 __u32 auto_reset; /* to KFD */
233 __u32 node_id; /* to KFD - only valid for certain
235 __u32 event_id; /* from KFD */
236 __u32 event_slot_index; /* from KFD */
239 struct kfd_ioctl_destroy_event_args {
240 __u32 event_id; /* to KFD */
244 struct kfd_ioctl_set_event_args {
245 __u32 event_id; /* to KFD */
249 struct kfd_ioctl_reset_event_args {
250 __u32 event_id; /* to KFD */
254 struct kfd_memory_exception_failure {
255 __u32 NotPresent; /* Page not present or supervisor privilege */
256 __u32 ReadOnly; /* Write access to a read-only page */
257 __u32 NoExecute; /* Execute access to a page marked NX */
258 __u32 imprecise; /* Can't determine the exact fault address */
261 /* memory exception data */
262 struct kfd_hsa_memory_exception_data {
263 struct kfd_memory_exception_failure failure;
266 __u32 ErrorType; /* 0 = no RAS error,
268 * 2 = Link_SYNFLOOD (poison),
269 * 3 = GPU hang (not attributable to a specific cause),
270 * other values reserved
274 /* hw exception data */
275 struct kfd_hsa_hw_exception_data {
283 struct kfd_event_data {
285 struct kfd_hsa_memory_exception_data memory_exception_data;
286 struct kfd_hsa_hw_exception_data hw_exception_data;
288 __u64 kfd_event_data_ext; /* pointer to an extension structure
289 for future exception types */
290 __u32 event_id; /* to KFD */
294 struct kfd_ioctl_wait_events_args {
295 __u64 events_ptr; /* pointed to struct
296 kfd_event_data array, to KFD */
297 __u32 num_events; /* to KFD */
298 __u32 wait_for_all; /* to KFD */
299 __u32 timeout; /* to KFD */
300 __u32 wait_result; /* from KFD */
303 struct kfd_ioctl_set_scratch_backing_va_args {
304 __u64 va_addr; /* to KFD */
305 __u32 gpu_id; /* to KFD */
309 struct kfd_ioctl_get_tile_config_args {
310 /* to KFD: pointer to tile array */
311 __u64 tile_config_ptr;
312 /* to KFD: pointer to macro tile array */
313 __u64 macro_tile_config_ptr;
314 /* to KFD: array size allocated by user mode
315 * from KFD: array size filled by kernel
317 __u32 num_tile_configs;
318 /* to KFD: array size allocated by user mode
319 * from KFD: array size filled by kernel
321 __u32 num_macro_tile_configs;
323 __u32 gpu_id; /* to KFD */
324 __u32 gb_addr_config; /* from KFD */
325 __u32 num_banks; /* from KFD */
326 __u32 num_ranks; /* from KFD */
327 /* struct size can be extended later if needed
328 * without breaking ABI compatibility
332 struct kfd_ioctl_set_trap_handler_args {
333 __u64 tba_addr; /* to KFD */
334 __u64 tma_addr; /* to KFD */
335 __u32 gpu_id; /* to KFD */
339 struct kfd_ioctl_acquire_vm_args {
340 __u32 drm_fd; /* to KFD */
341 __u32 gpu_id; /* to KFD */
344 /* Allocation flags: memory types */
345 #define KFD_IOC_ALLOC_MEM_FLAGS_VRAM (1 << 0)
346 #define KFD_IOC_ALLOC_MEM_FLAGS_GTT (1 << 1)
347 #define KFD_IOC_ALLOC_MEM_FLAGS_USERPTR (1 << 2)
348 #define KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL (1 << 3)
349 #define KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP (1 << 4)
350 /* Allocation flags: attributes/access options */
351 #define KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE (1 << 31)
352 #define KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE (1 << 30)
353 #define KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC (1 << 29)
354 #define KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE (1 << 28)
355 #define KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM (1 << 27)
356 #define KFD_IOC_ALLOC_MEM_FLAGS_COHERENT (1 << 26)
357 #define KFD_IOC_ALLOC_MEM_FLAGS_UNCACHED (1 << 25)
359 /* Allocate memory for later SVM (shared virtual memory) mapping.
361 * @va_addr: virtual address of the memory to be allocated
362 * all later mappings on all GPUs will use this address
363 * @size: size in bytes
364 * @handle: buffer handle returned to user mode, used to refer to
365 * this allocation for mapping, unmapping and freeing
366 * @mmap_offset: for CPU-mapping the allocation by mmapping a render node
367 * for userptrs this is overloaded to specify the CPU address
368 * @gpu_id: device identifier
369 * @flags: memory type and attributes. See KFD_IOC_ALLOC_MEM_FLAGS above
371 struct kfd_ioctl_alloc_memory_of_gpu_args {
372 __u64 va_addr; /* to KFD */
373 __u64 size; /* to KFD */
374 __u64 handle; /* from KFD */
375 __u64 mmap_offset; /* to KFD (userptr), from KFD (mmap offset) */
376 __u32 gpu_id; /* to KFD */
380 /* Free memory allocated with kfd_ioctl_alloc_memory_of_gpu
382 * @handle: memory handle returned by alloc
384 struct kfd_ioctl_free_memory_of_gpu_args {
385 __u64 handle; /* to KFD */
388 /* Map memory to one or more GPUs
390 * @handle: memory handle returned by alloc
391 * @device_ids_array_ptr: array of gpu_ids (__u32 per device)
392 * @n_devices: number of devices in the array
393 * @n_success: number of devices mapped successfully
395 * @n_success returns information to the caller how many devices from
396 * the start of the array have mapped the buffer successfully. It can
397 * be passed into a subsequent retry call to skip those devices. For
398 * the first call the caller should initialize it to 0.
400 * If the ioctl completes with return code 0 (success), n_success ==
403 struct kfd_ioctl_map_memory_to_gpu_args {
404 __u64 handle; /* to KFD */
405 __u64 device_ids_array_ptr; /* to KFD */
406 __u32 n_devices; /* to KFD */
407 __u32 n_success; /* to/from KFD */
410 /* Unmap memory from one or more GPUs
412 * same arguments as for mapping
414 struct kfd_ioctl_unmap_memory_from_gpu_args {
415 __u64 handle; /* to KFD */
416 __u64 device_ids_array_ptr; /* to KFD */
417 __u32 n_devices; /* to KFD */
418 __u32 n_success; /* to/from KFD */
421 /* Allocate GWS for specific queue
423 * @queue_id: queue's id that GWS is allocated for
424 * @num_gws: how many GWS to allocate
425 * @first_gws: index of the first GWS allocated.
426 * only support contiguous GWS allocation
428 struct kfd_ioctl_alloc_queue_gws_args {
429 __u32 queue_id; /* to KFD */
430 __u32 num_gws; /* to KFD */
431 __u32 first_gws; /* from KFD */
435 struct kfd_ioctl_get_dmabuf_info_args {
436 __u64 size; /* from KFD */
437 __u64 metadata_ptr; /* to KFD */
438 __u32 metadata_size; /* to KFD (space allocated by user)
439 * from KFD (actual metadata size)
441 __u32 gpu_id; /* from KFD */
442 __u32 flags; /* from KFD (KFD_IOC_ALLOC_MEM_FLAGS) */
443 __u32 dmabuf_fd; /* to KFD */
446 struct kfd_ioctl_import_dmabuf_args {
447 __u64 va_addr; /* to KFD */
448 __u64 handle; /* from KFD */
449 __u32 gpu_id; /* to KFD */
450 __u32 dmabuf_fd; /* to KFD */
454 * KFD SMI(System Management Interface) events
457 KFD_SMI_EVENT_NONE = 0, /* not used */
458 KFD_SMI_EVENT_VMFAULT = 1, /* event start counting at 1 */
459 KFD_SMI_EVENT_THERMAL_THROTTLE = 2,
460 KFD_SMI_EVENT_GPU_PRE_RESET = 3,
461 KFD_SMI_EVENT_GPU_POST_RESET = 4,
464 #define KFD_SMI_EVENT_MASK_FROM_INDEX(i) (1ULL << ((i) - 1))
466 struct kfd_ioctl_smi_events_args {
467 __u32 gpuid; /* to KFD */
468 __u32 anon_fd; /* from KFD */
471 /* Register offset inside the remapped mmio page
473 enum kfd_mmio_remap {
474 KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL = 0,
475 KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL = 4,
478 /* Guarantee host access to memory */
479 #define KFD_IOCTL_SVM_FLAG_HOST_ACCESS 0x00000001
480 /* Fine grained coherency between all devices with access */
481 #define KFD_IOCTL_SVM_FLAG_COHERENT 0x00000002
482 /* Use any GPU in same hive as preferred device */
483 #define KFD_IOCTL_SVM_FLAG_HIVE_LOCAL 0x00000004
484 /* GPUs only read, allows replication */
485 #define KFD_IOCTL_SVM_FLAG_GPU_RO 0x00000008
486 /* Allow execution on GPU */
487 #define KFD_IOCTL_SVM_FLAG_GPU_EXEC 0x00000010
488 /* GPUs mostly read, may allow similar optimizations as RO, but writes fault */
489 #define KFD_IOCTL_SVM_FLAG_GPU_READ_MOSTLY 0x00000020
492 * kfd_ioctl_svm_op - SVM ioctl operations
494 * @KFD_IOCTL_SVM_OP_SET_ATTR: Modify one or more attributes
495 * @KFD_IOCTL_SVM_OP_GET_ATTR: Query one or more attributes
497 enum kfd_ioctl_svm_op {
498 KFD_IOCTL_SVM_OP_SET_ATTR,
499 KFD_IOCTL_SVM_OP_GET_ATTR
502 /** kfd_ioctl_svm_location - Enum for preferred and prefetch locations
504 * GPU IDs are used to specify GPUs as preferred and prefetch locations.
505 * Below definitions are used for system memory or for leaving the preferred
506 * location unspecified.
508 enum kfd_ioctl_svm_location {
509 KFD_IOCTL_SVM_LOCATION_SYSMEM = 0,
510 KFD_IOCTL_SVM_LOCATION_UNDEFINED = 0xffffffff
514 * kfd_ioctl_svm_attr_type - SVM attribute types
516 * @KFD_IOCTL_SVM_ATTR_PREFERRED_LOC: gpuid of the preferred location, 0 for
518 * @KFD_IOCTL_SVM_ATTR_PREFETCH_LOC: gpuid of the prefetch location, 0 for
519 * system memory. Setting this triggers an
520 * immediate prefetch (migration).
521 * @KFD_IOCTL_SVM_ATTR_ACCESS:
522 * @KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
523 * @KFD_IOCTL_SVM_ATTR_NO_ACCESS: specify memory access for the gpuid given
524 * by the attribute value
525 * @KFD_IOCTL_SVM_ATTR_SET_FLAGS: bitmask of flags to set (see
526 * KFD_IOCTL_SVM_FLAG_...)
527 * @KFD_IOCTL_SVM_ATTR_CLR_FLAGS: bitmask of flags to clear
528 * @KFD_IOCTL_SVM_ATTR_GRANULARITY: migration granularity
531 enum kfd_ioctl_svm_attr_type {
532 KFD_IOCTL_SVM_ATTR_PREFERRED_LOC,
533 KFD_IOCTL_SVM_ATTR_PREFETCH_LOC,
534 KFD_IOCTL_SVM_ATTR_ACCESS,
535 KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE,
536 KFD_IOCTL_SVM_ATTR_NO_ACCESS,
537 KFD_IOCTL_SVM_ATTR_SET_FLAGS,
538 KFD_IOCTL_SVM_ATTR_CLR_FLAGS,
539 KFD_IOCTL_SVM_ATTR_GRANULARITY
543 * kfd_ioctl_svm_attribute - Attributes as pairs of type and value
545 * The meaning of the @value depends on the attribute type.
547 * @type: attribute type (see enum @kfd_ioctl_svm_attr_type)
548 * @value: attribute value
550 struct kfd_ioctl_svm_attribute {
556 * kfd_ioctl_svm_args - Arguments for SVM ioctl
558 * @op specifies the operation to perform (see enum
559 * @kfd_ioctl_svm_op). @start_addr and @size are common for all
562 * A variable number of attributes can be given in @attrs.
563 * @nattr specifies the number of attributes. New attributes can be
564 * added in the future without breaking the ABI. If unknown attributes
565 * are given, the function returns -EINVAL.
567 * @KFD_IOCTL_SVM_OP_SET_ATTR sets attributes for a virtual address
568 * range. It may overlap existing virtual address ranges. If it does,
569 * the existing ranges will be split such that the attribute changes
570 * only apply to the specified address range.
572 * @KFD_IOCTL_SVM_OP_GET_ATTR returns the intersection of attributes
573 * over all memory in the given range and returns the result as the
574 * attribute value. If different pages have different preferred or
575 * prefetch locations, 0xffffffff will be returned for
576 * @KFD_IOCTL_SVM_ATTR_PREFERRED_LOC or
577 * @KFD_IOCTL_SVM_ATTR_PREFETCH_LOC resepctively. For
578 * @KFD_IOCTL_SVM_ATTR_SET_FLAGS, flags of all pages will be
579 * aggregated by bitwise AND. That means, a flag will be set in the
580 * output, if that flag is set for all pages in the range. For
581 * @KFD_IOCTL_SVM_ATTR_CLR_FLAGS, flags of all pages will be
582 * aggregated by bitwise NOR. That means, a flag will be set in the
583 * output, if that flag is clear for all pages in the range.
584 * The minimum migration granularity throughout the range will be
585 * returned for @KFD_IOCTL_SVM_ATTR_GRANULARITY.
587 * Querying of accessibility attributes works by initializing the
588 * attribute type to @KFD_IOCTL_SVM_ATTR_ACCESS and the value to the
589 * GPUID being queried. Multiple attributes can be given to allow
590 * querying multiple GPUIDs. The ioctl function overwrites the
591 * attribute type to indicate the access for the specified GPU.
593 struct kfd_ioctl_svm_args {
598 /* Variable length array of attributes */
599 struct kfd_ioctl_svm_attribute attrs[0];
603 * kfd_ioctl_set_xnack_mode_args - Arguments for set_xnack_mode
605 * @xnack_enabled: [in/out] Whether to enable XNACK mode for this process
607 * @xnack_enabled indicates whether recoverable page faults should be
608 * enabled for the current process. 0 means disabled, positive means
609 * enabled, negative means leave unchanged. If enabled, virtual address
610 * translations on GFXv9 and later AMD GPUs can return XNACK and retry
611 * the access until a valid PTE is available. This is used to implement
612 * device page faults.
614 * On output, @xnack_enabled returns the (new) current mode (0 or
615 * positive). Therefore, a negative input value can be used to query
616 * the current mode without changing it.
618 * The XNACK mode fundamentally changes the way SVM managed memory works
619 * in the driver, with subtle effects on application performance and
622 * Enabling XNACK mode requires shader programs to be compiled
623 * differently. Furthermore, not all GPUs support changing the mode
624 * per-process. Therefore changing the mode is only allowed while no
625 * user mode queues exist in the process. This ensure that no shader
626 * code is running that may be compiled for the wrong mode. And GPUs
627 * that cannot change to the requested mode will prevent the XNACK
628 * mode from occurring. All GPUs used by the process must be in the
631 * GFXv8 or older GPUs do not support 48 bit virtual addresses or SVM.
632 * Therefore those GPUs are not considered for the XNACK mode switch.
634 * Return: 0 on success, -errno on failure
636 struct kfd_ioctl_set_xnack_mode_args {
640 #define AMDKFD_IOCTL_BASE 'K'
641 #define AMDKFD_IO(nr) _IO(AMDKFD_IOCTL_BASE, nr)
642 #define AMDKFD_IOR(nr, type) _IOR(AMDKFD_IOCTL_BASE, nr, type)
643 #define AMDKFD_IOW(nr, type) _IOW(AMDKFD_IOCTL_BASE, nr, type)
644 #define AMDKFD_IOWR(nr, type) _IOWR(AMDKFD_IOCTL_BASE, nr, type)
646 #define AMDKFD_IOC_GET_VERSION \
647 AMDKFD_IOR(0x01, struct kfd_ioctl_get_version_args)
649 #define AMDKFD_IOC_CREATE_QUEUE \
650 AMDKFD_IOWR(0x02, struct kfd_ioctl_create_queue_args)
652 #define AMDKFD_IOC_DESTROY_QUEUE \
653 AMDKFD_IOWR(0x03, struct kfd_ioctl_destroy_queue_args)
655 #define AMDKFD_IOC_SET_MEMORY_POLICY \
656 AMDKFD_IOW(0x04, struct kfd_ioctl_set_memory_policy_args)
658 #define AMDKFD_IOC_GET_CLOCK_COUNTERS \
659 AMDKFD_IOWR(0x05, struct kfd_ioctl_get_clock_counters_args)
661 #define AMDKFD_IOC_GET_PROCESS_APERTURES \
662 AMDKFD_IOR(0x06, struct kfd_ioctl_get_process_apertures_args)
664 #define AMDKFD_IOC_UPDATE_QUEUE \
665 AMDKFD_IOW(0x07, struct kfd_ioctl_update_queue_args)
667 #define AMDKFD_IOC_CREATE_EVENT \
668 AMDKFD_IOWR(0x08, struct kfd_ioctl_create_event_args)
670 #define AMDKFD_IOC_DESTROY_EVENT \
671 AMDKFD_IOW(0x09, struct kfd_ioctl_destroy_event_args)
673 #define AMDKFD_IOC_SET_EVENT \
674 AMDKFD_IOW(0x0A, struct kfd_ioctl_set_event_args)
676 #define AMDKFD_IOC_RESET_EVENT \
677 AMDKFD_IOW(0x0B, struct kfd_ioctl_reset_event_args)
679 #define AMDKFD_IOC_WAIT_EVENTS \
680 AMDKFD_IOWR(0x0C, struct kfd_ioctl_wait_events_args)
682 #define AMDKFD_IOC_DBG_REGISTER \
683 AMDKFD_IOW(0x0D, struct kfd_ioctl_dbg_register_args)
685 #define AMDKFD_IOC_DBG_UNREGISTER \
686 AMDKFD_IOW(0x0E, struct kfd_ioctl_dbg_unregister_args)
688 #define AMDKFD_IOC_DBG_ADDRESS_WATCH \
689 AMDKFD_IOW(0x0F, struct kfd_ioctl_dbg_address_watch_args)
691 #define AMDKFD_IOC_DBG_WAVE_CONTROL \
692 AMDKFD_IOW(0x10, struct kfd_ioctl_dbg_wave_control_args)
694 #define AMDKFD_IOC_SET_SCRATCH_BACKING_VA \
695 AMDKFD_IOWR(0x11, struct kfd_ioctl_set_scratch_backing_va_args)
697 #define AMDKFD_IOC_GET_TILE_CONFIG \
698 AMDKFD_IOWR(0x12, struct kfd_ioctl_get_tile_config_args)
700 #define AMDKFD_IOC_SET_TRAP_HANDLER \
701 AMDKFD_IOW(0x13, struct kfd_ioctl_set_trap_handler_args)
703 #define AMDKFD_IOC_GET_PROCESS_APERTURES_NEW \
705 struct kfd_ioctl_get_process_apertures_new_args)
707 #define AMDKFD_IOC_ACQUIRE_VM \
708 AMDKFD_IOW(0x15, struct kfd_ioctl_acquire_vm_args)
710 #define AMDKFD_IOC_ALLOC_MEMORY_OF_GPU \
711 AMDKFD_IOWR(0x16, struct kfd_ioctl_alloc_memory_of_gpu_args)
713 #define AMDKFD_IOC_FREE_MEMORY_OF_GPU \
714 AMDKFD_IOW(0x17, struct kfd_ioctl_free_memory_of_gpu_args)
716 #define AMDKFD_IOC_MAP_MEMORY_TO_GPU \
717 AMDKFD_IOWR(0x18, struct kfd_ioctl_map_memory_to_gpu_args)
719 #define AMDKFD_IOC_UNMAP_MEMORY_FROM_GPU \
720 AMDKFD_IOWR(0x19, struct kfd_ioctl_unmap_memory_from_gpu_args)
722 #define AMDKFD_IOC_SET_CU_MASK \
723 AMDKFD_IOW(0x1A, struct kfd_ioctl_set_cu_mask_args)
725 #define AMDKFD_IOC_GET_QUEUE_WAVE_STATE \
726 AMDKFD_IOWR(0x1B, struct kfd_ioctl_get_queue_wave_state_args)
728 #define AMDKFD_IOC_GET_DMABUF_INFO \
729 AMDKFD_IOWR(0x1C, struct kfd_ioctl_get_dmabuf_info_args)
731 #define AMDKFD_IOC_IMPORT_DMABUF \
732 AMDKFD_IOWR(0x1D, struct kfd_ioctl_import_dmabuf_args)
734 #define AMDKFD_IOC_ALLOC_QUEUE_GWS \
735 AMDKFD_IOWR(0x1E, struct kfd_ioctl_alloc_queue_gws_args)
737 #define AMDKFD_IOC_SMI_EVENTS \
738 AMDKFD_IOWR(0x1F, struct kfd_ioctl_smi_events_args)
740 #define AMDKFD_IOC_SVM AMDKFD_IOWR(0x20, struct kfd_ioctl_svm_args)
742 #define AMDKFD_IOC_SET_XNACK_MODE \
743 AMDKFD_IOWR(0x21, struct kfd_ioctl_set_xnack_mode_args)
745 #define AMDKFD_COMMAND_START 0x01
746 #define AMDKFD_COMMAND_END 0x22