2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #ifndef KFD_PRIV_H_INCLUDED
24 #define KFD_PRIV_H_INCLUDED
26 #include <linux/hashtable.h>
27 #include <linux/mmu_notifier.h>
28 #include <linux/mutex.h>
29 #include <linux/types.h>
30 #include <linux/atomic.h>
31 #include <linux/workqueue.h>
32 #include <linux/spinlock.h>
33 #include <linux/kfd_ioctl.h>
34 #include <linux/idr.h>
35 #include <linux/kfifo.h>
36 #include <linux/seq_file.h>
37 #include <linux/kref.h>
38 #include <linux/sysfs.h>
39 #include <linux/device_cgroup.h>
40 #include <drm/drm_file.h>
41 #include <drm/drm_drv.h>
42 #include <drm/drm_device.h>
43 #include <kgd_kfd_interface.h>
44 #include <linux/swap.h>
46 #include "amd_shared.h"
48 #define KFD_MAX_RING_ENTRY_SIZE 8
50 #define KFD_SYSFS_FILE_MODE 0444
52 /* GPU ID hash width in bits */
53 #define KFD_GPU_ID_HASH_WIDTH 16
55 /* Use upper bits of mmap offset to store KFD driver specific information.
56 * BITS[63:62] - Encode MMAP type
57 * BITS[61:46] - Encode gpu_id. To identify to which GPU the offset belongs to
58 * BITS[45:0] - MMAP offset value
60 * NOTE: struct vm_area_struct.vm_pgoff uses offset in pages. Hence, these
61 * defines are w.r.t to PAGE_SIZE
63 #define KFD_MMAP_TYPE_SHIFT 62
64 #define KFD_MMAP_TYPE_MASK (0x3ULL << KFD_MMAP_TYPE_SHIFT)
65 #define KFD_MMAP_TYPE_DOORBELL (0x3ULL << KFD_MMAP_TYPE_SHIFT)
66 #define KFD_MMAP_TYPE_EVENTS (0x2ULL << KFD_MMAP_TYPE_SHIFT)
67 #define KFD_MMAP_TYPE_RESERVED_MEM (0x1ULL << KFD_MMAP_TYPE_SHIFT)
68 #define KFD_MMAP_TYPE_MMIO (0x0ULL << KFD_MMAP_TYPE_SHIFT)
70 #define KFD_MMAP_GPU_ID_SHIFT 46
71 #define KFD_MMAP_GPU_ID_MASK (((1ULL << KFD_GPU_ID_HASH_WIDTH) - 1) \
72 << KFD_MMAP_GPU_ID_SHIFT)
73 #define KFD_MMAP_GPU_ID(gpu_id) ((((uint64_t)gpu_id) << KFD_MMAP_GPU_ID_SHIFT)\
74 & KFD_MMAP_GPU_ID_MASK)
75 #define KFD_MMAP_GET_GPU_ID(offset) ((offset & KFD_MMAP_GPU_ID_MASK) \
76 >> KFD_MMAP_GPU_ID_SHIFT)
79 * When working with cp scheduler we should assign the HIQ manually or via
80 * the amdgpu driver to a fixed hqd slot, here are the fixed HIQ hqd slot
81 * definitions for Kaveri. In Kaveri only the first ME queues participates
82 * in the cp scheduling taking that in mind we set the HIQ slot in the
85 #define KFD_CIK_HIQ_PIPE 4
86 #define KFD_CIK_HIQ_QUEUE 0
88 /* Macro for allocating structures */
89 #define kfd_alloc_struct(ptr_to_struct) \
90 ((typeof(ptr_to_struct)) kzalloc(sizeof(*ptr_to_struct), GFP_KERNEL))
92 #define KFD_MAX_NUM_OF_PROCESSES 512
93 #define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
96 * Size of the per-process TBA+TMA buffer: 2 pages
98 * The first page is the TBA used for the CWSR ISA code. The second
99 * page is used as TMA for daisy changing a user-mode trap handler.
101 #define KFD_CWSR_TBA_TMA_SIZE (PAGE_SIZE * 2)
102 #define KFD_CWSR_TMA_OFFSET PAGE_SIZE
104 #define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE \
105 (KFD_MAX_NUM_OF_PROCESSES * \
106 KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)
108 #define KFD_KERNEL_QUEUE_SIZE 2048
110 #define KFD_UNMAP_LATENCY_MS (4000)
114 * The doorbell index distance between SDMA RLC (2*i) and (2*i+1) in the
115 * same SDMA engine on SOC15, which has 8-byte doorbells for SDMA.
116 * 512 8-byte doorbell distance (i.e. one page away) ensures that SDMA RLC
117 * (2*i+1) doorbells (in terms of the lower 12 bit address) lie exactly in
118 * the OFFSET and SIZE set in registers like BIF_SDMA0_DOORBELL_RANGE.
120 #define KFD_QUEUE_DOORBELL_MIRROR_OFFSET 512
124 * Kernel module parameter to specify maximum number of supported queues per
127 extern int max_num_of_queues_per_device;
130 /* Kernel module parameter to specify the scheduling policy */
131 extern int sched_policy;
134 * Kernel module parameter to specify the maximum process
135 * number per HW scheduler
137 extern int hws_max_conc_proc;
139 extern int cwsr_enable;
142 * Kernel module parameter to specify whether to send sigterm to HSA process on
143 * unhandled exception
145 extern int send_sigterm;
148 * This kernel module is used to simulate large bar machine on non-large bar
151 extern int debug_largebar;
154 * Ignore CRAT table during KFD initialization, can be used to work around
155 * broken CRAT tables on some AMD systems
157 extern int ignore_crat;
160 * Set sh_mem_config.retry_disable on Vega10
162 extern int amdgpu_noretry;
165 * Halt if HWS hang is detected
167 extern int halt_if_hws_hang;
170 * Whether MEC FW support GWS barriers
172 extern bool hws_gws_support;
175 * Queue preemption timeout in ms
177 extern int queue_preemption_timeout_ms;
180 cache_policy_coherent,
181 cache_policy_noncoherent
184 #define KFD_IS_SOC15(chip) ((chip) >= CHIP_VEGA10)
186 struct kfd_event_interrupt_class {
187 bool (*interrupt_isr)(struct kfd_dev *dev,
188 const uint32_t *ih_ring_entry, uint32_t *patched_ihre,
190 void (*interrupt_wq)(struct kfd_dev *dev,
191 const uint32_t *ih_ring_entry);
194 struct kfd_device_info {
195 enum amd_asic_type asic_family;
196 const char *asic_name;
197 const struct kfd_event_interrupt_class *event_interrupt_class;
198 unsigned int max_pasid_bits;
199 unsigned int max_no_of_hqd;
200 unsigned int doorbell_size;
201 size_t ih_ring_entry_size;
202 uint8_t num_of_watch_points;
203 uint16_t mqd_size_aligned;
205 bool needs_iommu_device;
206 bool needs_pci_atomics;
207 unsigned int num_sdma_engines;
208 unsigned int num_xgmi_sdma_engines;
209 unsigned int num_sdma_queues_per_engine;
213 uint32_t range_start;
220 struct kfd_vmid_info {
221 uint32_t first_vmid_kfd;
222 uint32_t last_vmid_kfd;
223 uint32_t vmid_num_kfd;
229 const struct kfd_device_info *device_info;
230 struct pci_dev *pdev;
231 struct drm_device *ddev;
233 unsigned int id; /* topology stub index */
235 phys_addr_t doorbell_base; /* Start of actual doorbells used by
236 * KFD. It is aligned for mapping
239 size_t doorbell_base_dw_offset; /* Offset from the start of the PCI
240 * doorbell BAR to the first KFD
241 * doorbell in dwords. GFX reserves
242 * the segment before this offset.
244 u32 __iomem *doorbell_kernel_ptr; /* This is a pointer for a doorbells
245 * page used by kernel queue
248 struct kgd2kfd_shared_resources shared_resources;
249 struct kfd_vmid_info vm_info;
251 const struct kfd2kgd_calls *kfd2kgd;
252 struct mutex doorbell_mutex;
253 DECLARE_BITMAP(doorbell_available_index,
254 KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
257 uint64_t gtt_start_gpu_addr;
258 void *gtt_start_cpu_ptr;
260 struct mutex gtt_sa_lock;
261 unsigned int gtt_sa_chunk_size;
262 unsigned int gtt_sa_num_of_chunks;
265 struct kfifo ih_fifo;
266 struct workqueue_struct *ih_wq;
267 struct work_struct interrupt_work;
268 spinlock_t interrupt_lock;
270 /* QCM Device instance */
271 struct device_queue_manager *dqm;
275 * Interrupts of interest to KFD are copied
276 * from the HW ring into a SW ring.
278 bool interrupts_active;
281 struct kfd_dbgmgr *dbgmgr;
283 /* Firmware versions */
284 uint16_t mec_fw_version;
285 uint16_t sdma_fw_version;
287 /* Maximum process number mapped to HW scheduler */
288 unsigned int max_proc_per_quantum;
292 const void *cwsr_isa;
293 unsigned int cwsr_isa_size;
301 bool pci_atomic_requested;
304 atomic_t sram_ecc_flag;
306 /* Compute Profile ref. count */
307 atomic_t compute_profile;
309 /* Global GWS resource shared b/t processes*/
314 KFD_MEMPOOL_SYSTEM_CACHEABLE = 1,
315 KFD_MEMPOOL_SYSTEM_WRITECOMBINE = 2,
316 KFD_MEMPOOL_FRAMEBUFFER = 3,
319 /* Character device interface */
320 int kfd_chardev_init(void);
321 void kfd_chardev_exit(void);
322 struct device *kfd_chardev(void);
325 * enum kfd_unmap_queues_filter
327 * @KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE: Preempts single queue.
329 * @KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES: Preempts all queues in the
330 * running queues list.
332 * @KFD_UNMAP_QUEUES_FILTER_BY_PASID: Preempts queues that belongs to
336 enum kfd_unmap_queues_filter {
337 KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE,
338 KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES,
339 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES,
340 KFD_UNMAP_QUEUES_FILTER_BY_PASID
344 * enum kfd_queue_type
346 * @KFD_QUEUE_TYPE_COMPUTE: Regular user mode queue type.
348 * @KFD_QUEUE_TYPE_SDMA: Sdma user mode queue type.
350 * @KFD_QUEUE_TYPE_HIQ: HIQ queue type.
352 * @KFD_QUEUE_TYPE_DIQ: DIQ queue type.
354 enum kfd_queue_type {
355 KFD_QUEUE_TYPE_COMPUTE,
359 KFD_QUEUE_TYPE_SDMA_XGMI
362 enum kfd_queue_format {
363 KFD_QUEUE_FORMAT_PM4,
367 enum KFD_QUEUE_PRIORITY {
368 KFD_QUEUE_PRIORITY_MINIMUM = 0,
369 KFD_QUEUE_PRIORITY_MAXIMUM = 15
373 * struct queue_properties
375 * @type: The queue type.
377 * @queue_id: Queue identifier.
379 * @queue_address: Queue ring buffer address.
381 * @queue_size: Queue ring buffer size.
383 * @priority: Defines the queue priority relative to other queues in the
385 * This is just an indication and HW scheduling may override the priority as
386 * necessary while keeping the relative prioritization.
387 * the priority granularity is from 0 to f which f is the highest priority.
388 * currently all queues are initialized with the highest priority.
390 * @queue_percent: This field is partially implemented and currently a zero in
391 * this field defines that the queue is non active.
393 * @read_ptr: User space address which points to the number of dwords the
394 * cp read from the ring buffer. This field updates automatically by the H/W.
396 * @write_ptr: Defines the number of dwords written to the ring buffer.
398 * @doorbell_ptr: This field aim is to notify the H/W of new packet written to
399 * the queue ring buffer. This field should be similar to write_ptr and the
400 * user should update this field after he updated the write_ptr.
402 * @doorbell_off: The doorbell offset in the doorbell pci-bar.
404 * @is_interop: Defines if this is a interop queue. Interop queue means that
405 * the queue can access both graphics and compute resources.
407 * @is_evicted: Defines if the queue is evicted. Only active queues
408 * are evicted, rendering them inactive.
410 * @is_active: Defines if the queue is active or not. @is_active and
411 * @is_evicted are protected by the DQM lock.
413 * @vmid: If the scheduling mode is no cp scheduling the field defines the vmid
416 * This structure represents the queue properties for each queue no matter if
417 * it's user mode or kernel mode queue.
420 struct queue_properties {
421 enum kfd_queue_type type;
422 enum kfd_queue_format format;
423 unsigned int queue_id;
424 uint64_t queue_address;
427 uint32_t queue_percent;
430 void __iomem *doorbell_ptr;
431 uint32_t doorbell_off;
435 /* Not relevant for user mode queues in cp scheduling */
437 /* Relevant only for sdma queues*/
438 uint32_t sdma_engine_id;
439 uint32_t sdma_queue_id;
440 uint32_t sdma_vm_addr;
441 /* Relevant only for VI */
442 uint64_t eop_ring_buffer_address;
443 uint32_t eop_ring_buffer_size;
444 uint64_t ctx_save_restore_area_address;
445 uint32_t ctx_save_restore_area_size;
446 uint32_t ctl_stack_size;
449 /* Relevant for CU */
450 uint32_t cu_mask_count; /* Must be a multiple of 32 */
454 #define QUEUE_IS_ACTIVE(q) ((q).queue_size > 0 && \
455 (q).queue_address != 0 && \
456 (q).queue_percent > 0 && \
462 * @list: Queue linked list.
464 * @mqd: The queue MQD.
466 * @mqd_mem_obj: The MQD local gpu memory object.
468 * @gart_mqd_addr: The MQD gart mc address.
470 * @properties: The queue properties.
472 * @mec: Used only in no cp scheduling mode and identifies to micro engine id
473 * that the queue should be execute on.
475 * @pipe: Used only in no cp scheduling mode and identifies the queue's pipe
478 * @queue: Used only in no cp scheduliong mode and identifies the queue's slot.
480 * @process: The kfd process that created this queue.
482 * @device: The kfd device that created this queue.
484 * @gws: Pointing to gws kgd_mem if this is a gws control queue; NULL
487 * This structure represents user mode compute queues.
488 * It contains all the necessary data to handle such queues.
493 struct list_head list;
495 struct kfd_mem_obj *mqd_mem_obj;
496 uint64_t gart_mqd_addr;
497 struct queue_properties properties;
503 unsigned int sdma_id;
504 unsigned int doorbell_id;
506 struct kfd_process *process;
507 struct kfd_dev *device;
515 * Please read the kfd_mqd_manager.h description.
518 KFD_MQD_TYPE_HIQ = 0, /* for hiq */
519 KFD_MQD_TYPE_CP, /* for cp queues and diq */
520 KFD_MQD_TYPE_SDMA, /* for sdma queues */
521 KFD_MQD_TYPE_DIQ, /* for diq */
525 enum KFD_PIPE_PRIORITY {
526 KFD_PIPE_PRIORITY_CS_LOW = 0,
527 KFD_PIPE_PRIORITY_CS_MEDIUM,
528 KFD_PIPE_PRIORITY_CS_HIGH
531 struct scheduling_resources {
532 unsigned int vmid_mask;
533 enum kfd_queue_type type;
537 uint32_t gds_heap_base;
538 uint32_t gds_heap_size;
541 struct process_queue_manager {
543 struct kfd_process *process;
544 struct list_head queues;
545 unsigned long *queue_slot_bitmap;
548 struct qcm_process_device {
549 /* The Device Queue Manager that owns this data */
550 struct device_queue_manager *dqm;
551 struct process_queue_manager *pqm;
553 struct list_head queues_list;
554 struct list_head priv_queue_list;
556 unsigned int queue_count;
559 unsigned int evicted; /* eviction counter, 0=active */
561 /* This flag tells if we should reset all wavefronts on
562 * process termination
564 bool reset_wavefronts;
567 * All the memory management data should be here too
569 uint64_t gds_context_area;
570 /* Contains page table flags such as AMDGPU_PTE_VALID since gfx9 */
571 uint64_t page_table_base;
572 uint32_t sh_mem_config;
573 uint32_t sh_mem_bases;
574 uint32_t sh_mem_ape1_base;
575 uint32_t sh_mem_ape1_limit;
579 uint32_t sh_hidden_private_base;
591 /* doorbell resources per process per device */
592 unsigned long *doorbell_bitmap;
595 /* KFD Memory Eviction */
597 /* Approx. wait time before attempting to restore evicted BOs */
598 #define PROCESS_RESTORE_TIME_MS 100
599 /* Approx. back off time if restore fails due to lack of memory */
600 #define PROCESS_BACK_OFF_TIME_MS 100
601 /* Approx. time before evicting the process again */
602 #define PROCESS_ACTIVE_TIME_MS 10
604 /* 8 byte handle containing GPU ID in the most significant 4 bytes and
605 * idr_handle in the least significant 4 bytes
607 #define MAKE_HANDLE(gpu_id, idr_handle) \
608 (((uint64_t)(gpu_id) << 32) + idr_handle)
609 #define GET_GPU_ID(handle) (handle >> 32)
610 #define GET_IDR_HANDLE(handle) (handle & 0xFFFFFFFF)
618 /* Data that is per-process-per device. */
619 struct kfd_process_device {
621 * List of all per-device data for a process.
622 * Starts from kfd_process.per_device_data.
624 struct list_head per_device_list;
626 /* The device that owns this data. */
629 /* The process that owns this kfd_process_device. */
630 struct kfd_process *process;
632 /* per-process-per device QCM data structure */
633 struct qcm_process_device qpd;
639 uint64_t gpuvm_limit;
640 uint64_t scratch_base;
641 uint64_t scratch_limit;
643 /* VM context for GPUVM allocations */
644 struct file *drm_file;
647 /* GPUVM allocations storage */
648 struct idr alloc_idr;
650 /* Flag used to tell the pdd has dequeued from the dqm.
651 * This is used to prevent dev->dqm->ops.process_termination() from
652 * being called twice when it is already called in IOMMU callback
655 bool already_dequeued;
658 /* Is this process/pasid bound to this device? (amd_iommu_bind_pasid) */
659 enum kfd_pdd_bound bound;
662 #define qpd_to_pdd(x) container_of(x, struct kfd_process_device, qpd)
667 * kfd_process are stored in an mm_struct*->kfd_process*
668 * hash table (kfd_processes in kfd_process.c)
670 struct hlist_node kfd_processes;
673 * Opaque pointer to mm_struct. We don't hold a reference to
674 * it so it should never be dereferenced from here. This is
675 * only used for looking up processes by their mm.
680 struct work_struct release_work;
685 * In any process, the thread that started main() is the lead
686 * thread and outlives the rest.
687 * It is here because amd_iommu_bind_pasid wants a task_struct.
688 * It can also be used for safely getting a reference to the
689 * mm_struct of the process.
691 struct task_struct *lead_thread;
693 /* We want to receive a notification when the mm_struct is destroyed */
694 struct mmu_notifier mmu_notifier;
697 unsigned int doorbell_index;
700 * List of kfd_process_device structures,
701 * one for each device the process is using.
703 struct list_head per_device_data;
705 struct process_queue_manager pqm;
707 /*Is the user space process 32 bit?*/
708 bool is_32bit_user_mode;
710 /* Event-related data */
711 struct mutex event_mutex;
712 /* Event ID allocator and lookup */
713 struct idr event_idr;
715 struct kfd_signal_page *signal_page;
716 size_t signal_mapped_size;
717 size_t signal_event_count;
718 bool signal_event_limit_reached;
720 /* Information used for memory eviction */
721 void *kgd_process_info;
722 /* Eviction fence that is attached to all the BOs of this process. The
723 * fence will be triggered during eviction and new one will be created
726 struct dma_fence *ef;
728 /* Work items for evicting and restoring BOs */
729 struct delayed_work eviction_work;
730 struct delayed_work restore_work;
731 /* seqno of the last scheduled eviction */
732 unsigned int last_eviction_seqno;
733 /* Approx. the last timestamp (in jiffies) when the process was
734 * restored after an eviction
736 unsigned long last_restore_timestamp;
738 /* Kobj for our procfs */
739 struct kobject *kobj;
740 struct kobject *kobj_queues;
741 struct attribute attr_pasid;
744 #define KFD_PROCESS_TABLE_SIZE 5 /* bits: 32 entries */
745 extern DECLARE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE);
746 extern struct srcu_struct kfd_processes_srcu;
749 * Ioctl function type.
751 * \param filep pointer to file structure.
752 * \param p amdkfd process pointer.
753 * \param data pointer to arg that was copied from user.
755 typedef int amdkfd_ioctl_t(struct file *filep, struct kfd_process *p,
758 struct amdkfd_ioctl_desc {
761 amdkfd_ioctl_t *func;
762 unsigned int cmd_drv;
765 bool kfd_dev_is_large_bar(struct kfd_dev *dev);
767 int kfd_process_create_wq(void);
768 void kfd_process_destroy_wq(void);
769 struct kfd_process *kfd_create_process(struct file *filep);
770 struct kfd_process *kfd_get_process(const struct task_struct *);
771 struct kfd_process *kfd_lookup_process_by_pasid(unsigned int pasid);
772 struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm);
773 void kfd_unref_process(struct kfd_process *p);
774 int kfd_process_evict_queues(struct kfd_process *p);
775 int kfd_process_restore_queues(struct kfd_process *p);
776 void kfd_suspend_all_processes(void);
777 int kfd_resume_all_processes(void);
779 int kfd_process_device_init_vm(struct kfd_process_device *pdd,
780 struct file *drm_file);
781 struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
782 struct kfd_process *p);
783 struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
784 struct kfd_process *p);
785 struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
786 struct kfd_process *p);
788 int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process,
789 struct vm_area_struct *vma);
791 /* KFD process API for creating and translating handles */
792 int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd,
794 void *kfd_process_device_translate_handle(struct kfd_process_device *p,
796 void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd,
799 /* Process device data iterator */
800 struct kfd_process_device *kfd_get_first_process_device_data(
801 struct kfd_process *p);
802 struct kfd_process_device *kfd_get_next_process_device_data(
803 struct kfd_process *p,
804 struct kfd_process_device *pdd);
805 bool kfd_has_process_device_data(struct kfd_process *p);
808 int kfd_pasid_init(void);
809 void kfd_pasid_exit(void);
810 bool kfd_set_pasid_limit(unsigned int new_limit);
811 unsigned int kfd_get_pasid_limit(void);
812 unsigned int kfd_pasid_alloc(void);
813 void kfd_pasid_free(unsigned int pasid);
816 size_t kfd_doorbell_process_slice(struct kfd_dev *kfd);
817 int kfd_doorbell_init(struct kfd_dev *kfd);
818 void kfd_doorbell_fini(struct kfd_dev *kfd);
819 int kfd_doorbell_mmap(struct kfd_dev *dev, struct kfd_process *process,
820 struct vm_area_struct *vma);
821 void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
822 unsigned int *doorbell_off);
823 void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr);
824 u32 read_kernel_doorbell(u32 __iomem *db);
825 void write_kernel_doorbell(void __iomem *db, u32 value);
826 void write_kernel_doorbell64(void __iomem *db, u64 value);
827 unsigned int kfd_get_doorbell_dw_offset_in_bar(struct kfd_dev *kfd,
828 struct kfd_process *process,
829 unsigned int doorbell_id);
830 phys_addr_t kfd_get_process_doorbells(struct kfd_dev *dev,
831 struct kfd_process *process);
832 int kfd_alloc_process_doorbells(struct kfd_process *process);
833 void kfd_free_process_doorbells(struct kfd_process *process);
835 /* GTT Sub-Allocator */
837 int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size,
838 struct kfd_mem_obj **mem_obj);
840 int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj);
842 extern struct device *kfd_device;
845 void kfd_procfs_init(void);
846 void kfd_procfs_shutdown(void);
847 int kfd_procfs_add_queue(struct queue *q);
848 void kfd_procfs_del_queue(struct queue *q);
851 int kfd_topology_init(void);
852 void kfd_topology_shutdown(void);
853 int kfd_topology_add_device(struct kfd_dev *gpu);
854 int kfd_topology_remove_device(struct kfd_dev *gpu);
855 struct kfd_topology_device *kfd_topology_device_by_proximity_domain(
856 uint32_t proximity_domain);
857 struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id);
858 struct kfd_dev *kfd_device_by_id(uint32_t gpu_id);
859 struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev);
860 struct kfd_dev *kfd_device_by_kgd(const struct kgd_dev *kgd);
861 int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev);
862 int kfd_numa_node_to_apic_id(int numa_node_id);
865 int kfd_interrupt_init(struct kfd_dev *dev);
866 void kfd_interrupt_exit(struct kfd_dev *dev);
867 bool enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry);
868 bool interrupt_is_wanted(struct kfd_dev *dev,
869 const uint32_t *ih_ring_entry,
870 uint32_t *patched_ihre, bool *flag);
872 /* amdkfd Apertures */
873 int kfd_init_apertures(struct kfd_process *process);
875 /* Queue Context Management */
876 int init_queue(struct queue **q, const struct queue_properties *properties);
877 void uninit_queue(struct queue *q);
878 void print_queue_properties(struct queue_properties *q);
879 void print_queue(struct queue *q);
881 struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
882 struct kfd_dev *dev);
883 struct mqd_manager *mqd_manager_init_cik_hawaii(enum KFD_MQD_TYPE type,
884 struct kfd_dev *dev);
885 struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
886 struct kfd_dev *dev);
887 struct mqd_manager *mqd_manager_init_vi_tonga(enum KFD_MQD_TYPE type,
888 struct kfd_dev *dev);
889 struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
890 struct kfd_dev *dev);
891 struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type,
892 struct kfd_dev *dev);
893 struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev);
894 void device_queue_manager_uninit(struct device_queue_manager *dqm);
895 struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
896 enum kfd_queue_type type);
897 void kernel_queue_uninit(struct kernel_queue *kq, bool hanging);
898 int kfd_process_vm_fault(struct device_queue_manager *dqm, unsigned int pasid);
900 /* Process Queue Manager */
901 struct process_queue_node {
903 struct kernel_queue *kq;
904 struct list_head process_queue_list;
907 void kfd_process_dequeue_from_device(struct kfd_process_device *pdd);
908 void kfd_process_dequeue_from_all_devices(struct kfd_process *p);
909 int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p);
910 void pqm_uninit(struct process_queue_manager *pqm);
911 int pqm_create_queue(struct process_queue_manager *pqm,
914 struct queue_properties *properties,
916 uint32_t *p_doorbell_offset_in_process);
917 int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid);
918 int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
919 struct queue_properties *p);
920 int pqm_set_cu_mask(struct process_queue_manager *pqm, unsigned int qid,
921 struct queue_properties *p);
922 int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid,
924 struct kernel_queue *pqm_get_kernel_queue(struct process_queue_manager *pqm,
926 int pqm_get_wave_state(struct process_queue_manager *pqm,
928 void __user *ctl_stack,
929 u32 *ctl_stack_used_size,
930 u32 *save_area_used_size);
932 int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
933 unsigned int fence_value,
934 unsigned int timeout_ms);
938 #define KFD_FENCE_COMPLETED (100)
939 #define KFD_FENCE_INIT (10)
941 struct packet_manager {
942 struct device_queue_manager *dqm;
943 struct kernel_queue *priv_queue;
946 struct kfd_mem_obj *ib_buffer_obj;
947 unsigned int ib_size_bytes;
948 bool is_over_subscription;
950 const struct packet_manager_funcs *pmf;
953 struct packet_manager_funcs {
954 /* Support ASIC-specific packet formats for PM4 packets */
955 int (*map_process)(struct packet_manager *pm, uint32_t *buffer,
956 struct qcm_process_device *qpd);
957 int (*runlist)(struct packet_manager *pm, uint32_t *buffer,
958 uint64_t ib, size_t ib_size_in_dwords, bool chain);
959 int (*set_resources)(struct packet_manager *pm, uint32_t *buffer,
960 struct scheduling_resources *res);
961 int (*map_queues)(struct packet_manager *pm, uint32_t *buffer,
962 struct queue *q, bool is_static);
963 int (*unmap_queues)(struct packet_manager *pm, uint32_t *buffer,
964 enum kfd_queue_type type,
965 enum kfd_unmap_queues_filter mode,
966 uint32_t filter_param, bool reset,
967 unsigned int sdma_engine);
968 int (*query_status)(struct packet_manager *pm, uint32_t *buffer,
969 uint64_t fence_address, uint32_t fence_value);
970 int (*release_mem)(uint64_t gpu_addr, uint32_t *buffer);
973 int map_process_size;
975 int set_resources_size;
977 int unmap_queues_size;
978 int query_status_size;
979 int release_mem_size;
982 extern const struct packet_manager_funcs kfd_vi_pm_funcs;
983 extern const struct packet_manager_funcs kfd_v9_pm_funcs;
985 int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm);
986 void pm_uninit(struct packet_manager *pm, bool hanging);
987 int pm_send_set_resources(struct packet_manager *pm,
988 struct scheduling_resources *res);
989 int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues);
990 int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
991 uint32_t fence_value);
993 int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
994 enum kfd_unmap_queues_filter mode,
995 uint32_t filter_param, bool reset,
996 unsigned int sdma_engine);
998 void pm_release_ib(struct packet_manager *pm);
1000 /* Following PM funcs can be shared among VI and AI */
1001 unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size);
1003 uint64_t kfd_get_number_elems(struct kfd_dev *kfd);
1006 extern const struct kfd_event_interrupt_class event_interrupt_class_cik;
1007 extern const struct kfd_event_interrupt_class event_interrupt_class_v9;
1009 extern const struct kfd_device_global_init_class device_global_init_class_cik;
1011 void kfd_event_init_process(struct kfd_process *p);
1012 void kfd_event_free_process(struct kfd_process *p);
1013 int kfd_event_mmap(struct kfd_process *process, struct vm_area_struct *vma);
1014 int kfd_wait_on_events(struct kfd_process *p,
1015 uint32_t num_events, void __user *data,
1016 bool all, uint32_t user_timeout_ms,
1017 uint32_t *wait_result);
1018 void kfd_signal_event_interrupt(unsigned int pasid, uint32_t partial_id,
1019 uint32_t valid_id_bits);
1020 void kfd_signal_iommu_event(struct kfd_dev *dev,
1021 unsigned int pasid, unsigned long address,
1022 bool is_write_requested, bool is_execute_requested);
1023 void kfd_signal_hw_exception_event(unsigned int pasid);
1024 int kfd_set_event(struct kfd_process *p, uint32_t event_id);
1025 int kfd_reset_event(struct kfd_process *p, uint32_t event_id);
1026 int kfd_event_page_set(struct kfd_process *p, void *kernel_address,
1028 int kfd_event_create(struct file *devkfd, struct kfd_process *p,
1029 uint32_t event_type, bool auto_reset, uint32_t node_id,
1030 uint32_t *event_id, uint32_t *event_trigger_data,
1031 uint64_t *event_page_offset, uint32_t *event_slot_index);
1032 int kfd_event_destroy(struct kfd_process *p, uint32_t event_id);
1034 void kfd_signal_vm_fault_event(struct kfd_dev *dev, unsigned int pasid,
1035 struct kfd_vm_fault_info *info);
1037 void kfd_signal_reset_event(struct kfd_dev *dev);
1039 void kfd_flush_tlb(struct kfd_process_device *pdd);
1041 int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p);
1043 bool kfd_is_locked(void);
1045 /* Compute profile */
1046 void kfd_inc_compute_active(struct kfd_dev *dev);
1047 void kfd_dec_compute_active(struct kfd_dev *dev);
1049 /* Cgroup Support */
1050 /* Check with device cgroup if @kfd device is accessible */
1051 static inline int kfd_devcgroup_check_permission(struct kfd_dev *kfd)
1053 #if defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF)
1054 struct drm_device *ddev = kfd->ddev;
1056 return devcgroup_check_permission(DEVCG_DEV_CHAR, ddev->driver->major,
1057 ddev->render->index,
1058 DEVCG_ACC_WRITE | DEVCG_ACC_READ);
1065 #if defined(CONFIG_DEBUG_FS)
1067 void kfd_debugfs_init(void);
1068 void kfd_debugfs_fini(void);
1069 int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data);
1070 int pqm_debugfs_mqds(struct seq_file *m, void *data);
1071 int kfd_debugfs_hqds_by_device(struct seq_file *m, void *data);
1072 int dqm_debugfs_hqds(struct seq_file *m, void *data);
1073 int kfd_debugfs_rls_by_device(struct seq_file *m, void *data);
1074 int pm_debugfs_runlist(struct seq_file *m, void *data);
1076 int kfd_debugfs_hang_hws(struct kfd_dev *dev);
1077 int pm_debugfs_hang_hws(struct packet_manager *pm);
1078 int dqm_debugfs_execute_queues(struct device_queue_manager *dqm);
1082 static inline void kfd_debugfs_init(void) {}
1083 static inline void kfd_debugfs_fini(void) {}