2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 /* amdgpu_amdkfd.h defines the private interface between amdgpu and amdkfd. */
25 #ifndef AMDGPU_AMDKFD_H_INCLUDED
26 #define AMDGPU_AMDKFD_H_INCLUDED
28 #include <linux/types.h>
30 #include <linux/kthread.h>
31 #include <linux/workqueue.h>
32 #include <kgd_kfd_interface.h>
33 #include <drm/ttm/ttm_execbuf_util.h>
34 #include "amdgpu_sync.h"
35 #include "amdgpu_vm.h"
37 extern uint64_t amdgpu_amdkfd_total_mem_size;
41 struct kfd_bo_va_list {
42 struct list_head bo_list;
43 struct amdgpu_bo_va *bo_va;
53 struct list_head bo_va_list;
54 /* protected by amdkfd_process_info.lock */
55 struct ttm_validate_buffer validate_list;
56 struct ttm_validate_buffer resv_list;
58 unsigned int mapped_to_gpu_memory;
64 struct amdkfd_process_info *process_info;
66 struct amdgpu_sync sync;
72 /* KFD Memory Eviction */
73 struct amdgpu_amdkfd_fence {
74 struct dma_fence base;
77 char timeline_name[TASK_COMM_LEN];
80 struct amdgpu_kfd_dev {
85 enum kgd_engine_type {
97 struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context,
98 struct mm_struct *mm);
99 bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm);
100 struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f);
101 int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo);
103 struct amdkfd_process_info {
104 /* List head of all VMs that belong to a KFD process */
105 struct list_head vm_list_head;
106 /* List head for all KFD BOs that belong to a KFD process. */
107 struct list_head kfd_bo_list;
108 /* List of userptr BOs that are valid or invalid */
109 struct list_head userptr_valid_list;
110 struct list_head userptr_inval_list;
111 /* Lock to protect kfd_bo_list */
117 struct amdgpu_amdkfd_fence *eviction_fence;
119 /* MMU-notifier related fields */
120 atomic_t evicted_bos;
121 struct delayed_work restore_userptr_work;
125 int amdgpu_amdkfd_init(void);
126 void amdgpu_amdkfd_fini(void);
128 void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm);
129 int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm);
130 void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
131 const void *ih_ring_entry);
132 void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev);
133 void amdgpu_amdkfd_device_init(struct amdgpu_device *adev);
134 void amdgpu_amdkfd_device_fini(struct amdgpu_device *adev);
136 int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm);
137 int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
138 uint32_t vmid, uint64_t gpu_addr,
139 uint32_t *ib_cmd, uint32_t ib_len);
140 void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle);
141 bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev *kgd);
142 int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct kgd_dev *kgd, uint16_t vmid);
143 int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct kgd_dev *kgd, uint16_t pasid);
145 bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid);
147 int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev);
149 int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev);
151 void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd);
153 int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev,
157 int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
158 void **mem_obj, uint64_t *gpu_addr,
159 void **cpu_ptr, bool mqd_gfx9);
160 void amdgpu_amdkfd_free_gtt_mem(struct kgd_dev *kgd, void *mem_obj);
161 int amdgpu_amdkfd_alloc_gws(struct kgd_dev *kgd, size_t size, void **mem_obj);
162 void amdgpu_amdkfd_free_gws(struct kgd_dev *kgd, void *mem_obj);
163 int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem);
164 int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem);
165 uint32_t amdgpu_amdkfd_get_fw_version(struct kgd_dev *kgd,
166 enum kgd_engine_type type);
167 void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd,
168 struct kfd_local_mem_info *mem_info);
169 uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct kgd_dev *kgd);
171 uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct kgd_dev *kgd);
172 void amdgpu_amdkfd_get_cu_info(struct kgd_dev *kgd, struct kfd_cu_info *cu_info);
173 int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd,
174 struct kgd_dev **dmabuf_kgd,
175 uint64_t *bo_size, void *metadata_buffer,
176 size_t buffer_size, uint32_t *metadata_size,
178 uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd);
179 uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd);
180 uint64_t amdgpu_amdkfd_get_unique_id(struct kgd_dev *kgd);
181 uint64_t amdgpu_amdkfd_get_mmio_remap_phys_addr(struct kgd_dev *kgd);
182 uint32_t amdgpu_amdkfd_get_num_gws(struct kgd_dev *kgd);
183 uint32_t amdgpu_amdkfd_get_asic_rev_id(struct kgd_dev *kgd);
184 int amdgpu_amdkfd_get_noretry(struct kgd_dev *kgd);
185 uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *src);
187 /* Read user wptr from a specified user address space with page fault
188 * disabled. The memory must be pinned and mapped to the hardware when
189 * this is called in hqd_load functions, so it should never fault in
190 * the first place. This resolves a circular lock dependency involving
191 * four locks, including the DQM lock and mmap_lock.
193 #define read_user_wptr(mmptr, wptr, dst) \
195 bool valid = false; \
196 if ((mmptr) && (wptr)) { \
197 pagefault_disable(); \
198 if ((mmptr) == current->mm) { \
199 valid = !get_user((dst), (wptr)); \
200 } else if (current->flags & PF_KTHREAD) { \
201 kthread_use_mm(mmptr); \
202 valid = !get_user((dst), (wptr)); \
203 kthread_unuse_mm(mmptr); \
205 pagefault_enable(); \
211 int amdgpu_amdkfd_gpuvm_create_process_vm(struct kgd_dev *kgd, u32 pasid,
212 void **vm, void **process_info,
213 struct dma_fence **ef);
214 int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
215 struct file *filp, u32 pasid,
216 void **vm, void **process_info,
217 struct dma_fence **ef);
218 void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
219 struct amdgpu_vm *vm);
220 void amdgpu_amdkfd_gpuvm_destroy_process_vm(struct kgd_dev *kgd, void *vm);
221 void amdgpu_amdkfd_gpuvm_release_process_vm(struct kgd_dev *kgd, void *vm);
222 uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *vm);
223 int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
224 struct kgd_dev *kgd, uint64_t va, uint64_t size,
225 void *vm, struct kgd_mem **mem,
226 uint64_t *offset, uint32_t flags);
227 int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
228 struct kgd_dev *kgd, struct kgd_mem *mem, uint64_t *size);
229 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
230 struct kgd_dev *kgd, struct kgd_mem *mem, void *vm);
231 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
232 struct kgd_dev *kgd, struct kgd_mem *mem, void *vm);
233 int amdgpu_amdkfd_gpuvm_sync_memory(
234 struct kgd_dev *kgd, struct kgd_mem *mem, bool intr);
235 int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
236 struct kgd_mem *mem, void **kptr, uint64_t *size);
237 int amdgpu_amdkfd_gpuvm_restore_process_bos(void *process_info,
238 struct dma_fence **ef);
240 int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
241 struct kfd_vm_fault_info *info);
243 int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
244 struct dma_buf *dmabuf,
245 uint64_t va, void *vm,
246 struct kgd_mem **mem, uint64_t *size,
247 uint64_t *mmap_offset);
249 void amdgpu_amdkfd_gpuvm_init_mem_limits(void);
250 void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo);
252 int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
253 struct tile_config *config);
255 /* KGD2KFD callbacks */
256 int kgd2kfd_init(void);
257 void kgd2kfd_exit(void);
258 struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev,
259 unsigned int asic_type, bool vf);
260 bool kgd2kfd_device_init(struct kfd_dev *kfd,
261 struct drm_device *ddev,
262 const struct kgd2kfd_shared_resources *gpu_resources);
263 void kgd2kfd_device_exit(struct kfd_dev *kfd);
264 void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm);
265 int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm);
266 int kgd2kfd_pre_reset(struct kfd_dev *kfd);
267 int kgd2kfd_post_reset(struct kfd_dev *kfd);
268 void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry);
269 int kgd2kfd_quiesce_mm(struct mm_struct *mm);
270 int kgd2kfd_resume_mm(struct mm_struct *mm);
271 int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
272 struct dma_fence *fence);
273 void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd);
274 void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint32_t throttle_bitmask);
276 #endif /* AMDGPU_AMDKFD_H_INCLUDED */