2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/module.h>
24 #include <linux/fdtable.h>
25 #include <linux/uaccess.h>
26 #include <linux/firmware.h>
29 #include "amdgpu_amdkfd.h"
30 #include "amdgpu_ucode.h"
32 #include "gca/gfx_8_0_sh_mask.h"
33 #include "gca/gfx_8_0_d.h"
34 #include "gca/gfx_8_0_enum.h"
35 #include "oss/oss_3_0_sh_mask.h"
36 #include "oss/oss_3_0_d.h"
37 #include "gmc/gmc_8_1_sh_mask.h"
38 #include "gmc/gmc_8_1_d.h"
39 #include "vi_structs.h"
42 enum hqd_dequeue_request_type {
51 * Register access functions
54 static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
55 uint32_t sh_mem_config,
56 uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit,
57 uint32_t sh_mem_bases);
58 static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
60 static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
61 static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
62 uint32_t queue_id, uint32_t __user *wptr,
63 uint32_t wptr_shift, uint32_t wptr_mask,
64 struct mm_struct *mm);
65 static int kgd_hqd_dump(struct kgd_dev *kgd,
66 uint32_t pipe_id, uint32_t queue_id,
67 uint32_t (**dump)[2], uint32_t *n_regs);
68 static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
69 uint32_t __user *wptr, struct mm_struct *mm);
70 static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
71 uint32_t engine_id, uint32_t queue_id,
72 uint32_t (**dump)[2], uint32_t *n_regs);
73 static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
74 uint32_t pipe_id, uint32_t queue_id);
75 static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
76 static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
77 enum kfd_preempt_type reset_type,
78 unsigned int utimeout, uint32_t pipe_id,
80 static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
81 unsigned int utimeout);
82 static int kgd_address_watch_disable(struct kgd_dev *kgd);
83 static int kgd_address_watch_execute(struct kgd_dev *kgd,
84 unsigned int watch_point_id,
88 static int kgd_wave_control_execute(struct kgd_dev *kgd,
89 uint32_t gfx_index_val,
91 static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
92 unsigned int watch_point_id,
93 unsigned int reg_offset);
95 static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
97 static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
99 static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
100 static void set_scratch_backing_va(struct kgd_dev *kgd,
101 uint64_t va, uint32_t vmid);
102 static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
103 uint32_t page_table_base);
104 static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid);
105 static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid);
107 /* Because of REG_GET_FIELD() being used, we put this function in the
108 * asic specific file.
110 static int get_tile_config(struct kgd_dev *kgd,
111 struct tile_config *config)
113 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
115 config->gb_addr_config = adev->gfx.config.gb_addr_config;
116 config->num_banks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
117 MC_ARB_RAMCFG, NOOFBANK);
118 config->num_ranks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
119 MC_ARB_RAMCFG, NOOFRANKS);
121 config->tile_config_ptr = adev->gfx.config.tile_mode_array;
122 config->num_tile_configs =
123 ARRAY_SIZE(adev->gfx.config.tile_mode_array);
124 config->macro_tile_config_ptr =
125 adev->gfx.config.macrotile_mode_array;
126 config->num_macro_tile_configs =
127 ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
132 static const struct kfd2kgd_calls kfd2kgd = {
133 .init_gtt_mem_allocation = alloc_gtt_mem,
134 .free_gtt_mem = free_gtt_mem,
135 .get_local_mem_info = get_local_mem_info,
136 .get_gpu_clock_counter = get_gpu_clock_counter,
137 .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
138 .alloc_pasid = amdgpu_pasid_alloc,
139 .free_pasid = amdgpu_pasid_free,
140 .program_sh_mem_settings = kgd_program_sh_mem_settings,
141 .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
142 .init_interrupts = kgd_init_interrupts,
143 .hqd_load = kgd_hqd_load,
144 .hqd_sdma_load = kgd_hqd_sdma_load,
145 .hqd_dump = kgd_hqd_dump,
146 .hqd_sdma_dump = kgd_hqd_sdma_dump,
147 .hqd_is_occupied = kgd_hqd_is_occupied,
148 .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
149 .hqd_destroy = kgd_hqd_destroy,
150 .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
151 .address_watch_disable = kgd_address_watch_disable,
152 .address_watch_execute = kgd_address_watch_execute,
153 .wave_control_execute = kgd_wave_control_execute,
154 .address_watch_get_offset = kgd_address_watch_get_offset,
155 .get_atc_vmid_pasid_mapping_pasid =
156 get_atc_vmid_pasid_mapping_pasid,
157 .get_atc_vmid_pasid_mapping_valid =
158 get_atc_vmid_pasid_mapping_valid,
159 .get_fw_version = get_fw_version,
160 .set_scratch_backing_va = set_scratch_backing_va,
161 .get_tile_config = get_tile_config,
162 .get_cu_info = get_cu_info,
163 .get_vram_usage = amdgpu_amdkfd_get_vram_usage,
164 .create_process_vm = amdgpu_amdkfd_gpuvm_create_process_vm,
165 .acquire_process_vm = amdgpu_amdkfd_gpuvm_acquire_process_vm,
166 .destroy_process_vm = amdgpu_amdkfd_gpuvm_destroy_process_vm,
167 .release_process_vm = amdgpu_amdkfd_gpuvm_release_process_vm,
168 .get_process_page_dir = amdgpu_amdkfd_gpuvm_get_process_page_dir,
169 .set_vm_context_page_table_base = set_vm_context_page_table_base,
170 .alloc_memory_of_gpu = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu,
171 .free_memory_of_gpu = amdgpu_amdkfd_gpuvm_free_memory_of_gpu,
172 .map_memory_to_gpu = amdgpu_amdkfd_gpuvm_map_memory_to_gpu,
173 .unmap_memory_to_gpu = amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu,
174 .sync_memory = amdgpu_amdkfd_gpuvm_sync_memory,
175 .map_gtt_bo_to_kernel = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel,
176 .restore_process_bos = amdgpu_amdkfd_gpuvm_restore_process_bos,
177 .invalidate_tlbs = invalidate_tlbs,
178 .invalidate_tlbs_vmid = invalidate_tlbs_vmid,
179 .submit_ib = amdgpu_amdkfd_submit_ib,
180 .get_vm_fault_info = amdgpu_amdkfd_gpuvm_get_vm_fault_info,
181 .gpu_recover = amdgpu_amdkfd_gpu_reset,
182 .set_compute_idle = amdgpu_amdkfd_set_compute_idle
185 struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void)
187 return (struct kfd2kgd_calls *)&kfd2kgd;
190 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
192 return (struct amdgpu_device *)kgd;
195 static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
196 uint32_t queue, uint32_t vmid)
198 struct amdgpu_device *adev = get_amdgpu_device(kgd);
199 uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue);
201 mutex_lock(&adev->srbm_mutex);
202 WREG32(mmSRBM_GFX_CNTL, value);
205 static void unlock_srbm(struct kgd_dev *kgd)
207 struct amdgpu_device *adev = get_amdgpu_device(kgd);
209 WREG32(mmSRBM_GFX_CNTL, 0);
210 mutex_unlock(&adev->srbm_mutex);
213 static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
216 struct amdgpu_device *adev = get_amdgpu_device(kgd);
218 uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
219 uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
221 lock_srbm(kgd, mec, pipe, queue_id, 0);
224 static void release_queue(struct kgd_dev *kgd)
229 static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
230 uint32_t sh_mem_config,
231 uint32_t sh_mem_ape1_base,
232 uint32_t sh_mem_ape1_limit,
233 uint32_t sh_mem_bases)
235 struct amdgpu_device *adev = get_amdgpu_device(kgd);
237 lock_srbm(kgd, 0, 0, 0, vmid);
239 WREG32(mmSH_MEM_CONFIG, sh_mem_config);
240 WREG32(mmSH_MEM_APE1_BASE, sh_mem_ape1_base);
241 WREG32(mmSH_MEM_APE1_LIMIT, sh_mem_ape1_limit);
242 WREG32(mmSH_MEM_BASES, sh_mem_bases);
247 static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
250 struct amdgpu_device *adev = get_amdgpu_device(kgd);
253 * We have to assume that there is no outstanding mapping.
254 * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
255 * a mapping is in progress or because a mapping finished
256 * and the SW cleared it.
257 * So the protocol is to always wait & clear.
259 uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
260 ATC_VMID0_PASID_MAPPING__VALID_MASK;
262 WREG32(mmATC_VMID0_PASID_MAPPING + vmid, pasid_mapping);
264 while (!(RREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS) & (1U << vmid)))
266 WREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS, 1U << vmid);
268 /* Mapping vmid to pasid also for IH block */
269 WREG32(mmIH_VMID_0_LUT + vmid, pasid_mapping);
274 static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
276 struct amdgpu_device *adev = get_amdgpu_device(kgd);
280 mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
281 pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
283 lock_srbm(kgd, mec, pipe, 0, 0);
285 WREG32(mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
286 CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
293 static inline uint32_t get_sdma_base_addr(struct vi_sdma_mqd *m)
297 retval = m->sdma_engine_id * SDMA1_REGISTER_OFFSET +
298 m->sdma_queue_id * KFD_VI_SDMA_QUEUE_OFFSET;
299 pr_debug("kfd: sdma base address: 0x%x\n", retval);
304 static inline struct vi_mqd *get_mqd(void *mqd)
306 return (struct vi_mqd *)mqd;
309 static inline struct vi_sdma_mqd *get_sdma_mqd(void *mqd)
311 return (struct vi_sdma_mqd *)mqd;
314 static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
315 uint32_t queue_id, uint32_t __user *wptr,
316 uint32_t wptr_shift, uint32_t wptr_mask,
317 struct mm_struct *mm)
319 struct amdgpu_device *adev = get_amdgpu_device(kgd);
322 uint32_t reg, wptr_val, data;
323 bool valid_wptr = false;
327 acquire_queue(kgd, pipe_id, queue_id);
329 /* HIQ is set during driver init period with vmid set to 0*/
330 if (m->cp_hqd_vmid == 0) {
331 uint32_t value, mec, pipe;
333 mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
334 pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
336 pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
337 mec, pipe, queue_id);
338 value = RREG32(mmRLC_CP_SCHEDULERS);
339 value = REG_SET_FIELD(value, RLC_CP_SCHEDULERS, scheduler1,
340 ((mec << 5) | (pipe << 3) | queue_id | 0x80));
341 WREG32(mmRLC_CP_SCHEDULERS, value);
344 /* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */
345 mqd_hqd = &m->cp_mqd_base_addr_lo;
347 for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_HQD_EOP_CONTROL; reg++)
348 WREG32(reg, mqd_hqd[reg - mmCP_MQD_BASE_ADDR]);
350 /* Tonga errata: EOP RPTR/WPTR should be left unmodified.
351 * This is safe since EOP RPTR==WPTR for any inactive HQD
352 * on ASICs that do not support context-save.
353 * EOP writes/reads can start anywhere in the ring.
355 if (get_amdgpu_device(kgd)->asic_type != CHIP_TONGA) {
356 WREG32(mmCP_HQD_EOP_RPTR, m->cp_hqd_eop_rptr);
357 WREG32(mmCP_HQD_EOP_WPTR, m->cp_hqd_eop_wptr);
358 WREG32(mmCP_HQD_EOP_WPTR_MEM, m->cp_hqd_eop_wptr_mem);
361 for (reg = mmCP_HQD_EOP_EVENTS; reg <= mmCP_HQD_ERROR; reg++)
362 WREG32(reg, mqd_hqd[reg - mmCP_MQD_BASE_ADDR]);
364 /* Copy userspace write pointer value to register.
365 * Activate doorbell logic to monitor subsequent changes.
367 data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control,
368 CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
369 WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, data);
371 /* read_user_ptr may take the mm->mmap_sem.
372 * release srbm_mutex to avoid circular dependency between
373 * srbm_mutex->mm_sem->reservation_ww_class_mutex->srbm_mutex.
376 valid_wptr = read_user_wptr(mm, wptr, wptr_val);
377 acquire_queue(kgd, pipe_id, queue_id);
379 WREG32(mmCP_HQD_PQ_WPTR, (wptr_val << wptr_shift) & wptr_mask);
381 data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
382 WREG32(mmCP_HQD_ACTIVE, data);
389 static int kgd_hqd_dump(struct kgd_dev *kgd,
390 uint32_t pipe_id, uint32_t queue_id,
391 uint32_t (**dump)[2], uint32_t *n_regs)
393 struct amdgpu_device *adev = get_amdgpu_device(kgd);
395 #define HQD_N_REGS (54+4)
396 #define DUMP_REG(addr) do { \
397 if (WARN_ON_ONCE(i >= HQD_N_REGS)) \
399 (*dump)[i][0] = (addr) << 2; \
400 (*dump)[i++][1] = RREG32(addr); \
403 *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
407 acquire_queue(kgd, pipe_id, queue_id);
409 DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE0);
410 DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE1);
411 DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE2);
412 DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE3);
414 for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_HQD_EOP_DONES; reg++)
419 WARN_ON_ONCE(i != HQD_N_REGS);
425 static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
426 uint32_t __user *wptr, struct mm_struct *mm)
428 struct amdgpu_device *adev = get_amdgpu_device(kgd);
429 struct vi_sdma_mqd *m;
430 unsigned long end_jiffies;
431 uint32_t sdma_base_addr;
434 m = get_sdma_mqd(mqd);
435 sdma_base_addr = get_sdma_base_addr(m);
436 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
437 m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
439 end_jiffies = msecs_to_jiffies(2000) + jiffies;
441 data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
442 if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
444 if (time_after(jiffies, end_jiffies))
446 usleep_range(500, 1000);
448 if (m->sdma_engine_id) {
449 data = RREG32(mmSDMA1_GFX_CONTEXT_CNTL);
450 data = REG_SET_FIELD(data, SDMA1_GFX_CONTEXT_CNTL,
452 WREG32(mmSDMA1_GFX_CONTEXT_CNTL, data);
454 data = RREG32(mmSDMA0_GFX_CONTEXT_CNTL);
455 data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL,
457 WREG32(mmSDMA0_GFX_CONTEXT_CNTL, data);
460 data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
462 WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data);
463 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdmax_rlcx_rb_rptr);
465 if (read_user_wptr(mm, wptr, data))
466 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, data);
468 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR,
469 m->sdmax_rlcx_rb_rptr);
471 WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR,
472 m->sdmax_rlcx_virtual_addr);
473 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
474 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
475 m->sdmax_rlcx_rb_base_hi);
476 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
477 m->sdmax_rlcx_rb_rptr_addr_lo);
478 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
479 m->sdmax_rlcx_rb_rptr_addr_hi);
481 data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
483 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data);
488 static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
489 uint32_t engine_id, uint32_t queue_id,
490 uint32_t (**dump)[2], uint32_t *n_regs)
492 struct amdgpu_device *adev = get_amdgpu_device(kgd);
493 uint32_t sdma_offset = engine_id * SDMA1_REGISTER_OFFSET +
494 queue_id * KFD_VI_SDMA_QUEUE_OFFSET;
497 #define HQD_N_REGS (19+4+2+3+7)
499 *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
503 for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
504 DUMP_REG(sdma_offset + reg);
505 for (reg = mmSDMA0_RLC0_VIRTUAL_ADDR; reg <= mmSDMA0_RLC0_WATERMARK;
507 DUMP_REG(sdma_offset + reg);
508 for (reg = mmSDMA0_RLC0_CSA_ADDR_LO; reg <= mmSDMA0_RLC0_CSA_ADDR_HI;
510 DUMP_REG(sdma_offset + reg);
511 for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN; reg <= mmSDMA0_RLC0_DUMMY_REG;
513 DUMP_REG(sdma_offset + reg);
514 for (reg = mmSDMA0_RLC0_MIDCMD_DATA0; reg <= mmSDMA0_RLC0_MIDCMD_CNTL;
516 DUMP_REG(sdma_offset + reg);
518 WARN_ON_ONCE(i != HQD_N_REGS);
524 static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
525 uint32_t pipe_id, uint32_t queue_id)
527 struct amdgpu_device *adev = get_amdgpu_device(kgd);
532 acquire_queue(kgd, pipe_id, queue_id);
533 act = RREG32(mmCP_HQD_ACTIVE);
535 low = lower_32_bits(queue_address >> 8);
536 high = upper_32_bits(queue_address >> 8);
538 if (low == RREG32(mmCP_HQD_PQ_BASE) &&
539 high == RREG32(mmCP_HQD_PQ_BASE_HI))
546 static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
548 struct amdgpu_device *adev = get_amdgpu_device(kgd);
549 struct vi_sdma_mqd *m;
550 uint32_t sdma_base_addr;
551 uint32_t sdma_rlc_rb_cntl;
553 m = get_sdma_mqd(mqd);
554 sdma_base_addr = get_sdma_base_addr(m);
556 sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
558 if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
564 static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
565 enum kfd_preempt_type reset_type,
566 unsigned int utimeout, uint32_t pipe_id,
569 struct amdgpu_device *adev = get_amdgpu_device(kgd);
571 enum hqd_dequeue_request_type type;
572 unsigned long flags, end_jiffies;
574 struct vi_mqd *m = get_mqd(mqd);
576 if (adev->in_gpu_reset)
579 acquire_queue(kgd, pipe_id, queue_id);
581 if (m->cp_hqd_vmid == 0)
582 WREG32_FIELD(RLC_CP_SCHEDULERS, scheduler1, 0);
584 switch (reset_type) {
585 case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN:
588 case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
596 /* Workaround: If IQ timer is active and the wait time is close to or
597 * equal to 0, dequeueing is not safe. Wait until either the wait time
598 * is larger or timer is cleared. Also, ensure that IQ_REQ_PEND is
599 * cleared before continuing. Also, ensure wait times are set to at
602 local_irq_save(flags);
604 retry = 5000; /* wait for 500 usecs at maximum */
606 temp = RREG32(mmCP_HQD_IQ_TIMER);
607 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, PROCESSING_IQ)) {
608 pr_debug("HW is processing IQ\n");
611 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, ACTIVE)) {
612 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, RETRY_TYPE)
613 == 3) /* SEM-rearm is safe */
615 /* Wait time 3 is safe for CP, but our MMIO read/write
616 * time is close to 1 microsecond, so check for 10 to
617 * leave more buffer room
619 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, WAIT_TIME)
622 pr_debug("IQ timer is active\n");
627 pr_err("CP HQD IQ timer status time out\n");
635 temp = RREG32(mmCP_HQD_DEQUEUE_REQUEST);
636 if (!(temp & CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_MASK))
638 pr_debug("Dequeue request is pending\n");
641 pr_err("CP HQD dequeue request time out\n");
647 local_irq_restore(flags);
650 WREG32(mmCP_HQD_DEQUEUE_REQUEST, type);
652 end_jiffies = (utimeout * HZ / 1000) + jiffies;
654 temp = RREG32(mmCP_HQD_ACTIVE);
655 if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK))
657 if (time_after(jiffies, end_jiffies)) {
658 pr_err("cp queue preemption time out.\n");
662 usleep_range(500, 1000);
669 static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
670 unsigned int utimeout)
672 struct amdgpu_device *adev = get_amdgpu_device(kgd);
673 struct vi_sdma_mqd *m;
674 uint32_t sdma_base_addr;
676 unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
678 m = get_sdma_mqd(mqd);
679 sdma_base_addr = get_sdma_base_addr(m);
681 temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
682 temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
683 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp);
686 temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
687 if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
689 if (time_after(jiffies, end_jiffies))
691 usleep_range(500, 1000);
694 WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
695 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
696 RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) |
697 SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
699 m->sdmax_rlcx_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR);
704 static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
708 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
710 reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
711 return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
714 static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
718 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
720 reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
721 return reg & ATC_VMID0_PASID_MAPPING__PASID_MASK;
724 static int kgd_address_watch_disable(struct kgd_dev *kgd)
729 static int kgd_address_watch_execute(struct kgd_dev *kgd,
730 unsigned int watch_point_id,
738 static int kgd_wave_control_execute(struct kgd_dev *kgd,
739 uint32_t gfx_index_val,
742 struct amdgpu_device *adev = get_amdgpu_device(kgd);
745 mutex_lock(&adev->grbm_idx_mutex);
747 WREG32(mmGRBM_GFX_INDEX, gfx_index_val);
748 WREG32(mmSQ_CMD, sq_cmd);
750 data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
751 INSTANCE_BROADCAST_WRITES, 1);
752 data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
753 SH_BROADCAST_WRITES, 1);
754 data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
755 SE_BROADCAST_WRITES, 1);
757 WREG32(mmGRBM_GFX_INDEX, data);
758 mutex_unlock(&adev->grbm_idx_mutex);
763 static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
764 unsigned int watch_point_id,
765 unsigned int reg_offset)
770 static void set_scratch_backing_va(struct kgd_dev *kgd,
771 uint64_t va, uint32_t vmid)
773 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
775 lock_srbm(kgd, 0, 0, 0, vmid);
776 WREG32(mmSH_HIDDEN_PRIVATE_BASE_VMID, va);
780 static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
782 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
783 const union amdgpu_firmware_header *hdr;
787 hdr = (const union amdgpu_firmware_header *)
788 adev->gfx.pfp_fw->data;
792 hdr = (const union amdgpu_firmware_header *)
793 adev->gfx.me_fw->data;
797 hdr = (const union amdgpu_firmware_header *)
798 adev->gfx.ce_fw->data;
801 case KGD_ENGINE_MEC1:
802 hdr = (const union amdgpu_firmware_header *)
803 adev->gfx.mec_fw->data;
806 case KGD_ENGINE_MEC2:
807 hdr = (const union amdgpu_firmware_header *)
808 adev->gfx.mec2_fw->data;
812 hdr = (const union amdgpu_firmware_header *)
813 adev->gfx.rlc_fw->data;
816 case KGD_ENGINE_SDMA1:
817 hdr = (const union amdgpu_firmware_header *)
818 adev->sdma.instance[0].fw->data;
821 case KGD_ENGINE_SDMA2:
822 hdr = (const union amdgpu_firmware_header *)
823 adev->sdma.instance[1].fw->data;
833 /* Only 12 bit in use*/
834 return hdr->common.ucode_version;
837 static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
838 uint32_t page_table_base)
840 struct amdgpu_device *adev = get_amdgpu_device(kgd);
842 if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
843 pr_err("trying to set page table base for wrong VMID\n");
846 WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8, page_table_base);
849 static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
851 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
855 if (adev->in_gpu_reset)
858 for (vmid = 0; vmid < 16; vmid++) {
859 if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid))
862 tmp = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
863 if ((tmp & ATC_VMID0_PASID_MAPPING__VALID_MASK) &&
864 (tmp & ATC_VMID0_PASID_MAPPING__PASID_MASK) == pasid) {
865 WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
866 RREG32(mmVM_INVALIDATE_RESPONSE);
874 static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid)
876 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
878 if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
879 pr_err("non kfd vmid %d\n", vmid);
883 WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
884 RREG32(mmVM_INVALIDATE_RESPONSE);