2 * Copyright 2019 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 #include <linux/mmu_context.h>
24 #include "amdgpu_amdkfd.h"
25 #include "gc/gc_10_3_0_offset.h"
26 #include "gc/gc_10_3_0_sh_mask.h"
27 #include "navi10_enum.h"
28 #include "oss/osssys_5_0_0_offset.h"
29 #include "oss/osssys_5_0_0_sh_mask.h"
30 #include "soc15_common.h"
31 #include "v10_structs.h"
34 #include "gfxhub_v2_1.h"
36 enum hqd_dequeue_request_type {
43 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
45 return (struct amdgpu_device *)kgd;
48 static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
49 uint32_t queue, uint32_t vmid)
51 struct amdgpu_device *adev = get_amdgpu_device(kgd);
53 mutex_lock(&adev->srbm_mutex);
54 nv_grbm_select(adev, mec, pipe, queue, vmid);
57 static void unlock_srbm(struct kgd_dev *kgd)
59 struct amdgpu_device *adev = get_amdgpu_device(kgd);
61 nv_grbm_select(adev, 0, 0, 0, 0);
62 mutex_unlock(&adev->srbm_mutex);
65 static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
68 struct amdgpu_device *adev = get_amdgpu_device(kgd);
70 uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
71 uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
73 lock_srbm(kgd, mec, pipe, queue_id, 0);
76 static uint64_t get_queue_mask(struct amdgpu_device *adev,
77 uint32_t pipe_id, uint32_t queue_id)
79 unsigned int bit = pipe_id * adev->gfx.mec.num_queue_per_pipe +
85 static void release_queue(struct kgd_dev *kgd)
90 static void program_sh_mem_settings_v10_3(struct kgd_dev *kgd, uint32_t vmid,
91 uint32_t sh_mem_config,
92 uint32_t sh_mem_ape1_base,
93 uint32_t sh_mem_ape1_limit,
94 uint32_t sh_mem_bases)
96 struct amdgpu_device *adev = get_amdgpu_device(kgd);
98 lock_srbm(kgd, 0, 0, 0, vmid);
100 WREG32(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG), sh_mem_config);
101 WREG32(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_BASES), sh_mem_bases);
102 /* APE1 no longer exists on GFX9 */
107 /* ATC is defeatured on Sienna_Cichlid */
108 static int set_pasid_vmid_mapping_v10_3(struct kgd_dev *kgd, unsigned int pasid,
111 struct amdgpu_device *adev = get_amdgpu_device(kgd);
113 uint32_t value = pasid << IH_VMID_0_LUT__PASID__SHIFT;
115 /* Mapping vmid to pasid also for IH block */
116 pr_debug("mapping vmid %d -> pasid %d in IH block for GFX client\n",
118 WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid, value);
123 static int init_interrupts_v10_3(struct kgd_dev *kgd, uint32_t pipe_id)
125 struct amdgpu_device *adev = get_amdgpu_device(kgd);
129 mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
130 pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
132 lock_srbm(kgd, mec, pipe, 0, 0);
134 WREG32(SOC15_REG_OFFSET(GC, 0, mmCPC_INT_CNTL),
135 CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
136 CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
143 static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
144 unsigned int engine_id,
145 unsigned int queue_id)
147 uint32_t sdma_engine_reg_base = 0;
148 uint32_t sdma_rlc_reg_offset;
153 "Invalid sdma engine id (%d), using engine id 0\n",
157 sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0,
158 mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
161 sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA1, 0,
162 mmSDMA1_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
165 sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA2, 0,
166 mmSDMA2_RLC0_RB_CNTL) - mmSDMA2_RLC0_RB_CNTL;
169 sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA3, 0,
170 mmSDMA3_RLC0_RB_CNTL) - mmSDMA2_RLC0_RB_CNTL;
174 sdma_rlc_reg_offset = sdma_engine_reg_base
175 + queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL);
177 pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id,
178 queue_id, sdma_rlc_reg_offset);
180 return sdma_rlc_reg_offset;
183 static inline struct v10_compute_mqd *get_mqd(void *mqd)
185 return (struct v10_compute_mqd *)mqd;
188 static inline struct v10_sdma_mqd *get_sdma_mqd(void *mqd)
190 return (struct v10_sdma_mqd *)mqd;
193 static int hqd_load_v10_3(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
194 uint32_t queue_id, uint32_t __user *wptr,
195 uint32_t wptr_shift, uint32_t wptr_mask,
196 struct mm_struct *mm)
198 struct amdgpu_device *adev = get_amdgpu_device(kgd);
199 struct v10_compute_mqd *m;
201 uint32_t reg, hqd_base, data;
205 pr_debug("Load hqd of pipe %d queue %d\n", pipe_id, queue_id);
206 acquire_queue(kgd, pipe_id, queue_id);
208 /* HIQ is set during driver init period with vmid set to 0*/
209 if (m->cp_hqd_vmid == 0) {
210 uint32_t value, mec, pipe;
212 mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
213 pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
215 pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
216 mec, pipe, queue_id);
217 value = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS));
218 value = REG_SET_FIELD(value, RLC_CP_SCHEDULERS, scheduler1,
219 ((mec << 5) | (pipe << 3) | queue_id | 0x80));
220 WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_CP_SCHEDULERS), value);
223 /* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */
224 mqd_hqd = &m->cp_mqd_base_addr_lo;
225 hqd_base = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR);
228 reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++)
229 WREG32(reg, mqd_hqd[reg - hqd_base]);
232 /* Activate doorbell logic before triggering WPTR poll. */
233 data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control,
234 CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
235 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL), data);
238 /* Don't read wptr with get_user because the user
239 * context may not be accessible (if this function
240 * runs in a work queue). Instead trigger a one-shot
241 * polling read from memory in the CP. This assumes
242 * that wptr is GPU-accessible in the queue's VMID via
243 * ATC or SVM. WPTR==RPTR before starting the poll so
244 * the CP starts fetching new commands from the right
247 * Guessing a 64-bit WPTR from a 32-bit RPTR is a bit
248 * tricky. Assume that the queue didn't overflow. The
249 * number of valid bits in the 32-bit RPTR depends on
250 * the queue size. The remaining bits are taken from
251 * the saved 64-bit WPTR. If the WPTR wrapped, add the
254 uint32_t queue_size =
255 2 << REG_GET_FIELD(m->cp_hqd_pq_control,
256 CP_HQD_PQ_CONTROL, QUEUE_SIZE);
257 uint64_t guessed_wptr = m->cp_hqd_pq_rptr & (queue_size - 1);
259 if ((m->cp_hqd_pq_wptr_lo & (queue_size - 1)) < guessed_wptr)
260 guessed_wptr += queue_size;
261 guessed_wptr += m->cp_hqd_pq_wptr_lo & ~(queue_size - 1);
262 guessed_wptr += (uint64_t)m->cp_hqd_pq_wptr_hi << 32;
264 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_LO),
265 lower_32_bits(guessed_wptr));
266 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI),
267 upper_32_bits(guessed_wptr));
268 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR),
269 lower_32_bits((uint64_t)wptr));
270 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI),
271 upper_32_bits((uint64_t)wptr));
272 pr_debug("%s setting CP_PQ_WPTR_POLL_CNTL1 to %x\n", __func__,
273 (uint32_t)get_queue_mask(adev, pipe_id, queue_id));
274 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1),
275 (uint32_t)get_queue_mask(adev, pipe_id, queue_id));
278 /* Start the EOP fetcher */
279 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_EOP_RPTR),
280 REG_SET_FIELD(m->cp_hqd_eop_rptr,
281 CP_HQD_EOP_RPTR, INIT_FETCHER, 1));
283 data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
284 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE), data);
291 static int hiq_mqd_load_v10_3(struct kgd_dev *kgd, void *mqd,
292 uint32_t pipe_id, uint32_t queue_id,
293 uint32_t doorbell_off)
295 struct amdgpu_device *adev = get_amdgpu_device(kgd);
296 struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
297 struct v10_compute_mqd *m;
303 acquire_queue(kgd, pipe_id, queue_id);
305 mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
306 pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
308 pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
309 mec, pipe, queue_id);
311 spin_lock(&adev->gfx.kiq.ring_lock);
312 r = amdgpu_ring_alloc(kiq_ring, 7);
314 pr_err("Failed to alloc KIQ (%d).\n", r);
318 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
319 amdgpu_ring_write(kiq_ring,
320 PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
321 PACKET3_MAP_QUEUES_VMID(m->cp_hqd_vmid) | /* VMID */
322 PACKET3_MAP_QUEUES_QUEUE(queue_id) |
323 PACKET3_MAP_QUEUES_PIPE(pipe) |
324 PACKET3_MAP_QUEUES_ME((mec - 1)) |
325 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
326 PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
327 PACKET3_MAP_QUEUES_ENGINE_SEL(1) | /* engine_sel: hiq */
328 PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
329 amdgpu_ring_write(kiq_ring,
330 PACKET3_MAP_QUEUES_DOORBELL_OFFSET(doorbell_off));
331 amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_lo);
332 amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_hi);
333 amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_lo);
334 amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_hi);
335 amdgpu_ring_commit(kiq_ring);
338 spin_unlock(&adev->gfx.kiq.ring_lock);
344 static int hqd_dump_v10_3(struct kgd_dev *kgd,
345 uint32_t pipe_id, uint32_t queue_id,
346 uint32_t (**dump)[2], uint32_t *n_regs)
348 struct amdgpu_device *adev = get_amdgpu_device(kgd);
350 #define HQD_N_REGS 56
351 #define DUMP_REG(addr) do { \
352 if (WARN_ON_ONCE(i >= HQD_N_REGS)) \
354 (*dump)[i][0] = (addr) << 2; \
355 (*dump)[i++][1] = RREG32(addr); \
358 *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
362 acquire_queue(kgd, pipe_id, queue_id);
364 for (reg = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR);
365 reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++)
370 WARN_ON_ONCE(i != HQD_N_REGS);
376 static int hqd_sdma_load_v10_3(struct kgd_dev *kgd, void *mqd,
377 uint32_t __user *wptr, struct mm_struct *mm)
379 struct amdgpu_device *adev = get_amdgpu_device(kgd);
380 struct v10_sdma_mqd *m;
381 uint32_t sdma_rlc_reg_offset;
382 unsigned long end_jiffies;
385 uint64_t __user *wptr64 = (uint64_t __user *)wptr;
387 m = get_sdma_mqd(mqd);
388 sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
391 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
392 m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
394 end_jiffies = msecs_to_jiffies(2000) + jiffies;
396 data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
397 if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
399 if (time_after(jiffies, end_jiffies)) {
400 pr_err("SDMA RLC not idle in %s\n", __func__);
403 usleep_range(500, 1000);
406 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL_OFFSET,
407 m->sdmax_rlcx_doorbell_offset);
409 data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
411 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data);
412 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR,
413 m->sdmax_rlcx_rb_rptr);
414 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI,
415 m->sdmax_rlcx_rb_rptr_hi);
417 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
418 if (read_user_wptr(mm, wptr64, data64)) {
419 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
420 lower_32_bits(data64));
421 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
422 upper_32_bits(data64));
424 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
425 m->sdmax_rlcx_rb_rptr);
426 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
427 m->sdmax_rlcx_rb_rptr_hi);
429 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
431 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
432 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI,
433 m->sdmax_rlcx_rb_base_hi);
434 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
435 m->sdmax_rlcx_rb_rptr_addr_lo);
436 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
437 m->sdmax_rlcx_rb_rptr_addr_hi);
439 data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
441 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data);
446 static int hqd_sdma_dump_v10_3(struct kgd_dev *kgd,
447 uint32_t engine_id, uint32_t queue_id,
448 uint32_t (**dump)[2], uint32_t *n_regs)
450 struct amdgpu_device *adev = get_amdgpu_device(kgd);
451 uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev,
452 engine_id, queue_id);
455 #define HQD_N_REGS (19+6+7+10)
457 *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL);
461 for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
462 DUMP_REG(sdma_rlc_reg_offset + reg);
463 for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++)
464 DUMP_REG(sdma_rlc_reg_offset + reg);
465 for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN;
466 reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++)
467 DUMP_REG(sdma_rlc_reg_offset + reg);
468 for (reg = mmSDMA0_RLC0_MIDCMD_DATA0;
469 reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++)
470 DUMP_REG(sdma_rlc_reg_offset + reg);
472 WARN_ON_ONCE(i != HQD_N_REGS);
478 static bool hqd_is_occupied_v10_3(struct kgd_dev *kgd, uint64_t queue_address,
479 uint32_t pipe_id, uint32_t queue_id)
481 struct amdgpu_device *adev = get_amdgpu_device(kgd);
486 acquire_queue(kgd, pipe_id, queue_id);
487 act = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE));
489 low = lower_32_bits(queue_address >> 8);
490 high = upper_32_bits(queue_address >> 8);
492 if (low == RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_BASE)) &&
493 high == RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_BASE_HI)))
500 static bool hqd_sdma_is_occupied_v10_3(struct kgd_dev *kgd, void *mqd)
502 struct amdgpu_device *adev = get_amdgpu_device(kgd);
503 struct v10_sdma_mqd *m;
504 uint32_t sdma_rlc_reg_offset;
505 uint32_t sdma_rlc_rb_cntl;
507 m = get_sdma_mqd(mqd);
508 sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
511 sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
513 if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
519 static int hqd_destroy_v10_3(struct kgd_dev *kgd, void *mqd,
520 enum kfd_preempt_type reset_type,
521 unsigned int utimeout, uint32_t pipe_id,
524 struct amdgpu_device *adev = get_amdgpu_device(kgd);
525 enum hqd_dequeue_request_type type;
526 unsigned long end_jiffies;
528 struct v10_compute_mqd *m = get_mqd(mqd);
530 acquire_queue(kgd, pipe_id, queue_id);
532 if (m->cp_hqd_vmid == 0)
533 WREG32_FIELD15(GC, 0, RLC_CP_SCHEDULERS, scheduler1, 0);
535 switch (reset_type) {
536 case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN:
539 case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
547 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_DEQUEUE_REQUEST), type);
549 end_jiffies = (utimeout * HZ / 1000) + jiffies;
551 temp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE));
552 if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK))
554 if (time_after(jiffies, end_jiffies)) {
555 pr_err("cp queue pipe %d queue %d preemption failed\n",
560 usleep_range(500, 1000);
567 static int hqd_sdma_destroy_v10_3(struct kgd_dev *kgd, void *mqd,
568 unsigned int utimeout)
570 struct amdgpu_device *adev = get_amdgpu_device(kgd);
571 struct v10_sdma_mqd *m;
572 uint32_t sdma_rlc_reg_offset;
574 unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
576 m = get_sdma_mqd(mqd);
577 sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
580 temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
581 temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
582 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp);
585 temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
586 if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
588 if (time_after(jiffies, end_jiffies)) {
589 pr_err("SDMA RLC not idle in %s\n", __func__);
592 usleep_range(500, 1000);
595 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0);
596 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
597 RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) |
598 SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
600 m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR);
601 m->sdmax_rlcx_rb_rptr_hi =
602 RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI);
608 static int address_watch_disable_v10_3(struct kgd_dev *kgd)
613 static int address_watch_execute_v10_3(struct kgd_dev *kgd,
614 unsigned int watch_point_id,
622 static int wave_control_execute_v10_3(struct kgd_dev *kgd,
623 uint32_t gfx_index_val,
626 struct amdgpu_device *adev = get_amdgpu_device(kgd);
629 mutex_lock(&adev->grbm_idx_mutex);
631 WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX), gfx_index_val);
632 WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_CMD), sq_cmd);
634 data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
635 INSTANCE_BROADCAST_WRITES, 1);
636 data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
637 SA_BROADCAST_WRITES, 1);
638 data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
639 SE_BROADCAST_WRITES, 1);
641 WREG32(SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX), data);
642 mutex_unlock(&adev->grbm_idx_mutex);
647 static uint32_t address_watch_get_offset_v10_3(struct kgd_dev *kgd,
648 unsigned int watch_point_id,
649 unsigned int reg_offset)
654 static void set_vm_context_page_table_base_v10_3(struct kgd_dev *kgd, uint32_t vmid,
655 uint64_t page_table_base)
657 struct amdgpu_device *adev = get_amdgpu_device(kgd);
659 /* SDMA is on gfxhub as well for Navi1* series */
660 gfxhub_v2_1_setup_vm_pt_regs(adev, vmid, page_table_base);
664 uint32_t enable_debug_trap_v10_3(struct kgd_dev *kgd,
665 uint32_t trap_debug_wave_launch_mode,
668 struct amdgpu_device *adev = get_amdgpu_device(kgd);
670 uint32_t orig_wave_cntl_value;
671 uint32_t orig_stall_vmid;
673 mutex_lock(&adev->grbm_idx_mutex);
675 orig_wave_cntl_value = RREG32(SOC15_REG_OFFSET(GC,
677 mmSPI_GDBG_WAVE_CNTL));
678 orig_stall_vmid = REG_GET_FIELD(orig_wave_cntl_value,
682 data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL, STALL_RA, 1);
683 WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL), data);
686 WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), data);
688 WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL), orig_stall_vmid);
690 mutex_unlock(&adev->grbm_idx_mutex);
695 uint32_t disable_debug_trap_v10_3(struct kgd_dev *kgd)
697 struct amdgpu_device *adev = get_amdgpu_device(kgd);
699 mutex_lock(&adev->grbm_idx_mutex);
701 WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), 0);
703 mutex_unlock(&adev->grbm_idx_mutex);
708 uint32_t set_wave_launch_trap_override_v10_3(struct kgd_dev *kgd,
709 uint32_t trap_override,
712 struct amdgpu_device *adev = get_amdgpu_device(kgd);
715 mutex_lock(&adev->grbm_idx_mutex);
717 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL));
718 data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL, STALL_RA, 1);
719 WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL), data);
722 data = REG_SET_FIELD(data, SPI_GDBG_TRAP_MASK,
724 data = REG_SET_FIELD(data, SPI_GDBG_TRAP_MASK,
725 REPLACE, trap_override);
726 WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_TRAP_MASK), data);
728 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL));
729 data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL, STALL_RA, 0);
730 WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL), data);
732 mutex_unlock(&adev->grbm_idx_mutex);
737 uint32_t set_wave_launch_mode_v10_3(struct kgd_dev *kgd,
738 uint8_t wave_launch_mode,
741 struct amdgpu_device *adev = get_amdgpu_device(kgd);
746 is_stall_mode = (wave_launch_mode == 4);
747 is_mode_set = (wave_launch_mode != 0 && wave_launch_mode != 4);
749 mutex_lock(&adev->grbm_idx_mutex);
751 data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL2,
752 VMID_MASK, is_mode_set ? 1 << vmid : 0);
753 data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL2,
754 MODE, is_mode_set ? wave_launch_mode : 0);
755 WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL2), data);
757 data = RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL));
758 data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL,
759 STALL_VMID, is_stall_mode ? 1 << vmid : 0);
760 data = REG_SET_FIELD(data, SPI_GDBG_WAVE_CNTL,
761 STALL_RA, is_stall_mode ? 1 : 0);
762 WREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL), data);
764 mutex_unlock(&adev->grbm_idx_mutex);
769 /* kgd_get_iq_wait_times: Returns the mmCP_IQ_WAIT_TIME1/2 values
770 * The values read are:
771 * ib_offload_wait_time -- Wait Count for Indirect Buffer Offloads.
772 * atomic_offload_wait_time -- Wait Count for L2 and GDS Atomics Offloads.
773 * wrm_offload_wait_time -- Wait Count for WAIT_REG_MEM Offloads.
774 * gws_wait_time -- Wait Count for Global Wave Syncs.
775 * que_sleep_wait_time -- Wait Count for Dequeue Retry.
776 * sch_wave_wait_time -- Wait Count for Scheduling Wave Message.
777 * sem_rearm_wait_time -- Wait Count for Semaphore re-arm.
778 * deq_retry_wait_time -- Wait Count for Global Wave Syncs.
780 void get_iq_wait_times_v10_3(struct kgd_dev *kgd,
781 uint32_t *wait_times)
784 struct amdgpu_device *adev = get_amdgpu_device(kgd);
786 *wait_times = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_IQ_WAIT_TIME2));
789 void build_grace_period_packet_info_v10_3(struct kgd_dev *kgd,
791 uint32_t grace_period,
792 uint32_t *reg_offset,
795 *reg_data = wait_times;
797 *reg_data = REG_SET_FIELD(*reg_data,
802 *reg_offset = mmCP_IQ_WAIT_TIME2;
806 const struct kfd2kgd_calls gfx_v10_3_kfd2kgd = {
807 .program_sh_mem_settings = program_sh_mem_settings_v10_3,
808 .set_pasid_vmid_mapping = set_pasid_vmid_mapping_v10_3,
809 .init_interrupts = init_interrupts_v10_3,
810 .hqd_load = hqd_load_v10_3,
811 .hiq_mqd_load = hiq_mqd_load_v10_3,
812 .hqd_sdma_load = hqd_sdma_load_v10_3,
813 .hqd_dump = hqd_dump_v10_3,
814 .hqd_sdma_dump = hqd_sdma_dump_v10_3,
815 .hqd_is_occupied = hqd_is_occupied_v10_3,
816 .hqd_sdma_is_occupied = hqd_sdma_is_occupied_v10_3,
817 .hqd_destroy = hqd_destroy_v10_3,
818 .hqd_sdma_destroy = hqd_sdma_destroy_v10_3,
819 .address_watch_disable = address_watch_disable_v10_3,
820 .address_watch_execute = address_watch_execute_v10_3,
821 .wave_control_execute = wave_control_execute_v10_3,
822 .address_watch_get_offset = address_watch_get_offset_v10_3,
823 .get_atc_vmid_pasid_mapping_info = NULL,
824 .set_vm_context_page_table_base = set_vm_context_page_table_base_v10_3,
825 .get_hive_id = amdgpu_amdkfd_get_hive_id,
827 .enable_debug_trap = enable_debug_trap_v10_3,
828 .disable_debug_trap = disable_debug_trap_v10_3,
829 .set_wave_launch_trap_override = set_wave_launch_trap_override_v10_3,
830 .set_wave_launch_mode = set_wave_launch_mode_v10_3,
831 .get_iq_wait_times = get_iq_wait_times_v10_3,
832 .build_grace_period_packet_info = build_grace_period_packet_info_v10_3,