2 * Copyright 2014-2018 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include "amdgpu_amdkfd.h"
24 #include "gc/gc_9_0_offset.h"
25 #include "gc/gc_9_0_sh_mask.h"
26 #include "vega10_enum.h"
27 #include "sdma0/sdma0_4_0_offset.h"
28 #include "sdma0/sdma0_4_0_sh_mask.h"
29 #include "sdma1/sdma1_4_0_offset.h"
30 #include "sdma1/sdma1_4_0_sh_mask.h"
31 #include "athub/athub_1_0_offset.h"
32 #include "athub/athub_1_0_sh_mask.h"
33 #include "oss/osssys_4_0_offset.h"
34 #include "oss/osssys_4_0_sh_mask.h"
35 #include "soc15_common.h"
36 #include "v9_structs.h"
39 #include "mmhub_v1_0.h"
40 #include "gfxhub_v1_0.h"
43 enum hqd_dequeue_request_type {
49 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
51 return (struct amdgpu_device *)kgd;
54 static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
55 uint32_t queue, uint32_t vmid)
57 struct amdgpu_device *adev = get_amdgpu_device(kgd);
59 mutex_lock(&adev->srbm_mutex);
60 soc15_grbm_select(adev, mec, pipe, queue, vmid);
63 static void unlock_srbm(struct kgd_dev *kgd)
65 struct amdgpu_device *adev = get_amdgpu_device(kgd);
67 soc15_grbm_select(adev, 0, 0, 0, 0);
68 mutex_unlock(&adev->srbm_mutex);
71 static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
74 struct amdgpu_device *adev = get_amdgpu_device(kgd);
76 uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
77 uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
79 lock_srbm(kgd, mec, pipe, queue_id, 0);
82 static uint64_t get_queue_mask(struct amdgpu_device *adev,
83 uint32_t pipe_id, uint32_t queue_id)
85 unsigned int bit = pipe_id * adev->gfx.mec.num_queue_per_pipe +
91 static void release_queue(struct kgd_dev *kgd)
96 void kgd_gfx_v9_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
97 uint32_t sh_mem_config,
98 uint32_t sh_mem_ape1_base,
99 uint32_t sh_mem_ape1_limit,
100 uint32_t sh_mem_bases)
102 struct amdgpu_device *adev = get_amdgpu_device(kgd);
104 lock_srbm(kgd, 0, 0, 0, vmid);
106 WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG), sh_mem_config);
107 WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_BASES), sh_mem_bases);
108 /* APE1 no longer exists on GFX9 */
113 int kgd_gfx_v9_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
116 struct amdgpu_device *adev = get_amdgpu_device(kgd);
119 * We have to assume that there is no outstanding mapping.
120 * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
121 * a mapping is in progress or because a mapping finished
122 * and the SW cleared it.
123 * So the protocol is to always wait & clear.
125 uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
126 ATC_VMID0_PASID_MAPPING__VALID_MASK;
129 * need to do this twice, once for gfx and once for mmhub
130 * for ATC add 16 to VMID for mmhub, for IH different registers.
131 * ATC_VMID0..15 registers are separate from ATC_VMID16..31.
134 WREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + vmid,
137 while (!(RREG32(SOC15_REG_OFFSET(
139 mmATC_VMID_PASID_MAPPING_UPDATE_STATUS)) &
143 WREG32(SOC15_REG_OFFSET(ATHUB, 0,
144 mmATC_VMID_PASID_MAPPING_UPDATE_STATUS),
147 /* Mapping vmid to pasid also for IH block */
148 WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid,
151 WREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID16_PASID_MAPPING) + vmid,
154 while (!(RREG32(SOC15_REG_OFFSET(
156 mmATC_VMID_PASID_MAPPING_UPDATE_STATUS)) &
157 (1U << (vmid + 16))))
160 WREG32(SOC15_REG_OFFSET(ATHUB, 0,
161 mmATC_VMID_PASID_MAPPING_UPDATE_STATUS),
164 /* Mapping vmid to pasid also for IH block */
165 WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid,
170 /* TODO - RING0 form of field is obsolete, seems to date back to SI
174 int kgd_gfx_v9_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
176 struct amdgpu_device *adev = get_amdgpu_device(kgd);
180 mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
181 pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
183 lock_srbm(kgd, mec, pipe, 0, 0);
185 WREG32(SOC15_REG_OFFSET(GC, 0, mmCPC_INT_CNTL),
186 CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
187 CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
194 static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
195 unsigned int engine_id,
196 unsigned int queue_id)
198 uint32_t sdma_engine_reg_base[2] = {
199 SOC15_REG_OFFSET(SDMA0, 0,
200 mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL,
201 SOC15_REG_OFFSET(SDMA1, 0,
202 mmSDMA1_RLC0_RB_CNTL) - mmSDMA1_RLC0_RB_CNTL
204 uint32_t retval = sdma_engine_reg_base[engine_id]
205 + queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL);
207 pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id,
213 static inline struct v9_mqd *get_mqd(void *mqd)
215 return (struct v9_mqd *)mqd;
218 static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd)
220 return (struct v9_sdma_mqd *)mqd;
223 int kgd_gfx_v9_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
224 uint32_t queue_id, uint32_t __user *wptr,
225 uint32_t wptr_shift, uint32_t wptr_mask,
226 struct mm_struct *mm)
228 struct amdgpu_device *adev = get_amdgpu_device(kgd);
231 uint32_t reg, hqd_base, data;
235 acquire_queue(kgd, pipe_id, queue_id);
237 /* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */
238 mqd_hqd = &m->cp_mqd_base_addr_lo;
239 hqd_base = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR);
242 reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++)
243 WREG32_RLC(reg, mqd_hqd[reg - hqd_base]);
246 /* Activate doorbell logic before triggering WPTR poll. */
247 data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control,
248 CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
249 WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL), data);
252 /* Don't read wptr with get_user because the user
253 * context may not be accessible (if this function
254 * runs in a work queue). Instead trigger a one-shot
255 * polling read from memory in the CP. This assumes
256 * that wptr is GPU-accessible in the queue's VMID via
257 * ATC or SVM. WPTR==RPTR before starting the poll so
258 * the CP starts fetching new commands from the right
261 * Guessing a 64-bit WPTR from a 32-bit RPTR is a bit
262 * tricky. Assume that the queue didn't overflow. The
263 * number of valid bits in the 32-bit RPTR depends on
264 * the queue size. The remaining bits are taken from
265 * the saved 64-bit WPTR. If the WPTR wrapped, add the
268 uint32_t queue_size =
269 2 << REG_GET_FIELD(m->cp_hqd_pq_control,
270 CP_HQD_PQ_CONTROL, QUEUE_SIZE);
271 uint64_t guessed_wptr = m->cp_hqd_pq_rptr & (queue_size - 1);
273 if ((m->cp_hqd_pq_wptr_lo & (queue_size - 1)) < guessed_wptr)
274 guessed_wptr += queue_size;
275 guessed_wptr += m->cp_hqd_pq_wptr_lo & ~(queue_size - 1);
276 guessed_wptr += (uint64_t)m->cp_hqd_pq_wptr_hi << 32;
278 WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_LO),
279 lower_32_bits(guessed_wptr));
280 WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI),
281 upper_32_bits(guessed_wptr));
282 WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR),
283 lower_32_bits((uintptr_t)wptr));
284 WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI),
285 upper_32_bits((uintptr_t)wptr));
286 WREG32(SOC15_REG_OFFSET(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1),
287 (uint32_t)get_queue_mask(adev, pipe_id, queue_id));
290 /* Start the EOP fetcher */
291 WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_EOP_RPTR),
292 REG_SET_FIELD(m->cp_hqd_eop_rptr,
293 CP_HQD_EOP_RPTR, INIT_FETCHER, 1));
295 data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
296 WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE), data);
303 int kgd_gfx_v9_hiq_mqd_load(struct kgd_dev *kgd, void *mqd,
304 uint32_t pipe_id, uint32_t queue_id,
305 uint32_t doorbell_off)
307 struct amdgpu_device *adev = get_amdgpu_device(kgd);
308 struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
315 acquire_queue(kgd, pipe_id, queue_id);
317 mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
318 pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
320 pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
321 mec, pipe, queue_id);
323 spin_lock(&adev->gfx.kiq.ring_lock);
324 r = amdgpu_ring_alloc(kiq_ring, 7);
326 pr_err("Failed to alloc KIQ (%d).\n", r);
330 amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5));
331 amdgpu_ring_write(kiq_ring,
332 PACKET3_MAP_QUEUES_QUEUE_SEL(0) | /* Queue_Sel */
333 PACKET3_MAP_QUEUES_VMID(m->cp_hqd_vmid) | /* VMID */
334 PACKET3_MAP_QUEUES_QUEUE(queue_id) |
335 PACKET3_MAP_QUEUES_PIPE(pipe) |
336 PACKET3_MAP_QUEUES_ME((mec - 1)) |
337 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */
338 PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */
339 PACKET3_MAP_QUEUES_ENGINE_SEL(1) | /* engine_sel: hiq */
340 PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */
341 amdgpu_ring_write(kiq_ring,
342 PACKET3_MAP_QUEUES_DOORBELL_OFFSET(doorbell_off));
343 amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_lo);
344 amdgpu_ring_write(kiq_ring, m->cp_mqd_base_addr_hi);
345 amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_lo);
346 amdgpu_ring_write(kiq_ring, m->cp_hqd_pq_wptr_poll_addr_hi);
347 amdgpu_ring_commit(kiq_ring);
350 spin_unlock(&adev->gfx.kiq.ring_lock);
356 int kgd_gfx_v9_hqd_dump(struct kgd_dev *kgd,
357 uint32_t pipe_id, uint32_t queue_id,
358 uint32_t (**dump)[2], uint32_t *n_regs)
360 struct amdgpu_device *adev = get_amdgpu_device(kgd);
362 #define HQD_N_REGS 56
363 #define DUMP_REG(addr) do { \
364 if (WARN_ON_ONCE(i >= HQD_N_REGS)) \
366 (*dump)[i][0] = (addr) << 2; \
367 (*dump)[i++][1] = RREG32(addr); \
370 *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
374 acquire_queue(kgd, pipe_id, queue_id);
376 for (reg = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR);
377 reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++)
382 WARN_ON_ONCE(i != HQD_N_REGS);
388 static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
389 uint32_t __user *wptr, struct mm_struct *mm)
391 struct amdgpu_device *adev = get_amdgpu_device(kgd);
392 struct v9_sdma_mqd *m;
393 uint32_t sdma_rlc_reg_offset;
394 unsigned long end_jiffies;
397 uint64_t __user *wptr64 = (uint64_t __user *)wptr;
399 m = get_sdma_mqd(mqd);
400 sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
403 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
404 m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
406 end_jiffies = msecs_to_jiffies(2000) + jiffies;
408 data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
409 if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
411 if (time_after(jiffies, end_jiffies)) {
412 pr_err("SDMA RLC not idle in %s\n", __func__);
415 usleep_range(500, 1000);
418 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL_OFFSET,
419 m->sdmax_rlcx_doorbell_offset);
421 data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL,
423 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data);
424 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR,
425 m->sdmax_rlcx_rb_rptr);
426 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI,
427 m->sdmax_rlcx_rb_rptr_hi);
429 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1);
430 if (read_user_wptr(mm, wptr64, data64)) {
431 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
432 lower_32_bits(data64));
433 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
434 upper_32_bits(data64));
436 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
437 m->sdmax_rlcx_rb_rptr);
438 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI,
439 m->sdmax_rlcx_rb_rptr_hi);
441 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0);
443 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base);
444 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI,
445 m->sdmax_rlcx_rb_base_hi);
446 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
447 m->sdmax_rlcx_rb_rptr_addr_lo);
448 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
449 m->sdmax_rlcx_rb_rptr_addr_hi);
451 data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL,
453 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data);
458 static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
459 uint32_t engine_id, uint32_t queue_id,
460 uint32_t (**dump)[2], uint32_t *n_regs)
462 struct amdgpu_device *adev = get_amdgpu_device(kgd);
463 uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev,
464 engine_id, queue_id);
467 #define HQD_N_REGS (19+6+7+10)
469 *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
473 for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
474 DUMP_REG(sdma_rlc_reg_offset + reg);
475 for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++)
476 DUMP_REG(sdma_rlc_reg_offset + reg);
477 for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN;
478 reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++)
479 DUMP_REG(sdma_rlc_reg_offset + reg);
480 for (reg = mmSDMA0_RLC0_MIDCMD_DATA0;
481 reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++)
482 DUMP_REG(sdma_rlc_reg_offset + reg);
484 WARN_ON_ONCE(i != HQD_N_REGS);
490 bool kgd_gfx_v9_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
491 uint32_t pipe_id, uint32_t queue_id)
493 struct amdgpu_device *adev = get_amdgpu_device(kgd);
498 acquire_queue(kgd, pipe_id, queue_id);
499 act = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE));
501 low = lower_32_bits(queue_address >> 8);
502 high = upper_32_bits(queue_address >> 8);
504 if (low == RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_BASE)) &&
505 high == RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_BASE_HI)))
512 static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
514 struct amdgpu_device *adev = get_amdgpu_device(kgd);
515 struct v9_sdma_mqd *m;
516 uint32_t sdma_rlc_reg_offset;
517 uint32_t sdma_rlc_rb_cntl;
519 m = get_sdma_mqd(mqd);
520 sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
523 sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
525 if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
531 int kgd_gfx_v9_hqd_destroy(struct kgd_dev *kgd, void *mqd,
532 enum kfd_preempt_type reset_type,
533 unsigned int utimeout, uint32_t pipe_id,
536 struct amdgpu_device *adev = get_amdgpu_device(kgd);
537 enum hqd_dequeue_request_type type;
538 unsigned long end_jiffies;
540 struct v9_mqd *m = get_mqd(mqd);
542 if (adev->in_gpu_reset)
545 acquire_queue(kgd, pipe_id, queue_id);
547 if (m->cp_hqd_vmid == 0)
548 WREG32_FIELD15_RLC(GC, 0, RLC_CP_SCHEDULERS, scheduler1, 0);
550 switch (reset_type) {
551 case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN:
554 case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
562 WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_DEQUEUE_REQUEST), type);
564 end_jiffies = (utimeout * HZ / 1000) + jiffies;
566 temp = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE));
567 if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK))
569 if (time_after(jiffies, end_jiffies)) {
570 pr_err("cp queue preemption time out.\n");
574 usleep_range(500, 1000);
581 static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
582 unsigned int utimeout)
584 struct amdgpu_device *adev = get_amdgpu_device(kgd);
585 struct v9_sdma_mqd *m;
586 uint32_t sdma_rlc_reg_offset;
588 unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
590 m = get_sdma_mqd(mqd);
591 sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id,
594 temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
595 temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
596 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp);
599 temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
600 if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
602 if (time_after(jiffies, end_jiffies)) {
603 pr_err("SDMA RLC not idle in %s\n", __func__);
606 usleep_range(500, 1000);
609 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0);
610 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
611 RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) |
612 SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
614 m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR);
615 m->sdmax_rlcx_rb_rptr_hi =
616 RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI);
621 bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
622 uint8_t vmid, uint16_t *p_pasid)
625 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
627 value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
629 *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
631 return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
634 int kgd_gfx_v9_address_watch_disable(struct kgd_dev *kgd)
639 int kgd_gfx_v9_address_watch_execute(struct kgd_dev *kgd,
640 unsigned int watch_point_id,
648 int kgd_gfx_v9_wave_control_execute(struct kgd_dev *kgd,
649 uint32_t gfx_index_val,
652 struct amdgpu_device *adev = get_amdgpu_device(kgd);
655 mutex_lock(&adev->grbm_idx_mutex);
657 WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_INDEX, gfx_index_val);
658 WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_CMD), sq_cmd);
660 data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
661 INSTANCE_BROADCAST_WRITES, 1);
662 data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
663 SH_BROADCAST_WRITES, 1);
664 data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
665 SE_BROADCAST_WRITES, 1);
667 WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_INDEX, data);
668 mutex_unlock(&adev->grbm_idx_mutex);
673 uint32_t kgd_gfx_v9_address_watch_get_offset(struct kgd_dev *kgd,
674 unsigned int watch_point_id,
675 unsigned int reg_offset)
680 static void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd,
681 uint32_t vmid, uint64_t page_table_base)
683 struct amdgpu_device *adev = get_amdgpu_device(kgd);
685 if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
686 pr_err("trying to set page table base for wrong VMID %u\n",
691 mmhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base);
693 gfxhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base);
696 const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
697 .program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings,
698 .set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping,
699 .init_interrupts = kgd_gfx_v9_init_interrupts,
700 .hqd_load = kgd_gfx_v9_hqd_load,
701 .hiq_mqd_load = kgd_gfx_v9_hiq_mqd_load,
702 .hqd_sdma_load = kgd_hqd_sdma_load,
703 .hqd_dump = kgd_gfx_v9_hqd_dump,
704 .hqd_sdma_dump = kgd_hqd_sdma_dump,
705 .hqd_is_occupied = kgd_gfx_v9_hqd_is_occupied,
706 .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
707 .hqd_destroy = kgd_gfx_v9_hqd_destroy,
708 .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
709 .address_watch_disable = kgd_gfx_v9_address_watch_disable,
710 .address_watch_execute = kgd_gfx_v9_address_watch_execute,
711 .wave_control_execute = kgd_gfx_v9_wave_control_execute,
712 .address_watch_get_offset = kgd_gfx_v9_address_watch_get_offset,
713 .get_atc_vmid_pasid_mapping_info =
714 kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
715 .set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base,
716 .get_hive_id = amdgpu_amdkfd_get_hive_id,
717 .get_unique_id = amdgpu_amdkfd_get_unique_id,