2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Alex Deucher
26 #include "amdgpu_trace.h"
30 const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
36 static void si_dma_set_ring_funcs(struct amdgpu_device *adev);
37 static void si_dma_set_buffer_funcs(struct amdgpu_device *adev);
38 static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev);
39 static void si_dma_set_irq_funcs(struct amdgpu_device *adev);
41 static uint64_t si_dma_ring_get_rptr(struct amdgpu_ring *ring)
43 return ring->adev->wb.wb[ring->rptr_offs>>2];
46 static uint64_t si_dma_ring_get_wptr(struct amdgpu_ring *ring)
48 struct amdgpu_device *adev = ring->adev;
49 u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
51 return (RREG32(DMA_RB_WPTR + sdma_offsets[me]) & 0x3fffc) >> 2;
54 static void si_dma_ring_set_wptr(struct amdgpu_ring *ring)
56 struct amdgpu_device *adev = ring->adev;
57 u32 me = (ring == &adev->sdma.instance[0].ring) ? 0 : 1;
59 WREG32(DMA_RB_WPTR + sdma_offsets[me],
60 (lower_32_bits(ring->wptr) << 2) & 0x3fffc);
63 static void si_dma_ring_emit_ib(struct amdgpu_ring *ring,
64 struct amdgpu_job *job,
68 unsigned vmid = AMDGPU_JOB_GET_VMID(job);
69 /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
70 * Pad as necessary with NOPs.
72 while ((lower_32_bits(ring->wptr) & 7) != 5)
73 amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
74 amdgpu_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, vmid, 0));
75 amdgpu_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
76 amdgpu_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
81 * si_dma_ring_emit_fence - emit a fence on the DMA ring
83 * @ring: amdgpu ring pointer
85 * @seq: sequence number
86 * @flags: fence related flags
88 * Add a DMA fence packet to the ring to write
89 * the fence seq number and DMA trap packet to generate
90 * an interrupt if needed (VI).
92 static void si_dma_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
96 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT;
98 amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0, 0));
99 amdgpu_ring_write(ring, addr & 0xfffffffc);
100 amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xff));
101 amdgpu_ring_write(ring, seq);
102 /* optionally write high bits as well */
105 amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0, 0));
106 amdgpu_ring_write(ring, addr & 0xfffffffc);
107 amdgpu_ring_write(ring, (upper_32_bits(addr) & 0xff));
108 amdgpu_ring_write(ring, upper_32_bits(seq));
110 /* generate an interrupt */
111 amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0, 0));
114 static void si_dma_stop(struct amdgpu_device *adev)
116 struct amdgpu_ring *ring;
120 for (i = 0; i < adev->sdma.num_instances; i++) {
121 ring = &adev->sdma.instance[i].ring;
123 rb_cntl = RREG32(DMA_RB_CNTL + sdma_offsets[i]);
124 rb_cntl &= ~DMA_RB_ENABLE;
125 WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl);
127 if (adev->mman.buffer_funcs_ring == ring)
128 amdgpu_ttm_set_buffer_funcs_status(adev, false);
132 static int si_dma_start(struct amdgpu_device *adev)
134 struct amdgpu_ring *ring;
135 u32 rb_cntl, dma_cntl, ib_cntl, rb_bufsz;
139 for (i = 0; i < adev->sdma.num_instances; i++) {
140 ring = &adev->sdma.instance[i].ring;
142 WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + sdma_offsets[i], 0);
143 WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0);
145 /* Set ring buffer size in dwords */
146 rb_bufsz = order_base_2(ring->ring_size / 4);
147 rb_cntl = rb_bufsz << 1;
149 rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
151 WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl);
153 /* Initialize the ring buffer's read and write pointers */
154 WREG32(DMA_RB_RPTR + sdma_offsets[i], 0);
155 WREG32(DMA_RB_WPTR + sdma_offsets[i], 0);
157 rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
159 WREG32(DMA_RB_RPTR_ADDR_LO + sdma_offsets[i], lower_32_bits(rptr_addr));
160 WREG32(DMA_RB_RPTR_ADDR_HI + sdma_offsets[i], upper_32_bits(rptr_addr) & 0xFF);
162 rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
164 WREG32(DMA_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8);
167 ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE;
169 ib_cntl |= DMA_IB_SWAP_ENABLE;
171 WREG32(DMA_IB_CNTL + sdma_offsets[i], ib_cntl);
173 dma_cntl = RREG32(DMA_CNTL + sdma_offsets[i]);
174 dma_cntl &= ~CTXEMPTY_INT_ENABLE;
175 WREG32(DMA_CNTL + sdma_offsets[i], dma_cntl);
178 WREG32(DMA_RB_WPTR + sdma_offsets[i], lower_32_bits(ring->wptr) << 2);
179 WREG32(DMA_RB_CNTL + sdma_offsets[i], rb_cntl | DMA_RB_ENABLE);
181 ring->sched.ready = true;
183 r = amdgpu_ring_test_helper(ring);
187 if (adev->mman.buffer_funcs_ring == ring)
188 amdgpu_ttm_set_buffer_funcs_status(adev, true);
195 * si_dma_ring_test_ring - simple async dma engine test
197 * @ring: amdgpu_ring structure holding ring information
199 * Test the DMA engine by writing using it to write an
200 * value to memory. (VI).
201 * Returns 0 for success, error for failure.
203 static int si_dma_ring_test_ring(struct amdgpu_ring *ring)
205 struct amdgpu_device *adev = ring->adev;
212 r = amdgpu_device_wb_get(adev, &index);
216 gpu_addr = adev->wb.gpu_addr + (index * 4);
218 adev->wb.wb[index] = cpu_to_le32(tmp);
220 r = amdgpu_ring_alloc(ring, 4);
224 amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, 1));
225 amdgpu_ring_write(ring, lower_32_bits(gpu_addr));
226 amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xff);
227 amdgpu_ring_write(ring, 0xDEADBEEF);
228 amdgpu_ring_commit(ring);
230 for (i = 0; i < adev->usec_timeout; i++) {
231 tmp = le32_to_cpu(adev->wb.wb[index]);
232 if (tmp == 0xDEADBEEF)
237 if (i >= adev->usec_timeout)
241 amdgpu_device_wb_free(adev, index);
246 * si_dma_ring_test_ib - test an IB on the DMA engine
248 * @ring: amdgpu_ring structure holding ring information
249 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
251 * Test a simple IB in the DMA ring (VI).
252 * Returns 0 on success, error on failure.
254 static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
256 struct amdgpu_device *adev = ring->adev;
258 struct dma_fence *f = NULL;
264 r = amdgpu_device_wb_get(adev, &index);
268 gpu_addr = adev->wb.gpu_addr + (index * 4);
270 adev->wb.wb[index] = cpu_to_le32(tmp);
271 memset(&ib, 0, sizeof(ib));
272 r = amdgpu_ib_get(adev, NULL, 256,
273 AMDGPU_IB_POOL_DIRECT, &ib);
277 ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, 1);
278 ib.ptr[1] = lower_32_bits(gpu_addr);
279 ib.ptr[2] = upper_32_bits(gpu_addr) & 0xff;
280 ib.ptr[3] = 0xDEADBEEF;
282 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
286 r = dma_fence_wait_timeout(f, false, timeout);
293 tmp = le32_to_cpu(adev->wb.wb[index]);
294 if (tmp == 0xDEADBEEF)
300 amdgpu_ib_free(adev, &ib, NULL);
303 amdgpu_device_wb_free(adev, index);
308 * cik_dma_vm_copy_pte - update PTEs by copying them from the GART
310 * @ib: indirect buffer to fill with commands
311 * @pe: addr of the page entry
312 * @src: src addr to copy from
313 * @count: number of page entries to update
315 * Update PTEs by copying them from the GART using DMA (SI).
317 static void si_dma_vm_copy_pte(struct amdgpu_ib *ib,
318 uint64_t pe, uint64_t src,
321 unsigned bytes = count * 8;
323 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
325 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
326 ib->ptr[ib->length_dw++] = lower_32_bits(src);
327 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
328 ib->ptr[ib->length_dw++] = upper_32_bits(src) & 0xff;
332 * si_dma_vm_write_pte - update PTEs by writing them manually
334 * @ib: indirect buffer to fill with commands
335 * @pe: addr of the page entry
336 * @value: dst addr to write into pe
337 * @count: number of page entries to update
338 * @incr: increase next addr by incr bytes
340 * Update PTEs by writing them manually using DMA (SI).
342 static void si_dma_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe,
343 uint64_t value, unsigned count,
346 unsigned ndw = count * 2;
348 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw);
349 ib->ptr[ib->length_dw++] = lower_32_bits(pe);
350 ib->ptr[ib->length_dw++] = upper_32_bits(pe);
351 for (; ndw > 0; ndw -= 2) {
352 ib->ptr[ib->length_dw++] = lower_32_bits(value);
353 ib->ptr[ib->length_dw++] = upper_32_bits(value);
359 * si_dma_vm_set_pte_pde - update the page tables using sDMA
361 * @ib: indirect buffer to fill with commands
362 * @pe: addr of the page entry
363 * @addr: dst addr to write into pe
364 * @count: number of page entries to update
365 * @incr: increase next addr by incr bytes
366 * @flags: access flags
368 * Update the page tables using sDMA (CIK).
370 static void si_dma_vm_set_pte_pde(struct amdgpu_ib *ib,
372 uint64_t addr, unsigned count,
373 uint32_t incr, uint64_t flags)
383 if (flags & AMDGPU_PTE_VALID)
388 /* for physically contiguous pages (vram) */
389 ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
390 ib->ptr[ib->length_dw++] = pe; /* dst addr */
391 ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
392 ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */
393 ib->ptr[ib->length_dw++] = upper_32_bits(flags);
394 ib->ptr[ib->length_dw++] = value; /* value */
395 ib->ptr[ib->length_dw++] = upper_32_bits(value);
396 ib->ptr[ib->length_dw++] = incr; /* increment size */
397 ib->ptr[ib->length_dw++] = 0;
399 addr += (ndw / 2) * incr;
405 * si_dma_pad_ib - pad the IB to the required number of dw
407 * @ring: amdgpu_ring pointer
408 * @ib: indirect buffer to fill with padding
411 static void si_dma_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
413 while (ib->length_dw & 0x7)
414 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0);
418 * cik_sdma_ring_emit_pipeline_sync - sync the pipeline
420 * @ring: amdgpu_ring pointer
422 * Make sure all previous operations are completed (CIK).
424 static void si_dma_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
426 uint32_t seq = ring->fence_drv.sync_seq;
427 uint64_t addr = ring->fence_drv.gpu_addr;
430 amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0) |
431 (1 << 27)); /* Poll memory */
432 amdgpu_ring_write(ring, lower_32_bits(addr));
433 amdgpu_ring_write(ring, (0xff << 16) | upper_32_bits(addr)); /* retry, addr_hi */
434 amdgpu_ring_write(ring, 0xffffffff); /* mask */
435 amdgpu_ring_write(ring, seq); /* value */
436 amdgpu_ring_write(ring, (3 << 28) | 0x20); /* func(equal) | poll interval */
440 * si_dma_ring_emit_vm_flush - cik vm flush using sDMA
442 * @ring: amdgpu_ring pointer
443 * @vmid: vmid number to use
446 * Update the page table base and flush the VM TLB
449 static void si_dma_ring_emit_vm_flush(struct amdgpu_ring *ring,
450 unsigned vmid, uint64_t pd_addr)
452 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr);
454 /* wait for invalidate to complete */
455 amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0));
456 amdgpu_ring_write(ring, VM_INVALIDATE_REQUEST);
457 amdgpu_ring_write(ring, 0xff << 16); /* retry */
458 amdgpu_ring_write(ring, 1 << vmid); /* mask */
459 amdgpu_ring_write(ring, 0); /* value */
460 amdgpu_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */
463 static void si_dma_ring_emit_wreg(struct amdgpu_ring *ring,
464 uint32_t reg, uint32_t val)
466 amdgpu_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
467 amdgpu_ring_write(ring, (0xf << 16) | reg);
468 amdgpu_ring_write(ring, val);
471 static int si_dma_early_init(void *handle)
473 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
475 adev->sdma.num_instances = 2;
477 si_dma_set_ring_funcs(adev);
478 si_dma_set_buffer_funcs(adev);
479 si_dma_set_vm_pte_funcs(adev);
480 si_dma_set_irq_funcs(adev);
485 static int si_dma_sw_init(void *handle)
487 struct amdgpu_ring *ring;
489 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
491 /* DMA0 trap event */
492 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 224,
493 &adev->sdma.trap_irq);
497 /* DMA1 trap event */
498 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 244,
499 &adev->sdma.trap_irq);
503 for (i = 0; i < adev->sdma.num_instances; i++) {
504 ring = &adev->sdma.instance[i].ring;
505 ring->ring_obj = NULL;
506 ring->use_doorbell = false;
507 sprintf(ring->name, "sdma%d", i);
508 r = amdgpu_ring_init(adev, ring, 1024,
509 &adev->sdma.trap_irq,
511 AMDGPU_SDMA_IRQ_INSTANCE0 :
512 AMDGPU_SDMA_IRQ_INSTANCE1,
513 AMDGPU_RING_PRIO_DEFAULT);
521 static int si_dma_sw_fini(void *handle)
523 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
526 for (i = 0; i < adev->sdma.num_instances; i++)
527 amdgpu_ring_fini(&adev->sdma.instance[i].ring);
532 static int si_dma_hw_init(void *handle)
534 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
536 return si_dma_start(adev);
539 static int si_dma_hw_fini(void *handle)
541 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
548 static int si_dma_suspend(void *handle)
550 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
552 return si_dma_hw_fini(adev);
555 static int si_dma_resume(void *handle)
557 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
559 return si_dma_hw_init(adev);
562 static bool si_dma_is_idle(void *handle)
564 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
565 u32 tmp = RREG32(SRBM_STATUS2);
567 if (tmp & (DMA_BUSY_MASK | DMA1_BUSY_MASK))
573 static int si_dma_wait_for_idle(void *handle)
576 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
578 for (i = 0; i < adev->usec_timeout; i++) {
579 if (si_dma_is_idle(handle))
586 static int si_dma_soft_reset(void *handle)
588 DRM_INFO("si_dma_soft_reset --- not implemented !!!!!!!\n");
592 static int si_dma_set_trap_irq_state(struct amdgpu_device *adev,
593 struct amdgpu_irq_src *src,
595 enum amdgpu_interrupt_state state)
600 case AMDGPU_SDMA_IRQ_INSTANCE0:
602 case AMDGPU_IRQ_STATE_DISABLE:
603 sdma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET);
604 sdma_cntl &= ~TRAP_ENABLE;
605 WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl);
607 case AMDGPU_IRQ_STATE_ENABLE:
608 sdma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET);
609 sdma_cntl |= TRAP_ENABLE;
610 WREG32(DMA_CNTL + DMA0_REGISTER_OFFSET, sdma_cntl);
616 case AMDGPU_SDMA_IRQ_INSTANCE1:
618 case AMDGPU_IRQ_STATE_DISABLE:
619 sdma_cntl = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET);
620 sdma_cntl &= ~TRAP_ENABLE;
621 WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl);
623 case AMDGPU_IRQ_STATE_ENABLE:
624 sdma_cntl = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET);
625 sdma_cntl |= TRAP_ENABLE;
626 WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, sdma_cntl);
638 static int si_dma_process_trap_irq(struct amdgpu_device *adev,
639 struct amdgpu_irq_src *source,
640 struct amdgpu_iv_entry *entry)
642 if (entry->src_id == 224)
643 amdgpu_fence_process(&adev->sdma.instance[0].ring);
645 amdgpu_fence_process(&adev->sdma.instance[1].ring);
649 static int si_dma_set_clockgating_state(void *handle,
650 enum amd_clockgating_state state)
652 u32 orig, data, offset;
655 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
657 enable = (state == AMD_CG_STATE_GATE);
659 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
660 for (i = 0; i < adev->sdma.num_instances; i++) {
662 offset = DMA0_REGISTER_OFFSET;
664 offset = DMA1_REGISTER_OFFSET;
665 orig = data = RREG32(DMA_POWER_CNTL + offset);
666 data &= ~MEM_POWER_OVERRIDE;
668 WREG32(DMA_POWER_CNTL + offset, data);
669 WREG32(DMA_CLK_CTRL + offset, 0x00000100);
672 for (i = 0; i < adev->sdma.num_instances; i++) {
674 offset = DMA0_REGISTER_OFFSET;
676 offset = DMA1_REGISTER_OFFSET;
677 orig = data = RREG32(DMA_POWER_CNTL + offset);
678 data |= MEM_POWER_OVERRIDE;
680 WREG32(DMA_POWER_CNTL + offset, data);
682 orig = data = RREG32(DMA_CLK_CTRL + offset);
685 WREG32(DMA_CLK_CTRL + offset, data);
692 static int si_dma_set_powergating_state(void *handle,
693 enum amd_powergating_state state)
697 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
699 WREG32(DMA_PGFSM_WRITE, 0x00002000);
700 WREG32(DMA_PGFSM_CONFIG, 0x100010ff);
702 for (tmp = 0; tmp < 5; tmp++)
703 WREG32(DMA_PGFSM_WRITE, 0);
708 static const struct amd_ip_funcs si_dma_ip_funcs = {
710 .early_init = si_dma_early_init,
712 .sw_init = si_dma_sw_init,
713 .sw_fini = si_dma_sw_fini,
714 .hw_init = si_dma_hw_init,
715 .hw_fini = si_dma_hw_fini,
716 .suspend = si_dma_suspend,
717 .resume = si_dma_resume,
718 .is_idle = si_dma_is_idle,
719 .wait_for_idle = si_dma_wait_for_idle,
720 .soft_reset = si_dma_soft_reset,
721 .set_clockgating_state = si_dma_set_clockgating_state,
722 .set_powergating_state = si_dma_set_powergating_state,
725 static const struct amdgpu_ring_funcs si_dma_ring_funcs = {
726 .type = AMDGPU_RING_TYPE_SDMA,
728 .nop = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0),
729 .support_64bit_ptrs = false,
730 .get_rptr = si_dma_ring_get_rptr,
731 .get_wptr = si_dma_ring_get_wptr,
732 .set_wptr = si_dma_ring_set_wptr,
734 3 + 3 + /* hdp flush / invalidate */
735 6 + /* si_dma_ring_emit_pipeline_sync */
736 SI_FLUSH_GPU_TLB_NUM_WREG * 3 + 6 + /* si_dma_ring_emit_vm_flush */
737 9 + 9 + 9, /* si_dma_ring_emit_fence x3 for user fence, vm fence */
738 .emit_ib_size = 7 + 3, /* si_dma_ring_emit_ib */
739 .emit_ib = si_dma_ring_emit_ib,
740 .emit_fence = si_dma_ring_emit_fence,
741 .emit_pipeline_sync = si_dma_ring_emit_pipeline_sync,
742 .emit_vm_flush = si_dma_ring_emit_vm_flush,
743 .test_ring = si_dma_ring_test_ring,
744 .test_ib = si_dma_ring_test_ib,
745 .insert_nop = amdgpu_ring_insert_nop,
746 .pad_ib = si_dma_ring_pad_ib,
747 .emit_wreg = si_dma_ring_emit_wreg,
750 static void si_dma_set_ring_funcs(struct amdgpu_device *adev)
754 for (i = 0; i < adev->sdma.num_instances; i++)
755 adev->sdma.instance[i].ring.funcs = &si_dma_ring_funcs;
758 static const struct amdgpu_irq_src_funcs si_dma_trap_irq_funcs = {
759 .set = si_dma_set_trap_irq_state,
760 .process = si_dma_process_trap_irq,
763 static void si_dma_set_irq_funcs(struct amdgpu_device *adev)
765 adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
766 adev->sdma.trap_irq.funcs = &si_dma_trap_irq_funcs;
770 * si_dma_emit_copy_buffer - copy buffer using the sDMA engine
772 * @ib: indirect buffer to copy to
773 * @src_offset: src GPU address
774 * @dst_offset: dst GPU address
775 * @byte_count: number of bytes to xfer
776 * @tmz: is this a secure operation
778 * Copy GPU buffers using the DMA engine (VI).
779 * Used by the amdgpu ttm implementation to move pages if
780 * registered as the asic copy callback.
782 static void si_dma_emit_copy_buffer(struct amdgpu_ib *ib,
788 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_COPY,
789 1, 0, 0, byte_count);
790 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
791 ib->ptr[ib->length_dw++] = lower_32_bits(src_offset);
792 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset) & 0xff;
793 ib->ptr[ib->length_dw++] = upper_32_bits(src_offset) & 0xff;
797 * si_dma_emit_fill_buffer - fill buffer using the sDMA engine
799 * @ib: indirect buffer to copy to
800 * @src_data: value to write to buffer
801 * @dst_offset: dst GPU address
802 * @byte_count: number of bytes to xfer
804 * Fill GPU buffers using the DMA engine (VI).
806 static void si_dma_emit_fill_buffer(struct amdgpu_ib *ib,
811 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_CONSTANT_FILL,
812 0, 0, 0, byte_count / 4);
813 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset);
814 ib->ptr[ib->length_dw++] = src_data;
815 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset) << 16;
819 static const struct amdgpu_buffer_funcs si_dma_buffer_funcs = {
820 .copy_max_bytes = 0xffff8,
822 .emit_copy_buffer = si_dma_emit_copy_buffer,
824 .fill_max_bytes = 0xffff8,
826 .emit_fill_buffer = si_dma_emit_fill_buffer,
829 static void si_dma_set_buffer_funcs(struct amdgpu_device *adev)
831 adev->mman.buffer_funcs = &si_dma_buffer_funcs;
832 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
835 static const struct amdgpu_vm_pte_funcs si_dma_vm_pte_funcs = {
836 .copy_pte_num_dw = 5,
837 .copy_pte = si_dma_vm_copy_pte,
839 .write_pte = si_dma_vm_write_pte,
840 .set_pte_pde = si_dma_vm_set_pte_pde,
843 static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev)
847 adev->vm_manager.vm_pte_funcs = &si_dma_vm_pte_funcs;
848 for (i = 0; i < adev->sdma.num_instances; i++) {
849 adev->vm_manager.vm_pte_scheds[i] =
850 &adev->sdma.instance[i].ring.sched;
852 adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances;
855 const struct amdgpu_ip_block_version si_dma_ip_block =
857 .type = AMD_IP_BLOCK_TYPE_SDMA,
861 .funcs = &si_dma_ip_funcs,