2 * Copyright 2019 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/firmware.h>
24 #include <linux/pci.h>
26 #include "amdgpu_atomfirmware.h"
27 #include "gmc_v10_0.h"
29 #include "hdp/hdp_5_0_0_offset.h"
30 #include "hdp/hdp_5_0_0_sh_mask.h"
31 #include "gc/gc_10_1_0_sh_mask.h"
32 #include "mmhub/mmhub_2_0_0_sh_mask.h"
33 #include "athub/athub_2_0_0_sh_mask.h"
34 #include "athub/athub_2_0_0_offset.h"
35 #include "dcn/dcn_2_0_0_offset.h"
36 #include "dcn/dcn_2_0_0_sh_mask.h"
37 #include "oss/osssys_5_0_0_offset.h"
38 #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
39 #include "navi10_enum.h"
43 #include "soc15_common.h"
45 #include "nbio_v2_3.h"
47 #include "gfxhub_v2_0.h"
48 #include "mmhub_v2_0.h"
49 #include "athub_v2_0.h"
50 /* XXX Move this macro to navi10 header file, which is like vid.h for VI.*/
51 #define AMDGPU_NUM_OF_VMIDS 8
54 static const struct soc15_reg_golden golden_settings_navi10_hdp[] =
56 /* TODO add golden setting for hdp */
61 gmc_v10_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
62 struct amdgpu_irq_src *src, unsigned type,
63 enum amdgpu_interrupt_state state)
65 struct amdgpu_vmhub *hub;
66 u32 tmp, reg, bits[AMDGPU_MAX_VMHUBS], i;
68 bits[AMDGPU_GFXHUB_0] = GCVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
69 GCVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
70 GCVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
71 GCVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
72 GCVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
73 GCVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
74 GCVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
76 bits[AMDGPU_MMHUB_0] = MMVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
77 MMVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
78 MMVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
79 MMVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
80 MMVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
81 MMVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
82 MMVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK;
85 case AMDGPU_IRQ_STATE_DISABLE:
87 hub = &adev->vmhub[AMDGPU_MMHUB_0];
88 for (i = 0; i < 16; i++) {
89 reg = hub->vm_context0_cntl + i;
91 tmp &= ~bits[AMDGPU_MMHUB_0];
96 hub = &adev->vmhub[AMDGPU_GFXHUB_0];
97 for (i = 0; i < 16; i++) {
98 reg = hub->vm_context0_cntl + i;
100 tmp &= ~bits[AMDGPU_GFXHUB_0];
104 case AMDGPU_IRQ_STATE_ENABLE:
106 hub = &adev->vmhub[AMDGPU_MMHUB_0];
107 for (i = 0; i < 16; i++) {
108 reg = hub->vm_context0_cntl + i;
110 tmp |= bits[AMDGPU_MMHUB_0];
115 hub = &adev->vmhub[AMDGPU_GFXHUB_0];
116 for (i = 0; i < 16; i++) {
117 reg = hub->vm_context0_cntl + i;
119 tmp |= bits[AMDGPU_GFXHUB_0];
130 static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
131 struct amdgpu_irq_src *source,
132 struct amdgpu_iv_entry *entry)
134 struct amdgpu_vmhub *hub = &adev->vmhub[entry->vmid_src];
138 addr = (u64)entry->src_data[0] << 12;
139 addr |= ((u64)entry->src_data[1] & 0xf) << 44;
141 if (!amdgpu_sriov_vf(adev)) {
143 * Issue a dummy read to wait for the status register to
144 * be updated to avoid reading an incorrect value due to
145 * the new fast GRBM interface.
147 if (entry->vmid_src == AMDGPU_GFXHUB_0)
148 RREG32(hub->vm_l2_pro_fault_status);
150 status = RREG32(hub->vm_l2_pro_fault_status);
151 WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
154 if (printk_ratelimit()) {
155 struct amdgpu_task_info task_info;
157 memset(&task_info, 0, sizeof(struct amdgpu_task_info));
158 amdgpu_vm_get_task_info(adev, entry->pasid, &task_info);
161 "[%s] page fault (src_id:%u ring:%u vmid:%u pasid:%u, "
162 "for process %s pid %d thread %s pid %d)\n",
163 entry->vmid_src ? "mmhub" : "gfxhub",
164 entry->src_id, entry->ring_id, entry->vmid,
165 entry->pasid, task_info.process_name, task_info.tgid,
166 task_info.task_name, task_info.pid);
167 dev_err(adev->dev, " in page starting at address 0x%016llx from client %d\n",
168 addr, entry->client_id);
169 if (!amdgpu_sriov_vf(adev)) {
171 "GCVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
173 dev_err(adev->dev, "\t Faulty UTCL2 client ID: 0x%lx\n",
174 REG_GET_FIELD(status,
175 GCVM_L2_PROTECTION_FAULT_STATUS, CID));
176 dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
177 REG_GET_FIELD(status,
178 GCVM_L2_PROTECTION_FAULT_STATUS, MORE_FAULTS));
179 dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n",
180 REG_GET_FIELD(status,
181 GCVM_L2_PROTECTION_FAULT_STATUS, WALKER_ERROR));
182 dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n",
183 REG_GET_FIELD(status,
184 GCVM_L2_PROTECTION_FAULT_STATUS, PERMISSION_FAULTS));
185 dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n",
186 REG_GET_FIELD(status,
187 GCVM_L2_PROTECTION_FAULT_STATUS, MAPPING_ERROR));
188 dev_err(adev->dev, "\t RW: 0x%lx\n",
189 REG_GET_FIELD(status,
190 GCVM_L2_PROTECTION_FAULT_STATUS, RW));
197 static const struct amdgpu_irq_src_funcs gmc_v10_0_irq_funcs = {
198 .set = gmc_v10_0_vm_fault_interrupt_state,
199 .process = gmc_v10_0_process_interrupt,
202 static void gmc_v10_0_set_irq_funcs(struct amdgpu_device *adev)
204 adev->gmc.vm_fault.num_types = 1;
205 adev->gmc.vm_fault.funcs = &gmc_v10_0_irq_funcs;
208 static uint32_t gmc_v10_0_get_invalidate_req(unsigned int vmid,
213 /* invalidate using legacy mode on vmid*/
214 req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ,
215 PER_VMID_INVALIDATE_REQ, 1 << vmid);
216 req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type);
217 req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
218 req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
219 req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
220 req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1);
221 req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1);
222 req = REG_SET_FIELD(req, GCVM_INVALIDATE_ENG0_REQ,
223 CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0);
229 * gmc_v10_0_use_invalidate_semaphore - judge whether to use semaphore
231 * @adev: amdgpu_device pointer
235 static bool gmc_v10_0_use_invalidate_semaphore(struct amdgpu_device *adev,
238 return ((vmhub == AMDGPU_MMHUB_0 ||
239 vmhub == AMDGPU_MMHUB_1) &&
240 (!amdgpu_sriov_vf(adev)));
243 static bool gmc_v10_0_get_atc_vmid_pasid_mapping_info(
244 struct amdgpu_device *adev,
245 uint8_t vmid, uint16_t *p_pasid)
249 value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING)
251 *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
253 return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
258 * VMID 0 is the physical GPU addresses as used by the kernel.
259 * VMIDs 1-15 are used for userspace clients and are handled
260 * by the amdgpu vm/hsa code.
263 static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
264 unsigned int vmhub, uint32_t flush_type)
266 bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(adev, vmhub);
267 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
268 u32 inv_req = gmc_v10_0_get_invalidate_req(vmid, flush_type);
270 /* Use register 17 for GART */
271 const unsigned eng = 17;
274 spin_lock(&adev->gmc.invalidate_lock);
276 * It may lose gpuvm invalidate acknowldege state across power-gating
277 * off cycle, add semaphore acquire before invalidation and semaphore
278 * release after invalidation to avoid entering power gated state
282 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
284 for (i = 0; i < adev->usec_timeout; i++) {
285 /* a read return value of 1 means semaphore acuqire */
286 tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng);
292 if (i >= adev->usec_timeout)
293 DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
296 WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, inv_req);
299 * Issue a dummy read to wait for the ACK register to be cleared
300 * to avoid a false ACK due to the new fast GRBM interface.
302 if (vmhub == AMDGPU_GFXHUB_0)
303 RREG32_NO_KIQ(hub->vm_inv_eng0_req + eng);
305 /* Wait for ACK with a delay.*/
306 for (i = 0; i < adev->usec_timeout; i++) {
307 tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
315 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
318 * add semaphore release after invalidation,
319 * write with 0 means semaphore release
321 WREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng, 0);
323 spin_unlock(&adev->gmc.invalidate_lock);
325 if (i < adev->usec_timeout)
328 DRM_ERROR("Timeout waiting for VM flush ACK!\n");
332 * gmc_v10_0_flush_gpu_tlb - gart tlb flush callback
334 * @adev: amdgpu_device pointer
335 * @vmid: vm instance to flush
337 * Flush the TLB for the requested page table.
339 static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
340 uint32_t vmhub, uint32_t flush_type)
342 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
343 struct dma_fence *fence;
344 struct amdgpu_job *job;
348 /* flush hdp cache */
349 adev->nbio.funcs->hdp_flush(adev, NULL);
351 mutex_lock(&adev->mman.gtt_window_lock);
353 if (vmhub == AMDGPU_MMHUB_0) {
354 gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_MMHUB_0, 0);
355 mutex_unlock(&adev->mman.gtt_window_lock);
359 BUG_ON(vmhub != AMDGPU_GFXHUB_0);
361 if (!adev->mman.buffer_funcs_enabled ||
362 !adev->ib_pool_ready ||
363 adev->in_gpu_reset ||
364 ring->sched.ready == false) {
365 gmc_v10_0_flush_vm_hub(adev, vmid, AMDGPU_GFXHUB_0, 0);
366 mutex_unlock(&adev->mman.gtt_window_lock);
370 /* The SDMA on Navi has a bug which can theoretically result in memory
371 * corruption if an invalidation happens at the same time as an VA
372 * translation. Avoid this by doing the invalidation from the SDMA
375 r = amdgpu_job_alloc_with_ib(adev, 16 * 4, AMDGPU_IB_POOL_VM, &job);
379 job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gart.bo);
380 job->vm_needs_flush = true;
381 job->ibs->ptr[job->ibs->length_dw++] = ring->funcs->nop;
382 amdgpu_ring_pad_ib(ring, &job->ibs[0]);
383 r = amdgpu_job_submit(job, &adev->mman.entity,
384 AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
388 mutex_unlock(&adev->mman.gtt_window_lock);
390 dma_fence_wait(fence, false);
391 dma_fence_put(fence);
396 amdgpu_job_free(job);
399 mutex_unlock(&adev->mman.gtt_window_lock);
400 DRM_ERROR("Error flushing GPU TLB using the SDMA (%d)!\n", r);
404 * gmc_v10_0_flush_gpu_tlb_pasid - tlb flush via pasid
406 * @adev: amdgpu_device pointer
407 * @pasid: pasid to be flush
409 * Flush the TLB for the requested pasid.
411 static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
412 uint16_t pasid, uint32_t flush_type,
418 uint16_t queried_pasid;
420 struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
421 struct amdgpu_kiq *kiq = &adev->gfx.kiq;
423 if (amdgpu_emu_mode == 0 && ring->sched.ready) {
424 spin_lock(&adev->gfx.kiq.ring_lock);
425 /* 2 dwords flush + 8 dwords fence */
426 amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8);
427 kiq->pmf->kiq_invalidate_tlbs(ring,
428 pasid, flush_type, all_hub);
429 r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
431 amdgpu_ring_undo(ring);
432 spin_unlock(&adev->gfx.kiq.ring_lock);
436 amdgpu_ring_commit(ring);
437 spin_unlock(&adev->gfx.kiq.ring_lock);
438 r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
440 DRM_ERROR("wait for kiq fence error: %ld.\n", r);
447 for (vmid = 1; vmid < 16; vmid++) {
449 ret = gmc_v10_0_get_atc_vmid_pasid_mapping_info(adev, vmid,
451 if (ret && queried_pasid == pasid) {
453 for (i = 0; i < adev->num_vmhubs; i++)
454 gmc_v10_0_flush_gpu_tlb(adev, vmid,
457 gmc_v10_0_flush_gpu_tlb(adev, vmid,
458 AMDGPU_GFXHUB_0, flush_type);
467 static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
468 unsigned vmid, uint64_t pd_addr)
470 bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(ring->adev, ring->funcs->vmhub);
471 struct amdgpu_vmhub *hub = &ring->adev->vmhub[ring->funcs->vmhub];
472 uint32_t req = gmc_v10_0_get_invalidate_req(vmid, 0);
473 unsigned eng = ring->vm_inv_eng;
476 * It may lose gpuvm invalidate acknowldege state across power-gating
477 * off cycle, add semaphore acquire before invalidation and semaphore
478 * release after invalidation to avoid entering power gated state
482 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
484 /* a read return value of 1 means semaphore acuqire */
485 amdgpu_ring_emit_reg_wait(ring,
486 hub->vm_inv_eng0_sem + eng, 0x1, 0x1);
488 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid),
489 lower_32_bits(pd_addr));
491 amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_hi32 + (2 * vmid),
492 upper_32_bits(pd_addr));
494 amdgpu_ring_emit_reg_write_reg_wait(ring, hub->vm_inv_eng0_req + eng,
495 hub->vm_inv_eng0_ack + eng,
498 /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
501 * add semaphore release after invalidation,
502 * write with 0 means semaphore release
504 amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem + eng, 0);
509 static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
512 struct amdgpu_device *adev = ring->adev;
515 if (ring->funcs->vmhub == AMDGPU_GFXHUB_0)
516 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid;
518 reg = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid;
520 amdgpu_ring_emit_wreg(ring, reg, pasid);
524 * PTE format on NAVI 10:
533 * 47:12 4k physical page base address
543 * PDE format on NAVI 10:
544 * 63:59 block fragment size
548 * 47:6 physical base address of PD or PTE
555 static uint64_t gmc_v10_0_map_mtype(struct amdgpu_device *adev, uint32_t flags)
558 case AMDGPU_VM_MTYPE_DEFAULT:
559 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
560 case AMDGPU_VM_MTYPE_NC:
561 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
562 case AMDGPU_VM_MTYPE_WC:
563 return AMDGPU_PTE_MTYPE_NV10(MTYPE_WC);
564 case AMDGPU_VM_MTYPE_CC:
565 return AMDGPU_PTE_MTYPE_NV10(MTYPE_CC);
566 case AMDGPU_VM_MTYPE_UC:
567 return AMDGPU_PTE_MTYPE_NV10(MTYPE_UC);
569 return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC);
573 static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level,
574 uint64_t *addr, uint64_t *flags)
576 if (!(*flags & AMDGPU_PDE_PTE) && !(*flags & AMDGPU_PTE_SYSTEM))
577 *addr = adev->vm_manager.vram_base_offset + *addr -
578 adev->gmc.vram_start;
579 BUG_ON(*addr & 0xFFFF00000000003FULL);
581 if (!adev->gmc.translate_further)
584 if (level == AMDGPU_VM_PDB1) {
585 /* Set the block fragment size */
586 if (!(*flags & AMDGPU_PDE_PTE))
587 *flags |= AMDGPU_PDE_BFS(0x9);
589 } else if (level == AMDGPU_VM_PDB0) {
590 if (*flags & AMDGPU_PDE_PTE)
591 *flags &= ~AMDGPU_PDE_PTE;
593 *flags |= AMDGPU_PTE_TF;
597 static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev,
598 struct amdgpu_bo_va_mapping *mapping,
601 *flags &= ~AMDGPU_PTE_EXECUTABLE;
602 *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE;
604 *flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK;
605 *flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK);
607 if (mapping->flags & AMDGPU_PTE_PRT) {
608 *flags |= AMDGPU_PTE_PRT;
609 *flags |= AMDGPU_PTE_SNOOPED;
610 *flags |= AMDGPU_PTE_LOG;
611 *flags |= AMDGPU_PTE_SYSTEM;
612 *flags &= ~AMDGPU_PTE_VALID;
616 static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = {
617 .flush_gpu_tlb = gmc_v10_0_flush_gpu_tlb,
618 .flush_gpu_tlb_pasid = gmc_v10_0_flush_gpu_tlb_pasid,
619 .emit_flush_gpu_tlb = gmc_v10_0_emit_flush_gpu_tlb,
620 .emit_pasid_mapping = gmc_v10_0_emit_pasid_mapping,
621 .map_mtype = gmc_v10_0_map_mtype,
622 .get_vm_pde = gmc_v10_0_get_vm_pde,
623 .get_vm_pte = gmc_v10_0_get_vm_pte
626 static void gmc_v10_0_set_gmc_funcs(struct amdgpu_device *adev)
628 if (adev->gmc.gmc_funcs == NULL)
629 adev->gmc.gmc_funcs = &gmc_v10_0_gmc_funcs;
632 static int gmc_v10_0_early_init(void *handle)
634 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
636 gmc_v10_0_set_gmc_funcs(adev);
637 gmc_v10_0_set_irq_funcs(adev);
639 adev->gmc.shared_aperture_start = 0x2000000000000000ULL;
640 adev->gmc.shared_aperture_end =
641 adev->gmc.shared_aperture_start + (4ULL << 30) - 1;
642 adev->gmc.private_aperture_start = 0x1000000000000000ULL;
643 adev->gmc.private_aperture_end =
644 adev->gmc.private_aperture_start + (4ULL << 30) - 1;
649 static int gmc_v10_0_late_init(void *handle)
651 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
654 amdgpu_bo_late_init(adev);
656 r = amdgpu_gmc_allocate_vm_inv_eng(adev);
660 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
663 static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev,
664 struct amdgpu_gmc *mc)
668 base = gfxhub_v2_0_get_fb_location(adev);
670 amdgpu_gmc_vram_location(adev, &adev->gmc, base);
671 amdgpu_gmc_gart_location(adev, mc);
673 /* base offset of vram pages */
674 adev->vm_manager.vram_base_offset = gfxhub_v2_0_get_mc_fb_offset(adev);
678 * gmc_v10_0_mc_init - initialize the memory controller driver params
680 * @adev: amdgpu_device pointer
682 * Look up the amount of vram, vram width, and decide how to place
683 * vram and gart within the GPU's physical address space.
684 * Returns 0 for success.
686 static int gmc_v10_0_mc_init(struct amdgpu_device *adev)
688 /* Could aper size report 0 ? */
689 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0);
690 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0);
692 /* size in MB on si */
693 adev->gmc.mc_vram_size =
694 adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL;
695 adev->gmc.real_vram_size = adev->gmc.mc_vram_size;
696 adev->gmc.visible_vram_size = adev->gmc.aper_size;
698 /* In case the PCI BAR is larger than the actual amount of vram */
699 if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size)
700 adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
702 /* set the gart size */
703 if (amdgpu_gart_size == -1) {
704 switch (adev->asic_type) {
709 adev->gmc.gart_size = 512ULL << 20;
713 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
715 gmc_v10_0_vram_gtt_location(adev, &adev->gmc);
720 static int gmc_v10_0_gart_init(struct amdgpu_device *adev)
725 WARN(1, "NAVI10 PCIE GART already initialized\n");
729 /* Initialize common gart structure */
730 r = amdgpu_gart_init(adev);
734 adev->gart.table_size = adev->gart.num_gpu_pages * 8;
735 adev->gart.gart_pte_flags = AMDGPU_PTE_MTYPE_NV10(MTYPE_UC) |
736 AMDGPU_PTE_EXECUTABLE;
738 return amdgpu_gart_table_vram_alloc(adev);
741 static unsigned gmc_v10_0_get_vbios_fb_size(struct amdgpu_device *adev)
743 u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
746 if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
747 size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */
752 viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
753 pitch = RREG32_SOC15(DCE, 0, mmHUBPREQ0_DCSURF_SURFACE_PITCH);
754 size = (REG_GET_FIELD(viewport,
755 HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
756 REG_GET_FIELD(pitch, HUBPREQ0_DCSURF_SURFACE_PITCH, PITCH) *
759 /* return 0 if the pre-OS buffer uses up most of vram */
760 if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024)) {
761 DRM_ERROR("Warning: pre-OS buffer uses most of vram, \
762 be aware of gart table overwrite\n");
771 static int gmc_v10_0_sw_init(void *handle)
773 int r, vram_width = 0, vram_type = 0, vram_vendor = 0;
774 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
776 gfxhub_v2_0_init(adev);
777 mmhub_v2_0_init(adev);
779 spin_lock_init(&adev->gmc.invalidate_lock);
781 r = amdgpu_atomfirmware_get_vram_info(adev,
782 &vram_width, &vram_type, &vram_vendor);
783 if (!amdgpu_emu_mode)
784 adev->gmc.vram_width = vram_width;
786 adev->gmc.vram_width = 1 * 128; /* numchan * chansize */
788 adev->gmc.vram_type = vram_type;
789 adev->gmc.vram_vendor = vram_vendor;
790 switch (adev->asic_type) {
794 adev->num_vmhubs = 2;
796 * To fulfill 4-level page support,
797 * vm size is 256TB (48bit), maximum size of Navi10/Navi14/Navi12,
798 * block size 512 (9bit)
800 amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
806 /* This interrupt is VMC page fault.*/
807 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC,
808 VMC_1_0__SRCID__VM_FAULT,
809 &adev->gmc.vm_fault);
814 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_UTCL2,
815 UTCL2_1_0__SRCID__FAULT,
816 &adev->gmc.vm_fault);
821 * Set the internal MC address mask This is the max address of the GPU's
822 * internal address space.
824 adev->gmc.mc_mask = 0xffffffffffffULL; /* 48 bit MC */
826 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
828 printk(KERN_WARNING "amdgpu: No suitable DMA available.\n");
832 r = gmc_v10_0_mc_init(adev);
836 adev->gmc.stolen_size = gmc_v10_0_get_vbios_fb_size(adev);
839 r = amdgpu_bo_init(adev);
843 r = gmc_v10_0_gart_init(adev);
849 * VMID 0 is reserved for System
850 * amdgpu graphics/compute will use VMIDs 1-7
851 * amdkfd will use VMIDs 8-15
853 adev->vm_manager.id_mgr[AMDGPU_GFXHUB_0].num_ids = AMDGPU_NUM_OF_VMIDS;
854 adev->vm_manager.id_mgr[AMDGPU_MMHUB_0].num_ids = AMDGPU_NUM_OF_VMIDS;
856 amdgpu_vm_manager_init(adev);
862 * gmc_v8_0_gart_fini - vm fini callback
864 * @adev: amdgpu_device pointer
866 * Tears down the driver GART/VM setup (CIK).
868 static void gmc_v10_0_gart_fini(struct amdgpu_device *adev)
870 amdgpu_gart_table_vram_free(adev);
871 amdgpu_gart_fini(adev);
874 static int gmc_v10_0_sw_fini(void *handle)
876 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
878 amdgpu_vm_manager_fini(adev);
879 gmc_v10_0_gart_fini(adev);
880 amdgpu_gem_force_release(adev);
881 amdgpu_bo_fini(adev);
886 static void gmc_v10_0_init_golden_registers(struct amdgpu_device *adev)
888 switch (adev->asic_type) {
899 * gmc_v10_0_gart_enable - gart enable
901 * @adev: amdgpu_device pointer
903 static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
909 if (adev->gart.bo == NULL) {
910 dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
914 r = amdgpu_gart_table_vram_pin(adev);
918 r = gfxhub_v2_0_gart_enable(adev);
922 r = mmhub_v2_0_gart_enable(adev);
926 tmp = RREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL);
927 tmp |= HDP_MISC_CNTL__FLUSH_INVALIDATE_CACHE_MASK;
928 WREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL, tmp);
930 tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
931 WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
933 /* Flush HDP after it is initialized */
934 adev->nbio.funcs->hdp_flush(adev, NULL);
936 value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ?
939 gfxhub_v2_0_set_fault_enable_default(adev, value);
940 mmhub_v2_0_set_fault_enable_default(adev, value);
941 gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_MMHUB_0, 0);
942 gmc_v10_0_flush_gpu_tlb(adev, 0, AMDGPU_GFXHUB_0, 0);
944 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
945 (unsigned)(adev->gmc.gart_size >> 20),
946 (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo));
948 adev->gart.ready = true;
953 static int gmc_v10_0_hw_init(void *handle)
956 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
958 /* The sequence of these two function calls matters.*/
959 gmc_v10_0_init_golden_registers(adev);
961 r = gmc_v10_0_gart_enable(adev);
969 * gmc_v10_0_gart_disable - gart disable
971 * @adev: amdgpu_device pointer
973 * This disables all VM page table.
975 static void gmc_v10_0_gart_disable(struct amdgpu_device *adev)
977 gfxhub_v2_0_gart_disable(adev);
978 mmhub_v2_0_gart_disable(adev);
979 amdgpu_gart_table_vram_unpin(adev);
982 static int gmc_v10_0_hw_fini(void *handle)
984 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
986 if (amdgpu_sriov_vf(adev)) {
987 /* full access mode, so don't touch any GMC register */
988 DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
992 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
993 gmc_v10_0_gart_disable(adev);
998 static int gmc_v10_0_suspend(void *handle)
1000 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1002 gmc_v10_0_hw_fini(adev);
1007 static int gmc_v10_0_resume(void *handle)
1010 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1012 r = gmc_v10_0_hw_init(adev);
1016 amdgpu_vmid_reset_all(adev);
1021 static bool gmc_v10_0_is_idle(void *handle)
1023 /* MC is always ready in GMC v10.*/
1027 static int gmc_v10_0_wait_for_idle(void *handle)
1029 /* There is no need to wait for MC idle in GMC v10.*/
1033 static int gmc_v10_0_soft_reset(void *handle)
1038 static int gmc_v10_0_set_clockgating_state(void *handle,
1039 enum amd_clockgating_state state)
1042 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1044 r = mmhub_v2_0_set_clockgating(adev, state);
1048 return athub_v2_0_set_clockgating(adev, state);
1051 static void gmc_v10_0_get_clockgating_state(void *handle, u32 *flags)
1053 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1055 mmhub_v2_0_get_clockgating(adev, flags);
1057 athub_v2_0_get_clockgating(adev, flags);
1060 static int gmc_v10_0_set_powergating_state(void *handle,
1061 enum amd_powergating_state state)
1066 const struct amd_ip_funcs gmc_v10_0_ip_funcs = {
1067 .name = "gmc_v10_0",
1068 .early_init = gmc_v10_0_early_init,
1069 .late_init = gmc_v10_0_late_init,
1070 .sw_init = gmc_v10_0_sw_init,
1071 .sw_fini = gmc_v10_0_sw_fini,
1072 .hw_init = gmc_v10_0_hw_init,
1073 .hw_fini = gmc_v10_0_hw_fini,
1074 .suspend = gmc_v10_0_suspend,
1075 .resume = gmc_v10_0_resume,
1076 .is_idle = gmc_v10_0_is_idle,
1077 .wait_for_idle = gmc_v10_0_wait_for_idle,
1078 .soft_reset = gmc_v10_0_soft_reset,
1079 .set_clockgating_state = gmc_v10_0_set_clockgating_state,
1080 .set_powergating_state = gmc_v10_0_set_powergating_state,
1081 .get_clockgating_state = gmc_v10_0_get_clockgating_state,
1084 const struct amdgpu_ip_block_version gmc_v10_0_ip_block =
1086 .type = AMD_IP_BLOCK_TYPE_GMC,
1090 .funcs = &gmc_v10_0_ip_funcs,